Ksjsjjdj commited on
Commit
1cb2e39
·
verified ·
1 Parent(s): 3f30ea1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -58
app.py CHANGED
@@ -18,7 +18,8 @@ import transformers
18
  import datasets
19
  from dotenv import load_dotenv
20
  from datasets import load_dataset, get_dataset_config_names, IterableDataset
21
- from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, TrainerCallback, AutoConfig, DataCollatorForLanguageModeling
 
22
  from huggingface_hub import login, whoami, create_repo, upload_folder
23
  import spaces
24
 
@@ -62,10 +63,12 @@ class JobStatus:
62
  self.add_log(msg)
63
 
64
  class CustomTrainerCallback(TrainerCallback):
65
- def __init__(self, job_id, hf_token, repo_id):
66
  self.job_id = job_id
67
  self.hf_token = hf_token
68
  self.repo_id = repo_id
 
 
69
 
70
  def on_step_end(self, args, state, control, **kwargs):
71
  if self.job_id in JOBS:
@@ -82,35 +85,58 @@ class CustomTrainerCallback(TrainerCallback):
82
  if self.job_id in JOBS:
83
  job = JOBS[self.job_id]
84
  step = state.global_step
85
- ckpt_name = f"checkpoint-{step}"
86
- ckpt_path = os.path.join(args.output_dir, ckpt_name)
87
 
88
- job.add_log(f"System: 100-Step Snapshot saved ({ckpt_name})")
89
 
90
- def _upload_bg():
91
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  upload_folder(
93
- folder_path=ckpt_path,
94
  path_in_repo=".",
95
  repo_id=self.repo_id,
96
  token=self.hf_token,
97
- commit_message=f"Live Checkpoint Step {step}"
98
  )
99
- job.add_log(f"Cloud: Synced Checkpoint {step} to Root")
100
- except:
101
- pass
 
 
 
102
 
103
- threading.Thread(target=_upload_bg, daemon=True).start()
104
  return control
105
 
106
  @spaces.GPU(duration=300)
107
- def background_train_task(job_id, hf_token, model_name, new_repo_name,
108
  train_steps, learning_rate, batch_size, datasets_text,
109
  reasoning_mode, c_conf, c_tok, c_gen):
110
 
111
  job = JOBS[job_id]
112
  job.status = "RUNNING"
113
- job.add_log("System: initializing Scratch Training Protocol...")
114
 
115
  try:
116
  if not hf_token.startswith("hf_"):
@@ -180,40 +206,47 @@ def background_train_task(job_id, hf_token, model_name, new_repo_name,
180
  def process_stream_generator():
181
  iterator = chain.from_iterable(streams)
182
  batch_buffer = []
183
-
184
  for item in iterator:
185
  try:
186
  text = str(item.get("text", item.get("content", str(item))))
187
- if len(text) < 5: continue
188
  batch_buffer.append(text)
189
-
190
- if len(batch_buffer) >= 100:
191
  encoded_batch = tokenizer(batch_buffer, truncation=True, max_length=2048, padding=False)
192
  for input_ids in encoded_batch["input_ids"]:
193
- yield {"input_ids": input_ids}
194
  batch_buffer = []
195
  except:
196
  continue
197
 
198
- job.set_progress(0.15, "Model: Initializing Architecture from Scratch...")
199
 
200
  torch.cuda.empty_cache()
201
  gc.collect()
202
 
203
- config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
204
-
205
- original_model = AutoModelForCausalLM.from_config(
206
- config,
207
- trust_remote_code=True,
208
  )
209
 
210
  if torch.cuda.is_available():
211
- original_model = original_model.to(torch.float16).cuda()
 
 
 
 
 
 
 
 
 
 
 
 
212
 
213
  output_dir = f"checkpoints/{job_id}"
214
 
215
- data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
216
-
217
  training_args = TrainingArguments(
218
  output_dir=output_dir,
219
  per_device_train_batch_size=int(batch_size),
@@ -224,54 +257,64 @@ def background_train_task(job_id, hf_token, model_name, new_repo_name,
224
  logging_steps=1,
225
  save_strategy="steps",
226
  save_steps=100,
227
- save_total_limit=1,
228
  report_to="none",
229
  fp16=True if torch.cuda.is_available() else False,
230
  disable_tqdm=True,
231
  dataloader_num_workers=4,
232
  dataloader_pin_memory=True,
233
- gradient_checkpointing=True,
234
  torch_compile=False
235
  )
236
 
237
  dataset_iterable = IterableDataset.from_generator(process_stream_generator)
238
 
239
  trainer = Trainer(
240
- model=original_model,
241
  train_dataset=dataset_iterable,
242
  args=training_args,
243
- data_collator=data_collator,
244
- callbacks=[CustomTrainerCallback(job_id, hf_token, full_repo_id)]
245
  )
246
 
247
- job.set_progress(0.2, "Training: Full Gradient Descent Initiated...")
248
  trainer.train()
249
  trainer.save_model(output_dir)
250
- tokenizer.save_pretrained(output_dir)
251
 
252
- job.set_progress(0.9, "Processing: Finalizing Artifacts...")
 
253
  del original_model
254
  torch.cuda.empty_cache()
255
  gc.collect()
256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  def inject_json(content, fname):
258
  if content and content.strip():
259
  try:
260
  data = json.loads(content)
261
- file_path = os.path.join(output_dir, fname)
262
-
263
  if os.path.exists(file_path):
264
  with open(file_path, 'r', encoding='utf-8') as f:
265
- try:
266
- existing_data = json.load(f)
267
- existing_data.update(data)
268
- data = existing_data
269
- except:
270
- pass
271
 
272
  with open(file_path, 'w', encoding='utf-8') as f:
273
  json.dump(data, f, indent=2)
274
- job.add_log(f"Config: Overwritten {fname} with user settings")
275
  except:
276
  pass
277
 
@@ -281,12 +324,17 @@ def background_train_task(job_id, hf_token, model_name, new_repo_name,
281
 
282
  job.set_progress(0.95, "Network: Uploading Final Model...")
283
 
 
 
 
 
 
284
  upload_folder(
285
- folder_path=output_dir,
286
  path_in_repo=".",
287
  repo_id=full_repo_id,
288
  token=hf_token,
289
- commit_message="Scratch Trained Model"
290
  )
291
 
292
  job.repo_url = f"https://huggingface.co/{full_repo_id}"
@@ -299,7 +347,7 @@ def background_train_task(job_id, hf_token, model_name, new_repo_name,
299
  job.add_log(f"FATAL ERROR: {str(e)}")
300
  torch.cuda.empty_cache()
301
 
302
- def start_training_wrapper(hf_token, model_name, new_repo_name,
303
  train_steps, learning_rate, batch_size, datasets_text,
304
  reasoning_mode, c_conf, c_tok, c_gen):
305
 
@@ -311,7 +359,7 @@ def start_training_wrapper(hf_token, model_name, new_repo_name,
311
 
312
  thread = threading.Thread(
313
  target=background_train_task,
314
- args=(new_job.id, hf_token, model_name, new_repo_name,
315
  train_steps, learning_rate, batch_size, datasets_text, reasoning_mode, c_conf, c_tok, c_gen)
316
  )
317
  thread.daemon = True
@@ -332,7 +380,7 @@ def get_job_update(job_id):
332
 
333
  result_comp = gr.update(visible=False)
334
  if job.status == "COMPLETED" and job.repo_url:
335
- result_comp = gr.update(visible=True, value=f"✅ Full Model Published: {job.repo_url}")
336
 
337
  return job.status, job.created_at, job.progress, log_text, result_comp
338
 
@@ -346,10 +394,10 @@ def load_from_url(request: gr.Request):
346
  pass
347
  return gr.update(selected="launch_tab"), ""
348
 
349
- with gr.Blocks(title="Nucleus Enterprise") as demo:
350
  with gr.Column():
351
  gr.Markdown("# ⚛️ NUCLEUS ENTERPRISE")
352
- gr.Markdown("Autonomous LLM Foundry | V7.0 Scratch Edition")
353
 
354
  with gr.Tabs() as main_tabs:
355
  with gr.TabItem("🚀 LAUNCHPAD", id="launch_tab"):
@@ -357,24 +405,26 @@ with gr.Blocks(title="Nucleus Enterprise") as demo:
357
  with gr.Column(scale=2):
358
  with gr.Row():
359
  hf_token = gr.Textbox(label="HuggingFace Token", type="password", value=os.getenv("HF_TOKEN", ""))
360
- model_name = gr.Textbox(label="Architecture Config Source", value="Qwen/Qwen2.5-0.5B")
361
 
362
- repo_name = gr.Textbox(label="Output Repository", value="nucleus-scratch-v1")
363
  datasets = gr.Textbox(label="Datasets (CSV)", value="Salesforce/fineweb_deduplicated", lines=3)
364
-
365
  reasoning = gr.Checkbox(label="Inject Reasoning (CoT/Math)", value=False)
366
 
367
  with gr.Column(scale=1):
368
  steps = gr.Number(label="Steps", value=100)
369
- lr = gr.Number(label="Learning Rate", value=1e-4)
370
  batch = gr.Number(label="Batch Size", value=1)
 
 
 
371
 
372
  with gr.Accordion("Advanced Config", open=False):
373
  c_conf = gr.Code(label="config.json", language="json")
374
  c_tok = gr.Code(label="tokenizer_config.json", language="json")
375
  c_gen = gr.Code(label="generation_config.json", language="json")
376
 
377
- btn_launch = gr.Button("INITIALIZE SCRATCH TRAINING", variant="primary", size="lg")
378
 
379
  with gr.TabItem("📡 TELEMETRY", id="monitor_tab"):
380
  with gr.Row():
@@ -395,7 +445,7 @@ with gr.Blocks(title="Nucleus Enterprise") as demo:
395
 
396
  btn_launch.click(
397
  start_training_wrapper,
398
- inputs=[hf_token, model_name, repo_name, steps, lr, batch, datasets, reasoning, c_conf, c_tok, c_gen],
399
  outputs=[job_id_input, main_tabs]
400
  ).then(
401
  None, [job_id_input], None,
 
18
  import datasets
19
  from dotenv import load_dotenv
20
  from datasets import load_dataset, get_dataset_config_names, IterableDataset
21
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, TrainerCallback
22
+ from peft import LoraConfig, get_peft_model, PeftModel
23
  from huggingface_hub import login, whoami, create_repo, upload_folder
24
  import spaces
25
 
 
63
  self.add_log(msg)
64
 
65
  class CustomTrainerCallback(TrainerCallback):
66
+ def __init__(self, job_id, hf_token, repo_id, model_name, tokenizer):
67
  self.job_id = job_id
68
  self.hf_token = hf_token
69
  self.repo_id = repo_id
70
+ self.model_name = model_name
71
+ self.tokenizer = tokenizer
72
 
73
  def on_step_end(self, args, state, control, **kwargs):
74
  if self.job_id in JOBS:
 
85
  if self.job_id in JOBS:
86
  job = JOBS[self.job_id]
87
  step = state.global_step
88
+ ckpt_path = os.path.join(args.output_dir, f"checkpoint-{step}")
 
89
 
90
+ job.add_log(f"System: Adapter Snapshot saved ({ckpt_path})")
91
 
92
+ def _merge_and_upload_bg():
93
  try:
94
+ job.add_log(f"Merge: Fusing weights for step {step}...")
95
+
96
+ base_model = AutoModelForCausalLM.from_pretrained(
97
+ self.model_name,
98
+ torch_dtype=torch.float16,
99
+ device_map="cpu",
100
+ trust_remote_code=True
101
+ )
102
+
103
+ merged_model = PeftModel.from_pretrained(base_model, ckpt_path)
104
+ merged_model = merged_model.merge_and_unload()
105
+
106
+ temp_merge_path = f"merged_tmp_{self.job_id}"
107
+ merged_model.save_pretrained(temp_merge_path)
108
+ self.tokenizer.save_pretrained(temp_merge_path)
109
+
110
+ del base_model
111
+ del merged_model
112
+ gc.collect()
113
+
114
+ job.add_log(f"Cloud: Uploading Merged Model (Step {step})...")
115
  upload_folder(
116
+ folder_path=temp_merge_path,
117
  path_in_repo=".",
118
  repo_id=self.repo_id,
119
  token=self.hf_token,
120
+ commit_message=f"Live Update Step {step}"
121
  )
122
+ job.add_log(f"Cloud: Success. Root updated.")
123
+
124
+ shutil.rmtree(temp_merge_path, ignore_errors=True)
125
+
126
+ except Exception as e:
127
+ job.add_log(f"Merge Error: {str(e)}")
128
 
129
+ threading.Thread(target=_merge_and_upload_bg, daemon=True).start()
130
  return control
131
 
132
  @spaces.GPU(duration=300)
133
+ def background_train_task(job_id, hf_token, model_name, new_repo_name, lora_r, lora_alpha, lora_dropout,
134
  train_steps, learning_rate, batch_size, datasets_text,
135
  reasoning_mode, c_conf, c_tok, c_gen):
136
 
137
  job = JOBS[job_id]
138
  job.status = "RUNNING"
139
+ job.add_log("System: Engaging LoRA Neural Engine...")
140
 
141
  try:
142
  if not hf_token.startswith("hf_"):
 
206
  def process_stream_generator():
207
  iterator = chain.from_iterable(streams)
208
  batch_buffer = []
 
209
  for item in iterator:
210
  try:
211
  text = str(item.get("text", item.get("content", str(item))))
212
+ if len(text) < 10: continue
213
  batch_buffer.append(text)
214
+ if len(batch_buffer) >= 200:
 
215
  encoded_batch = tokenizer(batch_buffer, truncation=True, max_length=2048, padding=False)
216
  for input_ids in encoded_batch["input_ids"]:
217
+ yield {"input_ids": input_ids, "labels": input_ids}
218
  batch_buffer = []
219
  except:
220
  continue
221
 
222
+ job.set_progress(0.15, "Model: Fast-Load Weights...")
223
 
224
  torch.cuda.empty_cache()
225
  gc.collect()
226
 
227
+ original_model = AutoModelForCausalLM.from_pretrained(
228
+ model_name,
229
+ trust_remote_code=True,
230
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
 
231
  )
232
 
233
  if torch.cuda.is_available():
234
+ original_model = original_model.cuda()
235
+
236
+ peft_config = LoraConfig(
237
+ r=int(lora_r),
238
+ lora_alpha=int(lora_alpha),
239
+ target_modules=["q_proj", "k_proj", "v_proj", "dense", "fc1", "fc2", "o_proj"],
240
+ bias="none",
241
+ lora_dropout=lora_dropout,
242
+ task_type="CAUSAL_LM"
243
+ )
244
+
245
+ peft_model = get_peft_model(original_model, peft_config)
246
+ peft_model.config.use_cache = False
247
 
248
  output_dir = f"checkpoints/{job_id}"
249
 
 
 
250
  training_args = TrainingArguments(
251
  output_dir=output_dir,
252
  per_device_train_batch_size=int(batch_size),
 
257
  logging_steps=1,
258
  save_strategy="steps",
259
  save_steps=100,
260
+ save_total_limit=2,
261
  report_to="none",
262
  fp16=True if torch.cuda.is_available() else False,
263
  disable_tqdm=True,
264
  dataloader_num_workers=4,
265
  dataloader_pin_memory=True,
 
266
  torch_compile=False
267
  )
268
 
269
  dataset_iterable = IterableDataset.from_generator(process_stream_generator)
270
 
271
  trainer = Trainer(
272
+ model=peft_model,
273
  train_dataset=dataset_iterable,
274
  args=training_args,
275
+ callbacks=[CustomTrainerCallback(job_id, hf_token, full_repo_id, model_name, tokenizer)]
 
276
  )
277
 
278
+ job.set_progress(0.2, "Training: LoRA Optimization Initiated...")
279
  trainer.train()
280
  trainer.save_model(output_dir)
 
281
 
282
+ job.set_progress(0.9, "Processing: Final Merge...")
283
+ del peft_model
284
  del original_model
285
  torch.cuda.empty_cache()
286
  gc.collect()
287
 
288
+ base_reload = AutoModelForCausalLM.from_pretrained(
289
+ model_name,
290
+ return_dict=True,
291
+ torch_dtype=torch.float16,
292
+ trust_remote_code=True
293
+ )
294
+
295
+ if torch.cuda.is_available():
296
+ base_reload = base_reload.cuda()
297
+
298
+ model_to_merge = PeftModel.from_pretrained(base_reload, output_dir)
299
+ final_model = model_to_merge.merge_and_unload()
300
+
301
+ final_path = f"merged/{job_id}"
302
+ final_model.save_pretrained(final_path, safe_serialization=True)
303
+ tokenizer.save_pretrained(final_path)
304
+
305
  def inject_json(content, fname):
306
  if content and content.strip():
307
  try:
308
  data = json.loads(content)
309
+ file_path = os.path.join(final_path, fname)
 
310
  if os.path.exists(file_path):
311
  with open(file_path, 'r', encoding='utf-8') as f:
312
+ existing_data = json.load(f)
313
+ existing_data.update(data)
314
+ data = existing_data
 
 
 
315
 
316
  with open(file_path, 'w', encoding='utf-8') as f:
317
  json.dump(data, f, indent=2)
 
318
  except:
319
  pass
320
 
 
324
 
325
  job.set_progress(0.95, "Network: Uploading Final Model...")
326
 
327
+ if os.path.exists(os.path.join(final_path, "adapter_model.bin")):
328
+ os.remove(os.path.join(final_path, "adapter_model.bin"))
329
+ if os.path.exists(os.path.join(final_path, "adapter_config.json")):
330
+ os.remove(os.path.join(final_path, "adapter_config.json"))
331
+
332
  upload_folder(
333
+ folder_path=final_path,
334
  path_in_repo=".",
335
  repo_id=full_repo_id,
336
  token=hf_token,
337
+ commit_message="Final Merged Model Release"
338
  )
339
 
340
  job.repo_url = f"https://huggingface.co/{full_repo_id}"
 
347
  job.add_log(f"FATAL ERROR: {str(e)}")
348
  torch.cuda.empty_cache()
349
 
350
+ def start_training_wrapper(hf_token, model_name, new_repo_name, lora_r, lora_alpha, lora_dropout,
351
  train_steps, learning_rate, batch_size, datasets_text,
352
  reasoning_mode, c_conf, c_tok, c_gen):
353
 
 
359
 
360
  thread = threading.Thread(
361
  target=background_train_task,
362
+ args=(new_job.id, hf_token, model_name, new_repo_name, lora_r, lora_alpha, lora_dropout,
363
  train_steps, learning_rate, batch_size, datasets_text, reasoning_mode, c_conf, c_tok, c_gen)
364
  )
365
  thread.daemon = True
 
380
 
381
  result_comp = gr.update(visible=False)
382
  if job.status == "COMPLETED" and job.repo_url:
383
+ result_comp = gr.update(visible=True, value=f"✅ Model Published: {job.repo_url}")
384
 
385
  return job.status, job.created_at, job.progress, log_text, result_comp
386
 
 
394
  pass
395
  return gr.update(selected="launch_tab"), ""
396
 
397
+ with gr.Blocks(title="Nucleus Enterprise", theme=gr.themes.Base()) as demo:
398
  with gr.Column():
399
  gr.Markdown("# ⚛️ NUCLEUS ENTERPRISE")
400
+ gr.Markdown("Autonomous LLM Foundry | V8.5 LoRA-Merge Edition")
401
 
402
  with gr.Tabs() as main_tabs:
403
  with gr.TabItem("🚀 LAUNCHPAD", id="launch_tab"):
 
405
  with gr.Column(scale=2):
406
  with gr.Row():
407
  hf_token = gr.Textbox(label="HuggingFace Token", type="password", value=os.getenv("HF_TOKEN", ""))
408
+ model_name = gr.Textbox(label="Base Model", value="Qwen/Qwen2.5-0.5B")
409
 
410
+ repo_name = gr.Textbox(label="Output Repository", value="nucleus-model-v1")
411
  datasets = gr.Textbox(label="Datasets (CSV)", value="Salesforce/fineweb_deduplicated", lines=3)
 
412
  reasoning = gr.Checkbox(label="Inject Reasoning (CoT/Math)", value=False)
413
 
414
  with gr.Column(scale=1):
415
  steps = gr.Number(label="Steps", value=100)
416
+ lr = gr.Number(label="Learning Rate", value=2e-4)
417
  batch = gr.Number(label="Batch Size", value=1)
418
+ r = gr.Slider(8, 256, 32, step=8, label="LoRA Rank")
419
+ a = gr.Slider(8, 512, 64, step=8, label="LoRA Alpha")
420
+ d = gr.Slider(0, 0.5, 0.05, label="Dropout")
421
 
422
  with gr.Accordion("Advanced Config", open=False):
423
  c_conf = gr.Code(label="config.json", language="json")
424
  c_tok = gr.Code(label="tokenizer_config.json", language="json")
425
  c_gen = gr.Code(label="generation_config.json", language="json")
426
 
427
+ btn_launch = gr.Button("INITIALIZE LORA TRAINING", variant="primary", size="lg")
428
 
429
  with gr.TabItem("📡 TELEMETRY", id="monitor_tab"):
430
  with gr.Row():
 
445
 
446
  btn_launch.click(
447
  start_training_wrapper,
448
+ inputs=[hf_token, model_name, repo_name, r, a, d, steps, lr, batch, datasets, reasoning, c_conf, c_tok, c_gen],
449
  outputs=[job_id_input, main_tabs]
450
  ).then(
451
  None, [job_id_input], None,