Ksjsjjdj commited on
Commit
7c75ddd
·
verified ·
1 Parent(s): db13e7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -110
app.py CHANGED
@@ -8,19 +8,21 @@ import sys
8
  import gc
9
  import multiprocessing
10
  import shutil
 
11
  from datetime import datetime
12
  from concurrent.futures import ThreadPoolExecutor, as_completed
13
  from itertools import chain
14
 
15
  import torch
 
 
16
  import gradio as gr
17
  import transformers
18
  import datasets
19
  from dotenv import load_dotenv
20
  from datasets import load_dataset, get_dataset_config_names, IterableDataset
21
- from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, TrainerCallback
22
- from peft import LoraConfig, get_peft_model, PeftModel
23
- from huggingface_hub import login, whoami, create_repo, upload_folder
24
  import spaces
25
 
26
  try:
@@ -42,6 +44,34 @@ if torch.cuda.is_available():
42
 
43
  JOBS = {}
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  class JobStatus:
46
  def __init__(self):
47
  self.id = str(uuid.uuid4())
@@ -63,12 +93,10 @@ class JobStatus:
63
  self.add_log(msg)
64
 
65
  class CustomTrainerCallback(TrainerCallback):
66
- def __init__(self, job_id, hf_token, repo_id, model_name, tokenizer):
67
  self.job_id = job_id
68
  self.hf_token = hf_token
69
  self.repo_id = repo_id
70
- self.model_name = model_name
71
- self.tokenizer = tokenizer
72
 
73
  def on_step_end(self, args, state, control, **kwargs):
74
  if self.job_id in JOBS:
@@ -85,58 +113,35 @@ class CustomTrainerCallback(TrainerCallback):
85
  if self.job_id in JOBS:
86
  job = JOBS[self.job_id]
87
  step = state.global_step
88
- ckpt_path = os.path.join(args.output_dir, f"checkpoint-{step}")
 
89
 
90
- job.add_log(f"System: Adapter Snapshot saved ({ckpt_path})")
91
 
92
- def _merge_and_upload_bg():
93
  try:
94
- job.add_log(f"Merge: Fusing weights for step {step}...")
95
-
96
- base_model = AutoModelForCausalLM.from_pretrained(
97
- self.model_name,
98
- torch_dtype=torch.float16,
99
- device_map="cpu",
100
- trust_remote_code=True
101
- )
102
-
103
- merged_model = PeftModel.from_pretrained(base_model, ckpt_path)
104
- merged_model = merged_model.merge_and_unload()
105
-
106
- temp_merge_path = f"merged_tmp_{self.job_id}"
107
- merged_model.save_pretrained(temp_merge_path)
108
- self.tokenizer.save_pretrained(temp_merge_path)
109
-
110
- del base_model
111
- del merged_model
112
- gc.collect()
113
-
114
- job.add_log(f"Cloud: Uploading Merged Model (Step {step})...")
115
  upload_folder(
116
- folder_path=temp_merge_path,
117
  path_in_repo=".",
118
  repo_id=self.repo_id,
119
  token=self.hf_token,
120
- commit_message=f"Live Update Step {step}"
121
  )
122
- job.add_log(f"Cloud: Success. Root updated.")
123
-
124
- shutil.rmtree(temp_merge_path, ignore_errors=True)
125
-
126
- except Exception as e:
127
- job.add_log(f"Merge Error: {str(e)}")
128
 
129
- threading.Thread(target=_merge_and_upload_bg, daemon=True).start()
130
  return control
131
 
132
  @spaces.GPU(duration=300)
133
- def background_train_task(job_id, hf_token, model_name, new_repo_name, lora_r, lora_alpha, lora_dropout,
134
  train_steps, learning_rate, batch_size, datasets_text,
135
  reasoning_mode, c_conf, c_tok, c_gen):
136
 
137
  job = JOBS[job_id]
138
  job.status = "RUNNING"
139
- job.add_log("System: Engaging LoRA Neural Engine...")
140
 
141
  try:
142
  if not hf_token.startswith("hf_"):
@@ -206,47 +211,45 @@ def background_train_task(job_id, hf_token, model_name, new_repo_name, lora_r, l
206
  def process_stream_generator():
207
  iterator = chain.from_iterable(streams)
208
  batch_buffer = []
 
209
  for item in iterator:
210
  try:
211
  text = str(item.get("text", item.get("content", str(item))))
212
- if len(text) < 10: continue
213
  batch_buffer.append(text)
214
- if len(batch_buffer) >= 200:
 
215
  encoded_batch = tokenizer(batch_buffer, truncation=True, max_length=2048, padding=False)
216
  for input_ids in encoded_batch["input_ids"]:
217
- yield {"input_ids": input_ids, "labels": input_ids}
218
  batch_buffer = []
219
  except:
220
  continue
221
 
222
- job.set_progress(0.15, "Model: Fast-Load Weights...")
223
 
224
  torch.cuda.empty_cache()
225
  gc.collect()
226
 
227
- original_model = AutoModelForCausalLM.from_pretrained(
228
- model_name,
229
- trust_remote_code=True,
230
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
231
- )
232
 
233
- if torch.cuda.is_available():
234
- original_model = original_model.cuda()
235
-
236
- peft_config = LoraConfig(
237
- r=int(lora_r),
238
- lora_alpha=int(lora_alpha),
239
- target_modules=["q_proj", "k_proj", "v_proj", "dense", "fc1", "fc2", "o_proj"],
240
- bias="none",
241
- lora_dropout=lora_dropout,
242
- task_type="CAUSAL_LM"
243
  )
244
 
245
- peft_model = get_peft_model(original_model, peft_config)
246
- peft_model.config.use_cache = False
 
 
 
 
 
247
 
248
  output_dir = f"checkpoints/{job_id}"
249
 
 
 
250
  training_args = TrainingArguments(
251
  output_dir=output_dir,
252
  per_device_train_batch_size=int(batch_size),
@@ -257,64 +260,56 @@ def background_train_task(job_id, hf_token, model_name, new_repo_name, lora_r, l
257
  logging_steps=1,
258
  save_strategy="steps",
259
  save_steps=100,
260
- save_total_limit=2,
261
  report_to="none",
262
  fp16=True if torch.cuda.is_available() else False,
263
  disable_tqdm=True,
264
  dataloader_num_workers=4,
265
  dataloader_pin_memory=True,
266
- torch_compile=False
 
 
 
267
  )
268
 
269
  dataset_iterable = IterableDataset.from_generator(process_stream_generator)
270
 
271
  trainer = Trainer(
272
- model=peft_model,
 
273
  train_dataset=dataset_iterable,
274
  args=training_args,
275
- callbacks=[CustomTrainerCallback(job_id, hf_token, full_repo_id, model_name, tokenizer)]
 
276
  )
277
 
278
- job.set_progress(0.2, "Training: LoRA Optimization Initiated...")
279
  trainer.train()
280
  trainer.save_model(output_dir)
281
 
282
- job.set_progress(0.9, "Processing: Final Merge...")
283
- del peft_model
284
  del original_model
285
  torch.cuda.empty_cache()
286
  gc.collect()
287
 
288
- base_reload = AutoModelForCausalLM.from_pretrained(
289
- model_name,
290
- return_dict=True,
291
- torch_dtype=torch.float16,
292
- trust_remote_code=True
293
- )
294
-
295
- if torch.cuda.is_available():
296
- base_reload = base_reload.cuda()
297
-
298
- model_to_merge = PeftModel.from_pretrained(base_reload, output_dir)
299
- final_model = model_to_merge.merge_and_unload()
300
-
301
- final_path = f"merged/{job_id}"
302
- final_model.save_pretrained(final_path, safe_serialization=True)
303
- tokenizer.save_pretrained(final_path)
304
-
305
  def inject_json(content, fname):
306
  if content and content.strip():
307
  try:
308
  data = json.loads(content)
309
- file_path = os.path.join(final_path, fname)
 
310
  if os.path.exists(file_path):
311
  with open(file_path, 'r', encoding='utf-8') as f:
312
- existing_data = json.load(f)
313
- existing_data.update(data)
314
- data = existing_data
 
 
 
315
 
316
  with open(file_path, 'w', encoding='utf-8') as f:
317
  json.dump(data, f, indent=2)
 
318
  except:
319
  pass
320
 
@@ -322,19 +317,14 @@ def background_train_task(job_id, hf_token, model_name, new_repo_name, lora_r, l
322
  inject_json(c_tok, "tokenizer_config.json")
323
  inject_json(c_gen, "generation_config.json")
324
 
325
- job.set_progress(0.95, "Network: Uploading Final Model...")
326
 
327
- if os.path.exists(os.path.join(final_path, "adapter_model.bin")):
328
- os.remove(os.path.join(final_path, "adapter_model.bin"))
329
- if os.path.exists(os.path.join(final_path, "adapter_config.json")):
330
- os.remove(os.path.join(final_path, "adapter_config.json"))
331
-
332
  upload_folder(
333
- folder_path=final_path,
334
  path_in_repo=".",
335
  repo_id=full_repo_id,
336
  token=hf_token,
337
- commit_message="Final Merged Model Release"
338
  )
339
 
340
  job.repo_url = f"https://huggingface.co/{full_repo_id}"
@@ -347,7 +337,7 @@ def background_train_task(job_id, hf_token, model_name, new_repo_name, lora_r, l
347
  job.add_log(f"FATAL ERROR: {str(e)}")
348
  torch.cuda.empty_cache()
349
 
350
- def start_training_wrapper(hf_token, model_name, new_repo_name, lora_r, lora_alpha, lora_dropout,
351
  train_steps, learning_rate, batch_size, datasets_text,
352
  reasoning_mode, c_conf, c_tok, c_gen):
353
 
@@ -359,7 +349,7 @@ def start_training_wrapper(hf_token, model_name, new_repo_name, lora_r, lora_alp
359
 
360
  thread = threading.Thread(
361
  target=background_train_task,
362
- args=(new_job.id, hf_token, model_name, new_repo_name, lora_r, lora_alpha, lora_dropout,
363
  train_steps, learning_rate, batch_size, datasets_text, reasoning_mode, c_conf, c_tok, c_gen)
364
  )
365
  thread.daemon = True
@@ -380,7 +370,7 @@ def get_job_update(job_id):
380
 
381
  result_comp = gr.update(visible=False)
382
  if job.status == "COMPLETED" and job.repo_url:
383
- result_comp = gr.update(visible=True, value=f"✅ Model Published: {job.repo_url}")
384
 
385
  return job.status, job.created_at, job.progress, log_text, result_comp
386
 
@@ -397,7 +387,7 @@ def load_from_url(request: gr.Request):
397
  with gr.Blocks(title="Nucleus Enterprise") as demo:
398
  with gr.Column():
399
  gr.Markdown("# ⚛️ NUCLEUS ENTERPRISE")
400
- gr.Markdown("Autonomous LLM Foundry | V8.5 LoRA-Merge Edition")
401
 
402
  with gr.Tabs() as main_tabs:
403
  with gr.TabItem("🚀 LAUNCHPAD", id="launch_tab"):
@@ -405,26 +395,24 @@ with gr.Blocks(title="Nucleus Enterprise") as demo:
405
  with gr.Column(scale=2):
406
  with gr.Row():
407
  hf_token = gr.Textbox(label="HuggingFace Token", type="password", value=os.getenv("HF_TOKEN", ""))
408
- model_name = gr.Textbox(label="Base Model", value="Qwen/Qwen2.5-0.5B")
409
 
410
- repo_name = gr.Textbox(label="Output Repository", value="nucleus-model-v1")
411
  datasets = gr.Textbox(label="Datasets (CSV)", value="Salesforce/fineweb_deduplicated", lines=3)
 
412
  reasoning = gr.Checkbox(label="Inject Reasoning (CoT/Math)", value=False)
413
 
414
  with gr.Column(scale=1):
415
  steps = gr.Number(label="Steps", value=100)
416
- lr = gr.Number(label="Learning Rate", value=2e-4)
417
  batch = gr.Number(label="Batch Size", value=1)
418
- r = gr.Slider(8, 256, 32, step=8, label="LoRA Rank")
419
- a = gr.Slider(8, 512, 64, step=8, label="LoRA Alpha")
420
- d = gr.Slider(0, 0.5, 0.05, label="Dropout")
421
 
422
  with gr.Accordion("Advanced Config", open=False):
423
  c_conf = gr.Code(label="config.json", language="json")
424
  c_tok = gr.Code(label="tokenizer_config.json", language="json")
425
  c_gen = gr.Code(label="generation_config.json", language="json")
426
 
427
- btn_launch = gr.Button("INITIALIZE LORA TRAINING", variant="primary", size="lg")
428
 
429
  with gr.TabItem("📡 TELEMETRY", id="monitor_tab"):
430
  with gr.Row():
@@ -445,7 +433,7 @@ with gr.Blocks(title="Nucleus Enterprise") as demo:
445
 
446
  btn_launch.click(
447
  start_training_wrapper,
448
- inputs=[hf_token, model_name, repo_name, r, a, d, steps, lr, batch, datasets, reasoning, c_conf, c_tok, c_gen],
449
  outputs=[job_id_input, main_tabs]
450
  ).then(
451
  None, [job_id_input], None,
 
8
  import gc
9
  import multiprocessing
10
  import shutil
11
+ import math
12
  from datetime import datetime
13
  from concurrent.futures import ThreadPoolExecutor, as_completed
14
  from itertools import chain
15
 
16
  import torch
17
+ import torch.nn as nn
18
+ import torch.nn.functional as F
19
  import gradio as gr
20
  import transformers
21
  import datasets
22
  from dotenv import load_dotenv
23
  from datasets import load_dataset, get_dataset_config_names, IterableDataset
24
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, TrainerCallback, AutoConfig, DataCollatorForLanguageModeling
25
+ from huggingface_hub import login, whoami, create_repo, upload_folder, HfApi
 
26
  import spaces
27
 
28
  try:
 
44
 
45
  JOBS = {}
46
 
47
+ def activation_quant(x):
48
+ scale = 127.0 / x.abs().max(dim=-1, keepdim=True).values.clamp_(min=1e-5)
49
+ y = (x * scale).round().clamp_(-128, 127) / scale
50
+ return y + x - x.detach()
51
+
52
+ def weight_quant(w):
53
+ scale = 1.0 / w.abs().mean().clamp_(min=1e-5)
54
+ u = (w * scale).round().clamp_(-1, 1) / scale
55
+ return u + w - w.detach()
56
+
57
+ class BitLinear(nn.Linear):
58
+ def forward(self, x):
59
+ w = weight_quant(self.weight)
60
+ x = activation_quant(x)
61
+ return F.linear(x, w, self.bias)
62
+
63
+ def convert_to_bitnet(model, copy_weights=False):
64
+ for name, module in model.named_children():
65
+ if isinstance(module, nn.Linear):
66
+ bit_linear = BitLinear(module.in_features, module.out_features, module.bias is not None)
67
+ if copy_weights:
68
+ bit_linear.weight.data = module.weight.data.clone()
69
+ if module.bias is not None:
70
+ bit_linear.bias.data = module.bias.data.clone()
71
+ setattr(model, name, bit_linear)
72
+ else:
73
+ convert_to_bitnet(module, copy_weights=copy_weights)
74
+
75
  class JobStatus:
76
  def __init__(self):
77
  self.id = str(uuid.uuid4())
 
93
  self.add_log(msg)
94
 
95
  class CustomTrainerCallback(TrainerCallback):
96
+ def __init__(self, job_id, hf_token, repo_id):
97
  self.job_id = job_id
98
  self.hf_token = hf_token
99
  self.repo_id = repo_id
 
 
100
 
101
  def on_step_end(self, args, state, control, **kwargs):
102
  if self.job_id in JOBS:
 
113
  if self.job_id in JOBS:
114
  job = JOBS[self.job_id]
115
  step = state.global_step
116
+ ckpt_name = f"checkpoint-{step}"
117
+ ckpt_path = os.path.join(args.output_dir, ckpt_name)
118
 
119
+ job.add_log(f"System: 100-Step Snapshot saved ({ckpt_name})")
120
 
121
+ def _upload_bg():
122
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  upload_folder(
124
+ folder_path=ckpt_path,
125
  path_in_repo=".",
126
  repo_id=self.repo_id,
127
  token=self.hf_token,
128
+ commit_message=f"Live Checkpoint Step {step}"
129
  )
130
+ job.add_log(f"Cloud: Synced Checkpoint {step} to Root")
131
+ except:
132
+ pass
 
 
 
133
 
134
+ threading.Thread(target=_upload_bg, daemon=True).start()
135
  return control
136
 
137
  @spaces.GPU(duration=300)
138
+ def background_train_task(job_id, hf_token, model_name, new_repo_name,
139
  train_steps, learning_rate, batch_size, datasets_text,
140
  reasoning_mode, c_conf, c_tok, c_gen):
141
 
142
  job = JOBS[job_id]
143
  job.status = "RUNNING"
144
+ job.add_log("System: initializing BitNet Scratch Protocol...")
145
 
146
  try:
147
  if not hf_token.startswith("hf_"):
 
211
  def process_stream_generator():
212
  iterator = chain.from_iterable(streams)
213
  batch_buffer = []
214
+
215
  for item in iterator:
216
  try:
217
  text = str(item.get("text", item.get("content", str(item))))
218
+ if len(text) < 5: continue
219
  batch_buffer.append(text)
220
+
221
+ if len(batch_buffer) >= 100:
222
  encoded_batch = tokenizer(batch_buffer, truncation=True, max_length=2048, padding=False)
223
  for input_ids in encoded_batch["input_ids"]:
224
+ yield {"input_ids": input_ids}
225
  batch_buffer = []
226
  except:
227
  continue
228
 
229
+ job.set_progress(0.15, "Model: Initializing Architecture & Converting to BitNet...")
230
 
231
  torch.cuda.empty_cache()
232
  gc.collect()
233
 
234
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
 
 
 
 
235
 
236
+ original_model = AutoModelForCausalLM.from_config(
237
+ config,
238
+ trust_remote_code=True,
 
 
 
 
 
 
 
239
  )
240
 
241
+ convert_to_bitnet(original_model, copy_weights=False)
242
+
243
+ model_size = sum(t.numel() for t in original_model.parameters())
244
+ job.add_log(f"Model Size: {model_size/1000**2:.1f}M Parameters (1.58-bit)")
245
+
246
+ if torch.cuda.is_available():
247
+ original_model = original_model.to(torch.float16).cuda()
248
 
249
  output_dir = f"checkpoints/{job_id}"
250
 
251
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
252
+
253
  training_args = TrainingArguments(
254
  output_dir=output_dir,
255
  per_device_train_batch_size=int(batch_size),
 
260
  logging_steps=1,
261
  save_strategy="steps",
262
  save_steps=100,
263
+ save_total_limit=1,
264
  report_to="none",
265
  fp16=True if torch.cuda.is_available() else False,
266
  disable_tqdm=True,
267
  dataloader_num_workers=4,
268
  dataloader_pin_memory=True,
269
+ gradient_checkpointing=True,
270
+ torch_compile=False,
271
+ lr_scheduler_type="cosine",
272
+ warmup_steps=0.1
273
  )
274
 
275
  dataset_iterable = IterableDataset.from_generator(process_stream_generator)
276
 
277
  trainer = Trainer(
278
+ model=original_model,
279
+ tokenizer=tokenizer,
280
  train_dataset=dataset_iterable,
281
  args=training_args,
282
+ data_collator=data_collator,
283
+ callbacks=[CustomTrainerCallback(job_id, hf_token, full_repo_id)]
284
  )
285
 
286
+ job.set_progress(0.2, "Training: BitNet Gradient Descent Initiated...")
287
  trainer.train()
288
  trainer.save_model(output_dir)
289
 
290
+ job.set_progress(0.9, "Processing: Finalizing Artifacts...")
 
291
  del original_model
292
  torch.cuda.empty_cache()
293
  gc.collect()
294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
  def inject_json(content, fname):
296
  if content and content.strip():
297
  try:
298
  data = json.loads(content)
299
+ file_path = os.path.join(output_dir, fname)
300
+
301
  if os.path.exists(file_path):
302
  with open(file_path, 'r', encoding='utf-8') as f:
303
+ try:
304
+ existing_data = json.load(f)
305
+ existing_data.update(data)
306
+ data = existing_data
307
+ except:
308
+ pass
309
 
310
  with open(file_path, 'w', encoding='utf-8') as f:
311
  json.dump(data, f, indent=2)
312
+ job.add_log(f"Config: Overwritten {fname} with user settings")
313
  except:
314
  pass
315
 
 
317
  inject_json(c_tok, "tokenizer_config.json")
318
  inject_json(c_gen, "generation_config.json")
319
 
320
+ job.set_progress(0.95, "Network: Uploading Final BitNet Model...")
321
 
 
 
 
 
 
322
  upload_folder(
323
+ folder_path=output_dir,
324
  path_in_repo=".",
325
  repo_id=full_repo_id,
326
  token=hf_token,
327
+ commit_message="BitNet Scratch Trained Model"
328
  )
329
 
330
  job.repo_url = f"https://huggingface.co/{full_repo_id}"
 
337
  job.add_log(f"FATAL ERROR: {str(e)}")
338
  torch.cuda.empty_cache()
339
 
340
+ def start_training_wrapper(hf_token, model_name, new_repo_name,
341
  train_steps, learning_rate, batch_size, datasets_text,
342
  reasoning_mode, c_conf, c_tok, c_gen):
343
 
 
349
 
350
  thread = threading.Thread(
351
  target=background_train_task,
352
+ args=(new_job.id, hf_token, model_name, new_repo_name,
353
  train_steps, learning_rate, batch_size, datasets_text, reasoning_mode, c_conf, c_tok, c_gen)
354
  )
355
  thread.daemon = True
 
370
 
371
  result_comp = gr.update(visible=False)
372
  if job.status == "COMPLETED" and job.repo_url:
373
+ result_comp = gr.update(visible=True, value=f"✅ Full Model Published: {job.repo_url}")
374
 
375
  return job.status, job.created_at, job.progress, log_text, result_comp
376
 
 
387
  with gr.Blocks(title="Nucleus Enterprise") as demo:
388
  with gr.Column():
389
  gr.Markdown("# ⚛️ NUCLEUS ENTERPRISE")
390
+ gr.Markdown("Autonomous LLM Foundry | V9.0 BitNet Edition")
391
 
392
  with gr.Tabs() as main_tabs:
393
  with gr.TabItem("🚀 LAUNCHPAD", id="launch_tab"):
 
395
  with gr.Column(scale=2):
396
  with gr.Row():
397
  hf_token = gr.Textbox(label="HuggingFace Token", type="password", value=os.getenv("HF_TOKEN", ""))
398
+ model_name = gr.Textbox(label="Architecture Config Source", value="Qwen/Qwen2.5-0.5B")
399
 
400
+ repo_name = gr.Textbox(label="Output Repository", value="nucleus-bitnet-v1")
401
  datasets = gr.Textbox(label="Datasets (CSV)", value="Salesforce/fineweb_deduplicated", lines=3)
402
+
403
  reasoning = gr.Checkbox(label="Inject Reasoning (CoT/Math)", value=False)
404
 
405
  with gr.Column(scale=1):
406
  steps = gr.Number(label="Steps", value=100)
407
+ lr = gr.Number(label="Learning Rate", value=1e-4)
408
  batch = gr.Number(label="Batch Size", value=1)
 
 
 
409
 
410
  with gr.Accordion("Advanced Config", open=False):
411
  c_conf = gr.Code(label="config.json", language="json")
412
  c_tok = gr.Code(label="tokenizer_config.json", language="json")
413
  c_gen = gr.Code(label="generation_config.json", language="json")
414
 
415
+ btn_launch = gr.Button("INITIALIZE BITNET TRAINING", variant="primary", size="lg")
416
 
417
  with gr.TabItem("📡 TELEMETRY", id="monitor_tab"):
418
  with gr.Row():
 
433
 
434
  btn_launch.click(
435
  start_training_wrapper,
436
+ inputs=[hf_token, model_name, repo_name, steps, lr, batch, datasets, reasoning, c_conf, c_tok, c_gen],
437
  outputs=[job_id_input, main_tabs]
438
  ).then(
439
  None, [job_id_input], None,