Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -49,7 +49,7 @@ idx_to_word_global = None
|
|
| 49 |
device_global = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 50 |
model_load_status_global = "Model not loaded."
|
| 51 |
|
| 52 |
-
CHECKPOINT_FILENAME = "
|
| 53 |
|
| 54 |
MAIN_LOSS_WEIGHT_APP = 1.0
|
| 55 |
BLOCK_TARGET_ENTROPY_LOSS_WEIGHT_APP = 0.02
|
|
@@ -57,7 +57,6 @@ OVERALL_OUTPUT_ENTROPY_REG_WEIGHT_APP = 0.01
|
|
| 57 |
GATE_SPARSITY_LOSS_WEIGHT_APP = 0.001
|
| 58 |
WIRING_PHASE_EPOCHS_APP = 1
|
| 59 |
|
| 60 |
-
# --- Helper to toggle all debug prints in the model ---
|
| 61 |
def set_model_debug_prints(model, seed_parser_debug, block_debug, model_debug):
|
| 62 |
if model:
|
| 63 |
model.debug_prints_enabled = model_debug
|
|
@@ -85,7 +84,7 @@ def build_vocab_from_corpus_text_app(corpus_text):
|
|
| 85 |
print(f"App: Built vocab of size {VOCAB_SIZE_APP}")
|
| 86 |
return temp_word_to_idx, temp_idx_to_word
|
| 87 |
|
| 88 |
-
def initialize_or_load_model_app(
|
| 89 |
global swck_model_global, optimizer_global, word_to_idx_global, idx_to_word_global, \
|
| 90 |
VOCAB_SIZE_APP, model_load_status_global
|
| 91 |
|
|
@@ -104,15 +103,17 @@ def initialize_or_load_model_app(enable_initial_debug=True): # Control initial d
|
|
| 104 |
'num_sub_modules_per_block': NUM_SUB_MODULES_PER_BLOCK_APP
|
| 105 |
}
|
| 106 |
|
| 107 |
-
|
| 108 |
-
# The SeedParser within SWCKModel will print if its own flag is True
|
| 109 |
-
|
| 110 |
swck_model_global = SWCKModel(**model_args).to(device_global)
|
| 111 |
-
#
|
| 112 |
-
set_model_debug_prints
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
|
| 117 |
|
| 118 |
if os.path.exists(CHECKPOINT_FILENAME):
|
|
@@ -140,27 +141,19 @@ def initialize_or_load_model_app(enable_initial_debug=True): # Control initial d
|
|
| 140 |
model_load_status_global = f"Model loaded successfully from {CHECKPOINT_FILENAME}."
|
| 141 |
print(model_load_status_global)
|
| 142 |
except Exception as e:
|
| 143 |
-
print(f"App: Error loading model from checkpoint: {e}. Re-initializing new model.")
|
| 144 |
-
# Re-initialize if loading failed, ensuring debug flags are set again
|
| 145 |
swck_model_global = SWCKModel(**model_args).to(device_global)
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
model_debug=enable_initial_debug)
|
| 150 |
optimizer_global = optim.AdamW(swck_model_global.parameters(), lr=0.001)
|
| 151 |
-
model_load_status_global = "Error loading checkpoint. Using new (untrained) model."
|
| 152 |
else:
|
| 153 |
-
print(f"App: Checkpoint {CHECKPOINT_FILENAME} not found. Initializing new model.")
|
| 154 |
optimizer_global = optim.AdamW(swck_model_global.parameters(), lr=0.001)
|
| 155 |
-
model_load_status_global = "Initialized a new (untrained) model."
|
| 156 |
|
| 157 |
-
swck_model_global.eval()
|
| 158 |
-
# After loading or initializing, ensure debug prints are set based on desire for startup logs
|
| 159 |
-
# If enable_initial_debug was False, they are off. If True, they were on during init.
|
| 160 |
-
# For operations like training/generation, we'll toggle them explicitly.
|
| 161 |
-
if not enable_initial_debug: # Turn them off if they weren't meant to be on for init
|
| 162 |
-
set_model_debug_prints(swck_model_global, False, False, False)
|
| 163 |
-
|
| 164 |
return model_load_status_global
|
| 165 |
|
| 166 |
|
|
@@ -195,12 +188,16 @@ def run_short_training_session(num_epochs_app, batch_size_app, learning_rate_app
|
|
| 195 |
if swck_model_global is None or word_to_idx_global is None:
|
| 196 |
return "Model not initialized. Cannot train."
|
| 197 |
|
| 198 |
-
print("\n--- App: Starting Short Training Session ---")
|
| 199 |
progress(0, desc="Preparing training data...")
|
| 200 |
|
|
|
|
|
|
|
|
|
|
| 201 |
training_corpus = SEED_PHRASE_APP + " " + EXTENDED_TEXT_FOR_TRAINING_APP
|
| 202 |
app_dataset = AppSWCKDataset(training_corpus, word_to_idx_global, SEQ_LEN_APP, SOS_TOKEN, EOS_TOKEN, PAD_TOKEN)
|
| 203 |
if not app_dataset.samples:
|
|
|
|
| 204 |
return "App Training Error: No samples created from the corpus."
|
| 205 |
|
| 206 |
app_dataloader = DataLoader(app_dataset, batch_size=int(batch_size_app), shuffle=True, collate_fn=app_swck_collate_fn)
|
|
@@ -213,24 +210,17 @@ def run_short_training_session(num_epochs_app, batch_size_app, learning_rate_app
|
|
| 213 |
|
| 214 |
criterion_main_app = nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)
|
| 215 |
|
| 216 |
-
training_log_output = f"Starting training for {num_epochs_app} epochs...\n"
|
| 217 |
swck_model_global.train()
|
| 218 |
|
| 219 |
for epoch in progress.tqdm(range(int(num_epochs_app)), desc="Training Epochs"):
|
| 220 |
swck_model_global.set_wiring_phase(epoch < WIRING_PHASE_EPOCHS_APP)
|
| 221 |
epoch_loss = 0.0
|
| 222 |
-
|
| 223 |
-
# Enable full debug for the first batch of the first "wiring" epoch
|
| 224 |
-
# This will give detailed insight into the "self-wiring roll" on the first piece of data
|
| 225 |
-
is_first_wiring_batch = (epoch < WIRING_PHASE_EPOCHS_APP and epoch == 0)
|
| 226 |
|
| 227 |
for batch_idx, (src_batch, tgt_batch) in enumerate(app_dataloader):
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
set_model_debug_prints(swck_model_global, True, True, True)
|
| 231 |
-
else: # Otherwise, keep debug prints minimal or off for speed
|
| 232 |
-
set_model_debug_prints(swck_model_global, False, False, False)
|
| 233 |
-
|
| 234 |
|
| 235 |
src_batch, tgt_batch = src_batch.to(device_global), tgt_batch.to(device_global)
|
| 236 |
decoder_input_tokens = src_batch[:, :-1]
|
|
@@ -278,19 +268,20 @@ def run_short_training_session(num_epochs_app, batch_size_app, learning_rate_app
|
|
| 278 |
epoch_loss += combined_loss.item()
|
| 279 |
|
| 280 |
log_line = f" Epoch {epoch+1}, Batch {batch_idx+1}/{len(app_dataloader)}, Loss: {combined_loss.item():.4f}"
|
|
|
|
|
|
|
| 281 |
if batch_idx % max(1, len(app_dataloader)//2) == 0 or batch_idx == len(app_dataloader)-1 :
|
| 282 |
-
print(log_line)
|
| 283 |
training_log_output += log_line + "\n"
|
| 284 |
|
| 285 |
-
# Ensure debug is off after the first special batch
|
| 286 |
-
set_model_debug_prints(swck_model_global, False, False, False)
|
| 287 |
-
|
| 288 |
avg_epoch_loss = epoch_loss / len(app_dataloader) if len(app_dataloader) > 0 else epoch_loss
|
| 289 |
epoch_summary = f"Epoch {epoch+1}/{num_epochs_app} - Avg Loss: {avg_epoch_loss:.4f}\n"
|
| 290 |
print(epoch_summary)
|
| 291 |
training_log_output += epoch_summary
|
| 292 |
|
| 293 |
-
|
|
|
|
|
|
|
|
|
|
| 294 |
swck_model_global.eval()
|
| 295 |
|
| 296 |
try:
|
|
@@ -316,7 +307,7 @@ def run_short_training_session(num_epochs_app, batch_size_app, learning_rate_app
|
|
| 316 |
|
| 317 |
return training_log_output
|
| 318 |
|
| 319 |
-
def generate_text_for_app(prompt_str, max_len_gen, temperature_gen
|
| 320 |
global model_load_status_global
|
| 321 |
if swck_model_global is None or word_to_idx_global is None or idx_to_word_global is None:
|
| 322 |
return "Model not loaded. Please check server logs or try training.", "Model not available."
|
|
@@ -324,17 +315,19 @@ def generate_text_for_app(prompt_str, max_len_gen, temperature_gen, enable_gen_d
|
|
| 324 |
swck_model_global.eval()
|
| 325 |
swck_model_global.set_wiring_phase(False)
|
| 326 |
|
| 327 |
-
#
|
| 328 |
-
|
|
|
|
| 329 |
|
| 330 |
-
print(f"App: Generating for prompt: '{prompt_str}', max_len: {max_len_gen}, temp: {temperature_gen}
|
| 331 |
|
| 332 |
tokens = [SOS_TOKEN] + [word_to_idx_global.get(w, UNK_TOKEN) for w in prompt_str.lower().split()]
|
| 333 |
generated_ids_app = list(tokens)
|
| 334 |
-
debug_info_lines = [f"Prompt tokens: {generated_ids_app}"]
|
| 335 |
|
| 336 |
with torch.no_grad():
|
| 337 |
for i in range(int(max_len_gen)):
|
|
|
|
| 338 |
context_start_idx = max(0, len(generated_ids_app) - SEQ_LEN_APP)
|
| 339 |
current_context_ids = generated_ids_app[context_start_idx:]
|
| 340 |
|
|
@@ -355,11 +348,14 @@ def generate_text_for_app(prompt_str, max_len_gen, temperature_gen, enable_gen_d
|
|
| 355 |
|
| 356 |
if next_token_id == EOS_TOKEN:
|
| 357 |
debug_info_lines.append(f"Step {i+1}: EOS token encountered.")
|
|
|
|
| 358 |
break
|
| 359 |
generated_ids_app.append(next_token_id)
|
| 360 |
|
| 361 |
-
|
| 362 |
-
|
|
|
|
|
|
|
| 363 |
overall_ent = entropy_report_infer['overall_output_entropy'].item()
|
| 364 |
if entropy_report_infer['block_output_entropies'] and len(entropy_report_infer['block_output_entropies']) > 0:
|
| 365 |
b0_ent = entropy_report_infer['block_output_entropies'][0].item()
|
|
@@ -380,11 +376,12 @@ def generate_text_for_app(prompt_str, max_len_gen, temperature_gen, enable_gen_d
|
|
| 380 |
|
| 381 |
debug_output_str = "\n".join(debug_info_lines)
|
| 382 |
|
| 383 |
-
|
| 384 |
-
set_model_debug_prints(swck_model_global, False, False, False)
|
| 385 |
return final_text, debug_output_str
|
| 386 |
|
| 387 |
-
#
|
|
|
|
| 388 |
initial_load_status = initialize_or_load_model_app(enable_initial_debug=False)
|
| 389 |
|
| 390 |
with gr.Blocks(title="SWCK Conceptual Demo") as demo:
|
|
@@ -392,7 +389,7 @@ with gr.Blocks(title="SWCK Conceptual Demo") as demo:
|
|
| 392 |
|
| 393 |
gr.Markdown(f"""
|
| 394 |
# Self-Wired Conscious Kernel (SWCK) - Conceptual Demo
|
| 395 |
-
This demo showcases a conceptual text generation model.
|
| 396 |
Seed Phrase: "{SEED_PHRASE_APP[:100]}..." | Seed Number: "{SEED_NUMBER_STR_APP}".
|
| 397 |
(Note: If checkpoint is not found or fails to load, an *untrained* model is used.)
|
| 398 |
""")
|
|
@@ -401,7 +398,7 @@ with gr.Blocks(title="SWCK Conceptual Demo") as demo:
|
|
| 401 |
with gr.TabItem("Generate Text"):
|
| 402 |
with gr.Row():
|
| 403 |
prompt_input = gr.Textbox(label="Enter your prompt:", placeholder="e.g., the meaning of existence is", scale=3)
|
| 404 |
-
|
| 405 |
with gr.Row():
|
| 406 |
generate_button = gr.Button("Generate", scale=1)
|
| 407 |
with gr.Row():
|
|
@@ -412,21 +409,21 @@ with gr.Blocks(title="SWCK Conceptual Demo") as demo:
|
|
| 412 |
debug_text_area = gr.Textbox(label="Generation Debug Info (first few steps to UI):", lines=8, interactive=False)
|
| 413 |
|
| 414 |
with gr.TabItem("In-App Training (Conceptual Test)"):
|
| 415 |
-
gr.Markdown("WARNING: In-app training is EXTREMELY slow
|
| 416 |
with gr.Row():
|
| 417 |
-
train_epochs_slider = gr.Slider(minimum=1, maximum=
|
| 418 |
-
train_batch_size_slider = gr.Slider(minimum=1, maximum=
|
| 419 |
train_lr_slider = gr.Slider(minimum=1e-5, maximum=1e-3, value=5e-4, step=1e-5, label="Learning Rate")
|
| 420 |
|
| 421 |
-
start_training_button = gr.Button("Start Short Training Session")
|
| 422 |
-
training_status_output = gr.Textbox(label="Training Log / Status (summary):", lines=10, interactive=False,show_label=True )
|
| 423 |
|
| 424 |
def update_status_text_for_ui():
|
| 425 |
return f"**Model Status:** {model_load_status_global}"
|
| 426 |
|
| 427 |
generate_button.click(
|
| 428 |
-
fn=generate_text_for_app,
|
| 429 |
-
inputs=[prompt_input, max_len_slider, temp_slider
|
| 430 |
outputs=[output_text, debug_text_area]
|
| 431 |
)
|
| 432 |
|
|
@@ -438,6 +435,4 @@ with gr.Blocks(title="SWCK Conceptual Demo") as demo:
|
|
| 438 |
|
| 439 |
|
| 440 |
if __name__ == "__main__":
|
| 441 |
-
|
| 442 |
-
# The model's internal debug prints are controlled by set_model_debug_prints().
|
| 443 |
-
demo.launch(debug=True)
|
|
|
|
| 49 |
device_global = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 50 |
model_load_status_global = "Model not loaded."
|
| 51 |
|
| 52 |
+
CHECKPOINT_FILENAME = "swck_model_conceptual_app_fulldebug.pth.tar" # New checkpoint name
|
| 53 |
|
| 54 |
MAIN_LOSS_WEIGHT_APP = 1.0
|
| 55 |
BLOCK_TARGET_ENTROPY_LOSS_WEIGHT_APP = 0.02
|
|
|
|
| 57 |
GATE_SPARSITY_LOSS_WEIGHT_APP = 0.001
|
| 58 |
WIRING_PHASE_EPOCHS_APP = 1
|
| 59 |
|
|
|
|
| 60 |
def set_model_debug_prints(model, seed_parser_debug, block_debug, model_debug):
|
| 61 |
if model:
|
| 62 |
model.debug_prints_enabled = model_debug
|
|
|
|
| 84 |
print(f"App: Built vocab of size {VOCAB_SIZE_APP}")
|
| 85 |
return temp_word_to_idx, temp_idx_to_word
|
| 86 |
|
| 87 |
+
def initialize_or_load_model_app():
|
| 88 |
global swck_model_global, optimizer_global, word_to_idx_global, idx_to_word_global, \
|
| 89 |
VOCAB_SIZE_APP, model_load_status_global
|
| 90 |
|
|
|
|
| 103 |
'num_sub_modules_per_block': NUM_SUB_MODULES_PER_BLOCK_APP
|
| 104 |
}
|
| 105 |
|
| 106 |
+
print("App: Initializing SWCKModel with FULL DEBUG ON by default for init...")
|
|
|
|
|
|
|
| 107 |
swck_model_global = SWCKModel(**model_args).to(device_global)
|
| 108 |
+
# Debug is on by default in SWCKModel and sub-components as per their class __init__
|
| 109 |
+
# We can use set_model_debug_prints to confirm or change it if needed later.
|
| 110 |
+
# For now, rely on their internal defaults being True.
|
| 111 |
+
# If SeedParser or AdaptiveBlock have their debug_prints_enabled=False by default in model.py,
|
| 112 |
+
# you would explicitly set them here:
|
| 113 |
+
if hasattr(swck_model_global, 'seed_parser'): swck_model_global.seed_parser.debug_prints_enabled = True
|
| 114 |
+
for block in swck_model_global.adaptive_blocks: block.debug_prints_enabled = True
|
| 115 |
+
swck_model_global.debug_prints_enabled = True
|
| 116 |
+
print("App: All model component debugs are intended to be ON by default from their init.")
|
| 117 |
|
| 118 |
|
| 119 |
if os.path.exists(CHECKPOINT_FILENAME):
|
|
|
|
| 141 |
model_load_status_global = f"Model loaded successfully from {CHECKPOINT_FILENAME}."
|
| 142 |
print(model_load_status_global)
|
| 143 |
except Exception as e:
|
| 144 |
+
print(f"App: Error loading model from checkpoint: {e}. Re-initializing new model with debug ON.")
|
|
|
|
| 145 |
swck_model_global = SWCKModel(**model_args).to(device_global)
|
| 146 |
+
if hasattr(swck_model_global, 'seed_parser'): swck_model_global.seed_parser.debug_prints_enabled = True
|
| 147 |
+
for block in swck_model_global.adaptive_blocks: block.debug_prints_enabled = True
|
| 148 |
+
swck_model_global.debug_prints_enabled = True
|
|
|
|
| 149 |
optimizer_global = optim.AdamW(swck_model_global.parameters(), lr=0.001)
|
| 150 |
+
model_load_status_global = "Error loading checkpoint. Using new (untrained) model with debug ON."
|
| 151 |
else:
|
| 152 |
+
print(f"App: Checkpoint {CHECKPOINT_FILENAME} not found. Initializing new model with debug ON.")
|
| 153 |
optimizer_global = optim.AdamW(swck_model_global.parameters(), lr=0.001)
|
| 154 |
+
model_load_status_global = "Initialized a new (untrained) model with debug ON."
|
| 155 |
|
| 156 |
+
swck_model_global.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
return model_load_status_global
|
| 158 |
|
| 159 |
|
|
|
|
| 188 |
if swck_model_global is None or word_to_idx_global is None:
|
| 189 |
return "Model not initialized. Cannot train."
|
| 190 |
|
| 191 |
+
print("\n--- App: Starting Short Training Session (Full Debug ON for ALL batches/epochs) ---")
|
| 192 |
progress(0, desc="Preparing training data...")
|
| 193 |
|
| 194 |
+
# Ensure debug prints are ON for the entire training session
|
| 195 |
+
set_model_debug_prints(swck_model_global, True, True, True)
|
| 196 |
+
|
| 197 |
training_corpus = SEED_PHRASE_APP + " " + EXTENDED_TEXT_FOR_TRAINING_APP
|
| 198 |
app_dataset = AppSWCKDataset(training_corpus, word_to_idx_global, SEQ_LEN_APP, SOS_TOKEN, EOS_TOKEN, PAD_TOKEN)
|
| 199 |
if not app_dataset.samples:
|
| 200 |
+
set_model_debug_prints(swck_model_global, False, False, False) # Turn off if error
|
| 201 |
return "App Training Error: No samples created from the corpus."
|
| 202 |
|
| 203 |
app_dataloader = DataLoader(app_dataset, batch_size=int(batch_size_app), shuffle=True, collate_fn=app_swck_collate_fn)
|
|
|
|
| 210 |
|
| 211 |
criterion_main_app = nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)
|
| 212 |
|
| 213 |
+
training_log_output = f"Starting training for {num_epochs_app} epochs (Full Debug ON)...\n"
|
| 214 |
swck_model_global.train()
|
| 215 |
|
| 216 |
for epoch in progress.tqdm(range(int(num_epochs_app)), desc="Training Epochs"):
|
| 217 |
swck_model_global.set_wiring_phase(epoch < WIRING_PHASE_EPOCHS_APP)
|
| 218 |
epoch_loss = 0.0
|
| 219 |
+
print(f"\n>>> EPOCH {epoch+1} - Starting with Full Debug for all batches <<<")
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
for batch_idx, (src_batch, tgt_batch) in enumerate(app_dataloader):
|
| 222 |
+
# Debug prints are already set for the whole session by set_model_debug_prints above
|
| 223 |
+
print(f"\n--- Training Batch {batch_idx+1}/{len(app_dataloader)} ---")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
|
| 225 |
src_batch, tgt_batch = src_batch.to(device_global), tgt_batch.to(device_global)
|
| 226 |
decoder_input_tokens = src_batch[:, :-1]
|
|
|
|
| 268 |
epoch_loss += combined_loss.item()
|
| 269 |
|
| 270 |
log_line = f" Epoch {epoch+1}, Batch {batch_idx+1}/{len(app_dataloader)}, Loss: {combined_loss.item():.4f}"
|
| 271 |
+
# Print every batch to console due to full debug, but maybe less often to UI
|
| 272 |
+
print(log_line)
|
| 273 |
if batch_idx % max(1, len(app_dataloader)//2) == 0 or batch_idx == len(app_dataloader)-1 :
|
|
|
|
| 274 |
training_log_output += log_line + "\n"
|
| 275 |
|
|
|
|
|
|
|
|
|
|
| 276 |
avg_epoch_loss = epoch_loss / len(app_dataloader) if len(app_dataloader) > 0 else epoch_loss
|
| 277 |
epoch_summary = f"Epoch {epoch+1}/{num_epochs_app} - Avg Loss: {avg_epoch_loss:.4f}\n"
|
| 278 |
print(epoch_summary)
|
| 279 |
training_log_output += epoch_summary
|
| 280 |
|
| 281 |
+
# Set debug prints OFF after the entire training session for subsequent operations (like generation)
|
| 282 |
+
# unless generation itself re-enables them.
|
| 283 |
+
print("--- App: Training Session Finished. Setting debug prints OFF by default. ---")
|
| 284 |
+
set_model_debug_prints(swck_model_global, False, False, False)
|
| 285 |
swck_model_global.eval()
|
| 286 |
|
| 287 |
try:
|
|
|
|
| 307 |
|
| 308 |
return training_log_output
|
| 309 |
|
| 310 |
+
def generate_text_for_app(prompt_str, max_len_gen, temperature_gen): # Removed debug toggle, always ON
|
| 311 |
global model_load_status_global
|
| 312 |
if swck_model_global is None or word_to_idx_global is None or idx_to_word_global is None:
|
| 313 |
return "Model not loaded. Please check server logs or try training.", "Model not available."
|
|
|
|
| 315 |
swck_model_global.eval()
|
| 316 |
swck_model_global.set_wiring_phase(False)
|
| 317 |
|
| 318 |
+
# FULL DEBUG ON for generation
|
| 319 |
+
print("\n--- App: Generating Text (Full Debug ON) ---")
|
| 320 |
+
set_model_debug_prints(swck_model_global, True, True, True)
|
| 321 |
|
| 322 |
+
print(f"App: Generating for prompt: '{prompt_str}', max_len: {max_len_gen}, temp: {temperature_gen}")
|
| 323 |
|
| 324 |
tokens = [SOS_TOKEN] + [word_to_idx_global.get(w, UNK_TOKEN) for w in prompt_str.lower().split()]
|
| 325 |
generated_ids_app = list(tokens)
|
| 326 |
+
debug_info_lines = [f"Prompt tokens: {generated_ids_app}"] # For UI
|
| 327 |
|
| 328 |
with torch.no_grad():
|
| 329 |
for i in range(int(max_len_gen)):
|
| 330 |
+
print(f"\n--- Generation Step {i+1} ---") # Console log for each step
|
| 331 |
context_start_idx = max(0, len(generated_ids_app) - SEQ_LEN_APP)
|
| 332 |
current_context_ids = generated_ids_app[context_start_idx:]
|
| 333 |
|
|
|
|
| 348 |
|
| 349 |
if next_token_id == EOS_TOKEN:
|
| 350 |
debug_info_lines.append(f"Step {i+1}: EOS token encountered.")
|
| 351 |
+
print(f"Step {i+1}: EOS token encountered.")
|
| 352 |
break
|
| 353 |
generated_ids_app.append(next_token_id)
|
| 354 |
|
| 355 |
+
current_word = idx_to_word_global.get(next_token_id, UNK_TOKEN_STR)
|
| 356 |
+
print(f" ==> Generated token {i+1}: '{current_word}' (ID: {next_token_id})") # Console log
|
| 357 |
+
|
| 358 |
+
if i < 10 : # UI debug info is still limited
|
| 359 |
overall_ent = entropy_report_infer['overall_output_entropy'].item()
|
| 360 |
if entropy_report_infer['block_output_entropies'] and len(entropy_report_infer['block_output_entropies']) > 0:
|
| 361 |
b0_ent = entropy_report_infer['block_output_entropies'][0].item()
|
|
|
|
| 376 |
|
| 377 |
debug_output_str = "\n".join(debug_info_lines)
|
| 378 |
|
| 379 |
+
print("--- App: Generation Finished. Setting debug prints OFF by default. ---")
|
| 380 |
+
set_model_debug_prints(swck_model_global, False, False, False) # Turn off after this call
|
| 381 |
return final_text, debug_output_str
|
| 382 |
|
| 383 |
+
# Initialize model with debug OFF for initial startup to keep logs clean,
|
| 384 |
+
# will be turned ON by training/generation functions.
|
| 385 |
initial_load_status = initialize_or_load_model_app(enable_initial_debug=False)
|
| 386 |
|
| 387 |
with gr.Blocks(title="SWCK Conceptual Demo") as demo:
|
|
|
|
| 389 |
|
| 390 |
gr.Markdown(f"""
|
| 391 |
# Self-Wired Conscious Kernel (SWCK) - Conceptual Demo
|
| 392 |
+
This demo showcases a conceptual text generation model with **FULL KERNEL DEBUGGING ON by default** for training and generation (output to Space console logs).
|
| 393 |
Seed Phrase: "{SEED_PHRASE_APP[:100]}..." | Seed Number: "{SEED_NUMBER_STR_APP}".
|
| 394 |
(Note: If checkpoint is not found or fails to load, an *untrained* model is used.)
|
| 395 |
""")
|
|
|
|
| 398 |
with gr.TabItem("Generate Text"):
|
| 399 |
with gr.Row():
|
| 400 |
prompt_input = gr.Textbox(label="Enter your prompt:", placeholder="e.g., the meaning of existence is", scale=3)
|
| 401 |
+
# Removed debug checkbox as it's on by default for console
|
| 402 |
with gr.Row():
|
| 403 |
generate_button = gr.Button("Generate", scale=1)
|
| 404 |
with gr.Row():
|
|
|
|
| 409 |
debug_text_area = gr.Textbox(label="Generation Debug Info (first few steps to UI):", lines=8, interactive=False)
|
| 410 |
|
| 411 |
with gr.TabItem("In-App Training (Conceptual Test)"):
|
| 412 |
+
gr.Markdown("WARNING: In-app training is EXTREMELY slow. **Full Kernel Debug will be printed to console for ALL batches/epochs.** Model state persists only for this session unless saved manually.")
|
| 413 |
with gr.Row():
|
| 414 |
+
train_epochs_slider = gr.Slider(minimum=1, maximum=2, value=1, step=1, label="Number of Training Epochs (1-2 for demo)")
|
| 415 |
+
train_batch_size_slider = gr.Slider(minimum=1, maximum=2, value=1, step=1, label="Training Batch Size (1-2 for demo)")
|
| 416 |
train_lr_slider = gr.Slider(minimum=1e-5, maximum=1e-3, value=5e-4, step=1e-5, label="Learning Rate")
|
| 417 |
|
| 418 |
+
start_training_button = gr.Button("Start Short Training Session (Full Debug to Console)")
|
| 419 |
+
training_status_output = gr.Textbox(label="Training Log / Status (summary to UI):", lines=10, interactive=False,show_label=True )
|
| 420 |
|
| 421 |
def update_status_text_for_ui():
|
| 422 |
return f"**Model Status:** {model_load_status_global}"
|
| 423 |
|
| 424 |
generate_button.click(
|
| 425 |
+
fn=generate_text_for_app, # Removed enable_gen_debug from inputs
|
| 426 |
+
inputs=[prompt_input, max_len_slider, temp_slider],
|
| 427 |
outputs=[output_text, debug_text_area]
|
| 428 |
)
|
| 429 |
|
|
|
|
| 435 |
|
| 436 |
|
| 437 |
if __name__ == "__main__":
|
| 438 |
+
demo.launch(debug=True) # Gradio server debug
|
|
|
|
|
|