kulia-moon commited on
Commit
02a357c
·
verified ·
1 Parent(s): 348750a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -34
app.py CHANGED
@@ -185,14 +185,12 @@ def push_file_to_huggingface_dataset(file_path, path_in_repo, commit_message_pre
185
  print(log_message)
186
  return log_message
187
 
188
- # --- Main Generation and Push Function (Now a generator for streaming) ---
189
  def generate_and_display_conversations(num_conversations_input, custom_prompts_input, custom_system_prompt_input,
190
  commit_subject, commit_body, selected_model_name_input):
191
  num_conversations = int(num_conversations_input)
192
  if num_conversations <= 0:
193
- # Use gr.NoOp() for the output that shouldn't be updated
194
- yield gr.NoOp(), "Please enter a number of conversations greater than zero.\n"
195
- return
196
 
197
  os.makedirs(OUTPUT_DIR, exist_ok=True)
198
 
@@ -204,47 +202,52 @@ def generate_and_display_conversations(num_conversations_input, custom_prompts_i
204
  try:
205
  existing_conversations.append(json.loads(line.strip()))
206
  except json.JSONDecodeError as e:
207
- # Use gr.NoOp() for the output that shouldn't be updated
208
- yield gr.NoOp(), f"Skipping malformed JSON line in {DATA_FILE}: {line.strip()} - {e}\n"
209
-
210
  # Deduplicate existing conversations
211
  seen_conversations = set()
212
  cleaned_existing_conversations = []
213
  for conv_entry in existing_conversations:
 
214
  conv_str = json.dumps(conv_entry, sort_keys=True)
215
  if conv_str not in seen_conversations:
216
  cleaned_existing_conversations.append(conv_entry)
217
  seen_conversations.add(conv_str)
218
 
219
  # Validate and filter existing conversations for completeness (expected length)
220
- expected_msg_len = lambda n_exchanges: 1 + 1 + n_exchanges * 2 # System + initial human + (AI_turn + human_followup) * n_exchanges
221
 
222
  validated_existing_conversations = []
223
  initial_cleaned_count = len(cleaned_existing_conversations)
224
  for conv_entry in cleaned_existing_conversations:
225
  conv_list = conv_entry.get("conversations", [])
226
- if len(conv_list) == expected_msg_len(5): # Fixed to 5 exchanges for generation
 
 
 
227
  validated_existing_conversations.append(conv_entry)
228
  else:
229
- # Use gr.NoOp() for the output that shouldn't be updated
230
- yield gr.NoOp(), f"Skipping incomplete/malformed existing conversation (length {len(conv_list)} != {expected_msg_len(5)}): {conv_entry}\n"
231
 
232
  all_conversations = list(validated_existing_conversations) # Start with clean existing ones
233
 
 
234
  current_time_loc = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " (An Nhơn, Binh Dinh, Vietnam)"
235
- # Use gr.NoOp() for the output that shouldn't be updated
236
- yield gr.NoOp(), f"Starting conversation generation at {current_time_loc}\n"
237
- yield gr.NoOp(), f"Loaded and cleaned {len(validated_existing_conversations)} existing conversations (initially {initial_cleaned_count} before validation).\n"
238
- yield gr.NoOp(), f"Generating {num_conversations} *new* conversations.\n"
239
 
240
  model_names_to_use = list(AVAILABLE_MODELS.keys())
241
  if selected_model_name_input and selected_model_name_input in model_names_to_use:
242
- model_selection_info = f"Specific model selected for all new conversations: '{selected_model_name_input}'\n"
 
 
243
  selected_model_for_this_conv_batch = selected_model_name_input
244
  else:
245
- model_selection_info = f"No specific model selected or invalid. Models will be chosen randomly per conversation from: {', '.join(model_names_to_use)}\n"
 
246
  selected_model_for_this_conv_batch = None # Indicate random selection per loop
247
- yield gr.NoOp(), model_selection_info
248
 
249
 
250
  current_prompts = DEFAULT_INITIAL_PROMPTS
@@ -256,7 +259,7 @@ def generate_and_display_conversations(num_conversations_input, custom_prompts_i
256
  new_conversations_generated = []
257
  expected_conversation_length = expected_msg_len(5) # Always 5 exchanges for new generations
258
 
259
- for i in range(num_conversations): # tqdm not directly compatible with yielding, so manually handle progress
260
  seed = random.randint(0, 1000000)
261
 
262
  if custom_system_prompt_input:
@@ -268,44 +271,46 @@ def generate_and_display_conversations(num_conversations_input, custom_prompts_i
268
  prompt_template = random.choice(current_prompts)
269
  prompt = prompt_template.replace("[NAME]", random_name)
270
 
 
271
  selected_model_for_this_conv = selected_model_for_this_conv_batch if selected_model_for_this_conv_batch else random.choice(model_names_to_use)
272
 
273
- yield gr.NoOp(), f"[{datetime.datetime.now().strftime('%H:%M:%S')}] Generating conv {i+1}/{num_conversations} with '{selected_model_for_this_conv}' (System: '{system[:50]}...')\n"
274
 
275
  conversation = chat(system, prompt, selected_model_for_this_conv, seed=seed, num_exchanges=5)
276
 
277
  if len(conversation) == expected_conversation_length and not any(d.get("from") == "error" for d in conversation):
278
  new_conv_entry = {"model_used": selected_model_for_this_conv, "conversations": conversation}
 
 
279
  new_conv_str = json.dumps(new_conv_entry, sort_keys=True)
280
  if new_conv_str not in seen_conversations:
281
  all_conversations.append(new_conv_entry)
282
  new_conversations_generated.append(new_conv_entry)
283
- seen_conversations.add(new_conv_str)
284
- yield gr.NoOp(), f"[{datetime.datetime.now().strftime('%H:%M:%S')}] Successfully generated and added conv {i+1}/{num_conversations}.\n"
285
  else:
286
- yield gr.NoOp(), f"[{datetime.datetime.now().strftime('%H:%M:%S')}] Skipped conv {i+1}/{num_conversations} as it's a duplicate.\n"
287
  else:
288
- yield gr.NoOp(), f"[{datetime.datetime.now().strftime('%H:%M:%S')}] Skipping conv {i+1}/{num_conversations} due to error or incorrect length ({len(conversation)} messages, expected {expected_conversation_length}).\n"
289
  if conversation and conversation[-1].get("from") == "error":
290
- yield gr.NoOp(), f" Error details: {conversation[-1]['value']}\n"
291
 
292
  # Save all (cleaned existing + newly generated unique) conversations to JSONL
293
  with open(DATA_FILE, "w") as f:
294
  for conv in all_conversations:
295
  f.write(json.dumps(conv) + "\n")
296
 
297
- yield gr.NoOp(), f"Saved {len(new_conversations_generated)} *new unique* conversations to {DATA_FILE} (total unique and validated: {len(all_conversations)}).\n"
298
- yield gr.NoOp(), "Attempting to push main conversations file to Hugging Face Dataset...\n"
299
 
300
  # --- Auto-push main conversations to Hugging Face Dataset ---
301
  # Use the custom commit message
302
  commit_message = f"{commit_subject.strip()}\n\n{commit_body.strip()}" if commit_body.strip() else commit_subject.strip()
303
  push_status = push_file_to_huggingface_dataset(DATA_FILE, "conversations.jsonl", commit_message)
304
- yield gr.NoOp(), push_status + "\n"
305
- final_log_message = f"Process complete at {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} (An Nhơn, Binh Dinh, Vietnam)\n"
306
-
307
- # Final return for both outputs: The first output (JSON) is now updated, the second (log) gets the final message.
308
- yield json.dumps(all_conversations, indent=2), final_log_message
309
 
310
  # --- Community Prompts Functions ---
311
  def load_community_prompts():
@@ -513,8 +518,8 @@ with gr.Blocks() as demo:
513
  generate_button = gr.Button("Generate & Push Conversations")
514
 
515
  output_conversations = gr.JSON(label="Generated Conversations (Content of conversations.jsonl)")
516
- # Set streaming=True for the output_log Textbox
517
  output_log = gr.Textbox(label="Process Log", interactive=False, lines=10, max_lines=20)
 
518
  # Link commit template dropdown to update fields
519
  commit_template_dropdown.change(
520
  fn=update_commit_fields,
@@ -539,7 +544,7 @@ with gr.Blocks() as demo:
539
  model_selector_dropdown # Pass selected model name
540
  ],
541
  outputs=[output_conversations, output_log],
542
- # show_progress=True is handled internally by yielding a generator
543
  )
544
 
545
  with gr.Tab("Community Prompts"):
 
185
  print(log_message)
186
  return log_message
187
 
188
+ # --- Main Generation and Push Function ---
189
  def generate_and_display_conversations(num_conversations_input, custom_prompts_input, custom_system_prompt_input,
190
  commit_subject, commit_body, selected_model_name_input):
191
  num_conversations = int(num_conversations_input)
192
  if num_conversations <= 0:
193
+ return "Please enter a number of conversations greater than zero.", ""
 
 
194
 
195
  os.makedirs(OUTPUT_DIR, exist_ok=True)
196
 
 
202
  try:
203
  existing_conversations.append(json.loads(line.strip()))
204
  except json.JSONDecodeError as e:
205
+ print(f"Skipping malformed JSON line in {DATA_FILE}: {line.strip()} - {e}")
206
+
 
207
  # Deduplicate existing conversations
208
  seen_conversations = set()
209
  cleaned_existing_conversations = []
210
  for conv_entry in existing_conversations:
211
+ # Use a string representation of the whole entry for deduplication
212
  conv_str = json.dumps(conv_entry, sort_keys=True)
213
  if conv_str not in seen_conversations:
214
  cleaned_existing_conversations.append(conv_entry)
215
  seen_conversations.add(conv_str)
216
 
217
  # Validate and filter existing conversations for completeness (expected length)
218
+ expected_msg_len = lambda n_exchanges: 1 + 1 + n_exchanges + (n_exchanges - 1) # System + initial human + AI turns + human follow-ups
219
 
220
  validated_existing_conversations = []
221
  initial_cleaned_count = len(cleaned_existing_conversations)
222
  for conv_entry in cleaned_existing_conversations:
223
  conv_list = conv_entry.get("conversations", [])
224
+ # Assume num_exchanges was 5 for old conversations if not stored
225
+ # Or more robustly, infer from length.
226
+ # Given the fixed num_exchanges=5 for generation, we can check for this.
227
+ if len(conv_list) == expected_msg_len(5):
228
  validated_existing_conversations.append(conv_entry)
229
  else:
230
+ print(f"Skipping incomplete/malformed existing conversation (length {len(conv_list)} != {expected_msg_len(5)}): {conv_entry}")
 
231
 
232
  all_conversations = list(validated_existing_conversations) # Start with clean existing ones
233
 
234
+ generation_log = []
235
  current_time_loc = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " (An Nhơn, Binh Dinh, Vietnam)"
236
+ generation_log.append(f"Starting conversation generation at {current_time_loc}")
237
+ generation_log.append(f"Loaded and cleaned {len(validated_existing_conversations)} existing conversations (initially {initial_cleaned_count} before validation).")
238
+ generation_log.append(f"Generating {num_conversations} *new* conversations.")
 
239
 
240
  model_names_to_use = list(AVAILABLE_MODELS.keys())
241
  if selected_model_name_input and selected_model_name_input in model_names_to_use:
242
+ # If a specific model is selected, all conversations in this batch will use that model
243
+ model_selection_info = f"Specific model selected for all new conversations: '{selected_model_name_input}'"
244
+ # Determine the model to use for this specific conversation
245
  selected_model_for_this_conv_batch = selected_model_name_input
246
  else:
247
+ # If no specific model or invalid model, random models will be picked per conversation
248
+ model_selection_info = f"No specific model selected or invalid. Models will be chosen randomly per conversation from: {', '.join(model_names_to_use)}"
249
  selected_model_for_this_conv_batch = None # Indicate random selection per loop
250
+ generation_log.append(model_selection_info)
251
 
252
 
253
  current_prompts = DEFAULT_INITIAL_PROMPTS
 
259
  new_conversations_generated = []
260
  expected_conversation_length = expected_msg_len(5) # Always 5 exchanges for new generations
261
 
262
+ for i in tqdm(range(num_conversations), desc="Generating conversations"):
263
  seed = random.randint(0, 1000000)
264
 
265
  if custom_system_prompt_input:
 
271
  prompt_template = random.choice(current_prompts)
272
  prompt = prompt_template.replace("[NAME]", random_name)
273
 
274
+ # Determine the model to use for this specific conversation
275
  selected_model_for_this_conv = selected_model_for_this_conv_batch if selected_model_for_this_conv_batch else random.choice(model_names_to_use)
276
 
277
+ generation_log.append(f"[{datetime.datetime.now().strftime('%H:%M:%S')}] Generating conv {i+1}/{num_conversations} with '{selected_model_for_this_conv}' (System: '{system[:50]}...')")
278
 
279
  conversation = chat(system, prompt, selected_model_for_this_conv, seed=seed, num_exchanges=5)
280
 
281
  if len(conversation) == expected_conversation_length and not any(d.get("from") == "error" for d in conversation):
282
  new_conv_entry = {"model_used": selected_model_for_this_conv, "conversations": conversation}
283
+ # Add to all_conversations and new_conversations_generated only if not a duplicate of what's already *in memory*
284
+ # This handles duplicates from current batch or newly generated identical to existing
285
  new_conv_str = json.dumps(new_conv_entry, sort_keys=True)
286
  if new_conv_str not in seen_conversations:
287
  all_conversations.append(new_conv_entry)
288
  new_conversations_generated.append(new_conv_entry)
289
+ seen_conversations.add(new_conv_str) # Mark as seen
290
+ generation_log.append(f"[{datetime.datetime.now().strftime('%H:%M:%S')}] Successfully generated and added conv {i+1}/{num_conversations}.")
291
  else:
292
+ generation_log.append(f"[{datetime.datetime.now().strftime('%H:%M:%S')}] Skipped conv {i+1}/{num_conversations} as it's a duplicate.")
293
  else:
294
+ generation_log.append(f"[{datetime.datetime.now().strftime('%H:%M:%S')}] Skipping conv {i+1}/{num_conversations} due to error or incorrect length ({len(conversation)} messages, expected {expected_conversation_length}).")
295
  if conversation and conversation[-1].get("from") == "error":
296
+ generation_log.append(f" Error details: {conversation[-1]['value']}")
297
 
298
  # Save all (cleaned existing + newly generated unique) conversations to JSONL
299
  with open(DATA_FILE, "w") as f:
300
  for conv in all_conversations:
301
  f.write(json.dumps(conv) + "\n")
302
 
303
+ generation_log.append(f"Saved {len(new_conversations_generated)} *new unique* conversations to {DATA_FILE} (total unique and validated: {len(all_conversations)}).")
304
+ generation_log.append("Attempting to push main conversations file to Hugging Face Dataset...")
305
 
306
  # --- Auto-push main conversations to Hugging Face Dataset ---
307
  # Use the custom commit message
308
  commit_message = f"{commit_subject.strip()}\n\n{commit_body.strip()}" if commit_body.strip() else commit_subject.strip()
309
  push_status = push_file_to_huggingface_dataset(DATA_FILE, "conversations.jsonl", commit_message)
310
+ generation_log.append(push_status)
311
+ generation_log.append(f"Process complete at {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} (An Nhơn, Binh Dinh, Vietnam)")
312
+
313
+ return json.dumps(all_conversations, indent=2), "\n".join(generation_log)
 
314
 
315
  # --- Community Prompts Functions ---
316
  def load_community_prompts():
 
518
  generate_button = gr.Button("Generate & Push Conversations")
519
 
520
  output_conversations = gr.JSON(label="Generated Conversations (Content of conversations.jsonl)")
 
521
  output_log = gr.Textbox(label="Process Log", interactive=False, lines=10, max_lines=20)
522
+
523
  # Link commit template dropdown to update fields
524
  commit_template_dropdown.change(
525
  fn=update_commit_fields,
 
544
  model_selector_dropdown # Pass selected model name
545
  ],
546
  outputs=[output_conversations, output_log],
547
+ show_progress=True
548
  )
549
 
550
  with gr.Tab("Community Prompts"):