| | |
| | """ |
| | Create clean training dataset with only valid examples |
| | """ |
| |
|
| | import json |
| |
|
| | def create_clean_dataset(input_path, output_path): |
| | """Create a clean dataset with only valid text examples""" |
| | clean_data = [] |
| | |
| | with open(input_path, 'r') as f: |
| | for line in f: |
| | try: |
| | data = json.loads(line) |
| | |
| | |
| | messages = data.get('messages', []) |
| | |
| | |
| | has_tool_call = any('tool_call' in str(msg) for msg in messages) |
| | if has_tool_call: |
| | continue |
| | |
| | |
| | if len(messages) >= 2: |
| | |
| | valid_messages = [] |
| | for msg in messages: |
| | if isinstance(msg, dict) and 'role' in msg and 'content' in msg: |
| | valid_messages.append({ |
| | 'role': msg['role'], |
| | 'content': str(msg['content']) |
| | }) |
| | |
| | if len(valid_messages) >= 2: |
| | clean_data.append({'messages': valid_messages}) |
| | |
| | except json.JSONDecodeError: |
| | continue |
| | |
| | |
| | with open(output_path, 'w') as f: |
| | for entry in clean_data: |
| | f.write(json.dumps(entry, ensure_ascii=False) + '\n') |
| | |
| | print(f"✅ Created clean dataset with {len(clean_data)} examples") |
| | print(f"💾 Saved to: {output_path}") |
| | return clean_data |
| |
|
| | |
| | clean_train = create_clean_dataset( |
| | "/home/x/adaptai/aiml/e-train-1/elizabeth_tooluse_minipack_v1.jsonl", |
| | "/home/x/adaptai/aiml/e-train-1/clean_training_data.jsonl" |
| | ) |
| |
|
| | print(f"Total clean examples: {len(clean_train)}") |