File size: 3,012 Bytes
81801a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#!/usr/bin/env python3

import os
import pandas as pd
import json
from pathlib import Path
import glob

def process_language(language_dir):
    """Process parquet files for a specific language and create train.jsonl"""
    print(f"Processing {language_dir}...")
    
    # Create train directory
    train_dir = Path(language_dir) / "train"
    train_dir.mkdir(exist_ok=True)
    
    # Find all parquet files for this language
    parquet_pattern = f"{language_dir}/data/{Path(language_dir).name}/*.parquet"
    parquet_files = glob.glob(parquet_pattern)
    
    if not parquet_files:
        print(f"  No parquet files found in {parquet_pattern}")
        return
    
    print(f"  Found {len(parquet_files)} parquet files")
    
    # Output JSONL file
    output_file = train_dir / "train.jsonl"
    
    total_records = 0
    
    with open(output_file, 'w', encoding='utf-8') as f:
        for parquet_file in sorted(parquet_files):
            print(f"  Processing {parquet_file}")
            
            # Read parquet file
            df = pd.read_parquet(parquet_file)
            
            # Process each row
            for _, row in df.iterrows():
                # Create record with text content and metadata
                record = {
                    'text': row['content'],
                    'language': row['lang'],
                    'size': int(row['size']),
                    'ext': row['ext'],
                    'avg_line_length': float(row['avg_line_length']) if pd.notna(row['avg_line_length']) else None,
                    'max_line_length': int(row['max_line_length']),
                    'alphanum_fraction': float(row['alphanum_fraction']) if pd.notna(row['alphanum_fraction']) else None,
                    'hexsha': row['hexsha']
                }
                
                # Add repository metadata if available
                if pd.notna(row['max_stars_repo_name']):
                    record['repo_name'] = row['max_stars_repo_name']
                    record['stars_count'] = int(row['max_stars_count']) if pd.notna(row['max_stars_count']) else None
                
                # Write as JSONL
                f.write(json.dumps(record, ensure_ascii=False) + '\n')
                total_records += 1
    
    print(f"  Created {output_file} with {total_records} records")
    return total_records

def main():
    """Process all language directories"""
    # Get all language directories
    language_dirs = [d for d in os.listdir('.') if os.path.isdir(d) and not d.startswith('.')]
    
    print(f"Found language directories: {language_dirs}")
    
    total_processed = 0
    
    for lang_dir in sorted(language_dirs):
        try:
            records = process_language(lang_dir)
            if records:
                total_processed += records
        except Exception as e:
            print(f"Error processing {lang_dir}: {e}")
    
    print(f"\nTotal records processed: {total_processed}")

if __name__ == "__main__":
    main()