File size: 3,469 Bytes
aeb35d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import random
from glob import glob

from datasets import load_dataset, Dataset, DatasetDict

# Get all the token files
token_files = glob('tokenized/*.tokens')
total_files = len(token_files)

print(f"Found {total_files} token files")

# Set the split sizes
train_size = 23
dev_size = 8
test_size = 8

# Ensure we have enough files
if total_files < (train_size + dev_size + test_size):
    print(f"Warning: Not enough files ({total_files}) for the requested split sizes.")
    # Adjust sizes proportionally if needed
    total_requested = train_size + dev_size + test_size
    train_size = int(total_files * (train_size / total_requested))
    dev_size = int(total_files * (dev_size / total_requested))
    test_size = total_files - train_size - dev_size

# Randomly shuffle the files
random.seed(42)  # For reproducibility
random.shuffle(token_files)

# Split the files
train_files = token_files[:train_size]
dev_files = token_files[train_size:train_size + dev_size]
test_files = token_files[train_size + dev_size:train_size + dev_size + test_size]

# Function to process a list of files and return data points
def process_files(file_list):
    result = []
    for file in file_list:
        tokens = []
        ner_tags = []
        
        with open(file, 'r') as f:
            for line in f:
                line = line.strip()
                
                # If empty line, append current document and reset
                if not line:
                    if tokens:  # Only append if there are tokens (avoid empty entries)
                        result.append({
                            "tokens": tokens,
                            "ner_tags": ner_tags,
                            "file_name": file  # Include file name for reference
                        })
                        tokens = []
                        ner_tags = []
                    continue
                
                # Split line into components
                parts = line.split()
                
                # Ensure we have at least 3 parts (token, POS, NER)
                if len(parts) >= 3:
                    token = parts[0]
                    ner_tag = parts[2]  # NER tag is the 3rd column
                    
                    tokens.append(token)
                    ner_tags.append(ner_tag)
            
            # Don't forget to add the last document if it exists
            if tokens:
                result.append({
                    "tokens": tokens,
                    "ner_tags": ner_tags,
                    "file_name": file  # Include file name for reference
                })
    
    return result

# Process each split
train_data = process_files(train_files)
dev_data = process_files(dev_files)
test_data = process_files(test_files)

# Create datasets for each split
train_dataset = Dataset.from_list(train_data)
dev_dataset = Dataset.from_list(dev_data)
test_dataset = Dataset.from_list(test_data)

# Create a DatasetDict with train, dev, and test splits
dataset_dict = DatasetDict({
    "train": train_dataset,
    "validation": dev_dataset,  # Using 'validation' as it's more standard
    "test": test_dataset
})

# Print some statistics
print(f"Train split: {len(train_data)} files")
print(f"Validation split: {len(dev_data)} files")
print(f"Test split: {len(test_data)} files")
print(f"Dataset features: {train_dataset.features}")

# Uncomment to push to Hub
dataset_dict.push_to_hub('extraordinarylab/malware-text-db')