malware-text-db / prepare.py
yangwang825's picture
Create prepare.py
aeb35d0 verified
import random
from glob import glob
from datasets import load_dataset, Dataset, DatasetDict
# Get all the token files
token_files = glob('tokenized/*.tokens')
total_files = len(token_files)
print(f"Found {total_files} token files")
# Set the split sizes
train_size = 23
dev_size = 8
test_size = 8
# Ensure we have enough files
if total_files < (train_size + dev_size + test_size):
print(f"Warning: Not enough files ({total_files}) for the requested split sizes.")
# Adjust sizes proportionally if needed
total_requested = train_size + dev_size + test_size
train_size = int(total_files * (train_size / total_requested))
dev_size = int(total_files * (dev_size / total_requested))
test_size = total_files - train_size - dev_size
# Randomly shuffle the files
random.seed(42) # For reproducibility
random.shuffle(token_files)
# Split the files
train_files = token_files[:train_size]
dev_files = token_files[train_size:train_size + dev_size]
test_files = token_files[train_size + dev_size:train_size + dev_size + test_size]
# Function to process a list of files and return data points
def process_files(file_list):
result = []
for file in file_list:
tokens = []
ner_tags = []
with open(file, 'r') as f:
for line in f:
line = line.strip()
# If empty line, append current document and reset
if not line:
if tokens: # Only append if there are tokens (avoid empty entries)
result.append({
"tokens": tokens,
"ner_tags": ner_tags,
"file_name": file # Include file name for reference
})
tokens = []
ner_tags = []
continue
# Split line into components
parts = line.split()
# Ensure we have at least 3 parts (token, POS, NER)
if len(parts) >= 3:
token = parts[0]
ner_tag = parts[2] # NER tag is the 3rd column
tokens.append(token)
ner_tags.append(ner_tag)
# Don't forget to add the last document if it exists
if tokens:
result.append({
"tokens": tokens,
"ner_tags": ner_tags,
"file_name": file # Include file name for reference
})
return result
# Process each split
train_data = process_files(train_files)
dev_data = process_files(dev_files)
test_data = process_files(test_files)
# Create datasets for each split
train_dataset = Dataset.from_list(train_data)
dev_dataset = Dataset.from_list(dev_data)
test_dataset = Dataset.from_list(test_data)
# Create a DatasetDict with train, dev, and test splits
dataset_dict = DatasetDict({
"train": train_dataset,
"validation": dev_dataset, # Using 'validation' as it's more standard
"test": test_dataset
})
# Print some statistics
print(f"Train split: {len(train_data)} files")
print(f"Validation split: {len(dev_data)} files")
print(f"Test split: {len(test_data)} files")
print(f"Dataset features: {train_dataset.features}")
# Uncomment to push to Hub
dataset_dict.push_to_hub('extraordinarylab/malware-text-db')