PKU_NLPDL_Assignment1 / task3 /dataset_init.py
Antoinegg1's picture
Upload folder using huggingface_hub
0ee6a96 verified
import random
from sklearn.model_selection import train_test_split
import MeCab
mecab = MeCab.Tagger("-Owakati")
def build_vocab(corpus, language="en"):
vocab = set()
if language == "en":
for sentence in corpus:
tokens = sentence.strip().split()
vocab.update(tokens)
else:
for sentence in corpus:
tokens = mecab.parse(sentence).strip().split()
vocab.update(tokens)
return sorted(vocab)
def preprocess_data(filepath):
english_sentences = []
japanese_sentences = []
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
if '\t' in line:
jp_sentence, en_sentence = line.strip().split("\t")
english_sentences.append(en_sentence)
japanese_sentences.append(jp_sentence)
return english_sentences, japanese_sentences
def split_dataset(english_sentences, japanese_sentences, train_size=0.8, valid_size=0.1):
total_size = train_size + valid_size
en_train_valid, en_test, jp_train_valid, jp_test = train_test_split(english_sentences, japanese_sentences, test_size=1-total_size, random_state=42)
en_train, en_valid, jp_train, jp_valid = train_test_split(en_train_valid, jp_train_valid, test_size=valid_size/(train_size + valid_size), random_state=42)
return en_train, en_valid, en_test, jp_train, jp_valid, jp_test
if __name__ == "__main__":
english_sentences, japanese_sentences = preprocess_data("eng_jpn.txt")
en_vocab = build_vocab(english_sentences, language="en")
jp_vocab = build_vocab(japanese_sentences, language="jp")
print(f"English Vocabulary Size: {len(en_vocab)}")
print(f"Japanese Vocabulary Size: {len(jp_vocab)}")
en_train, en_valid, en_test, jp_train, jp_valid, jp_test = split_dataset(english_sentences, japanese_sentences)
with open("train.txt", "w", encoding="utf-8") as f:
for jp_sentence, en_sentence in zip(jp_train, en_train):
f.write(jp_sentence + "\t" + en_sentence + "\n")
with open("valid.txt", "w", encoding="utf-8") as f:
for jp_sentence, en_sentence in zip(jp_valid, en_valid):
f.write(jp_sentence + "\t" + en_sentence + "\n")
with open("test.txt", "w", encoding="utf-8") as f:
for jp_sentence, en_sentence in zip(jp_test, en_test):
f.write(jp_sentence + "\t" + en_sentence + "\n")
print("Dataset splitting complete!")
#English Vocabulary Size: 19920
#Japanese Vocabulary Size: 15086