| import gensim
|
| from gensim.models import Word2Vec
|
| import MeCab
|
|
|
| def read_training_data(filepath):
|
| english_sentences = []
|
| japanese_sentences = []
|
|
|
| with open(filepath, "r", encoding="utf-8") as f:
|
| for line in f:
|
| jp_sentence, en_sentence = line.strip().split("\t")
|
| english_sentences.append(en_sentence.strip().split())
|
| japanese_sentences.append(mecab.parse(jp_sentence).strip().split())
|
|
|
| return english_sentences, japanese_sentences
|
|
|
|
|
| def train_word2vec(sentences, language="en", embedding_size=300, window=5, min_count=1, sg=1):
|
|
|
| model = Word2Vec(sentences, vector_size=embedding_size, window=window, min_count=min_count, sg=sg)
|
| model.save(f"word2vec_{language}.model")
|
| return model
|
|
|
| if __name__ == "__main__":
|
| mecab = MeCab.Tagger("-Owakati")
|
|
|
| english_sentences, japanese_sentences = read_training_data("./data/train.txt")
|
|
|
| en_model = train_word2vec(english_sentences, language="en", embedding_size=300, window=5, min_count=1, sg=0)
|
| print(f"Trained English Word Embedding with {len(en_model.wv)} words.")
|
|
|
| jp_model = train_word2vec(japanese_sentences, language="jp", embedding_size=300, window=5, min_count=1, sg=0)
|
| print(f"Trained Japanese Word Embedding with {len(jp_model.wv)} words.")
|
|
|