File size: 1,364 Bytes
0ee6a96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import gensim
from gensim.models import Word2Vec
import MeCab

def read_training_data(filepath):
    english_sentences = []
    japanese_sentences = []

    with open(filepath, "r", encoding="utf-8") as f:
        for line in f:
            jp_sentence, en_sentence = line.strip().split("\t") 
            english_sentences.append(en_sentence.strip().split())  
            japanese_sentences.append(mecab.parse(jp_sentence).strip().split())  

    return english_sentences, japanese_sentences


def train_word2vec(sentences, language="en", embedding_size=300, window=5, min_count=1, sg=1):
    # sg=1 skip-gram, sg=0 CBOW
    model = Word2Vec(sentences, vector_size=embedding_size, window=window, min_count=min_count, sg=sg)
    model.save(f"word2vec_{language}.model")
    return model

if __name__ == "__main__":
    mecab = MeCab.Tagger("-Owakati")
    
    english_sentences, japanese_sentences = read_training_data("./data/train.txt")
    
    en_model = train_word2vec(english_sentences, language="en", embedding_size=300, window=5, min_count=1, sg=0)
    print(f"Trained English Word Embedding with {len(en_model.wv)} words.")
    
    jp_model = train_word2vec(japanese_sentences, language="jp", embedding_size=300, window=5, min_count=1, sg=0)
    print(f"Trained Japanese Word Embedding with {len(jp_model.wv)} words.")