File size: 1,575 Bytes
d39027f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from underthesea import word_tokenize
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
        ".", "?", "!", ",", ":", "-", "--", "...", ";"] 

class VITokenizer:
    def __init__(self):
        self.model = word_tokenize

    def tokenize(self, captions_for_image):
        # ======================================================
        # prepare data for PTB Tokenizer
        # ======================================================
        final_tokenized_captions_for_image = {}
        image_id = [k for k, v in list(captions_for_image.items()) for _ in range(len(v))]
        sentences = [c['caption'] for k, v in list(captions_for_image.items()) for c in v]

        list_of_tokens = []
        for sent in sentences:
            doc = self.model(sent, format="text")
            list_of_tokens.append(doc.lower())

        # ======================================================
        # create dictionary for tokenized captions
        # ======================================================
        for k, tokens in zip(image_id, list_of_tokens):
            if not k in final_tokenized_captions_for_image:
                final_tokenized_captions_for_image[k] = []
            tokenized_caption = ' '.join([w for w in tokens.rstrip().split(' ') \
                    if w not in PUNCTUATIONS])
            final_tokenized_captions_for_image[k].append(tokenized_caption)

        return final_tokenized_captions_for_image