text
stringlengths 1
93.6k
|
|---|
print('steps_train: {}, steps_val: {}'.format(steps_train,steps_val))
|
print('Batch Size: {}'.format(batch_size))
|
print('Total Number of Epochs = {}'.format(num_of_epochs))
|
# Shuffle train data
|
ids_train = list(X2train.keys())
|
random.shuffle(ids_train)
|
X2train_shuffled = {_id: X2train[_id] for _id in ids_train}
|
X2train = X2train_shuffled
|
# Create the train data generator
|
# returns [[img_features, text_features], out_word]
|
generator_train = data_generator(X1train, X2train, tokenizer, max_length, batch_size, config['random_seed'])
|
# Create the validation data generator
|
# returns [[img_features, text_features], out_word]
|
generator_val = data_generator(X1val, X2val, tokenizer, max_length, batch_size, config['random_seed'])
|
# Fit for one epoch
|
model.fit_generator(generator_train,
|
epochs=num_of_epochs,
|
steps_per_epoch=steps_train,
|
validation_data=generator_val,
|
validation_steps=steps_val,
|
callbacks=callbacks,
|
verbose=1)
|
"""
|
*Evaluate the model on validation data and ouput BLEU score
|
"""
|
print('Model trained successfully. Running model on validation set for calculating BLEU score using BEAM search with k={}'.format(config['beam_search_k']))
|
evaluate_model_beam_search(model, X1val, X2val, tokenizer, max_length, beam_index=config['beam_search_k'])
|
# <FILESEP>
|
import os
|
import cv2
|
import glob
|
import random
|
import progressbar
|
import numpy as np
|
import matplotlib.pyplot as plt
|
rand_color = lambda : (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
|
rand_pos = lambda a, b: (random.randint(a, b-1), random.randint(a, b-1))
|
target_size = 256
|
imgs_per_back = 30
|
backs = glob.glob('./dataset/backs/*.png')
|
fonts = glob.glob('./dataset/font_mask/*.png')
|
os.makedirs('./dataset/train/I', exist_ok=True)
|
os.makedirs('./dataset/train/Itegt', exist_ok=True)
|
os.makedirs('./dataset/train/Mm', exist_ok=True)
|
os.makedirs('./dataset/train/Msgt', exist_ok=True)
|
os.makedirs('./dataset/val/I', exist_ok=True)
|
os.makedirs('./dataset/val/Itegt', exist_ok=True)
|
os.makedirs('./dataset/val/Mm', exist_ok=True)
|
os.makedirs('./dataset/val/Msgt', exist_ok=True)
|
t_idx = len(os.listdir('./dataset/train/I'))
|
v_idx = len(os.listdir('./dataset/val/I'))
|
bar = progressbar.ProgressBar(maxval=len(backs)*imgs_per_back)
|
bar.start()
|
for back in backs:
|
back_img = cv2.imread(back)
|
bh, bw, _ = back_img.shape
|
if bh < target_size or bw < target_size:
|
back_img = cv2.resize(back_img, (target_size, target_size), interpolation=cv2.INTER_CUBIC)
|
bh, bw, _ = back_img.shape
|
for bi in range(imgs_per_back):
|
sx, sy = random.randint(0, bw-target_size), random.randint(0, bh-target_size)
|
Itegt = back_img[sy:sy+target_size, sx:sx+target_size, :].copy()
|
I = Itegt.copy()
|
Mm = np.zeros_like(I)
|
Msgt = np.zeros_like(I)
|
hist = []
|
for font in random.sample(fonts, random.randint(2, 4)):
|
font_img = cv2.imread(font)
|
mask_img = np.ones_like(font_img, dtype=np.uint8)*255
|
height, width, _ = font_img.shape
|
angle = random.randint(-30, +30)
|
fs = random.randint(90, 120)
|
ratio = fs / height - 0.2
|
matrix = cv2.getRotationMatrix2D((width/2, height/2), angle, ratio)
|
font_rot = cv2.warpAffine(font_img, matrix, (width, height), cv2.INTER_CUBIC)
|
mask_rot = cv2.warpAffine(mask_img, matrix, (width, height), cv2.INTER_CUBIC)
|
h, w, _ = font_rot.shape
|
font_in_I = np.zeros_like(I)
|
mask_in_I = np.zeros_like(I)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.