code
stringlengths
17
6.64M
def init_devices(device_type=None): if (device_type is None): device_type = 'cpu' num_cores = 4 if (device_type == 'gpu'): num_GPU = 1 num_CPU = 1 else: num_CPU = 1 num_GPU = 0 config = tf.ConfigProto(intra_op_parallelism_threads=num_cores, inter_op_parallelism_threads=num_cores, allow_soft_placement=True, device_count={'CPU': num_CPU, 'GPU': num_GPU}) session = tf.Session(config=config) K.set_session(session)
def reporthook(block_num, block_size, total_size): read_so_far = (block_num * block_size) if (total_size > 0): percent = ((read_so_far * 100.0) / total_size) s = ('\r%5.1f%% %*d / %d' % (percent, len(str(total_size)), read_so_far, total_size)) sys.stderr.write(s) if (read_so_far >= total_size): sys.stderr.write('\n') else: sys.stderr.write(('read %d\n' % (read_so_far,)))
def download_glove(data_dir_path=None): if (data_dir_path is None): data_dir_path = 'very_large_data' glove_model_path = (((data_dir_path + '/glove.6B.') + str(GLOVE_EMBEDDING_SIZE)) + 'd.txt') if (not os.path.exists(glove_model_path)): glove_zip = (data_dir_path + '/glove.6B.zip') if (not os.path.exists(data_dir_path)): os.makedirs(data_dir_path) if (not os.path.exists(glove_zip)): print('glove file does not exist, downloading from internet') urllib.request.urlretrieve(url='http://nlp.stanford.edu/data/glove.6B.zip', filename=glove_zip, reporthook=reporthook) print('unzipping glove file') zip_ref = zipfile.ZipFile(glove_zip, 'r') zip_ref.extractall(data_dir_path) zip_ref.close()
def load_glove(data_dir_path=None): if (data_dir_path is None): data_dir_path = 'very_large_data' download_glove(data_dir_path) _word2em = {} glove_model_path = (((data_dir_path + '/glove.6B.') + str(GLOVE_EMBEDDING_SIZE)) + 'd.txt') file = open(glove_model_path, mode='rt', encoding='utf8') for line in file: words = line.strip().split() word = words[0] embeds = np.array(words[1:], dtype=np.float32) _word2em[word] = embeds file.close() return _word2em
def glove_zero_emb(): return np.zeros(shape=GLOVE_EMBEDDING_SIZE)
class Glove(object): word2em = None GLOVE_EMBEDDING_SIZE = GLOVE_EMBEDDING_SIZE def __init__(self): self.word2em = load_glove()
def in_white_list(_word): for char in _word: if (char in WHITELIST): return True return False
def absTokenizer1(regex, abstracts): '\n above abstractTokenizer returns all the word but sentence structured.\n ' stopWords = set(stopwords.words('english')) tokenizer = RegexpTokenizer(regex) tokened = [tokenizer.tokenize(abstract) for abstract in abstracts] print('Abstracts are tokenized...') return tokened
def df2model(path_to_json): data = json2list(path_to_json) df_to_model = pd.DataFrame({'Abstracts': absTokenizer1('\\w+', data['abstracts']), 'Titles': data['titles']}) df_to_model.columns = ['input_text', 'target_text'] for i in range(len(df_to_model)): df_to_model.iloc[(i, 0)] = ' '.join(df_to_model.iloc[(i, 0)]) print('df_to_model.csv is prepared...') return df_to_model
def fit_text(X, Y, input_seq_max_length=None, target_seq_max_length=None): if (input_seq_max_length is None): input_seq_max_length = MAX_INPUT_SEQ_LENGTH if (target_seq_max_length is None): target_seq_max_length = MAX_TARGET_SEQ_LENGTH input_counter = Counter() target_counter = Counter() max_input_seq_length = 0 max_target_seq_length = 0 for line in X: text = [word.lower() for word in line.split(' ')] seq_length = len(text) if (seq_length > input_seq_max_length): text = text[0:input_seq_max_length] seq_length = len(text) for word in text: input_counter[word] += 1 max_input_seq_length = max(max_input_seq_length, seq_length) for line in Y: line2 = (('START ' + line.lower()) + ' END') text = [word for word in line2.split(' ')] seq_length = len(text) if (seq_length > target_seq_max_length): text = text[0:target_seq_max_length] seq_length = len(text) for word in text: target_counter[word] += 1 max_target_seq_length = max(max_target_seq_length, seq_length) input_word2idx = dict() for (idx, word) in enumerate(input_counter.most_common(MAX_INPUT_VOCAB_SIZE)): input_word2idx[word[0]] = (idx + 2) input_word2idx['PAD'] = 0 input_word2idx['UNK'] = 1 input_idx2word = dict([(idx, word) for (word, idx) in input_word2idx.items()]) target_word2idx = dict() for (idx, word) in enumerate(target_counter.most_common(MAX_TARGET_VOCAB_SIZE)): target_word2idx[word[0]] = (idx + 1) target_word2idx['UNK'] = 0 target_idx2word = dict([(idx, word) for (word, idx) in target_word2idx.items()]) num_input_tokens = len(input_word2idx) num_target_tokens = len(target_word2idx) config = dict() config['input_word2idx'] = input_word2idx config['input_idx2word'] = input_idx2word config['target_word2idx'] = target_word2idx config['target_idx2word'] = target_idx2word config['num_input_tokens'] = num_input_tokens config['num_target_tokens'] = num_target_tokens config['max_input_seq_length'] = max_input_seq_length config['max_target_seq_length'] = max_target_seq_length return config
def getCategoryVocab(df_raw): df_raw = pd.read_csv(df_raw) category_vocab = [item for sublist in (category.split(' ') for category in df_raw['Category']) for item in sublist] return category_vocab
def getCategoryVocabByYear(df_raw): df_raw = pd.read_csv(df_raw) category_year_vocab = [] pair = [] for i in range(len(df_raw)): splitted = df_raw.iloc[(i, (- 1))].split(' ') for splits in splitted: pair.append(splits) pair.append(df_raw.iloc[(i, (- 2))]) pair = [] category_year_vocab.append(pair) return category_year_vocab
def countCategoryVocabByYear(category_vocab_by_year): count = collections.defaultdict(dict) i = 0 for pair in category_vocab_by_year: try: count[str(pair[1])][pair[0]] = 0 except Exception as e: if (e is IndexError): print('This exception is not possible. But it happens.') for pair in category_vocab_by_year: try: count[str(pair[1])][pair[0]] += 1 except Exception as e: if (e is IndexError): print('This exception is not possible. But it happens.') return count
def countCategories(category_vocab, k): countCat = collections.defaultdict(int) for cat in category_vocab: countCat[cat] += 1 return dict(islice(collections.OrderedDict(countCat).items(), k))
def populars(df_csv_raw, k=10): df_raw = pd.read_csv(df_csv_raw) category_vocab = getCategoryVocab(df_csv_raw) topk = countCategories(category_vocab, 10) print('TOP 10 CATEGORY BY POPULARITY') for (key, value) in topk.items(): print(' {}: {}'.format(key, value))
def popularsbar(df_csv_raw, k): df_raw = pd.read_csv(df_csv_raw) category_vocab = getCategoryVocab(df_csv_raw) topk = countCategories(category_vocab, k) cat_dir = '../data/categories.json' with open(cat_dir) as json_file: categories = json.load(json_file) legends = [] for a in list(topk.keys()): if (a in categories): legends.append(categories[a]) plt.figure(num=None, figsize=(13, 7), dpi=80, facecolor='w', edgecolor='k') plt.bar(legends, list(topk.values())) plt.tight_layout() plt.xticks(rotation='vertical') plt.xlabel('categories') plt.ylabel('Count') plt.title('Top 10 Categories') plt.show()
def get(seed=0, pc_valid=0.1): data = {} taskcla = [] size = [3, 32, 32] if (not os.path.isdir(file_dir)): os.makedirs(file_dir) mean = [(x / 255) for x in [125.3, 123.0, 113.9]] std = [(x / 255) for x in [63.0, 62.1, 66.7]] dat = {} dat['train'] = datasets.CIFAR100(cf100_dir, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) dat['test'] = datasets.CIFAR100(cf100_dir, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) for n in range(10): data[n] = {} data[n]['name'] = 'cifar100' data[n]['ncla'] = 10 data[n]['train'] = {'x': [], 'y': []} data[n]['test'] = {'x': [], 'y': []} for s in ['train', 'test']: loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False) for (image, target) in loader: n = target.numpy()[0] nn = (n // 10) data[nn][s]['x'].append(image) data[nn][s]['y'].append((n % 10)) for t in data.keys(): for s in ['train', 'test']: data[t][s]['x'] = torch.stack(data[t][s]['x']).view((- 1), size[0], size[1], size[2]) data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view((- 1)) torch.save(data[t][s]['x'], os.path.join(os.path.expanduser(file_dir), ((('data' + str(t)) + s) + 'x.bin'))) torch.save(data[t][s]['y'], os.path.join(os.path.expanduser(file_dir), ((('data' + str(t)) + s) + 'y.bin'))) data = {} ids = list(np.arange(10)) print('Task order =', ids) for i in range(10): data[i] = dict.fromkeys(['name', 'ncla', 'train', 'test']) for s in ['train', 'test']: data[i][s] = {'x': [], 'y': []} data[i][s]['x'] = torch.load(os.path.join(os.path.expanduser(file_dir), ((('data' + str(ids[i])) + s) + 'x.bin'))) data[i][s]['y'] = torch.load(os.path.join(os.path.expanduser(file_dir), ((('data' + str(ids[i])) + s) + 'y.bin'))) data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy())) if (data[i]['ncla'] == 2): data[i]['name'] = ('cifar10-' + str(ids[i])) else: data[i]['name'] = ('cifar100-' + str(ids[i])) for t in data.keys(): r = np.arange(data[t]['train']['x'].size(0)) r = np.array(shuffle(r, random_state=seed), dtype=int) nvalid = int((pc_valid * len(r))) ivalid = torch.LongTensor(r[:nvalid]) itrain = torch.LongTensor(r[nvalid:]) data[t]['valid'] = {} data[t]['valid']['x'] = data[t]['train']['x'][ivalid].clone() data[t]['valid']['y'] = data[t]['train']['y'][ivalid].clone() data[t]['train']['x'] = data[t]['train']['x'][itrain].clone() data[t]['train']['y'] = data[t]['train']['y'][itrain].clone() n = 0 for t in data.keys(): taskcla.append((t, data[t]['ncla'])) n += data[t]['ncla'] data['ncla'] = n return (data, taskcla, size)
def cifar100_superclass_python(task_order, group=5, validation=False, val_ratio=0.05, flat=False, one_hot=True, seed=0): CIFAR100_LABELS_LIST = ['apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm'] sclass = [] sclass.append(' beaver, dolphin, otter, seal, whale,') sclass.append(' aquarium_fish, flatfish, ray, shark, trout,') sclass.append(' orchid, poppy, rose, sunflower, tulip,') sclass.append(' bottle, bowl, can, cup, plate,') sclass.append(' apple, mushroom, orange, pear, sweet_pepper,') sclass.append(' clock, computer keyboard, lamp, telephone, television,') sclass.append(' bed, chair, couch, table, wardrobe,') sclass.append(' bee, beetle, butterfly, caterpillar, cockroach,') sclass.append(' bear, leopard, lion, tiger, wolf,') sclass.append(' bridge, castle, house, road, skyscraper,') sclass.append(' cloud, forest, mountain, plain, sea,') sclass.append(' camel, cattle, chimpanzee, elephant, kangaroo,') sclass.append(' fox, porcupine, possum, raccoon, skunk,') sclass.append(' crab, lobster, snail, spider, worm,') sclass.append(' baby, boy, girl, man, woman,') sclass.append(' crocodile, dinosaur, lizard, snake, turtle,') sclass.append(' hamster, mouse, rabbit, shrew, squirrel,') sclass.append(' maple_tree, oak_tree, palm_tree, pine_tree, willow_tree,') sclass.append(' bicycle, bus, motorcycle, pickup_truck, train,') sclass.append(' lawn_mower, rocket, streetcar, tank, tractor,') dataset_train = datasets.CIFAR100('./data/', train=True, download=True) dataset_test = datasets.CIFAR100('./data/', train=False, download=True) if (validation == True): data_path = './data/cifar-100-python/train' else: data_path = './data/cifar-100-python/test' n_classes = 100 size = [3, 32, 32] data = {} taskcla = [] mean = np.array([(x / 255) for x in [125.3, 123.0, 113.9]]) std = np.array([(x / 255) for x in [63.0, 62.1, 66.7]]) files = open(data_path, 'rb') dict = pickle.load(files, encoding='bytes') images = dict[b'data'] images = (np.float32(images) / 255) labels = dict[b'fine_labels'] labels_pair = [[jj for jj in range(100) if ((' %s,' % CIFAR100_LABELS_LIST[jj]) in sclass[kk])] for kk in range(20)] argsort_sup = [[] for _ in range(20)] for _i in range(len(images)): for _j in range(20): if (labels[_i] in labels_pair[_j]): argsort_sup[_j].append(_i) argsort_sup_c = np.concatenate(argsort_sup) train_split = [] val_split = [] position = [_k for _k in range(0, (len(images) + 1), int((len(images) / 20)))] if (validation == True): s_train = 'train' s_valid = 'valid' else: s_train = 'test' for idx in task_order: data[idx] = {} data[idx]['name'] = 'cifar100' data[idx]['ncla'] = 5 data[idx][s_train] = {'x': [], 'y': []} gimages = np.take(images, argsort_sup_c[position[idx]:position[(idx + 1)]], axis=0) if (not flat): gimages = gimages.reshape([gimages.shape[0], 32, 32, 3]) gimages = gimages.swapaxes(2, 3).swapaxes(1, 2) glabels = np.take(labels, argsort_sup_c[position[idx]:position[(idx + 1)]]) for (_si, swap) in enumerate(labels_pair[idx]): glabels = [(('%d' % _si) if (x == swap) else x) for x in glabels] data[idx][s_train]['x'] = torch.FloatTensor(gimages) data[idx][s_train]['y'] = torch.LongTensor(np.array([np.int32(glabels)], dtype=int)).view((- 1)) if (validation == True): r = np.arange(data[idx][s_train]['x'].size(0)) r = np.array(shuffle(r, random_state=seed), dtype=int) nvalid = int((val_ratio * len(r))) ivalid = torch.LongTensor(r[:nvalid]) itrain = torch.LongTensor(r[nvalid:]) data[idx]['valid'] = {} data[idx]['valid']['x'] = data[idx]['train']['x'][ivalid].clone() data[idx]['valid']['y'] = data[idx]['train']['y'][ivalid].clone() data[idx]['train']['x'] = data[idx]['train']['x'][itrain].clone() data[idx]['train']['y'] = data[idx]['train']['y'][itrain].clone() n = 0 for t in data.keys(): taskcla.append((t, data[t]['ncla'])) n += data[t]['ncla'] data['ncla'] = n return (data, taskcla)
def imshow(img): npimg = img plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show()
def get(seed=1, fixed_order=False, pc_valid=0.05): data = {} taskcla = [] size = [3, 32, 32] idata = np.arange(5) print('Task order =', idata) if (not os.path.isdir('./data/Five_data/binary_mixture_5_Data/')): os.makedirs('./data/Five_data/binary_mixture_5_Data') for (n, idx) in enumerate(idata): if (idx == 0): mean = [(x / 255) for x in [125.3, 123.0, 113.9]] std = [(x / 255) for x in [63.0, 62.1, 66.7]] dat = {} dat['train'] = datasets.CIFAR10('./data/Five_data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) dat['test'] = datasets.CIFAR10('./data/Five_data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) data[n] = {} data[n]['name'] = 'cifar10' data[n]['ncla'] = 10 for s in ['train', 'test']: loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False) data[n][s] = {'x': [], 'y': []} for (image, target) in loader: data[n][s]['x'].append(image) data[n][s]['y'].append(target.numpy()[0]) elif (idx == 1): mean = (0.1,) std = (0.2752,) dat = {} dat['train'] = datasets.MNIST('./data/Five_data/', train=True, download=True, transform=transforms.Compose([transforms.Pad(padding=2, fill=0), transforms.ToTensor(), transforms.Normalize(mean, std)])) dat['test'] = datasets.MNIST('./data/Five_data/', train=False, download=True, transform=transforms.Compose([transforms.Pad(padding=2, fill=0), transforms.ToTensor(), transforms.Normalize(mean, std)])) data[n] = {} data[n]['name'] = 'mnist' data[n]['ncla'] = 10 for s in ['train', 'test']: loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False) data[n][s] = {'x': [], 'y': []} for (image, target) in loader: image = image.expand(1, 3, image.size(2), image.size(3)) data[n][s]['x'].append(image) data[n][s]['y'].append(target.numpy()[0]) elif (idx == 2): mean = [0.4377, 0.4438, 0.4728] std = [0.198, 0.201, 0.197] dat = {} dat['train'] = datasets.SVHN('./data/Five_data/', split='train', download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) dat['test'] = datasets.SVHN('./data/Five_data/', split='test', download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) data[n] = {} data[n]['name'] = 'svhn' data[n]['ncla'] = 10 for s in ['train', 'test']: loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False) data[n][s] = {'x': [], 'y': []} for (image, target) in loader: data[n][s]['x'].append(image) data[n][s]['y'].append(target.numpy()[0]) elif (idx == 3): mean = (0.219,) std = (0.3318,) dat = {} dat['train'] = FashionMNIST('./data/Five_data/fashion_mnist', train=True, download=True, transform=transforms.Compose([transforms.Pad(padding=2, fill=0), transforms.ToTensor(), transforms.Normalize(mean, std)])) dat['test'] = FashionMNIST('./data/Five_data/fashion_mnist', train=False, download=True, transform=transforms.Compose([transforms.Pad(padding=2, fill=0), transforms.ToTensor(), transforms.Normalize(mean, std)])) data[n] = {} data[n]['name'] = 'fashion-mnist' data[n]['ncla'] = 10 for s in ['train', 'test']: loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False) data[n][s] = {'x': [], 'y': []} for (image, target) in loader: image = image.expand(1, 3, image.size(2), image.size(3)) data[n][s]['x'].append(image) data[n][s]['y'].append(target.numpy()[0]) elif (idx == 4): mean = (0.4254,) std = (0.4501,) dat = {} dat['train'] = notMNIST('./data/Five_data/notmnist', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) dat['test'] = notMNIST('./data/Five_data/notmnist', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) data[n] = {} data[n]['name'] = 'notmnist' data[n]['ncla'] = 10 for s in ['train', 'test']: loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False) data[n][s] = {'x': [], 'y': []} for (image, target) in loader: image = image.expand(1, 3, image.size(2), image.size(3)) data[n][s]['x'].append(image) data[n][s]['y'].append(target.numpy()[0]) else: print('ERROR: Undefined data set', n) sys.exit() for s in ['train', 'test']: data[n][s]['x'] = torch.stack(data[n][s]['x']).view((- 1), size[0], size[1], size[2]) data[n][s]['y'] = torch.LongTensor(np.array(data[n][s]['y'], dtype=int)).view((- 1)) torch.save(data[n][s]['x'], os.path.join(os.path.expanduser('./data/Five_data/binary_mixture_5_Data'), ((('data' + str(idx)) + s) + 'x.bin'))) torch.save(data[n][s]['y'], os.path.join(os.path.expanduser('./data/Five_data/binary_mixture_5_Data'), ((('data' + str(idx)) + s) + 'y.bin'))) else: for (n, idx) in enumerate(idata): data[n] = dict.fromkeys(['name', 'ncla', 'train', 'test']) if (idx == 0): data[n]['name'] = 'cifar10' data[n]['ncla'] = 10 elif (idx == 1): data[n]['name'] = 'mnist' data[n]['ncla'] = 10 elif (idx == 2): data[n]['name'] = 'svhn' data[n]['ncla'] = 10 elif (idx == 3): data[n]['name'] = 'fashion-mnist' data[n]['ncla'] = 10 elif (idx == 4): data[n]['name'] = 'notmnist' data[n]['ncla'] = 10 else: print('ERROR: Undefined data set', n) sys.exit() for s in ['train', 'test']: data[n][s] = {'x': [], 'y': []} data[n][s]['x'] = torch.load(os.path.join(os.path.expanduser('./data/Five_data/binary_mixture_5_Data'), ((('data' + str(idx)) + s) + 'x.bin'))) data[n][s]['y'] = torch.load(os.path.join(os.path.expanduser('./data/Five_data/binary_mixture_5_Data'), ((('data' + str(idx)) + s) + 'y.bin'))) for t in data.keys(): r = np.arange(data[t]['train']['x'].size(0)) r = np.array(shuffle(r, random_state=seed), dtype=int) nvalid = int((pc_valid * len(r))) ivalid = torch.LongTensor(r[:nvalid]) itrain = torch.LongTensor(r[nvalid:]) data[t]['valid'] = {} data[t]['valid']['x'] = data[t]['train']['x'][ivalid].clone() data[t]['valid']['y'] = data[t]['train']['y'][ivalid].clone() data[t]['train']['x'] = data[t]['train']['x'][itrain].clone() data[t]['train']['y'] = data[t]['train']['y'][itrain].clone() n = 0 for t in data.keys(): taskcla.append((t, data[t]['ncla'])) n += data[t]['ncla'] data['ncla'] = n return (data, taskcla, size)
class FashionMNIST(datasets.MNIST): '`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.\n ' urls = ['http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz']
class TrafficSigns(torch.utils.data.Dataset): "`German Traffic Signs <http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n " def __init__(self, root, train=True, transform=None, download=False): self.root = os.path.expanduser(root) self.transform = transform self.filename = 'traffic_signs_dataset.zip' self.url = 'https://d17h27t6h515a5.cloudfront.net/topher/2016/October/580d53ce_traffic-sign-data/traffic-sign-data.zip' fpath = os.path.join(root, self.filename) if (not os.path.isfile(fpath)): if (not download): raise RuntimeError('Dataset not found. You can use download=True to download it') else: print(('Downloading from ' + self.url)) self.download() training_file = 'lab 2 data/train.p' testing_file = 'lab 2 data/test.p' if train: with open(os.path.join(root, training_file), mode='rb') as f: train = pickle.load(f) self.data = train['features'] self.labels = train['labels'] else: with open(os.path.join(root, testing_file), mode='rb') as f: test = pickle.load(f) self.data = test['features'] self.labels = test['labels'] self.data = np.transpose(self.data, (0, 3, 1, 2)) def __getitem__(self, index): '\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n ' (img, target) = (self.data[index], self.labels[index]) img = Image.fromarray(np.transpose(img, (1, 2, 0))) if (self.transform is not None): img = self.transform(img) return (img, target) def __len__(self): return len(self.data) def download(self): import errno root = os.path.expanduser(self.root) fpath = os.path.join(root, self.filename) try: os.makedirs(root) except OSError as e: if (e.errno == errno.EEXIST): pass else: raise urllib.request.urlretrieve(self.url, fpath) import zipfile zip_ref = zipfile.ZipFile(fpath, 'r') zip_ref.extractall(root) zip_ref.close()
class Facescrub(torch.utils.data.Dataset): "Subset of the Facescrub cropped from the official Megaface challenge page: http://megaface.cs.washington.edu/participate/challenge.html, resized to 38x38\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n " def __init__(self, root, train=True, transform=None, download=False): self.root = os.path.expanduser(root) self.transform = transform self.filename = 'facescrub_100.zip' self.url = 'https://github.com/nkundiushuti/facescrub_subset/blob/master/data/facescrub_100.zip?raw=true' fpath = os.path.join(root, self.filename) if (not os.path.isfile(fpath)): if (not download): raise RuntimeError('Dataset not found. You can use download=True to download it') else: print(('Downloading from ' + self.url)) self.download() training_file = 'facescrub_train_100.pkl' testing_file = 'facescrub_test_100.pkl' if train: with open(os.path.join(root, training_file), 'rb') as f: train = pickle.load(f) self.data = train['features'].astype(np.uint8) self.labels = train['labels'].astype(np.uint8) '\n print(self.data.shape)\n print(self.data.mean())\n print(self.data.std())\n print(self.labels.max())\n #' else: with open(os.path.join(root, testing_file), 'rb') as f: test = pickle.load(f) self.data = test['features'].astype(np.uint8) self.labels = test['labels'].astype(np.uint8) def __getitem__(self, index): '\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n ' (img, target) = (self.data[index], self.labels[index]) img = Image.fromarray(np.transpose(img, (1, 2, 0))) if (self.transform is not None): img = self.transform(img) return (img, target) def __len__(self): return len(self.data) def download(self): import errno root = os.path.expanduser(self.root) fpath = os.path.join(root, self.filename) try: os.makedirs(root) except OSError as e: if (e.errno == errno.EEXIST): pass else: raise urllib.request.urlretrieve(self.url, fpath) import zipfile zip_ref = zipfile.ZipFile(fpath, 'r') zip_ref.extractall(root) zip_ref.close()
class notMNIST(torch.utils.data.Dataset): "The notMNIST dataset is a image recognition dataset of font glypyhs for the letters A through J useful with simple neural networks. It is quite similar to the classic MNIST dataset of handwritten digits 0 through 9.\n\n Args:\n root (string): Root directory of dataset where directory ``Traffic signs`` exists.\n split (string): One of {'train', 'test'}.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.\n If dataset is already downloaded, it is not downloaded again.\n\n " def __init__(self, root, train=True, transform=None, download=False): self.root = os.path.expanduser(root) self.transform = transform self.filename = 'notmnist.zip' self.url = 'https://github.com/nkundiushuti/notmnist_convert/blob/master/notmnist.zip?raw=true' fpath = os.path.join(root, self.filename) if (not os.path.isfile(fpath)): if (not download): raise RuntimeError('Dataset not found. You can use download=True to download it') else: print(('Downloading from ' + self.url)) self.download() training_file = 'notmnist_train.pkl' testing_file = 'notmnist_test.pkl' if train: with open(os.path.join(root, training_file), 'rb') as f: train = pickle.load(f) self.data = train['features'].astype(np.uint8) self.labels = train['labels'].astype(np.uint8) else: with open(os.path.join(root, testing_file), 'rb') as f: test = pickle.load(f) self.data = test['features'].astype(np.uint8) self.labels = test['labels'].astype(np.uint8) def __getitem__(self, index): '\n Args: index (int): Index\n Returns: tuple: (image, target) where target is index of the target class.\n ' (img, target) = (self.data[index], self.labels[index]) img = Image.fromarray(img[0]) if (self.transform is not None): img = self.transform(img) return (img, target) def __len__(self): return len(self.data) def download(self): import errno root = os.path.expanduser(self.root) fpath = os.path.join(root, self.filename) try: os.makedirs(root) except OSError as e: if (e.errno == errno.EEXIST): pass else: raise urllib.request.urlretrieve(self.url, fpath) import zipfile zip_ref = zipfile.ZipFile(fpath, 'r') zip_ref.extractall(root) zip_ref.close()
def get(seed=0, fixed_order=False, pc_valid=0.1): data = {} taskcla = [] size = [1, 28, 28] nperm = 10 seeds = np.array(list(range(nperm)), dtype=int) if (not fixed_order): seeds = shuffle(seeds, random_state=seed) if (not os.path.isdir(pmnist_dir)): os.makedirs(pmnist_dir) mean = (0.1307,) std = (0.3081,) dat = {} dat['train'] = datasets.MNIST(mnist_dir, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) dat['test'] = datasets.MNIST(mnist_dir, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])) for (i, r) in enumerate(seeds): print(i, end=',') sys.stdout.flush() data[i] = {} data[i]['name'] = 'pmnist-{:d}'.format(i) data[i]['ncla'] = 10 for s in ['train', 'test']: loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False) data[i][s] = {'x': [], 'y': []} for (image, target) in loader: aux = image.view((- 1)).numpy() aux = shuffle(aux, random_state=((r * 100) + i)) image = torch.FloatTensor(aux).view(size) data[i][s]['x'].append(image) data[i][s]['y'].append(target.numpy()[0]) for s in ['train', 'test']: data[i][s]['x'] = torch.stack(data[i][s]['x']).view((- 1), size[0], size[1], size[2]) data[i][s]['y'] = torch.LongTensor(np.array(data[i][s]['y'], dtype=int)).view((- 1)) torch.save(data[i][s]['x'], os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'x.bin'))) torch.save(data[i][s]['y'], os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'y.bin'))) print() else: for (i, r) in enumerate(seeds): data[i] = dict.fromkeys(['name', 'ncla', 'train', 'test']) data[i]['ncla'] = 10 data[i]['name'] = 'pmnist-{:d}'.format(i) for s in ['train', 'test']: data[i][s] = {'x': [], 'y': []} data[i][s]['x'] = torch.load(os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'x.bin'))) data[i][s]['y'] = torch.load(os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'y.bin'))) for t in data.keys(): r = np.arange(data[t]['train']['x'].size(0)) r = np.array(r, dtype=int) nvalid = int((pc_valid * len(r))) ivalid = torch.LongTensor(r[:nvalid]) itrain = torch.LongTensor(r[nvalid:]) data[t]['valid'] = {} data[t]['valid']['x'] = data[t]['train']['x'][ivalid].clone() data[t]['valid']['y'] = data[t]['train']['y'][ivalid].clone() data[t]['train']['x'] = data[t]['train']['x'][itrain].clone() data[t]['train']['y'] = data[t]['train']['y'][itrain].clone() n = 0 for t in data.keys(): taskcla.append((t, data[t]['ncla'])) n += data[t]['ncla'] data['ncla'] = n return (data, taskcla, size)
class LanguageIdentification(object): def __init__(self, language): self.language = language if (self.language == 'spa-eng'): print('Downloading pretrained model. It will take time according to model size and your internet speed') self.tokenizer = AutoTokenizer.from_pretrained('sagorsarker/codeswitch-spaeng-lid-lince') self.model = AutoModelForTokenClassification.from_pretrained('sagorsarker/codeswitch-spaeng-lid-lince') print('Model Download Completed!') elif (self.language == 'hin-eng'): print('Downloading pretrained model. It will take time according to model size and your internet speed') self.tokenizer = AutoTokenizer.from_pretrained('sagorsarker/codeswitch-hineng-lid-lince') self.model = AutoModelForTokenClassification.from_pretrained('sagorsarker/codeswitch-hineng-lid-lince') print('Model Download Completed!') elif (self.language == 'nep-eng'): print('Downloading pretrained model. It will take time according to model size and your internet speed') self.tokenizer = AutoTokenizer.from_pretrained('sagorsarker/codeswitch-nepeng-lid-lince') self.model = AutoModelForTokenClassification.from_pretrained('sagorsarker/codeswitch-nepeng-lid-lince') print('Model Download Completed!') else: raise Exception("No such language found! Try with spa-eng, hin-eng or nep-eng with inverted comman like 'hin-eng' ") def identify(self, text): lid_model = pipeline('ner', model=self.model, tokenizer=self.tokenizer) results = lid_model(text) return results
class POS(object): def __init__(self, language): self.language = language if (self.language == 'spa-eng'): print('Downloading pretrained model. It will take time according to model size and your internet speed') self.tokenizer = AutoTokenizer.from_pretrained('sagorsarker/codeswitch-spaeng-pos-lince') self.model = AutoModelForTokenClassification.from_pretrained('sagorsarker/codeswitch-spaeng-pos-lince') print('Model Download Completed!') elif (self.language == 'hin-eng'): print('Downloading pretrained model. It will take time according to model size and your internet speed') self.tokenizer = AutoTokenizer.from_pretrained('sagorsarker/codeswitch-hineng-pos-lince') self.model = AutoModelForTokenClassification.from_pretrained('sagorsarker/codeswitch-hineng-pos-lince') print('Model Download Completed!') else: raise Exception("No such language found! Try with spa-eng, hin-eng or nep-eng with inverted comman like 'hin-eng' ") def tag(self, text): pos_model = pipeline('ner', model=self.model, tokenizer=self.tokenizer) results = pos_model(text) return results
class NER(object): def __init__(self, language): self.language = language if (self.language == 'spa-eng'): print('Downloading pretrained model. It will take time according to model size and your internet speed') self.tokenizer = AutoTokenizer.from_pretrained('sagorsarker/codeswitch-spaeng-ner-lince') self.model = AutoModelForTokenClassification.from_pretrained('sagorsarker/codeswitch-spaeng-ner-lince') print('Model Download Completed!') elif (self.language == 'hin-eng'): print('Downloading pretrained model. It will take time according to model size and your internet speed') self.tokenizer = AutoTokenizer.from_pretrained('sagorsarker/codeswitch-hineng-ner-lince') self.model = AutoModelForTokenClassification.from_pretrained('sagorsarker/codeswitch-hineng-ner-lince') print('Model Download Completed!') else: raise Exception("No such language found! Try with spa-eng, hin-eng with inverted comman like 'hin-eng' ") def tag(self, text): pos_model = pipeline('ner', model=self.model, tokenizer=self.tokenizer) results = pos_model(text) return results
class SentimentAnalysis(object): def __init__(self, language): if (language == 'spa-eng'): self.tokenizer = AutoTokenizer.from_pretrained('sagorsarker/codeswitch-spaeng-sentiment-analysis-lince') self.model = AutoModelForSequenceClassification.from_pretrained('sagorsarker/codeswitch-spaeng-sentiment-analysis-lince') else: raise Exception("No such language found! Try with spa-eng with inverted comman like 'spa-eng' ") def analyze(self, sentence): sa = pipeline('sentiment-analysis', model=self.model, tokenizer=self.tokenizer) result = sa(sentence) return result
class Config(object): 'Base configuration class. For custom configurations, create a\n sub-class that inherits from this one and override properties\n that need to be changed.\n ' NAME = None GPU_COUNT = 1 IMAGES_PER_GPU = 1 STEPS_PER_EPOCH = 1000 VALIDATION_STEPS = 10 BACKBONE = 'resnet101' COMPUTE_BACKBONE_SHAPE = None BACKBONE_STRIDES = [4, 8, 16, 32, 64] FPN_CLASSIF_FC_LAYERS_SIZE = 1024 TOP_DOWN_PYRAMID_SIZE = 256 NUM_CLASSES = 1 RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) RPN_ANCHOR_RATIOS = [0.5, 1, 2] RPN_ANCHOR_STRIDE = 1 RPN_NMS_THRESHOLD = 0.0001 RPN_TRAIN_ANCHORS_PER_IMAGE = 768 PRE_NMS_LIMIT = 6000 POST_NMS_ROIS_TRAINING = 3500 POST_NMS_ROIS_INFERENCE = 2500 USE_MINI_MASK = True MINI_MASK_SHAPE = (56, 56) IMAGE_RESIZE_MODE = 'square' IMAGE_MIN_DIM = 800 IMAGE_MAX_DIM = 1024 IMAGE_MIN_SCALE = 0 IMAGE_CHANNEL_COUNT = 3 MEAN_PIXEL = np.array([123.7, 116.8, 103.9]) TRAIN_ROIS_PER_IMAGE = 512 SAMPLES_PER_VERTEX = 10 LSTM_DEPTH = 256 GRAPH_NEIGHBORS = 10 ROI_POSITIVE_RATIO = 0.6 POOL_SIZE = 7 MASK_POOL_SIZE = 14 MASK_SHAPE = [28, 28] MAX_GT_INSTANCES = 2000 RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) DETECTION_MAX_INSTANCES = 2000 DETECTION_MIN_CONFIDENCE = 0.3 DETECTION_NMS_THRESHOLD = 0.25 LEARNING_RATE = 0.001 LEARNING_MOMENTUM = 0.9 WEIGHT_DECAY = 0.0001 LOSS_WEIGHTS = {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0, 'roi_alignment_loss': 1, 'row_adj_loss': 1, 'col_adj_loss': 1} USE_RPN_ROIS = True TRAIN_BN = False GRADIENT_CLIP_NORM = 5.0 def __init__(self): 'Set values of computed attributes.' self.BATCH_SIZE = (self.IMAGES_PER_GPU * self.GPU_COUNT) if (self.IMAGE_RESIZE_MODE == 'crop'): self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, self.IMAGE_CHANNEL_COUNT]) else: self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, self.IMAGE_CHANNEL_COUNT]) self.IMAGE_META_SIZE = (((((1 + 3) + 3) + 4) + 1) + self.NUM_CLASSES) def display(self): 'Display Configuration values.' print('\nConfigurations:') for a in dir(self): if ((not a.startswith('__')) and (not callable(getattr(self, a)))): print('{:30} {}'.format(a, getattr(self, a))) print('\n')
class ParallelModel(KM.Model): 'Subclasses the standard Keras Model and adds multi-GPU support.\n It works by creating a copy of the model on each GPU. Then it slices\n the inputs and sends a slice to each copy of the model, and then\n merges the outputs together and applies the loss on the combined\n outputs.\n ' def __init__(self, keras_model, gpu_count): 'Class constructor.\n keras_model: The Keras model to parallelize\n gpu_count: Number of GPUs. Must be > 1\n ' super(ParallelModel, self).__init__() self.inner_model = keras_model self.gpu_count = gpu_count merged_outputs = self.make_parallel() super(ParallelModel, self).__init__(inputs=self.inner_model.inputs, outputs=merged_outputs) def __getattribute__(self, attrname): "Redirect loading and saving methods to the inner model. That's where\n the weights are stored." if (('load' in attrname) or ('save' in attrname)): return getattr(self.inner_model, attrname) return super(ParallelModel, self).__getattribute__(attrname) def summary(self, *args, **kwargs): 'Override summary() to display summaries of both, the wrapper\n and inner models.' super(ParallelModel, self).summary(*args, **kwargs) self.inner_model.summary(*args, **kwargs) def make_parallel(self): 'Creates a new wrapper model that consists of multiple replicas of\n the original model placed on different GPUs.\n ' input_slices = {name: tf.split(x, self.gpu_count) for (name, x) in zip(self.inner_model.input_names, self.inner_model.inputs)} output_names = self.inner_model.output_names outputs_all = [] for i in range(len(self.inner_model.outputs)): outputs_all.append([]) for i in range(self.gpu_count): with tf.device(('/gpu:%d' % i)): with tf.name_scope(('tower_%d' % i)): zipped_inputs = zip(self.inner_model.input_names, self.inner_model.inputs) inputs = [KL.Lambda((lambda s: input_slices[name][i]), output_shape=(lambda s: ((None,) + s[1:])))(tensor) for (name, tensor) in zipped_inputs] outputs = self.inner_model(inputs) if (not isinstance(outputs, list)): outputs = [outputs] for (l, o) in enumerate(outputs): outputs_all[l].append(o) with tf.device('/cpu:0'): merged = [] for (outputs, name) in zip(outputs_all, output_names): if (K.int_shape(outputs[0]) == ()): m = KL.Lambda((lambda o: (tf.add_n(o) / len(outputs))), name=name)(outputs) else: m = KL.Concatenate(axis=0, name=name)(outputs) merged.append(m) return merged
class Config(object): 'Base configuration class. For custom configurations, create a\n sub-class that inherits from this one and override properties\n that need to be changed.\n ' NAME = None GPU_COUNT = 1 IMAGES_PER_GPU = 1 STEPS_PER_EPOCH = 500 VALIDATION_STEPS = 10 BACKBONE = 'resnet101' COMPUTE_BACKBONE_SHAPE = None BACKBONE_STRIDES = [4, 8, 16, 32, 64] FPN_CLASSIF_FC_LAYERS_SIZE = 1024 TOP_DOWN_PYRAMID_SIZE = 256 NUM_CLASSES = 1 RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) RPN_ANCHOR_RATIOS = [0.5, 1, 2] RPN_ANCHOR_STRIDE = 1 RPN_NMS_THRESHOLD = 0.75 RPN_TRAIN_ANCHORS_PER_IMAGE = 1024 PRE_NMS_LIMIT = 10000 POST_NMS_ROIS_TRAINING = 7500 POST_NMS_ROIS_INFERENCE = 7500 USE_MINI_MASK = True MINI_MASK_SHAPE = (56, 56) IMAGE_RESIZE_MODE = 'square' IMAGE_MIN_DIM = 800 IMAGE_MAX_DIM = 1024 IMAGE_MIN_SCALE = 0 IMAGE_CHANNEL_COUNT = 3 MEAN_PIXEL = np.array([123.7, 116.8, 103.9]) TRAIN_ROIS_PER_IMAGE = 768 SAMPLES_PER_VERTEX = 6 LSTM_DEPTH = 256 GRAPH_NEIGHBORS = 20 ROI_POSITIVE_RATIO = 0.75 POOL_SIZE = 7 MASK_POOL_SIZE = 14 MASK_SHAPE = [28, 28] MAX_GT_INSTANCES = 2000 RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) DETECTION_MAX_INSTANCES = 2000 DETECTION_MIN_CONFIDENCE = 0.75 DETECTION_NMS_THRESHOLD = 0.1 LEARNING_RATE = 0.001 LEARNING_MOMENTUM = 0.9 WEIGHT_DECAY = 0.0001 LOSS_WEIGHTS = {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0, 'roi_alignment_loss': 1, 'row_adj_loss': 1, 'col_adj_loss': 1} USE_RPN_ROIS = True TRAIN_BN = False GRADIENT_CLIP_NORM = 5.0 def __init__(self): 'Set values of computed attributes.' self.BATCH_SIZE = (self.IMAGES_PER_GPU * self.GPU_COUNT) if (self.IMAGE_RESIZE_MODE == 'crop'): self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, self.IMAGE_CHANNEL_COUNT]) else: self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, self.IMAGE_CHANNEL_COUNT]) self.IMAGE_META_SIZE = (((((1 + 3) + 3) + 4) + 1) + self.NUM_CLASSES) def display(self): 'Display Configuration values.' print('\nConfigurations:') for a in dir(self): if ((not a.startswith('__')) and (not callable(getattr(self, a)))): print('{:30} {}'.format(a, getattr(self, a))) print('\n')
class ParallelModel(KM.Model): 'Subclasses the standard Keras Model and adds multi-GPU support.\n It works by creating a copy of the model on each GPU. Then it slices\n the inputs and sends a slice to each copy of the model, and then\n merges the outputs together and applies the loss on the combined\n outputs.\n ' def __init__(self, keras_model, gpu_count): 'Class constructor.\n keras_model: The Keras model to parallelize\n gpu_count: Number of GPUs. Must be > 1\n ' super(ParallelModel, self).__init__() self.inner_model = keras_model self.gpu_count = gpu_count merged_outputs = self.make_parallel() super(ParallelModel, self).__init__(inputs=self.inner_model.inputs, outputs=merged_outputs) def __getattribute__(self, attrname): "Redirect loading and saving methods to the inner model. That's where\n the weights are stored." if (('load' in attrname) or ('save' in attrname)): return getattr(self.inner_model, attrname) return super(ParallelModel, self).__getattribute__(attrname) def summary(self, *args, **kwargs): 'Override summary() to display summaries of both, the wrapper\n and inner models.' super(ParallelModel, self).summary(*args, **kwargs) self.inner_model.summary(*args, **kwargs) def make_parallel(self): 'Creates a new wrapper model that consists of multiple replicas of\n the original model placed on different GPUs.\n ' input_slices = {name: tf.split(x, self.gpu_count) for (name, x) in zip(self.inner_model.input_names, self.inner_model.inputs)} output_names = self.inner_model.output_names outputs_all = [] for i in range(len(self.inner_model.outputs)): outputs_all.append([]) for i in range(self.gpu_count): with tf.device(('/gpu:%d' % i)): with tf.name_scope(('tower_%d' % i)): zipped_inputs = zip(self.inner_model.input_names, self.inner_model.inputs) inputs = [KL.Lambda((lambda s: input_slices[name][i]), output_shape=(lambda s: ((None,) + s[1:])))(tensor) for (name, tensor) in zipped_inputs] outputs = self.inner_model(inputs) if (not isinstance(outputs, list)): outputs = [outputs] for (l, o) in enumerate(outputs): outputs_all[l].append(o) with tf.device('/cpu:0'): merged = [] for (outputs, name) in zip(outputs_all, output_names): if (K.int_shape(outputs[0]) == ()): m = KL.Lambda((lambda o: (tf.add_n(o) / len(outputs))), name=name)(outputs) else: m = KL.Concatenate(axis=0, name=name)(outputs) merged.append(m) return merged
def _parse_requirements(file_path): pip_ver = pkg_resources.get_distribution('pip').version pip_version = list(map(int, pip_ver.split('.')[:2])) if (pip_version >= [6, 0]): raw = pip.req.parse_requirements(file_path, session=pip.download.PipSession()) else: raw = pip.req.parse_requirements(file_path) return [str(i.req) for i in raw]
def create_root(file_prefix, width, height, depth): root = ET.Element('annotations') ET.SubElement(root, 'folder').text = 'images' ET.SubElement(root, 'filename').text = '{}'.format(file_prefix) ET.SubElement(root, 'path').text = (output_images_dir + '{}'.format(file_prefix)) source = ET.SubElement(root, 'source') ET.SubElement(source, 'database').text = 'Unknown' size = ET.SubElement(root, 'size') ET.SubElement(size, 'width').text = str(width) ET.SubElement(size, 'height').text = str(height) ET.SubElement(size, 'depth').text = str(depth) ET.SubElement(root, 'segmentated').text = '0' return root
def create_object_annotation(root, table_list, table_information_list): length_table_list = len(table_list) print('length_table_list==>', length_table_list) for i in range(length_table_list): obj = ET.SubElement(root, 'object') ET.SubElement(obj, 'name').text = 'table' ET.SubElement(obj, 'pose').text = 'Unspecified' ET.SubElement(obj, 'truncated').text = str(0) ET.SubElement(obj, 'difficult').text = str(0) bbox = ET.SubElement(obj, 'bndbox') ET.SubElement(bbox, 'xmin').text = str(table_information_list[i][0]) ET.SubElement(bbox, 'ymin').text = str(table_information_list[i][2]) ET.SubElement(bbox, 'xmax').text = str(table_information_list[i][1]) ET.SubElement(bbox, 'ymax').text = str(table_information_list[i][3]) cells = ET.SubElement(obj, 'cells') table_details = table_list[i] for j in range(len(table_details)): cell_detail = table_details[j] cell = ET.SubElement(cells, 'tablecell') ET.SubElement(cell, 'dont_care').text = str(cell_detail[0]) ET.SubElement(cell, 'end_col').text = str(cell_detail[1]) ET.SubElement(cell, 'end_row').text = str(cell_detail[2]) ET.SubElement(cell, 'start_col').text = str(cell_detail[3]) ET.SubElement(cell, 'start_row').text = str(cell_detail[4]) ET.SubElement(cell, 'x0').text = str(cell_detail[5]) ET.SubElement(cell, 'x1').text = str(cell_detail[6]) ET.SubElement(cell, 'y0').text = str(cell_detail[7]) ET.SubElement(cell, 'y1').text = str(cell_detail[8]) return root
def match_ann(fileName): js = json.loads(open(fileName).read()) for items in js['people']: handRight = items['hand_right_keypoints_2d'] confPoints = helper.confidencePoints(handRight) confidence = helper.confidence(confPoints) if (confidence > 10.2): handPoints = helper.removePoints(handRight) '\n experimenting with scaling \n ' p1 = [handPoints[0], handPoints[1]] p2 = [handPoints[18], handPoints[19]] distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))) (Result, Points) = scale.scalePoints(handPoints, distance) (handRightResults, handRightPoints) = move.centerPoints(handPoints) '\n extracting data from db\n ' connection = sqlite3.connect('data\\db\\main_dataset.db') crsr = connection.cursor() sql = 'SELECT x1,y1' for x in range(2, 22): sql = ((((sql + ',x') + str(x)) + ',y') + str(x)) sql = (sql + ' FROM rightHandDataset WHERE 1') crsr.execute(sql) feature_res = crsr.fetchall() feature_res = np.asarray(feature_res) features = [] for x in feature_res: features.append(x) crsr.execute('SELECT label FROM rightHandDataset WHERE 1') label_res = crsr.fetchall() labels = [] for x in label_res: labels.append(x) le = preprocessing.LabelEncoder() label_encoded = le.fit_transform(labels) label_encoded = to_categorical(label_encoded) (X_train, X_test, y_train, y_test) = train_test_split(features, label_encoded, test_size=0.2) scaler = StandardScaler().fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) y_pred = model.predict(scaler.transform(np.array([handRightResults]))) C = np.argmax(y_pred) result = le.inverse_transform([C]) return result[0] else: return 'no confidence'
def signal_handler(signal, frame): shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly) shutil.rmtree('gui\\captured_images', ignore_errors=True, onerror=handleRemoveReadonly) shutil.rmtree('gui\\temp_images', ignore_errors=True, onerror=handleRemoveReadonly) print('All done') sys.exit(0)
def handleRemoveReadonly(func, path, exc): excvalue = exc[1] if ((func in (os.rmdir, os.remove)) and (excvalue.errno == errno.EACCES)): os.chmod(path, ((stat.S_IRWXU | stat.S_IRWXG) | stat.S_IRWXO)) func(path) else: raise Exception
def plotPose(posePoints, handRightPoints, handLeftPoints): POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 15], [15, 17], [0, 16], [16, 18]] HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]] background = 'PSL\\BLACK_background.jpg' frame = cv2.imread(background) count = 0 for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)): cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10) cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED) cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handLeftPoints[partA] and handLeftPoints[partB]): cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10) cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 return frame
@eel.expose def capture_alphabet_dataset(sec): global remfileNames '\n ----------------------Start OpenPoseDemo.exe----------------------\n --render_pose 0 --display 0\n ' os.chdir('bin\\openpose') print('Starting OpenPose') subprocess.Popen('bin\\OpenPoseDemo.exe --hand --write_json ..\\..\\Keypoints --number_people_max 1', shell=True) os.chdir('..\\..') '\n ----------------------Creating temp folder----------------------\n ' dirName = 'Keypoints' init_file = 'PSL\\000000000000_keypoints.json' try: os.mkdir(dirName) os.mkdir('gui\\captured_images') os.mkdir('gui\\temp_images') shutil.copy(init_file, dirName) print('Directory ', dirName, ' Created ') except FileExistsError: print('Directory ', dirName, ' already exists') '\n ----------------------Live View----------------------\n ' t = (time.time() + sec) while (time.time() <= t): eel.sleep(0.05) os.system('taskkill /f /im OpenPoseDemo.exe') '\n ---------------------- Auto Remove files----------------------\n ' conf_thershold = 10 fileNames = [] for entry in os.scandir('Keypoints'): if entry.is_file(): if (os.path.splitext(entry)[1] == '.json'): fileNames.append(entry.name) for x in range(len(fileNames)): js = json.loads(open(('Keypoints\\' + fileNames[x])).read()) for items in js['people']: handRight = items['hand_right_keypoints_2d'] confPoints = helper.confidencePoints(handRight) confidence = helper.confidence(confPoints) print(confidence) if (confidence < conf_thershold): os.remove(('Keypoints\\' + fileNames[x])) '\n ----------------------plot and save----------------------\n ' background = 'big_background.png' fileNames = [] for entry in os.scandir('Keypoints'): if entry.is_file(): if (os.path.splitext(entry)[1] == '.json'): fileNames.append(entry.name) frame = cv2.imread(background) i = 1 for x in range(len(fileNames)): js = json.loads(open(('Keypoints\\' + fileNames[x])).read()) for items in js['people']: handRight = items['hand_right_keypoints_2d'] handPoints = helper.removePoints(handRight) p1 = [handPoints[0], handPoints[1]] p2 = [handPoints[18], handPoints[19]] distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))) (Result, Points) = scale.dummy_scalePoints(handPoints, distance) (handRightResults, handRightPoints) = move.dummy_centerPoints(Result) frame = plot.plot_dataset(handRightPoints, 'black') cv2.imwrite((('gui\\captured_images\\' + str(i)) + '.jpg'), frame) i += 1 '\n ----------------------get ref to delete files----------------------\n ' for entry in os.scandir('Keypoints'): if entry.is_file(): if (os.path.splitext(entry)[1] == '.json'): remfileNames.append(entry.name) '\n ----------------------end capture_alphabet_dataset(sec)----------------------\n '
@eel.expose def getFileCount(): Names = [] for entry in os.scandir('gui\\captured_images'): Names.append(entry.name) return str(len(Names))
@eel.expose def delete_Image(i): global remfileNames print(remfileNames) try: os.remove(('Keypoints\\' + remfileNames[(i - 1)])) os.remove((('gui\\captured_images\\' + str(i)) + '.jpg')) except: print('file not found') pass
@eel.expose def getlabel(a): label = a.strip() print(label) "\n traverse 'dataset' folder ,\n find subfolder matching 'label' ,\n create folder with timestamp in matched folder , \n and copy everything from 'Keypoints_temp' to created folder\n " for entry in os.scandir('data\\datasets\\alphabets_dataset'): if (entry.name == label): now = datetime.now() timestamp = str(datetime.timestamp(now)) dir_name = ((('data\\datasets\\alphabets_dataset\\' + entry.name) + '\\') + timestamp) try: os.mkdir(dir_name) print('Directory ', dir_name, ' Created ') except FileExistsError: print('Directory ', dir_name, ' already exists') copy_tree('Keypoints', ((('data\\datasets\\alphabets_dataset\\' + entry.name) + '\\') + timestamp)) ' \n Remove temp folders \n ' try: shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly) shutil.rmtree('gui\\captured_images', ignore_errors=True, onerror=handleRemoveReadonly) shutil.rmtree('gui\\temp_images', ignore_errors=True, onerror=handleRemoveReadonly) print('Keypoints_temp folder removed') except: print('not removed') pass
@eel.expose def db_train(): retrain.re_train(1)
def signal_handler(signal, frame): shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly) print('All done') sys.exit(0)
def handleRemoveReadonly(func, path, exc): excvalue = exc[1] if ((func in (os.rmdir, os.remove)) and (excvalue.errno == errno.EACCES)): os.chmod(path, ((stat.S_IRWXU | stat.S_IRWXG) | stat.S_IRWXO)) func(path) else: raise Exception
def json_files(Dir): folders = [] files = [] fileNames = [] for entry in os.scandir(Dir): if entry.is_dir(): folders.append(entry.path) for entry1 in os.scandir(entry.path): if entry1.is_dir(): folders.append(entry1.path) for entry2 in os.scandir(entry1.path): if entry2.is_dir(): folders.append(entry2.path) elif entry2.is_file(): if (os.path.splitext(entry2)[1] == '.json'): files.append(entry2.path) fileNames.append(entry2.name) elif entry1.is_file(): if (os.path.splitext(entry1)[1] == '.json'): files.append(entry1.path) fileNames.append(entry1.name) elif entry.is_file(): if (os.path.splitext(entry)[1] == '.json'): files.append(entry.path) fileNames.append(entry.name) return (files, fileNames, folders)
def removePoints(handRight): handRightResults = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 3): handRightX.append(handRight[x]) for x in range(1, len(handRight), 3): handRightY.append(handRight[x]) for x in range(len(handRightX)): handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return handRightResults
def getCoordPoints(handRight): handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 3): handRightX.append(handRight[x]) for x in range(1, len(handRight), 3): handRightY.append(handRight[x]) for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) return handRightPoints
def confidencePoints(handRight): handRightC = [] for x in range(2, len(handRight), 3): handRightC.append(handRight[x]) return handRightC
def confidence(handRight): sum = handRight[0] for x in range(1, len(handRight)): sum += handRight[x] return sum
def seperate_points(handRight): handRightResults = [] handRightX = [] handRightY = [] for x in range(len(handRight)): handRightX.append(handRight[x][0]) handRightY.append(handRight[x][1]) for x in range(len(handRight)): handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return handRightResults
def join_points(handRight): handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) return handRightPoints
def isolatePoints(handRight): handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) minX = min(handRightX, key=float) minX -= 10 for x in range(len(handRightX)): handRightX[x] -= minX minY = min(handRightY, key=float) minY -= 10 for x in range(len(handRightY)): handRightY[x] -= minY for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def centerPoints(handRight): refX = 150 refY = 150 handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) p1 = [handRightX[0], handRightY[0]] p2 = [refX, refY] distanceX = (p1[0] - p2[0]) distanceY = (p1[1] - p2[1]) for x in range(len(handRightX)): handRightX[x] -= distanceX for x in range(len(handRightY)): handRightY[x] -= distanceY for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def dummy_centerPoints(handRight): refX = 600 refY = 600 handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) p1 = [handRightX[0], handRightY[0]] p2 = [refX, refY] distanceX = (p1[0] - p2[0]) distanceY = (p1[1] - p2[1]) for x in range(len(handRightX)): handRightX[x] -= distanceX for x in range(len(handRightY)): handRightY[x] -= distanceY for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def movePoints(handRight, addX, addY): refX = (handRight[0] + addX) refY = (handRight[1] + addY) handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) p1 = [handRightX[0], handRightY[0]] p2 = [refX, refY] distanceX = (p1[0] - p2[0]) distanceY = (p1[1] - p2[1]) for x in range(len(handRightX)): handRightX[x] -= distanceX for x in range(len(handRightY)): handRightY[x] -= distanceY for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def moveBothHands(handRight, handLeft, addX, addY): refX = (handRight[0] + addX) refY = (handRight[1] + addY) handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) p1 = [handRightX[0], handRightY[0]] p2 = [refX, refY] distanceX = (p1[0] - p2[0]) distanceY = (p1[1] - p2[1]) for x in range(len(handRightX)): handRightX[x] -= distanceX for x in range(len(handRightY)): handRightY[x] -= distanceY for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) refX = (handLeft[0] + addX) refY = (handLeft[1] + addY) handLeftResults = [] handLeftPoints = [] handLeftX = [] handLeftY = [] for x in range(0, len(handLeft), 2): handLeftX.append(handLeft[x]) for x in range(1, len(handLeft), 2): handLeftY.append(handLeft[x]) p1 = [handLeftX[0], handLeftY[0]] p2 = [refX, refY] distanceX = (p1[0] - p2[0]) distanceY = (p1[1] - p2[1]) for x in range(len(handLeftX)): if (handLeftX[x] != 0): handLeftX[x] -= distanceX for x in range(len(handLeftY)): if (handLeftX[x] != 0): handLeftY[x] -= distanceY for x in range(len(handLeftX)): handLeftPoints.append((int(handLeftX[x]), int(handLeftY[x]))) handLeftResults.append(handLeftX[x]) handLeftResults.append(handLeftY[x]) return (handRightResults, handRightPoints, handLeftResults, handLeftPoints)
def move_to_wrist(handRight, wristX, wristY): refX = wristX refY = wristY handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) p1 = [handRightX[0], handRightY[0]] p2 = [refX, refY] distanceX = (p1[0] - p2[0]) distanceY = (p1[1] - p2[1]) for x in range(len(handRightX)): handRightX[x] -= distanceX for x in range(len(handRightY)): handRightY[x] -= distanceY for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def scaleBody(handRight, distance): ref = 200 handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) scale = (ref / distance) for x in range(len(handRightX)): handRightX[x] *= scale for x in range(len(handRightY)): handRightY[x] *= scale for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def moveBody(handRight): refX = 1000 refY = 400 handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) p1 = [handRightX[1], handRightY[1]] p2 = [refX, refY] distanceX = (p1[0] - p2[0]) distanceY = (p1[1] - p2[1]) for x in range(len(handRightX)): if (handRightX[x] != 0): handRightX[x] -= distanceX for x in range(len(handRightY)): if (handRightY[x] != 0): handRightY[x] -= distanceY for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def dummyMoveBody(handRight): refX = 400 refY = 200 handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) p1 = [handRightX[1], handRightY[1]] p2 = [refX, refY] distanceX = (p1[0] - p2[0]) distanceY = (p1[1] - p2[1]) for x in range(len(handRightX)): if (handRightX[x] != 0): handRightX[x] -= distanceX for x in range(len(handRightY)): if (handRightY[x] != 0): handRightY[x] -= distanceY for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def dummyScaleBody(handRight, distance): ref = 500 handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) scale = (ref / distance) for x in range(len(handRightX)): handRightX[x] *= scale for x in range(len(handRightY)): handRightY[x] *= scale for x in range(len(handRightY)): handRightX[x] *= 2 handRightY[x] *= 2 for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def plot_skeleton(fileName, background, isMove, isScale): js = json.loads(open(fileName).read()) for items in js['people']: handRight = items['hand_right_keypoints_2d'] handCoord = helper.getCoordPoints(handRight) handPoints = helper.removePoints(handRight) p1 = [handPoints[0], handPoints[1]] p2 = [handPoints[18], handPoints[19]] distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))) if isScale: (handRightResult, handRightPoints) = scale.scalePoints(handPoints, distance) else: handRightResult = handPoints handRightPoints = handCoord if isMove: (handRightResult, handRightPoints) = move.centerPoints(handRightResult) p1 = [handRightResult[0], handRightResult[1]] p2 = [handRightResult[18], handRightResult[19]] distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))) frame = cv2.imread(('C:\\123Drive\\Python\\Sign_Language_Interpreter\\' + background)) for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], (0, 255, 255), 2) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED) return frame
def plot_points(points, background): handRight = points handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) frame = cv2.imread(('' + background)) for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], (0, 255, 255), 2) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED) return frame
def plot_db(): ret_frame = [] POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] background = 'big_background.png' connection = sqlite3.connect('db\\main_dataset.db') crsr = connection.cursor() sql = 'SELECT x1,y1' for x in range(2, 22): sql = ((((sql + ',x') + str(x)) + ',y') + str(x)) sql = (sql + ' FROM rightHandDataset WHERE 1') crsr.execute(sql) feature_res = crsr.fetchall() for x in range(len(feature_res)): points = feature_res[x] handRight = points handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) frame = cv2.imread(background) for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], (0, 255, 255), 2) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) ret_frame.append(frame) frame = cv2.imread(background) return ret_frame
def plot_db_label(label): ret_frame = [] POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] background = 'big_background.png' connection = sqlite3.connect('db\\main_dataset.db') crsr = connection.cursor() label = label.strip() label = (("'" + label) + "'") sql = 'SELECT x1,y1' for x in range(2, 22): sql = ((((sql + ',x') + str(x)) + ',y') + str(x)) sql = ((sql + ' FROM rightHandDataset WHERE label = ') + label) crsr.execute(sql) feature_res = crsr.fetchall() for x in range(len(feature_res)): points = feature_res[x] handRight = points handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) frame = cv2.imread(background) for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], (0, 255, 255), 2) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) ret_frame.append(frame) frame = cv2.imread(background) return ret_frame
def plot_dataset(handRightPoints, color): ret_frame = [] POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]] color = color.capitalize() background = (color + '_background.jpg') frame = cv2.imread(('PSL\\' + background)) count = 0 for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): if (color == 'White'): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1)) else: cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED) count += 1 ret_frame.append(frame) return frame
def save_old_dataset(handRightPoints, color, name): POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]] color = color.capitalize() background = (color + '_background.jpg') frame = cv2.imread(background) count = 0 for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): if (color == 'White'): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1)) else: cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED) count += 1 os.chdir('temp_old_dataset_processing') cv2.imwrite((name + '.png'), frame) os.chdir('..')
def plotPose(posePoints, handRightPoints, handLeftPoints): POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 15], [15, 17], [0, 16], [16, 18]] HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]] color = 'black' color = color.capitalize() background = (color + '_background.jpg') frame = cv2.imread(background) count = 0 for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)): if (color == 'White'): cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10) cv2.circle(frame, posePoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED) cv2.circle(frame, posePoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1)) else: cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10) cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED) cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): if (color == 'White'): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1)) else: cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handLeftPoints[partA] and handLeftPoints[partB]): if (color == 'White'): cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10) cv2.circle(frame, handLeftPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED) cv2.circle(frame, handLeftPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1)) else: cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10) cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) return frame
def plotPoseDataset(): POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 9], [9, 11], [0, 10], [10, 12]] HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]] '\n extracting data from db\n ' connection = sqlite3.connect('..\\data\\db\\main_dataset.db') crsr = connection.cursor() sql = 'SELECT Rx1,Ry1' for x in range(2, 22): sql = ((((sql + ',Rx') + str(x)) + ',Ry') + str(x)) for x in range(1, 22): sql = ((((sql + ',Lx') + str(x)) + ',Ly') + str(x)) for x in range(1, 14): sql = ((((sql + ',Px') + str(x)) + ',Py') + str(x)) sql = (sql + ' FROM poseDataset WHERE 1') crsr.execute(sql) feature_res = crsr.fetchall() feature_res = np.asarray(feature_res) features = [] for x in feature_res: features.append(x) print(features[0][22]) for i in range(len(features)): posePoints = [] for x in range(84, 110, 2): posePoints.append((int(features[i][x]), int(features[i][(x + 1)]))) handRightPoints = [] for x in range(0, 42, 2): handRightPoints.append((int(features[i][x]), int(features[i][(x + 1)]))) handLeftPoints = [] for x in range(0, 42, 2): handLeftPoints.append((int(features[i][x]), int(features[i][(x + 1)]))) color = 'black' color = color.capitalize() background = (color + '_background.jpg') frame = cv2.imread(background) count = 0 for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)): if (color == 'White'): cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10) cv2.circle(frame, posePoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED) cv2.circle(frame, posePoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1)) else: cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10) cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED) cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): if (color == 'White'): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1)) else: cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handLeftPoints[partA] and handLeftPoints[partB]): if (color == 'White'): cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10) cv2.circle(frame, handLeftPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED) cv2.circle(frame, handLeftPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1)) else: cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10) cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) fig2 = plt.figure(figsize=(10, 10)) ax3 = fig2.add_subplot(111) ax3.imshow(frame, interpolation='none') plt.imshow(frame) plt.show()
def rotate(point, angle, center_point=(0, 0)): 'Rotates a point around center_point(origin by default)\n Angle is in degrees.\n Rotation is counter-clockwise\n ' angle_rad = radians((angle % 360)) new_point = ((point[0] - center_point[0]), (point[1] - center_point[1])) new_point = (((new_point[0] * cos(angle_rad)) - (new_point[1] * sin(angle_rad))), ((new_point[0] * sin(angle_rad)) + (new_point[1] * cos(angle_rad)))) new_point = (int((new_point[0] + center_point[0])), int((new_point[1] + center_point[1]))) return new_point
def rotate_file(fileName): js = json.loads(open(fileName).read()) for items in js['people']: handRight = items['hand_right_keypoints_2d'] handPoints = helper.removePoints(handRight) p1 = [handPoints[0], handPoints[1]] p2 = [handPoints[18], handPoints[19]] distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))) (Result, Points) = scale.scalePoints(handPoints, distance) (handRightResults, handRightPoints) = move.centerPoints(Result) newPoints = [handRightPoints[0]] for x in range(1, len(handRightPoints)): newPoints.append(rotate(handRightPoints[x], (- 60), handRightPoints[0])) newPoints = helper.seperate_points(newPoints) return newPoints
def rotate_points(points, angle): coordPoints = helper.join_points(points) newPoints = [coordPoints[0]] for x in range(1, len(coordPoints)): newPoints.append(rotate(coordPoints[x], angle, coordPoints[0])) return newPoints
def rotate_line(origin, point, angle): '\n Rotate a point counterclockwise by a given angle around a given origin.\n\n The angle should be given in radians.\n ' (ox, oy) = origin (px, py) = point qx = ((ox + (math.cos(angle) * (px - ox))) - (math.sin(angle) * (py - oy))) qy = ((oy + (math.sin(angle) * (px - ox))) + (math.cos(angle) * (py - oy))) return (qx, qy)
def scalePoints(handRight, distance): ref = 50 handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) scale = (ref / distance) for x in range(len(handRightX)): handRightX[x] *= scale for x in range(len(handRightY)): handRightY[x] *= scale for x in range(len(handRightY)): handRightX[x] *= 2 handRightY[x] *= 2 for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def dummy_scalePoints(handRight, distance): ref = 200 handRightResults = [] handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 2): handRightX.append(handRight[x]) for x in range(1, len(handRight), 2): handRightY.append(handRight[x]) scale = (ref / distance) for x in range(len(handRightX)): handRightX[x] *= scale for x in range(len(handRightY)): handRightY[x] *= scale for x in range(len(handRightY)): handRightX[x] *= 2 handRightY[x] *= 2 for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) handRightResults.append(handRightX[x]) handRightResults.append(handRightY[x]) return (handRightResults, handRightPoints)
def synthesize(angle): '\n extracting data from db\n ' connection = sqlite3.connect('data\\db\\main_dataset.db') crsr = connection.cursor() sql = 'SELECT x1,y1' for x in range(2, 22): sql = ((((sql + ',x') + str(x)) + ',y') + str(x)) sql = (sql + ' FROM rightHandDataset WHERE 1') crsr.execute(sql) feature_res = crsr.fetchall() features = [] for x in feature_res: features.append(x) crsr.execute('SELECT label FROM rightHandDataset WHERE 1') label_res = crsr.fetchall() labels = [] for x in label_res: labels.append(x) connection = sqlite3.connect('data\\db\\main_dataset.db') crsr = connection.cursor() for x in range(len(features)): rotated = rotate.rotate_points(features[x], (- angle)) handRightResults = helper.seperate_points(rotated) parentName = (("'" + str(labels[x][0])) + "'") sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');') crsr.execute(sql_command) rotated = rotate.rotate_points(features[x], angle) handRightResults = helper.seperate_points(rotated) sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');') crsr.execute(sql_command) connection.commit() connection.close()
def synthesize_multiple(angle1, angle2): '\n extracting data from db\n ' connection = sqlite3.connect('..\\..\\data\\db\\main_dataset.db') crsr = connection.cursor() sql = 'SELECT x1,y1' for x in range(2, 22): sql = ((((sql + ',x') + str(x)) + ',y') + str(x)) sql = (sql + ' FROM rightHandDataset WHERE 1') crsr.execute(sql) feature_res = crsr.fetchall() features = [] for x in feature_res: features.append(x) crsr.execute('SELECT label FROM rightHandDataset WHERE 1') label_res = crsr.fetchall() labels = [] for x in label_res: labels.append(x) connection = sqlite3.connect('..\\..\\data\\db\\main_dataset.db') crsr = connection.cursor() for x in range(len(features)): '\n sythesizing at angle 1\n ' rotated = rotate.rotate_points(features[x], (- angle1)) handRightResults = helper.seperate_points(rotated) parentName = (("'" + str(labels[x][0])) + "'") sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');') crsr.execute(sql_command) rotated = rotate.rotate_points(features[x], angle1) handRightResults = helper.seperate_points(rotated) sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');') crsr.execute(sql_command) '\n sythesizing at angle 2\n ' rotated = rotate.rotate_points(features[x], (- angle2)) handRightResults = helper.seperate_points(rotated) parentName = (("'" + str(labels[x][0])) + "'") sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');') crsr.execute(sql_command) rotated = rotate.rotate_points(features[x], angle2) handRightResults = helper.seperate_points(rotated) sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');') crsr.execute(sql_command) connection.commit() connection.close()
def re_train(mode): if (mode == 0): dbh.create_table() dbh.populate_db() synth.synthesize(20) alphabet_model.train_alphabets() if (mode == 1): dbh.create_pose_table() dbh.populate_words() word_model.train_words()
def match_ann(fileName): js = json.loads(open(fileName).read()) for items in js['people']: pose = items['pose_keypoints_2d'] handRight = items['hand_right_keypoints_2d'] handLeft = items['hand_left_keypoints_2d'] RightConfPoints = helper.confidencePoints(handRight) LeftConfPoints = helper.confidencePoints(handLeft) RightConfidence = helper.confidence(RightConfPoints) LeftConfidence = helper.confidence(LeftConfPoints) if (RightConfidence > 12): if ((LeftConfidence > 12) or (LeftConfidence < 2)): pose_points = helper.removePoints(pose) p1 = [pose_points[0], pose_points[1]] p2 = [pose_points[2], pose_points[3]] distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))) (scaled_results, scaled_points) = norm.scaleBody(pose_points, distance) (poseResults, posePoints) = norm.moveBody(scaled_results) hand_right_points = helper.removePoints(handRight) p1 = [hand_right_points[0], hand_right_points[1]] p2 = [hand_right_points[18], hand_right_points[19]] distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))) (RightResult, Points) = scale.scalePoints(hand_right_points, distance) (handRightResults, handRightPoints) = norm.move_to_wrist(RightResult, poseResults[8], poseResults[9]) if (LeftConfidence > 3): hand_left_points = helper.removePoints(handLeft) p1 = [hand_left_points[0], hand_left_points[1]] p2 = [hand_left_points[18], hand_left_points[19]] distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))) if (distance != 0): (LeftResult, Points) = scale.scalePoints(hand_left_points, distance) (handLeftResults, handLeftPoints) = norm.move_to_wrist(LeftResult, poseResults[14], poseResults[15]) else: (handLeftResults, handLeftPoints) = norm.move_to_wrist(hand_left_points, poseResults[14], poseResults[15]) else: handLeftResults = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] posePoints = [] for x in range(18): posePoints.append(poseResults[x]) for x in range(30, 38): posePoints.append(poseResults[x]) results = ((handRightResults + handLeftResults) + posePoints) connection = sqlite3.connect('data\\db\\main_dataset.db') crsr = connection.cursor() sql = 'SELECT Rx1,Ry1' for x in range(2, 22): sql = ((((sql + ',Rx') + str(x)) + ',Ry') + str(x)) for x in range(1, 22): sql = ((((sql + ',Lx') + str(x)) + ',Ly') + str(x)) for x in range(1, 14): sql = ((((sql + ',Px') + str(x)) + ',Py') + str(x)) sql = (sql + ' FROM poseDataset WHERE 1') crsr.execute(sql) feature_res = crsr.fetchall() feature_res = np.asarray(feature_res) features = [] for x in feature_res: features.append(x) crsr.execute('SELECT label FROM poseDataset WHERE 1') label_res = crsr.fetchall() labels = [] for x in label_res: labels.append(x) le = preprocessing.LabelEncoder() label_encoded = le.fit_transform(labels) label_encoded = to_categorical(label_encoded) (X_train, X_test, y_train, y_test) = train_test_split(features, label_encoded, test_size=0.2) scaler = StandardScaler().fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) y_pred = model.predict(scaler.transform(np.array([results]))) C = np.argmax(y_pred) result = le.inverse_transform([C]) return result[0] else: return 'no confidence' else: return 'no confidence'
def signal_handler(signal, frame): shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly) shutil.rmtree('gui\\Learn_images', ignore_errors=True, onerror=handleRemoveReadonly) os.system('taskkill /f /im OpenPoseDemo.exe') print('All done') sys.exit(0)
def handleRemoveReadonly(func, path, exc): excvalue = exc[1] if ((func in (os.rmdir, os.remove)) and (excvalue.errno == errno.EACCES)): os.chmod(path, ((stat.S_IRWXU | stat.S_IRWXG) | stat.S_IRWXO)) func(path) else: raise Exception
@eel.expose def skip_Sign(): global skip_sign skip_sign = True print('skip_sign')
@eel.expose def openposelearn(): '\n Starting OpenPoseDemo.exe\n and storing json files to temporary folder [Keypoints]\n ' print('Starting OpenPose') os.chdir('bin\\openpose') subprocess.Popen('bin\\OpenPoseDemo.exe --hand --write_json ..\\..\\Keypoints --net_resolution 128x128 --number_people_max 1', shell=True) os.chdir('..\\..')
def plotPose(posePoints, handRightPoints, handLeftPoints): POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 15], [15, 17], [0, 16], [16, 18]] HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]] background = 'PSL\\BLACK_background.jpg' frame = cv2.imread(background) count = 0 for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)): cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10) cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED) cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handLeftPoints[partA] and handLeftPoints[partB]): cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10) cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 return frame
@eel.expose def learning(): global skip_sign '\n storing json files to temporary folder [Keypoints]\n Creating temp folder and initializing with zero padded json file\n ' dirName = 'Keypoints' fileName = 'PSL\\000000000000_keypoints.json' try: os.mkdir(dirName) shutil.copy(fileName, dirName) print('Directory ', dirName, ' Created ') except FileExistsError: print('Directory ', dirName, ' already exists') label = '' for x in range(len(fileNames)): skip_sign = False eel.get_Alphabet((x + 1)) while (label != labels[x]): for entry in os.scandir('Keypoints'): if entry.is_file(): if (os.path.splitext(entry)[1] == '.json'): filePlotName = entry.name try: js = json.loads(open(('Keypoints\\' + filePlotName)).read()) except ValueError: print('Decoding JSON has failed') pass for items in js['people']: pose = items['pose_keypoints_2d'] handRight = items['hand_right_keypoints_2d'] handLeft = items['hand_left_keypoints_2d'] pose_points = helper.removePoints(pose) posePoints = helper.join_points(pose_points) hand_right_Points = helper.removePoints(handRight) handRightPoints = helper.join_points(hand_right_Points) hand_left_points = helper.removePoints(handLeft) handLeftPoints = helper.join_points(hand_left_points) frame = plotPose(posePoints, handRightPoints, handLeftPoints) if (hand_right_Points[0] != 0): cv2.imwrite((('gui\\Learn_images\\' + filePlotName) + '.jpg'), frame) frame = cv2.imread('PSL\\BLACK_background.jpg') eel.get_fileName(filePlotName) eel.sleep(0.05) if (skip_sign == True): break try: for entry in os.scandir('Keypoints'): if entry.is_file(): if (os.path.splitext(entry)[1] == '.json'): fileName = entry.name try: label = alphabet.match_ann(('Keypoints\\' + fileName)) except: pass except UnboundLocalError: print('UnboundLocalError') eel.get_status() print('end while') return True
def on_close(page, sockets): print(page, 'closed') print('Still have sockets open to', sockets)
def signal_handler(signal, frame): shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly) os.system('taskkill /f /im OpenPoseDemo.exe') print('All done') sys.exit(0)
def handleRemoveReadonly(func, path, exc): excvalue = exc[1] if ((func in (os.rmdir, os.remove)) and (excvalue.errno == errno.EACCES)): os.chmod(path, ((stat.S_IRWXU | stat.S_IRWXG) | stat.S_IRWXO)) func(path) else: raise Exception
def plotPose(posePoints, handRightPoints, handLeftPoints): POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 15], [15, 17], [0, 16], [16, 18]] HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]] background = 'PSL\\BLACK_background.jpg' frame = cv2.imread(background) count = 0 for pair in POSE_PAIRS: partA = pair[0] partB = pair[1] if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)): cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10) cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED) cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handRightPoints[partA] and handRightPoints[partB]): cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10) cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 count = 0 for pair in HAND_PAIRS: partA = pair[0] partB = pair[1] if (handLeftPoints[partA] and handLeftPoints[partB]): cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10) cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED) cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED) count += 1 return frame
@eel.expose def exit_openpose(): os.system('taskkill /f /im OpenPoseDemo.exe')
@eel.expose def openpose(): '\n Starting OpenPoseDemo.exe\n and storing json files to temporary folder [Keypoints]\n ' print('Starting OpenPose') os.chdir('bin\\openpose') subprocess.Popen('bin\\OpenPoseDemo.exe --hand --write_json ..\\..\\Keypoints --net_resolution 128x128 --number_people_max 1', shell=True) os.chdir('..\\..') '\n Creating temp folder and initializing with zero padded json file\n ' dirName = 'Keypoints' fileName = 'PSL\\000000000000_keypoints.json' try: os.mkdir(dirName) shutil.copy(fileName, dirName) print('Directory ', dirName, ' Created ') except FileExistsError: print('Directory ', dirName, ' already exists')
@eel.expose def match(speech, mode): global label, lastLabel '\n Load each .json file from Keypoints folder and\n predict the label\n ' for entry in os.scandir('Keypoints'): if entry.is_file(): if (os.path.splitext(entry)[1] == '.json'): filePlotName = entry.name try: js = json.loads(open(('Keypoints\\' + filePlotName)).read()) for items in js['people']: pose = items['pose_keypoints_2d'] handRight = items['hand_right_keypoints_2d'] handLeft = items['hand_left_keypoints_2d'] pose_points = helper.removePoints(pose) posePoints = helper.join_points(pose_points) hand_right_Points = helper.removePoints(handRight) handRightPoints = helper.join_points(hand_right_Points) hand_left_points = helper.removePoints(handLeft) handLeftPoints = helper.join_points(hand_left_points) frame = plotPose(posePoints, handRightPoints, handLeftPoints) cv2.imwrite((('gui\\Learn_images\\' + filePlotName) + '.jpg'), frame) frame = cv2.imread('PSL\\BLACK_background.jpg') eel.get_fileName(filePlotName) except: print('Decoding JSON has failed') pass try: if (mode == 0): label = alphabet.match_ann(('Keypoints\\' + filePlotName)) if (mode == 1): label = word.match_ann(('Keypoints\\' + filePlotName)) print(label) except Exception: pass if ((label != 'no match') and (label != 'no confidence') and (label != lastLabel)): lastLabel = label if (speech == 1): try: mp3 = (('data\\speech\\' + label) + '.mp3') mixer.init() mixer.music.load(mp3) mixer.music.play() except: pass return label
def test_file1_method1(): x = 5 y = 6 assert ((x + 1) == y), 'test failed' assert (x == y), 'test failed'
def test_file1_method2(): x = 5 y = 6 assert ((x + 1) == y), 'test failed'
def call_html(): import IPython display(IPython.core.display.HTML('\n <script src="/static/components/requirejs/require.js"></script>\n <script>\n requirejs.config({\n paths: {\n base: \'/static/base\',\n "d3": "https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.8/d3.min",\n jquery: \'//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min\',\n },\n });\n </script>\n '))
def call_html(): import IPython display(IPython.core.display.HTML('\n <script src="/static/components/requirejs/require.js"></script>\n <script>\n requirejs.config({\n paths: {\n base: \'/static/base\',\n "d3": "https://cdnjs.cloudflare.com/ajax/libs/d3/5.7.0/d3.min",\n jquery: \'//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min\',\n },\n });\n </script>\n '))
def branch2num(branch, init_root=0): num = [init_root] for b in branch: if (b == 'L'): num.append(((num[(- 1)] * 2) + 1)) if (b == 'R'): num.append(((num[(- 1)] * 2) + 2)) return num