code
stringlengths
17
6.64M
def extract_translations(translations: List[str], texts: List[str], translate_args: Dict[(str, Any)]) -> List[str]: '\n Extract the translation from the output of the translation model.\n\n Args:\n - translations: A list containing the translations to be extracted.\n - texts: A list containing the texts to be extracted.\n - translate_args: A dictionary containing the translation configurations.\n\n Returns:\n - A list containing the extracted translations.\n ' for (i, text) in enumerate(texts): if ('xglm' in translate_args['model_name']): translations[i] = translations[i].split('English:')[(- 1)].strip() elif ('bloom' in translate_args['model_name']): translations[i] = translations[i][len(text):].split('\\n')[0].strip() elif ('llama' in translate_args['model_name'].lower()): translations[i] = translations[i][len(text):].split('\\n')[0].strip() if (': ' in translations[i]): translations[i] = translations[i].split(': ')[1] elif ('RedPajama' in translate_args['model_name']): translations[i] = translations[i][len(text):].split('\\n')[0].strip() else: translations[i] = translations[i][len(text):].split('\\n')[0].strip() return translations
def translate_texts(dataset: DatasetDict, texts: Dict[(str, Dict[(str, List[str])])], translate_args: Dict[(str, Any)], dataset_args: Dict[(str, Any)]) -> None: '\n Translate the texts.\n\n Args:\n - dataset: A DatasetDict object containing the dataset.\n - texts: A dictionary containing the texts to be translated.\n - translate_args: A dictionary containing the translation configurations.\n - dataset_args: A dictionary containing the dataset configurations.\n\n Returns:\n - None\n ' translations = {} for config in dataset_args['dataset_configs']: translations[config] = dataset[config].to_dict() translate_args['source_lang'] = dataset_args['lang_codes'][config] print(f'Translating from {config}') for field in dataset_args['dataset_fields']: translations[config][field] = translate_few_shot.main(sentences_list=texts[config][field], return_output=True, **translate_args) translations[config][field] = extract_translations(translations[config][field], texts[config][field], translate_args) save_file(translations[config], config, translate_args, dataset_args)
def save_file(translations: Dict[(str, List[str])], config: str, translate_args: Dict[(str, Any)], dataset_args: Dict[(str, Any)]) -> None: '\n Save the translations to a file.\n\n Args:\n - translations: A dictionary containing the translations to be saved.\n - config: A string representing the configuration.\n - translate_args: A dictionary containing the translation configurations.\n - dataset_args: A dictionary containing the dataset configurations.\n\n Returns:\n - None\n ' name = translate_args['model_name'].split('/')[(- 1)] if ('LLaMA' in translate_args['model_name']): name = f'llama-{name}' dirname = f"{dataset_args['file_path']}/{name}" if (not os.path.exists(dirname)): os.makedirs(dirname) translated_df = pd.DataFrame(translations) filename = f"{dirname}/{dataset_args['filename'].format(config=config)}" if filename.endswith('.tsv'): translated_df.to_csv(filename, sep='\t', index=False) elif filename.endswith('.jsonl'): translated_df.to_json(filename, orient='records', lines=True) else: raise ValueError('Unknown file format')
def main(translate_args: Dict[(str, Any)], dataset_args: Dict[(str, Any)]) -> None: '\n Main function to translate the dataset.\n\n Args:\n - translate_args: A dictionary containing the translation configurations.\n - dataset_args: A dictionary containing the dataset configurations.\n\n Returns:\n - None\n ' dataset = get_dataset(dataset_args) texts = get_texts(dataset, dataset_args) few_shot_dataset = get_few_shot_dataset(dataset_args) prompts = get_few_shot_prompts(few_shot_dataset, dataset_args, translate_args, shots=4) texts_with_prompts = map_texts_with_prompts(texts, prompts, translate_args=translate_args) translate_texts(dataset, texts_with_prompts, translate_args, dataset_args)
def get_dataset(dataset_args): dataset = DatasetDict() for config in dataset_args['dataset_configs']: dataset[config] = load_dataset(dataset_args['dataset'], config, split=dataset_args['dataset_split']) return dataset
def get_texts(dataset, dataset_args): texts = defaultdict(dict) for config in dataset_args['dataset_configs']: for field in dataset_args['dataset_fields']: texts[config][field] = dataset[config][field] return texts
def translate_texts(dataset, texts, translate_args, dataset_args): translations = {} for config in dataset_args['dataset_configs']: translations[config] = dataset[config].to_dict() translate_args['source_lang'] = dataset_args['lang_codes'][config] print(f'Translating from {config}') for field in dataset_args['dataset_fields']: translations[config][field] = translate.main(sentences_list=texts[config][field], return_output=True, **translate_args) save_file(translations[config], config, translate_args, dataset_args)
def save_file(translations, config, translate_args, dataset_args): name = translate_args['model_name'].split('/')[(- 1)] dirname = f"{dataset_args['file_path']}/{name}" if (not os.path.exists(dirname)): os.makedirs(dirname) translated_df = pd.DataFrame(translations) filename = f"{dirname}/{dataset_args['filename'].format(config=config)}" if filename.endswith('.tsv'): translated_df.to_csv(filename, sep='\t', index=False) elif filename.endswith('.jsonl'): translated_df.to_json(filename, orient='records', lines=True) else: raise ValueError('Unknown file format')
def main(translate_args, dataset_args): dataset = get_dataset(dataset_args) texts = get_texts(dataset, dataset_args) translate_texts(dataset, texts, translate_args, dataset_args)
def encode_string(text): return text.replace('\r', '\\r').replace('\n', '\\n').replace('\t', '\\t')
def get_dataloader(accelerator: Accelerator, translate_data, tokenizer: PreTrainedTokenizerBase, batch_size: int, max_length: int) -> DataLoader: dataset = DatasetReader(translate_data, tokenizer, max_length) if (accelerator.distributed_type == DistributedType.TPU): data_collator = DataCollatorForSeq2Seq(tokenizer, padding='max_length', max_length=max_length, label_pad_token_id=tokenizer.pad_token_id, return_tensors='pt') else: data_collator = DataCollatorForSeq2Seq(tokenizer, padding=True, label_pad_token_id=tokenizer.pad_token_id, pad_to_multiple_of=8, return_tensors='pt') return DataLoader(dataset, batch_size=batch_size, collate_fn=data_collator, num_workers=0)
def main(source_lang: str, target_lang: str, starting_batch_size: int, model_name: str='facebook/m2m100_1.2B', cache_dir: str=None, precision: str='32', max_length: int=128, max_new_tokens: int=128, num_beams: int=4, num_return_sequences: int=1, do_sample: bool=False, temperature: float=1.0, top_k: int=50, top_p: float=1.0, keep_special_tokens: bool=False, eos_token: str='</s>', sentences_path: str=None, output_path: str=None, sentences_list: str=None, return_output: bool=False): if (not return_output): os.makedirs(os.path.abspath(os.path.dirname(output_path)), exist_ok=True) accelerator = Accelerator(mixed_precision=(precision if (precision != '32') else 'no'), split_batches=False, dispatch_batches=False) print(f'Loading tokenizer {model_name}...') tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name, cache_dir=cache_dir, trust_remote_code=('xgen' in model_name), use_fast=('polylm' not in model_name)) tokenizer.padding_side = 'left' if (tokenizer.pad_token_id is None): tokenizer.pad_token_id = tokenizer.eos_token_id print(f'Loading model {model_name}...') model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_name, cache_dir=cache_dir, trust_remote_code=(True if ('falcon' in model_name) else False)) model.eval() print(f'''Preparing data... ''') if (precision == '32'): model = model.float() elif (precision == 'fp16'): model = model.half() elif (precision == 'bf16'): model = model.bfloat16() else: raise ValueError('Precision not supported. Supported values: 32, fp16, bf16') gen_kwargs = {'max_new_tokens': max_new_tokens, 'num_beams': num_beams, 'num_return_sequences': num_return_sequences, 'do_sample': do_sample, 'temperature': temperature, 'top_k': top_k, 'top_p': top_p} total_lines: int = (count_lines(sentences_path) if (sentences_list == None) else len(sentences_list)) if accelerator.is_main_process: print(f'''** Translation ** Input file: {sentences_path} Output file: {output_path} Source language: {source_lang} Target language: {target_lang} Starting batch size: {starting_batch_size} Device: {str(accelerator.device).split(':')[0]} Num. Devices: {accelerator.num_processes} Distributed_type: {accelerator.distributed_type} Max length: {max_length} Precision: {model.dtype} Model: {model_name} ''') print('** Generation parameters **') print('\n'.join((f'{k}: {v}' for (k, v) in gen_kwargs.items()))) print('\n') def save_sentences(tgt_text: list): nonlocal return_output, output_path if return_output: save_sentences.sentences.extend(tgt_text) else: print('\n'.join(tgt_text), file=save_sentences.f) if (not return_output): save_sentences.f = open(output_path, 'w', encoding='utf-8') @find_executable_batch_size(starting_batch_size=starting_batch_size) def inference(batch_size): nonlocal model, tokenizer, sentences_path, max_length, output_path, gen_kwargs, precision, sentences_list, return_output print(f'Translating with batch size {batch_size}') translate_data = (sentences_path if (sentences_list == None) else sentences_list) data_loader = get_dataloader(accelerator=accelerator, translate_data=translate_data, tokenizer=tokenizer, batch_size=batch_size, max_length=max_length) (model, data_loader) = accelerator.prepare(model, data_loader) samples_seen: int = 0 save_sentences.sentences = [] with tqdm(total=total_lines, desc='Dataset translation', leave=True, ascii=True, disable=(not accelerator.is_main_process)) as pbar: with torch.no_grad(): for (step, batch) in enumerate(data_loader): batch['input_ids'] = batch['input_ids'] batch['attention_mask'] = batch['attention_mask'] batch = {k: v for (k, v) in batch.items() if (k != 'token_type_ids')} generated_tokens = accelerator.unwrap_model(model).generate(**batch, **gen_kwargs) generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1, pad_index=tokenizer.pad_token_id) generated_tokens = accelerator.gather(generated_tokens).cpu().numpy() tgt_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=(not keep_special_tokens)) if accelerator.is_main_process: if (step == (math.ceil((math.ceil((total_lines / batch_size)) / accelerator.num_processes)) - 1)): tgt_text = tgt_text[:((total_lines * num_return_sequences) - samples_seen)] else: samples_seen += len(tgt_text) save_sentences([encode_string(sentence) for sentence in tgt_text]) pbar.update((len(tgt_text) // gen_kwargs['num_return_sequences'])) inference() print(f'''Translation done. ''') if return_output: return save_sentences.sentences
def fixed_point(x, k, fraclength=None, signed=True): if (fraclength != None): f = fraclength n = float((2.0 ** f)) mn = (- (2.0 ** ((k - f) - 1))) mx = ((- mn) - (2.0 ** (- f))) if (not signed): mx -= mn mn = 0 x = tf.clip_by_value(x, mn, mx) else: n = float(((2 ** k) - 1)) return (x + tf.stop_gradient(((tf.floor(((x * n) + 0.5)) / n) - x)))
def quantize(x, bit_width, frac_bits=None, signed=None): if (bit_width is None): return x elif (bit_width == 1): return (x + tf.stop_gradient((tf.sign(x) - x))) elif (bit_width == 2): ones = tf.ones_like(x) zeros = (ones * 0) mask = tf.where((x < 0.33), zeros, ones) binary = (x + tf.stop_gradient((tf.sign(x) - x))) ternary = (binary * mask) return ternary else: x = tf.clip_by_value(x, (- 1), 1) x = ((x * 0.5) + 0.5) return ((2 * fixed_point(x, bit_width)) - 1)
class SYQ(Conv2D): def __init__(self, bit_width, *args, **kwargs): self.bit_width = bit_width super(SYQ, self).__init__(*args, **kwargs) def get_config(self): config = super().get_config() config['bit_width'] = self.bit_width return config def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if (self.data_format == 'channels_first'): channel_axis = 1 else: channel_axis = (- 1) if (input_shape.dims[channel_axis].value is None): raise ValueError('The channel dimension of the inputs should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) kernel_shape = (self.kernel_size + (input_dim, self.filters)) self.kernel = self.add_weight(name='kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype) if self.use_bias: self.bias = self.add_weight(name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) else: self.bias = None self.scale = self.add_weight('scale', shape=kernel_shape, initializer=keras.initializers.Ones(), dtype=self.dtype, trainable=True) self.kernel = (quantize(self.kernel, self.bit_width) * self.scale) self.input_spec = InputSpec(ndim=(self.rank + 2), axes={channel_axis: input_dim}) if (self.padding == 'causal'): op_padding = 'valid' else: op_padding = self.padding if (not isinstance(op_padding, (list, tuple))): op_padding = op_padding.upper() self._convolution_op = nn_ops.Convolution(input_shape, filter_shape=self.kernel.shape, dilation_rate=self.dilation_rate, strides=self.strides, padding=op_padding, data_format=conv_utils.convert_data_format(self.data_format, (self.rank + 2))) self.built = True
class SYQ_Dense(Dense): def __init__(self, bit_width, *args, **kwargs): self.bit_width = bit_width super(SYQ_Dense, self).__init__(*args, **kwargs) def get_config(self): config = super().get_config() config['bit_width'] = self.bit_width return config def build(self, input_shape): dtype = dtypes.as_dtype((self.dtype or K.floatx())) if (not (dtype.is_floating or dtype.is_complex)): raise TypeError(('Unable to build `Dense` layer with non-floating point dtype %s' % (dtype,))) input_shape = tensor_shape.TensorShape(input_shape) if (tensor_shape.dimension_value(input_shape[(- 1)]) is None): raise ValueError('The last dimension of the inputs to `Dense` should be defined. Found `None`.') last_dim = tensor_shape.dimension_value(input_shape[(- 1)]) self.input_spec = InputSpec(min_ndim=2, axes={(- 1): last_dim}) self.kernel = self.add_weight('kernel', shape=[last_dim, self.units], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, dtype=self.dtype, trainable=True) self.scale = self.add_weight('scale', shape=[1], initializer=keras.initializers.Ones(), dtype=self.dtype, trainable=True) self.kernel = (quantize(self.kernel, self.bit_width) * self.scale) if self.use_bias: self.bias = self.add_weight('bias', shape=[self.units], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, dtype=self.dtype, trainable=True) else: self.bias = None self.built = True
class Model(): def __init__(self, bit_width=None, model_name=None, load=None): self.bit_width = bit_width self.load = load self.model_name = model_name self.model = keras.Sequential([SYQ(self.bit_width, 32, (3, 3), activation='relu', input_shape=(28, 28, 1)), SYQ(self.bit_width, 32, (3, 3), activation='relu'), Flatten(), SYQ_Dense(self.bit_width, 128, activation=tf.nn.relu), SYQ_Dense(self.bit_width, 128, activation=tf.nn.relu), Dense(10, activation=tf.nn.softmax)]) print(self.model.get_config()) def train_model(self): if (self.load is not None): self.model = load_model(args.load) assert (self.model_name is not None) self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) self.model.fit(train_images, train_labels, epochs=1) self.model.save((args.model_name + '.h5')) def evaluate_model(self): if (self.load is not None): self.model = load_model(self.load, custom_objects={'SYQ': SYQ, 'SYQ_Dense': SYQ_Dense}) (test_loss, test_acc) = self.model.evaluate(test_images, test_labels) print('Test accuracy:', test_acc) predictions = self.model.predict(test_images)
def skip(app, what, name, obj, skip, options): if (name == '__init__'): return False return skip
def process_signature(app, what, name, obj, options, signature, return_annotation): if signature: signature = re.sub("<Mock name='([^']+)'.*>", '\\g<1>', signature) signature = re.sub('tensorflow', 'tf', signature) return (signature, return_annotation)
def setup(app): from recommonmark.transform import AutoStructify app.connect('autodoc-process-signature', process_signature) app.connect('autodoc-skip-member', skip) app.add_config_value('recommonmark_config', {'url_resolver': (lambda url: ('https://github.com/ppwwyyxx/tensorpack/blob/master/tensorpack/' + url)), 'auto_toc_tree_section': 'Contents', 'enable_math': True, 'enable_inline_math': True}, True) app.add_transform(AutoStructify)
def get_args(): description = 'plot points into graph.' parser = argparse.ArgumentParser(description=description) parser.add_argument('-i', '--input', help='input data file, use "-" for stdin. Default stdin. Input format is many rows of DELIMIETER-separated data', default='-') parser.add_argument('-o', '--output', help='output image', default='') parser.add_argument('--show', help='show the figure after rendered', action='store_true') parser.add_argument('-c', '--column', help="describe each column in data, for example 'x,y,y'. Default to 'y' for one column and 'x,y' for two columns. Plot attributes can be appended after 'y', like 'ythick;cr'. By default, assume all columns are y. ") parser.add_argument('-t', '--title', help='title of the graph', default='') parser.add_argument('--xlabel', help='x label', type=six.text_type) parser.add_argument('--ylabel', help='y label', type=six.text_type) parser.add_argument('--xlim', help='x lim', type=float, nargs=2) parser.add_argument('--ylim', help='y lim', type=float, nargs=2) parser.add_argument('-s', '--scale', help='scale of each y, separated by comma') parser.add_argument('--annotate-maximum', help='annonate maximum value in graph', action='store_true') parser.add_argument('--annotate-minimum', help='annonate minimum value in graph', action='store_true') parser.add_argument('--xkcd', help='xkcd style', action='store_true') parser.add_argument('--decay', help='exponential decay rate to smooth Y', type=float, default=0) parser.add_argument('-l', '--legend', help='legend for each y') parser.add_argument('-d', '--delimeter', help='column delimeter', default='\t') global args args = parser.parse_args() if ((not args.show) and (not args.output)): args.show = True
def filter_valid_range(points, rect): 'rect = (min_x, max_x, min_y, max_y)' ret = [] for (x, y) in points: if ((x >= rect[0]) and (x <= rect[1]) and (y >= rect[2]) and (y <= rect[3])): ret.append((x, y)) if (len(ret) == 0): ret.append(points[0]) return ret
def exponential_smooth(data, alpha): ' smooth data by alpha. returned a smoothed version' ret = np.copy(data) now = data[0] for k in range(len(data)): ret[k] = ((now * alpha) + (data[k] * (1 - alpha))) now = ret[k] return ret
def annotate_min_max(data_x, data_y, ax): (max_x, min_x) = (max(data_x), min(data_x)) (max_y, min_y) = (max(data_y), min(data_y)) x_range = (max_x - min_x) y_range = (max_y - min_y) (x_max, y_max) = (data_y[0], data_y[0]) (x_min, y_min) = (data_x[0], data_y[0]) for i in range(1, len(data_x)): if (data_y[i] > y_max): y_max = data_y[i] x_max = data_x[i] if (data_y[i] < y_min): y_min = data_y[i] x_min = data_x[i] rect = ax.axis() if args.annotate_maximum: (text_x, text_y) = filter_valid_range([((x_max + (0.05 * x_range)), (y_max + (0.025 * y_range))), ((x_max - (0.05 * x_range)), (y_max + (0.025 * y_range))), ((x_max + (0.05 * x_range)), (y_max - (0.025 * y_range))), ((x_max - (0.05 * x_range)), (y_max - (0.025 * y_range)))], rect)[0] ax.annotate('maximum ({:d},{:.3f})'.format(int(x_max), y_max), xy=(x_max, y_max), xytext=(text_x, text_y), arrowprops=dict(arrowstyle='->')) if args.annotate_minimum: (text_x, text_y) = filter_valid_range([((x_min + (0.05 * x_range)), (y_min - (0.025 * y_range))), ((x_min - (0.05 * x_range)), (y_min - (0.025 * y_range))), ((x_min + (0.05 * x_range)), (y_min + (0.025 * y_range))), ((x_min - (0.05 * x_range)), (y_min + (0.025 * y_range)))], rect)[0] ax.annotate('minimum ({:d},{:.3f})'.format(int(x_min), y_min), xy=(x_min, y_min), xytext=(text_x, text_y), arrowprops=dict(arrowstyle='->'))
def plot_args_from_column_desc(desc): if (not desc): return {} ret = {} desc = desc.split(';') if ('thick' in desc): ret['lw'] = 5 if ('dash' in desc): ret['ls'] = '--' for v in desc: if v.startswith('c'): ret['color'] = v[1:] return ret
def do_plot(data_xs, data_ys): '\n data_xs: list of 1d array, either of size 1 or size len(data_ys)\n data_ys: list of 1d array\n ' fig = plt.figure(figsize=((16.18 / 1.2), (10 / 1.2))) ax = fig.add_axes((0.1, 0.2, 0.8, 0.7)) nr_y = len(data_ys) y_column = args.y_column if args.legend: legends = args.legend.split(',') assert (len(legends) == nr_y) else: legends = None if args.scale: scale = map(float, args.scale.split(',')) assert (len(scale) == nr_y) else: scale = ([1.0] * nr_y) for yidx in range(nr_y): plotargs = plot_args_from_column_desc(y_column[yidx][1:]) now_scale = scale[yidx] data_y = (data_ys[yidx] * now_scale) leg = (legends[yidx] if legends else None) if (now_scale != 1): leg = '{}*{}'.format((now_scale if (int(now_scale) != now_scale) else int(now_scale)), leg) data_x = (data_xs[0] if (len(data_xs) == 1) else data_xs[yidx]) assert (len(data_x) >= len(data_y)), 'x column is shorter than y column! {} < {}'.format(len(data_x), len(data_y)) truncate_data_x = data_x[:len(data_y)] p = plt.plot(truncate_data_x, data_y, label=leg, **plotargs) c = p[0].get_color() plt.fill_between(truncate_data_x, data_y, alpha=0.1, facecolor=c) if (args.annotate_maximum or args.annotate_minimum): annotate_min_max(truncate_data_x, data_y, ax) if args.xlabel: plt.xlabel(args.xlabel, fontsize='xx-large') if args.ylabel: plt.ylabel(args.ylabel, fontsize='xx-large') if args.xlim: plt.xlim(args.xlim[0], args.xlim[1]) if args.ylim: plt.ylim(args.ylim[0], args.ylim[1]) plt.legend(loc='best', fontsize='xx-large') (minx, maxx) = (min(data_x), max(data_x)) new_maxx = (maxx + ((maxx - minx) * 0.05)) plt.xlim(minx, new_maxx) for label in chain.from_iterable([ax.get_xticklabels(), ax.get_yticklabels()]): label.set_fontproperties(fontm.FontProperties(size=15)) ax.grid(color='gray', linestyle='dashed') plt.title(args.title, fontdict={'fontsize': '20'}) if (args.output != ''): plt.savefig(args.output, bbox_inches='tight') if args.show: plt.show()
def main(): get_args() if (args.input == STDIN_FNAME): fin = sys.stdin else: fin = open(args.input) all_inputs = fin.readlines() if (args.input != STDIN_FNAME): fin.close() nr_column = len(all_inputs[0].rstrip('\n').split(args.delimeter)) if (args.column is None): column = (['y'] * nr_column) else: column = args.column.strip().split(',') for k in column: assert (k[0] in ['x', 'y']) assert (nr_column == len(column)), "Column and data doesn't have same length. {}!={}".format(nr_column, len(column)) args.y_column = [v for v in column if (v[0] == 'y')] args.y_column_idx = [idx for (idx, v) in enumerate(column) if (v[0] == 'y')] args.x_column = [v for v in column if (v[0] == 'x')] args.x_column_idx = [idx for (idx, v) in enumerate(column) if (v[0] == 'x')] nr_x_column = len(args.x_column) nr_y_column = len(args.y_column) if (nr_x_column > 1): assert (nr_x_column == nr_y_column), 'If multiple x columns are used, nr_x_column must equals to nr_y_column' x_column_set = set(args.x_column) data = [[] for _ in range(nr_column)] ended = defaultdict(bool) data_format = (- 1) for (lineno, line) in enumerate(all_inputs): line = line.rstrip('\n').split(args.delimeter) assert (len(line) <= nr_column), 'One row have too many columns (separated by {})!\nLine: {}'.format(repr(args.delimeter), line) for (idx, val) in enumerate(line): if (val == ''): ended[idx] = True continue else: val = float(val) assert (not ended[idx]), 'Column {} has hole!'.format(idx) data[idx].append(val) data_ys = [data[k] for k in args.y_column_idx] length_ys = [len(t) for t in data_ys] print('Length of each column:', length_ys) max_ysize = max(length_ys) if nr_x_column: data_xs = [data[k] for k in args.x_column_idx] else: data_xs = [list(range(1, (max_ysize + 1)))] for (idx, data_y) in enumerate(data_ys): data_ys[idx] = np.asarray(data_y) if (args.decay != 0): data_ys[idx] = exponential_smooth(data_y, args.decay) for (idx, data_x) in enumerate(data_xs): data_xs[idx] = np.asarray(data_x) if args.xkcd: with plt.xkcd(): do_plot(data_xs, data_ys) else: do_plot(data_xs, data_ys)
def _global_import(name): p = __import__(name, globals(), locals(), level=1) lst = (p.__all__ if ('__all__' in dir(p)) else dir(p)) del globals()[name] for k in lst: globals()[k] = p.__dict__[k]
class PreventStuckPlayer(ProxyPlayer): " Prevent the player from getting stuck (repeating a no-op)\n by inserting a different action. Useful in games such as Atari Breakout\n where the agent needs to press the 'start' button to start playing.\n " def __init__(self, player, nr_repeat, action): "\n :param nr_repeat: trigger the 'action' after this many of repeated action\n :param action: the action to be triggered to get out of stuck\n Does auto-reset, but doesn't auto-restart the underlying player.\n " super(PreventStuckPlayer, self).__init__(player) self.act_que = deque(maxlen=nr_repeat) self.trigger_action = action def action(self, act): self.act_que.append(act) if (self.act_que.count(self.act_que[0]) == self.act_que.maxlen): act = self.trigger_action (r, isOver) = self.player.action(act) if isOver: self.act_que.clear() return (r, isOver) def restart_episode(self): super(PreventStuckPlayer, self).restart_episode() self.act_que.clear()
class LimitLengthPlayer(ProxyPlayer): ' Limit the total number of actions in an episode.\n Will auto restart the underlying player on timeout\n ' def __init__(self, player, limit): super(LimitLengthPlayer, self).__init__(player) self.limit = limit self.cnt = 0 def action(self, act): (r, isOver) = self.player.action(act) self.cnt += 1 if (self.cnt >= self.limit): isOver = True self.finish_episode() self.restart_episode() if isOver: self.cnt = 0 return (r, isOver) def restart_episode(self): self.player.restart_episode() self.cnt = 0
class AutoRestartPlayer(ProxyPlayer): " Auto-restart the player on episode ends,\n in case some player wasn't designed to do so. " def action(self, act): (r, isOver) = self.player.action(act) if isOver: self.player.finish_episode() self.player.restart_episode() return (r, isOver)
class MapPlayerState(ProxyPlayer): def __init__(self, player, func): super(MapPlayerState, self).__init__(player) self.func = func def current_state(self): return self.func(self.player.current_state())
@six.add_metaclass(ABCMeta) class RLEnvironment(object): def __init__(self): self.reset_stat() @abstractmethod def current_state(self): '\n Observe, return a state representation\n ' @abstractmethod def action(self, act): '\n Perform an action. Will automatically start a new episode if isOver==True\n :param act: the action\n :returns: (reward, isOver)\n ' def restart_episode(self): " Start a new episode, even if the current hasn't ended " raise NotImplementedError() def finish_episode(self): ' get called when an episode finished' pass def get_action_space(self): ' return an `ActionSpace` instance' raise NotImplementedError() def reset_stat(self): ' reset all statistics counter' self.stats = defaultdict(list) def play_one_episode(self, func, stat='score'): ' play one episode for eval.\n :param func: call with the state and return an action\n :param stat: a key or list of keys in stats\n :returns: the stat(s) after running this episode\n ' if (not isinstance(stat, list)): stat = [stat] while True: s = self.current_state() act = func(s) (r, isOver) = self.action(act) if isOver: s = [self.stats[k] for k in stat] self.reset_stat() return (s if (len(s) > 1) else s[0])
class ActionSpace(object): def __init__(self): self.rng = get_rng(self) @abstractmethod def sample(self): pass def num_actions(self): raise NotImplementedError()
class DiscreteActionSpace(ActionSpace): def __init__(self, num): super(DiscreteActionSpace, self).__init__() self.num = num def sample(self): return self.rng.randint(self.num) def num_actions(self): return self.num def __repr__(self): return 'DiscreteActionSpace({})'.format(self.num) def __str__(self): return 'DiscreteActionSpace({})'.format(self.num)
class NaiveRLEnvironment(RLEnvironment): ' for testing only' def __init__(self): self.k = 0 def current_state(self): self.k += 1 return self.k def action(self, act): self.k = act return (self.k, (self.k > 10))
class ProxyPlayer(RLEnvironment): ' Serve as a proxy another player ' def __init__(self, player): self.player = player def reset_stat(self): self.player.reset_stat() def current_state(self): return self.player.current_state() def action(self, act): return self.player.action(act) @property def stats(self): return self.player.stats def restart_episode(self): self.player.restart_episode() def finish_episode(self): self.player.finish_episode() def get_action_space(self): return self.player.get_action_space()
class GymEnv(RLEnvironment): '\n An OpenAI/gym wrapper. Can optionally auto restart.\n Only support discrete action space now\n ' def __init__(self, name, dumpdir=None, viz=False, auto_restart=True): with _ENV_LOCK: self.gymenv = gym.make(name) if dumpdir: mkdir_p(dumpdir) self.gymenv.monitor.start(dumpdir) self.use_dir = dumpdir self.reset_stat() self.rwd_counter = StatCounter() self.restart_episode() self.auto_restart = auto_restart self.viz = viz def restart_episode(self): self.rwd_counter.reset() self._ob = self.gymenv.reset() def finish_episode(self): if (self.use_dir is not None): self.gymenv.monitor.flush() self.stats['score'].append(self.rwd_counter.sum) def current_state(self): if self.viz: self.gymenv.render() time.sleep(self.viz) return self._ob def action(self, act): (self._ob, r, isOver, info) = self.gymenv.step(act) self.rwd_counter.feed(r) if isOver: self.finish_episode() if self.auto_restart: self.restart_episode() return (r, isOver) def get_action_space(self): spc = self.gymenv.action_space assert isinstance(spc, gym.spaces.discrete.Discrete) return DiscreteActionSpace(spc.n)
class HistoryFramePlayer(ProxyPlayer): ' Include history frames in state, or use black images\n Assume player will do auto-restart.\n ' def __init__(self, player, hist_len): '\n :param hist_len: total length of the state, including the current\n and `hist_len-1` history\n ' super(HistoryFramePlayer, self).__init__(player) self.history = deque(maxlen=hist_len) s = self.player.current_state() self.history.append(s) def current_state(self): assert (len(self.history) != 0) diff_len = (self.history.maxlen - len(self.history)) if (diff_len == 0): return np.concatenate(self.history, axis=2) zeros = [np.zeros_like(self.history[0]) for k in range(diff_len)] for k in self.history: zeros.append(k) assert (len(zeros) == self.history.maxlen) return np.concatenate(zeros, axis=2) def action(self, act): (r, isOver) = self.player.action(act) s = self.player.current_state() self.history.append(s) if isOver: self.history.clear() self.history.append(s) return (r, isOver) def restart_episode(self): super(HistoryFramePlayer, self).restart_episode() self.history.clear() self.history.append(self.player.current_state())
class TransitionExperience(object): ' A transition of state, or experience' def __init__(self, state, action, reward, **kwargs): ' kwargs: whatever other attribute you want to save' self.state = state self.action = action self.reward = reward for (k, v) in six.iteritems(kwargs): setattr(self, k, v)
@six.add_metaclass(ABCMeta) class SimulatorProcessBase(mp.Process): def __init__(self, idx): super(SimulatorProcessBase, self).__init__() self.idx = int(idx) self.name = u'simulator-{}'.format(self.idx) self.identity = self.name.encode('utf-8') @abstractmethod def _build_player(self): pass
class SimulatorProcessStateExchange(SimulatorProcessBase): '\n A process that simulates a player and communicates to master to\n send states and receive the next action\n ' def __init__(self, idx, pipe_c2s, pipe_s2c): '\n :param idx: idx of this process\n ' super(SimulatorProcessStateExchange, self).__init__(idx) self.c2s = pipe_c2s self.s2c = pipe_s2c def run(self): player = self._build_player() context = zmq.Context() c2s_socket = context.socket(zmq.PUSH) c2s_socket.setsockopt(zmq.IDENTITY, self.identity) c2s_socket.set_hwm(2) c2s_socket.connect(self.c2s) s2c_socket = context.socket(zmq.DEALER) s2c_socket.setsockopt(zmq.IDENTITY, self.identity) s2c_socket.connect(self.s2c) state = player.current_state() (reward, isOver) = (0, False) while True: c2s_socket.send(dumps((self.identity, state, reward, isOver)), copy=False) action = loads(s2c_socket.recv(copy=False).bytes) (reward, isOver) = player.action(action) state = player.current_state()
class SimulatorMaster(threading.Thread): ' A base thread to communicate with all StateExchangeSimulatorProcess.\n It should produce action for each simulator, as well as\n defining callbacks when a transition or an episode is finished.\n ' class ClientState(object): def __init__(self): self.memory = [] def __init__(self, pipe_c2s, pipe_s2c): super(SimulatorMaster, self).__init__() self.daemon = True self.name = 'SimulatorMaster' self.context = zmq.Context() self.c2s_socket = self.context.socket(zmq.PULL) self.c2s_socket.bind(pipe_c2s) self.c2s_socket.set_hwm(10) self.s2c_socket = self.context.socket(zmq.ROUTER) self.s2c_socket.bind(pipe_s2c) self.s2c_socket.set_hwm(10) self.send_queue = queue.Queue(maxsize=100) def f(): msg = self.send_queue.get() self.s2c_socket.send_multipart(msg, copy=False) self.send_thread = LoopThread(f) self.send_thread.daemon = True self.send_thread.start() def clean_context(soks, context): for s in soks: s.close() context.term() import atexit atexit.register(clean_context, [self.c2s_socket, self.s2c_socket], self.context) def run(self): self.clients = defaultdict(self.ClientState) while True: msg = loads(self.c2s_socket.recv(copy=False).bytes) (ident, state, reward, isOver) = msg client = self.clients[ident] if (len(client.memory) > 0): client.memory[(- 1)].reward = reward if isOver: self._on_episode_over(ident) else: self._on_datapoint(ident) self._on_state(state, ident) @abstractmethod def _on_state(self, state, ident): 'response to state sent by ident. Preferrably an async call' @abstractmethod def _on_episode_over(self, client): " callback when the client just finished an episode.\n You may want to clear the client's memory in this callback.\n " def _on_datapoint(self, client): ' callback when the client just finished a transition\n ' def __del__(self): self.context.destroy(linger=0)
class SimulatorProcessDF(SimulatorProcessBase): ' A simulator which contains a forward model itself, allowing\n it to produce data points directly ' def __init__(self, idx, pipe_c2s): super(SimulatorProcessDF, self).__init__(idx) self.pipe_c2s = pipe_c2s def run(self): self.player = self._build_player() self.ctx = zmq.Context() self.c2s_socket = self.ctx.socket(zmq.PUSH) self.c2s_socket.setsockopt(zmq.IDENTITY, self.identity) self.c2s_socket.set_hwm(5) self.c2s_socket.connect(self.pipe_c2s) self._prepare() for dp in self.get_data(): self.c2s_socket.send(dumps(dp), copy=False) @abstractmethod def _prepare(self): pass @abstractmethod def get_data(self): pass
class SimulatorProcessSharedWeight(SimulatorProcessDF): ' A simulator process with an extra thread waiting for event,\n and take shared weight from shm.\n\n Start me under some CUDA_VISIBLE_DEVICES set!\n ' def __init__(self, idx, pipe_c2s, condvar, shared_dic, pred_config): super(SimulatorProcessSharedWeight, self).__init__(idx, pipe_c2s) self.condvar = condvar self.shared_dic = shared_dic self.pred_config = pred_config def _prepare(self): disable_layer_logging() self.predictor = OfflinePredictor(self.pred_config) with self.predictor.graph.as_default(): vars_to_update = self._params_to_update() self.sess_updater = SessionUpdate(self.predictor.session, vars_to_update) self.predictor.graph.finalize() self.weight_lock = threading.Lock() def func(): self.condvar.acquire() while True: self.condvar.wait() self._trigger_evt() self.evt_th = threading.Thread(target=func) self.evt_th.daemon = True self.evt_th.start() def _trigger_evt(self): with self.weight_lock: self.sess_updater.update(self.shared_dic['params']) logger.info('Updated.') def _params_to_update(self): return tf.trainable_variables()
class WeightSync(Callback): ' Sync weight from main process to shared_dic and notify' def __init__(self, condvar, shared_dic): self.condvar = condvar self.shared_dic = shared_dic def _setup_graph(self): self.vars = self._params_to_update() def _params_to_update(self): return tf.trainable_variables() def _before_train(self): self._sync() def _trigger_epoch(self): self._sync() def _sync(self): logger.info('Updating weights ...') dic = {v.name: v.eval() for v in self.vars} self.shared_dic['params'] = dic self.condvar.acquire() self.condvar.notify_all() self.condvar.release()
def _global_import(name): p = __import__(name, globals(), locals(), level=1) lst = (p.__all__ if ('__all__' in dir(p)) else dir(p)) del globals()[name] for k in lst: globals()[k] = p.__dict__[k] __all__.append(k)
@six.add_metaclass(ABCMeta) class Callback(object): ' Base class for all callbacks ' def before_train(self): '\n Called right before the first iteration.\n ' self._before_train() def _before_train(self): pass def setup_graph(self, trainer): '\n Called before finalizing the graph.\n Use this callback to setup some ops used in the callback.\n\n :param trainer: :class:`train.Trainer` instance\n ' self.trainer = trainer self.graph = tf.get_default_graph() self.epoch_num = (self.trainer.config.starting_epoch - 1) with tf.name_scope(type(self).__name__): self._setup_graph() def _setup_graph(self): pass def after_train(self): '\n Called after training.\n ' self._after_train() def _after_train(self): pass def trigger_step(self): '\n Callback to be triggered after every step (every backpropagation)\n\n Could be useful to apply some tricks on parameters (clipping, low-rank, etc)\n ' def trigger_epoch(self): '\n Triggered after every epoch.\n\n In this function, self.epoch_num would be the number of epoch finished.\n ' self.epoch_num += 1 self._trigger_epoch() def _trigger_epoch(self): pass def __str__(self): return type(self).__name__
class ProxyCallback(Callback): def __init__(self, cb): self.cb = cb def _before_train(self): self.cb.before_train() def _setup_graph(self): self.cb.setup_graph(self.trainer) def _after_train(self): self.cb.after_train() def _trigger_epoch(self): self.cb.trigger_epoch() def __str__(self): return ('Proxy-' + str(self.cb))
class PeriodicCallback(ProxyCallback): "\n A callback to be triggered after every `period` epochs.\n Doesn't work for trigger_step\n " def __init__(self, cb, period): '\n :param cb: a `Callback`\n :param period: int\n ' super(PeriodicCallback, self).__init__(cb) self.period = int(period) def _trigger_epoch(self): if ((self.epoch_num % self.period) == 0): self.cb.epoch_num = (self.epoch_num - 1) self.cb.trigger_epoch() def __str__(self): return ('Periodic-' + str(self.cb))
class StartProcOrThread(Callback): def __init__(self, procs_threads): '\n Start extra threads and processes before training\n :param procs_threads: list of processes or threads\n ' if (not isinstance(procs_threads, list)): procs_threads = [procs_threads] self._procs_threads = procs_threads def _before_train(self): logger.info(('Starting ' + ', '.join([k.name for k in self._procs_threads]))) start_proc_mask_signal(self._procs_threads)
class OutputTensorDispatcer(object): def __init__(self): self._names = [] self._idxs = [] def add_entry(self, names): v = [] for n in names: tensorname = get_op_tensor_name(n)[1] if (tensorname in self._names): v.append(self._names.index(tensorname)) else: self._names.append(tensorname) v.append((len(self._names) - 1)) self._idxs.append(v) def get_all_names(self): return self._names def get_idx_for_each_entry(self): return self._idxs
class DumpParamAsImage(Callback): '\n Dump a variable to image(s) after every epoch to logger.LOG_DIR.\n ' def __init__(self, var_name, prefix=None, map_func=None, scale=255, clip=False): '\n :param var_name: the name of the variable.\n :param prefix: the filename prefix for saved images. Default is the op name.\n :param map_func: map the value of the variable to an image or list of\n images of shape [h, w] or [h, w, c]. If None, will use identity\n :param scale: a multiplier on pixel values, applied after map_func. default to 255\n :param clip: whether to clip the result to [0, 255]\n ' (op_name, self.var_name) = get_op_var_name(var_name) self.func = map_func if (prefix is None): self.prefix = op_name else: self.prefix = prefix self.log_dir = logger.LOG_DIR self.scale = scale self.clip = clip def _before_train(self): self.var = self.graph.get_tensor_by_name(self.var_name) def _trigger_epoch(self): val = self.trainer.sess.run(self.var) if (self.func is not None): val = self.func(val) if isinstance(val, list): for (idx, im) in enumerate(val): self._dump_image(im, idx) else: self._dump_image(val) def _dump_image(self, im, idx=None): assert (im.ndim in [2, 3]), str(im.ndim) fname = os.path.join(self.log_dir, (self.prefix + '-ep{:03d}{}.png'.format(self.epoch_num, (('-' + str(idx)) if idx else '')))) res = (im * self.scale) if self.clip: res = np.clip(res, 0, 255) cv2.imwrite(fname, res.astype('uint8'))
class RunOp(Callback): ' Run an op periodically' def __init__(self, setup_func, run_before=True, run_epoch=True): '\n :param setup_func: a function that returns the op in the graph\n :param run_before: run the op before training\n :param run_epoch: run the op on every epoch trigger\n ' self.setup_func = setup_func self.run_before = run_before self.run_epoch = run_epoch def _setup_graph(self): self._op = self.setup_func() def _before_train(self): if self.run_before: self._op.run() def _trigger_epoch(self): if self.run_epoch: self._op.run()
class CallbackTimeLogger(object): def __init__(self): self.times = [] self.tot = 0 def add(self, name, time): self.tot += time self.times.append((name, time)) @contextmanager def timed_callback(self, name): s = time.time() (yield) self.add(name, (time.time() - s)) def log(self): ' log the time of some heavy callbacks ' if (self.tot < 3): return msgs = [] for (name, t) in self.times: if (((t / self.tot) > 0.3) and (t > 1)): msgs.append('{}: {:.3f}sec'.format(name, t)) logger.info('Callbacks took {:.3f} sec in total. {}'.format(self.tot, '; '.join(msgs)))
class Callbacks(Callback): '\n A container to hold all callbacks, and execute them in the right order and proper session.\n ' def __init__(self, cbs): '\n :param cbs: a list of `Callbacks`\n ' for cb in cbs: assert isinstance(cb, Callback), cb.__class__ for cb in cbs: if isinstance(cb, StatPrinter): sp = cb cbs.remove(sp) cbs.append(sp) break else: raise ValueError('Callbacks must contain StatPrinter for stat and writer to work properly!') self.cbs = cbs def _setup_graph(self): with tf.name_scope(None): for cb in self.cbs: cb.setup_graph(self.trainer) def _before_train(self): for cb in self.cbs: cb.before_train() def _after_train(self): for cb in self.cbs: cb.after_train() def trigger_step(self): for cb in self.cbs: cb.trigger_step() def _trigger_epoch(self): tm = CallbackTimeLogger() test_sess_restored = False for cb in self.cbs: display_name = str(cb) with tm.timed_callback(display_name): cb.trigger_epoch() tm.log() def append(self, cb): assert isinstance(cb, Callback) self.cbs.append(cb)
@six.add_metaclass(ABCMeta) class Inferencer(object): def before_inference(self): '\n Called before a new round of inference starts.\n ' self._before_inference() def _before_inference(self): pass def datapoint(self, output): '\n Called after complete running every data point\n ' self._datapoint(output) @abstractmethod def _datapoint(self, output): pass def after_inference(self): '\n Called after a round of inference ends.\n Returns a dict of statistics which will be logged by the InferenceRunner.\n The inferencer needs to handle other kind of logging by their own.\n ' return self._after_inference() def _after_inference(self): pass def get_output_tensors(self): '\n Return a list of tensor names needed for this inference\n ' return self._get_output_tensors() @abstractmethod def _get_output_tensors(self): pass
class ScalarStats(Inferencer): '\n Write some scalar tensor to both stat and summary.\n The output of the given Ops must be a scalar.\n The value will be averaged over all data points in the inference dataflow.\n ' def __init__(self, names_to_print, prefix='validation'): '\n :param names_to_print: list of names of tensors, or just a name\n :param prefix: an optional prefix for logging\n ' if (not isinstance(names_to_print, list)): self.names = [names_to_print] else: self.names = names_to_print self.prefix = prefix def _get_output_tensors(self): return self.names def _before_inference(self): self.stats = [] def _datapoint(self, output): self.stats.append(output) def _after_inference(self): self.stats = np.mean(self.stats, axis=0) assert (len(self.stats) == len(self.names)) ret = {} for (stat, name) in zip(self.stats, self.names): (opname, _) = get_op_var_name(name) name = ('{}_{}'.format(self.prefix, opname) if self.prefix else opname) ret[name] = stat return ret
class ClassificationError(Inferencer): '\n Compute classification error in batch mode, from a `wrong` variable\n\n The `wrong` tensor is supposed to be an 0/1 integer vector containing\n whether each sample in the batch is incorrectly classified.\n You can use `tf.nn.in_top_k` to produce this vector record top-k error as well.\n\n This callback produce the "true" error,\n taking account of the fact that batches might not have the same size in\n testing (because the size of test set might not be a multiple of batch size).\n Therefore the result is different from averaging the error rate of each batch.\n ' def __init__(self, wrong_var_name='incorrect_vector', summary_name='val_error'): '\n :param wrong_var_name: name of the `wrong` variable\n :param summary_name: the name for logging\n ' self.wrong_var_name = wrong_var_name self.summary_name = summary_name def _get_output_tensors(self): return [self.wrong_var_name] def _before_inference(self): self.err_stat = RatioCounter() def _datapoint(self, outputs): vec = outputs[0] if (vec.ndim == 0): logger.error("[DEPRECATED] use a 'wrong vector' for ClassificationError instead of nr_wrong") sys.exit(1) else: assert (vec.ndim == 1), '{} is not a vector!'.format(self.wrong_var_name) batch_size = len(vec) wrong = np.sum(vec) self.err_stat.feed(wrong, batch_size) def _after_inference(self): return {self.summary_name: self.err_stat.ratio}
class BinaryClassificationStats(Inferencer): ' Compute precision/recall in binary classification, given the\n prediction vector and the label vector.\n ' def __init__(self, pred_var_name, label_var_name, summary_prefix='val'): '\n :param pred_var_name: name of the 0/1 prediction tensor.\n :param label_var_name: name of the 0/1 label tensor.\n ' self.pred_var_name = pred_var_name self.label_var_name = label_var_name self.prefix = summary_prefix def _get_output_tensors(self): return [self.pred_var_name, self.label_var_name] def _before_inference(self): self.stat = BinaryStatistics() def _datapoint(self, outputs): (pred, label) = outputs self.stat.feed(pred, label) def _after_inference(self): return {(self.prefix + '_precision'): self.stat.precision, (self.prefix + '_recall'): self.stat.recall}
def summary_inferencer(trainer, infs): for inf in infs: ret = inf.after_inference() for (k, v) in six.iteritems(ret): try: v = float(v) except: logger.warn('{} returns a non-scalar statistics!'.format(type(inf).__name__)) continue trainer.write_scalar_summary(k, v)
class InferenceRunner(Callback): '\n A callback that runs different kinds of inferencer.\n ' IOTensor = namedtuple('IOTensor', ['index', 'isOutput']) def __init__(self, ds, infs, inf_epochs, input_tensors=None): '\n :param ds: inference dataset. a `DataFlow` instance.\n :param infs: a list of `Inferencer` instance.\n :param input_tensor_names: list of tensors to feed the dataflow to.\n default to all the input placeholders.\n ' assert isinstance(ds, DataFlow), ds self.ds = ds if (not isinstance(infs, list)): self.infs = [infs] else: self.infs = infs for v in self.infs: assert isinstance(v, Inferencer), v self.input_tensors = input_tensors self.inf_epochs = inf_epochs def _setup_graph(self): self._find_input_tensors() self._find_output_tensors() self.pred_func = self.trainer.get_predict_func(self.input_tensors, self.output_tensors) def _find_input_tensors(self): if (self.input_tensors is None): input_vars = self.trainer.model.get_reuse_placehdrs() def get_name(x): if isinstance(x, tf.SparseTensor): return x.op.name.split('/')[0] return x.name self.input_tensors = [get_name(x) for x in input_vars] def _find_output_tensors(self): dispatcer = OutputTensorDispatcer() for inf in self.infs: dispatcer.add_entry(inf.get_output_tensors()) all_names = dispatcer.get_all_names() IOTensor = InferenceRunner.IOTensor self.output_tensors = list(filter((lambda x: (x not in self.input_tensors)), all_names)) def find_oid(idxs): ret = [] for idx in idxs: name = all_names[idx] if (name in self.input_tensors): ret.append(IOTensor(self.input_tensors.index(name), False)) else: ret.append(IOTensor(self.output_tensors.index(name), True)) return ret self.inf_to_tensors = [find_oid(t) for t in dispatcer.get_idx_for_each_entry()] def _trigger_epoch(self): if np.any((self.inf_epochs[:] == self.epoch_num)): for inf in self.infs: inf.before_inference() sess = tf.get_default_session() self.ds.reset_state() with get_tqdm(total=self.ds.size()) as pbar: for dp in self.ds.get_data(): outputs = self.pred_func(dp) for (inf, tensormap) in zip(self.infs, self.inf_to_tensors): inf_output = [(outputs if k.isOutput else dp)[k.index] for k in tensormap] inf.datapoint(inf_output) pbar.update() self._write_summary_after_inference() def _write_summary_after_inference(self): summary_inferencer(self.trainer, self.infs)
class FeedfreeInferenceRunner(Callback): IOTensor = namedtuple('IOTensor', ['index', 'isOutput']) def __init__(self, input, infs, input_tensors=None): assert isinstance(input, FeedfreeInput), input self._input_data = input if (not isinstance(infs, list)): self.infs = [infs] else: self.infs = infs for v in self.infs: assert isinstance(v, Inferencer), v self.input_tensor_names = input_tensors def _setup_graph(self): self._find_input_tensors() self._find_output_tensors() def _find_input_tensors(self): self._input_data._setup(self.trainer) self._input_tensors = self._input_data.get_input_tensors() model_placehdrs = self.trainer.model.get_reuse_placehdrs() assert (len(self._input_tensors) == len(model_placehdrs)), "FeedfreeInput doesn't produce correct number of output tensors" if (self.input_tensor_names is not None): assert isinstance(self.input_tensor_names, list) self._input_tensors = [k for (idx, k) in enumerate(self._input_tensors) if (model_placehdrs[idx].name in self.input_tensor_names)] assert (len(self._input_tensors) == len(self.input_tensor_names)), 'names of input tensors are not defined in the Model' def _find_output_tensors(self): dispatcer = OutputTensorDispatcer() for inf in self.infs: dispatcer.add_entry(inf.get_output_tensors()) all_names = dispatcer.get_all_names() IOTensor = InferenceRunner.IOTensor self.output_tensors = all_names def find_oid(idxs): ret = [] for idx in idxs: name = all_names[idx] ret.append(IOTensor(self.output_tensors.index(name), True)) return ret self.inf_to_tensors = [find_oid(t) for t in dispatcer.get_idx_for_each_entry()] def _trigger_epoch(self): for inf in self.infs: inf.before_inference() sess = tf.get_default_session() sz = self._input_data.size() with get_tqdm(total=sz) as pbar: for _ in range(sz): pbar.update() self._write_summary_after_inference() def _write_summary_after_inference(self): summary_inferencer(self.trainer, self.infs)
@six.add_metaclass(ABCMeta) class HyperParam(object): ' Base class for a hyper param' def setup_graph(self): ' setup the graph in `setup_graph` callback stage, if necessary' pass @abstractmethod def set_value(self, v): ' define how the value of the param will be set' pass @property def readable_name(self): ' A name to display' return self._readable_name
class GraphVarParam(HyperParam): ' a variable in the graph can be a hyperparam' def __init__(self, name, shape=[]): self.name = name self.shape = shape (self._readable_name, self.var_name) = get_op_var_name(name) def setup_graph(self): try: all_vars = tf.global_variables() except: all_vars = tf.all_variables() for v in all_vars: if (v.name == self.var_name): self.var = v break else: raise ValueError('{} is not a VARIABLE in the graph!'.format(self.var_name)) self.val_holder = tf.placeholder(tf.float32, shape=self.shape, name=(self._readable_name + '_feed')) self.assign_op = self.var.assign(self.val_holder) def set_value(self, v): self.assign_op.eval(feed_dict={self.val_holder: v}) def get_value(self): return self.var.eval()
class ObjAttrParam(HyperParam): ' an attribute of an object can be a hyperparam' def __init__(self, obj, attrname, readable_name=None): ' :param readable_name: default to be attrname.' self.obj = obj self.attrname = attrname if (readable_name is None): self._readable_name = attrname else: self._readable_name = readable_name def set_value(self, v): setattr(self.obj, self.attrname, v) def get_value(self, v): return getattr(self.obj, self.attrname)
class HyperParamSetter(Callback): '\n Base class to set hyperparameters after every epoch.\n ' def __init__(self, param): '\n :param param: a `HyperParam` instance, or a string (assumed to be a scalar `GraphVarParam`)\n ' if isinstance(param, six.string_types): param = GraphVarParam(param) assert isinstance(param, HyperParam), type(param) self.param = param self.last_value = None def _setup_graph(self): self.param.setup_graph() def get_value_to_set(self): '\n :returns: the value to assign to the variable now.\n ' ret = self._get_value_to_set() if ((ret is not None) and (ret != self.last_value)): logger.info('{} at epoch {} will change to {:.8f}'.format(self.param.readable_name, (self.epoch_num + 1), ret)) self.last_value = ret return ret def get_current_value(self): return self.param.get_value() @abstractmethod def _get_value_to_set(self): pass def _trigger_epoch(self): self._set_param() def _before_train(self): self._set_param() def _set_param(self): v = self.get_value_to_set() if (v is not None): self.param.set_value(v)
class HumanHyperParamSetter(HyperParamSetter): '\n Set hyperparameters by loading the value from a file each time it get called.\n ' def __init__(self, param, file_name='hyper.txt'): '\n :param file_name: a file containing the value of the variable.\n Each line in the file is a k:v pair, where k is\n param.readable_name, and v is the value\n ' super(HumanHyperParamSetter, self).__init__(param) self.file_name = os.path.join(logger.LOG_DIR, file_name) logger.info('Use {} to control hyperparam {}.'.format(self.file_name, self.param.readable_name)) def _get_value_to_set(self): if (not os.path.isfile(self.file_name)): return None try: with open(self.file_name) as f: lines = f.readlines() lines = [s.strip().split(':') for s in lines] dic = {str(k): float(v) for (k, v) in lines} ret = dic[self.param.readable_name] return ret except: logger.warn('Cannot find {} in {}'.format(self.param.readable_name, self.file_name)) return None
class ScheduledHyperParamSetter(HyperParamSetter): '\n Set hyperparameters by a predefined schedule.\n ' def __init__(self, param, schedule, interp=None): "\n :param schedule: [(epoch1, val1), (epoch2, val2), (epoch3, val3), ...]\n (ep, val) means set the param to `val` after the `ep`th epoch.\n If epoch == 0, the value is set before training.\n :param interp: None: no interpolation. 'linear': linear interpolation\n " schedule = [(int(a), float(b)) for (a, b) in schedule] self.schedule = sorted(schedule, key=operator.itemgetter(0)) if (interp is not None): assert (interp == 'linear') self.interp = interp super(ScheduledHyperParamSetter, self).__init__(param) def _get_value_to_set(self): if (self.interp is None): for (e, v) in self.schedule: if (e == self.epoch_num): return v return None else: (laste, lastv) = (None, None) for (e, v) in self.schedule: if (e == self.epoch_num): return v if (e > self.epoch_num): break (laste, lastv) = (e, v) if ((laste is None) or (laste == e)): return None v = (((((self.epoch_num - laste) * 1.0) / (e - laste)) * (v - lastv)) + lastv) return v
class HyperParamSetterWithFunc(HyperParamSetter): def __init__(self, param, func): 'Set hyperparameter by a func\n new_value = f(epoch_num, old_value)\n ' super(HyperParamSetterWithFunc, self).__init__(param) self.f = func def _get_value_to_set(self): return self.f(self.epoch_num, self.get_current_value())
class StatMonitorParamSetter(HyperParamSetter): def __init__(self, param, stat_name, value_func, threshold, last_k, reverse=False): "\n Set hyperparameter by a func, when a specific stat wasn't\n decreasing/increasing enough in the last $k$ epochs.\n Change param by `new_value = value_func(old_value)`,\n if :\n min(stats) >= stats[0] - threshold, where\n stats = [`stat_nam` in latest `last_k` epochs]\n\n For example, if error wasn't decreasing, anneal the learning rate:\n StatMonitorParamSetter('learning_rate', 'val-error', lambda x: x * 0.2)\n\n If reverse==True, use 'increasing' instead of decreasing\n " super(StatMonitorParamSetter, self).__init__(param) self.stat_name = stat_name self.value_func = value_func self.last_k = last_k self.threshold = threshold self.reverse = reverse self.last_changed_epoch = 0 def _get_value_to_set(self): holder = self.trainer.stat_holder hist = holder.get_stat_history(self.stat_name) if ((len(hist) < (self.last_k + 1)) or ((self.epoch_num - self.last_changed_epoch) < self.last_k)): return None hist = hist[((- self.last_k) - 1):] hist_first = hist[0] if (not self.reverse): hist_min = min(hist) if (hist_min < (hist_first - self.threshold)): return None else: hist_max = max(hist) if (hist_max > (hist_first + self.threshold)): return None self.last_changed_epoch = self.epoch_num logger.info(('[StatMonitorParamSetter] Triggered, history: ' + ','.join(map(str, hist)))) return self.value_func(self.get_current_value())
class ModelSaver(Callback): '\n Save the model to logger directory.\n ' def __init__(self, keep_recent=10, keep_freq=0.5, var_collections=None): '\n :param keep_recent: see `tf.train.Saver` documentation.\n :param keep_freq: see `tf.train.Saver` documentation.\n ' self.keep_recent = keep_recent self.keep_freq = keep_freq if (var_collections is None): try: var_collections = tf.GraphKeys.GLOBAL_VARIABLES except: var_collections = tf.GraphKeys.VARIABLES if (not isinstance(var_collections, list)): var_collections = [var_collections] self.var_collections = var_collections def _setup_graph(self): vars = [] for key in self.var_collections: vars.extend(tf.get_collection(key)) self.path = os.path.join(logger.LOG_DIR, 'model') try: self.saver = tf.train.Saver(var_list=ModelSaver._get_var_dict(vars), max_to_keep=self.keep_recent, keep_checkpoint_every_n_hours=self.keep_freq, write_version=tf.train.SaverDef.V2) except: self.saver = tf.train.Saver(var_list=ModelSaver._get_var_dict(vars), max_to_keep=self.keep_recent, keep_checkpoint_every_n_hours=self.keep_freq) self.meta_graph_written = False @staticmethod def _get_var_dict(vars): var_dict = {} for v in vars: name = get_savename_from_varname(v.name) if (name not in var_dict): if (name != v.name): logger.info('[ModelSaver] {} renamed to {} when saving model.'.format(v.name, name)) var_dict[name] = v else: logger.info("[ModelSaver] Variable {} won't be saved due to an alternative in a different tower".format(v.name, var_dict[name].name)) return var_dict def _trigger_epoch(self): try: if (not self.meta_graph_written): self.saver.export_meta_graph(os.path.join(logger.LOG_DIR, 'graph-{}.meta'.format(logger.get_time_str())), collection_list=self.graph.get_all_collection_keys()) self.meta_graph_written = True self.saver.save(tf.get_default_session(), self.path, global_step=get_global_step(), write_meta_graph=False) except (OSError, IOError): logger.exception('Exception in ModelSaver.trigger_epoch!')
class MinSaver(Callback): def __init__(self, monitor_stat, reverse=True, filename=None): self.monitor_stat = monitor_stat self.reverse = reverse self.filename = filename self.min = None def _get_stat(self): try: v = self.trainer.stat_holder.get_stat_now(self.monitor_stat) except KeyError: v = None return v def _need_save(self): v = self._get_stat() if (not v): return False return ((v > self.min) if self.reverse else (v < self.min)) def _trigger_epoch(self): if ((self.min is None) or self._need_save()): self.min = self._get_stat() if self.min: self._save() def _save(self): ckpt = tf.train.get_checkpoint_state(logger.LOG_DIR) if (ckpt is None): raise RuntimeError('Cannot find a checkpoint state. Do you forget to use ModelSaver?') path = ckpt.model_checkpoint_path newname = os.path.join(logger.LOG_DIR, (self.filename or ('max-' if self.reverse else (('min-' + self.monitor_stat) + '.tfmodel')))) shutil.copy(path, newname) logger.info("Model with {} '{}' saved.".format(('maximum' if self.reverse else 'minimum'), self.monitor_stat))
class MaxSaver(MinSaver): def __init__(self, monitor_stat): super(MaxSaver, self).__init__(monitor_stat, True)
class StatHolder(object): '\n A holder to keep all statistics aside from tensorflow events.\n ' def __init__(self, log_dir): '\n :param log_dir: directory to save the stats.\n ' self.set_print_tag([]) self.blacklist_tag = set() self.stat_now = {} self.log_dir = log_dir self.filename = os.path.join(log_dir, 'stat.json') if os.path.isfile(self.filename): logger.info('Found stats at {}, will append to it.'.format(self.filename)) with open(self.filename) as f: self.stat_history = json.load(f) else: self.stat_history = [] def add_stat(self, k, v): '\n Add a stat.\n :param k: name\n :param v: value\n ' self.stat_now[k] = float(v) def set_print_tag(self, print_tag): '\n Set name of stats to print.\n ' self.print_tag = (None if (print_tag is None) else set(print_tag)) def add_blacklist_tag(self, blacklist_tag): ' Disable printing for some tags ' self.blacklist_tag |= set(blacklist_tag) def get_stat_now(self, key): '\n Return the value of a stat in the current epoch.\n ' return self.stat_now[key] def get_stat_history(self, key): ret = [] for h in self.stat_history: v = h.get(key, None) if (v is not None): ret.append(v) v = self.stat_now.get(key, None) if (v is not None): ret.append(v) return ret def finalize(self): '\n Called after finishing adding stats for this epoch. Will print and write stats to disk.\n ' self._print_stat() self.stat_history.append(self.stat_now) self.stat_now = {} self._write_stat() def _print_stat(self): for (k, v) in sorted(self.stat_now.items(), key=operator.itemgetter(0)): if ((self.print_tag is None) or (k in self.print_tag)): if (k not in self.blacklist_tag): logger.info('{}: {:.5g}'.format(k, v)) def _write_stat(self): tmp_filename = (self.filename + '.tmp') try: with open(tmp_filename, 'w') as f: json.dump(self.stat_history, f) os.rename(tmp_filename, self.filename) except IOError: logger.exception('Exception in StatHolder.finalize()!')
class StatPrinter(Callback): '\n Control what stats to print.\n ' def __init__(self, print_tag=None): '\n :param print_tag: a list of regex to match scalar summary to print.\n If None, will print all scalar tags\n ' self.print_tag = print_tag def _before_train(self): self._stat_holder = self.trainer.stat_holder self._stat_holder.set_print_tag(self.print_tag) self._stat_holder.add_blacklist_tag(['global_step', 'epoch_num']) self._stat_holder.add_stat('epoch_num', (self.epoch_num + 1)) def _trigger_epoch(self): self._stat_holder.add_stat('global_step', get_global_step()) self._stat_holder.finalize() self._stat_holder.add_stat('epoch_num', (self.epoch_num + 1))
class SendStat(Callback): '\n Execute a command with some specific stats.\n For example, send the stats to your phone through pushbullet:\n\n SendStat(\'curl -u your_id: https://api.pushbullet.com/v2/pushes -d type=note -d title="validation error" -d body={validation_error} > /dev/null 2>&1\',\n \'validation_error\')\n ' def __init__(self, command, stats): self.command = command if (not isinstance(stats, list)): stats = [stats] self.stats = stats def _trigger_epoch(self): holder = self.trainer.stat_holder v = {k: holder.get_stat_now(k) for k in self.stats} cmd = self.command.format(**v) ret = os.system(cmd) if (ret != 0): logger.error('Command {} failed with ret={}!'.format(cmd, ret))
def _global_import(name): p = __import__(name, globals(), locals(), level=1) lst = (p.__all__ if ('__all__' in dir(p)) else dir(p)) del globals()[name] for k in lst: globals()[k] = p.__dict__[k]
@six.add_metaclass(ABCMeta) class DataFlow(object): ' Base class for all DataFlow ' class Infinity(): pass @abstractmethod def get_data(self): '\n A generator to generate data as a list.\n Datapoint should be a mutable list.\n Each component should be assumed immutable.\n ' def size(self): '\n Size of this data flow.\n ' raise NotImplementedError() def reset_state(self): '\n Reset state of the dataflow. Will always be called before consuming data points.\n for example, RNG **HAS** to be reset here if used in the DataFlow.\n Otherwise it may not work well with prefetching, because different\n processes will have the same RNG state.\n ' pass
class RNGDataFlow(DataFlow): ' A dataflow with rng' def reset_state(self): self.rng = get_rng(self)
class ProxyDataFlow(DataFlow): ' Base class for DataFlow that proxies another' def __init__(self, ds): '\n :param ds: a :mod:`DataFlow` instance to proxy\n ' self.ds = ds def reset_state(self): '\n Will reset state of the proxied DataFlow\n ' self.ds.reset_state() def size(self): return self.ds.size()
class TestDataSpeed(ProxyDataFlow): def __init__(self, ds, size=1000): super(TestDataSpeed, self).__init__(ds) self.test_size = size def get_data(self): self.start_test() for dp in self.ds.get_data(): (yield dp) def start_test(self): self.ds.reset_state() with get_tqdm(total=self.test_size) as pbar: for dp in self.ds.get_data(): pbar.update()
class BatchData(ProxyDataFlow): def __init__(self, ds, batch_size, remainder=False): '\n Group data in `ds` into batches.\n\n :param ds: a DataFlow instance. Its component must be either a scalar or a numpy array\n :param remainder: whether to return the remaining data smaller than a batch_size.\n If set True, will possibly return a data point of a smaller 1st dimension.\n Otherwise, all generated data are guranteed to have the same size.\n ' super(BatchData, self).__init__(ds) if (not remainder): try: s = ds.size() assert (batch_size <= ds.size()) except NotImplementedError: pass self.batch_size = batch_size self.remainder = remainder def size(self): ds_size = self.ds.size() div = (ds_size // self.batch_size) rem = (ds_size % self.batch_size) if (rem == 0): return div return (div + int(self.remainder)) def get_data(self): '\n :returns: produce batched data by tiling data on an extra 0th dimension.\n ' holder = [] for data in self.ds.get_data(): holder.append(data) if (len(holder) == self.batch_size): (yield BatchData._aggregate_batch(holder)) del holder[:] if (self.remainder and (len(holder) > 0)): (yield BatchData._aggregate_batch(holder)) @staticmethod def _aggregate_batch(data_holder): size = len(data_holder[0]) result = [] for k in range(size): dt = data_holder[0][k] if (type(dt) in [int, bool]): tp = 'int32' elif (type(dt) == float): tp = 'float32' else: tp = dt.dtype try: result.append(np.array([x[k] for x in data_holder], dtype=tp)) except KeyboardInterrupt: raise except: logger.exception('Cannot batch data. Perhaps they are of inconsistent shape?') import IPython as IP IP.embed(config=IP.terminal.ipapp.load_default_config()) return result
class BatchDataByShape(BatchData): def __init__(self, ds, batch_size, idx): ' Group datapoint of the same shape together to batches\n\n :param ds: a DataFlow instance. Its component must be either a scalar or a numpy array\n :param idx: dp[idx] will be used to group datapoints. Other component\n in dp are assumed to have the same shape.\n ' super(BatchDataByShape, self).__init__(ds, batch_size, remainder=False) self.idx = idx def reset_state(self): super(BatchDataByShape, self).reset_state() self.holder = defaultdict(list) def get_data(self): for dp in self.ds.get_data(): shp = dp[self.idx].shape holder = self.holder[shp] holder.append(dp) if (len(holder) == self.batch_size): (yield BatchData._aggregate_batch(holder)) del holder[:]
class FixedSizeData(ProxyDataFlow): ' Generate data from another DataFlow, but with a fixed epoch size.\n The state of the underlying DataFlow is maintained among each epoch.\n ' def __init__(self, ds, size): '\n :param ds: a :mod:`DataFlow` to produce data\n :param size: a int\n ' super(FixedSizeData, self).__init__(ds) self._size = int(size) self.itr = None def size(self): return self._size def get_data(self): '\n Produce data from ds, stop at size\n ' if (self.itr is None): self.itr = self.ds.get_data() cnt = 0 while True: try: dp = self.itr.next() except StopIteration: self.itr = self.ds.get_data() dp = self.itr.next() cnt += 1 (yield dp) if (cnt == self._size): return
class RepeatedData(ProxyDataFlow): " Take data points from another `DataFlow` and produce them until\n it's exhausted for certain amount of times.\n " def __init__(self, ds, nr): '\n :param ds: a :mod:`DataFlow` instance.\n :param nr: number of times to repeat ds.\n If nr == -1, repeat ds infinitely many times.\n ' if (nr == (- 1)): nr = DataFlow.Infinity self.nr = nr super(RepeatedData, self).__init__(ds) def size(self): if (self.nr == DataFlow.Infinity): raise RuntimeError('size() is unavailable for infinite dataflow') return (self.ds.size() * self.nr) def get_data(self): if (self.nr == DataFlow.Infinity): while True: for dp in self.ds.get_data(): (yield dp) else: for _ in range(self.nr): for dp in self.ds.get_data(): (yield dp)
class MapData(ProxyDataFlow): ' Apply map/filter a function on the datapoint' def __init__(self, ds, func): "\n :param ds: a :mod:`DataFlow` instance.\n :param func: a function that takes a original datapoint, returns a new\n datapoint. return None to skip this data point.\n Note that if you use filter, ds.size() won't be correct.\n " super(MapData, self).__init__(ds) self.func = func def get_data(self): for dp in self.ds.get_data(): ret = self.func(dp) if (ret is not None): (yield ret)
class MapDataComponent(ProxyDataFlow): ' Apply map/filter on the given index in the datapoint' def __init__(self, ds, func, index=0): "\n :param ds: a :mod:`DataFlow` instance.\n :param func: a function that takes a datapoint component dp[index], returns a\n new value of dp[index]. return None to skip this datapoint.\n Note that if you use filter, ds.size() won't be correct.\n " super(MapDataComponent, self).__init__(ds) self.func = func self.index = index def get_data(self): for dp in self.ds.get_data(): repl = self.func(dp[self.index]) if (repl is not None): dp[self.index] = repl (yield dp)
class RandomChooseData(RNGDataFlow): '\n Randomly choose from several DataFlow. Stop producing when any of them is\n exhausted.\n ' def __init__(self, df_lists): '\n :param df_lists: list of dataflow, or list of (dataflow, probability) tuple\n ' super(RandomChooseData, self).__init__() if isinstance(df_lists[0], (tuple, list)): assert (sum([v[1] for v in df_lists]) == 1.0) self.df_lists = df_lists else: prob = (1.0 / len(df_lists)) self.df_lists = [(k, prob) for k in df_lists] def reset_state(self): super(RandomChooseData, self).reset_state() for d in self.df_lists: if isinstance(d, tuple): d[0].reset_state() else: d.reset_state() def get_data(self): itrs = [v[0].get_data() for v in self.df_lists] probs = np.array([v[1] for v in self.df_lists]) try: while True: itr = self.rng.choice(itrs, p=probs) (yield next(itr)) except StopIteration: return
class RandomMixData(RNGDataFlow): "\n Randomly choose from several dataflow, and will eventually exhaust all dataflow. So it's a perfect mix.\n " def __init__(self, df_lists): '\n :param df_lists: list of dataflow.\n All DataFlow in `df_lists` must have :func:`size()` implemented\n ' super(RandomMixData, self).__init__() self.df_lists = df_lists self.sizes = [k.size() for k in self.df_lists] def reset_state(self): super(RandomMixData, self).reset_state() for d in self.df_lists: d.reset_state() def size(self): return sum(self.sizes) def get_data(self): sums = np.cumsum(self.sizes) idxs = np.arange(self.size()) self.rng.shuffle(idxs) idxs = np.array(list(map((lambda x: np.searchsorted(sums, x, 'right')), idxs))) itrs = [k.get_data() for k in self.df_lists] assert (idxs.max() == (len(itrs) - 1)), '{}!={}'.format(idxs.max(), (len(itrs) - 1)) for k in idxs: (yield next(itrs[k]))
class ConcatData(DataFlow): '\n Concatenate several dataflows.\n ' def __init__(self, df_lists): '\n :param df_lists: list of :mod:`DataFlow` instances\n ' self.df_lists = df_lists def reset_state(self): for d in self.df_lists: d.reset_state() def size(self): return sum([x.size() for x in self.df_lists]) def get_data(self): for d in self.df_lists: for dp in d.get_data(): (yield dp)
class JoinData(DataFlow): '\n Join the components from each DataFlow.\n\n .. code-block:: none\n\n e.g.: df1: [dp1, dp2]\n df2: [dp3, dp4]\n join: [dp1, dp2, dp3, dp4]\n ' def __init__(self, df_lists): '\n :param df_lists: list of :mod:`DataFlow` instances\n ' self.df_lists = df_lists self._size = self.df_lists[0].size() for d in self.df_lists: assert (d.size() == self._size), 'All DataFlow must have the same size! {} != {}'.format(d.size(), self._size) def reset_state(self): for d in self.df_lists: d.reset_state() def size(self): return self._size def get_data(self): itrs = [k.get_data() for k in self.df_lists] try: while True: dp = [] for itr in itrs: dp.extend(next(itr)) (yield dp) except StopIteration: pass finally: for itr in itrs: del itr
class LocallyShuffleData(ProxyDataFlow, RNGDataFlow): def __init__(self, ds, cache_size, nr_reuse=1): '\n Cache a number of datapoints and shuffle them.\n :param cache_size: size of the cache\n :param nr_reuse: reuse each datapoints several times\n ' ProxyDataFlow.__init__(self, ds) self.q = deque(maxlen=cache_size) self.nr_reuse = nr_reuse def reset_state(self): ProxyDataFlow.reset_state(self) RNGDataFlow.reset_state(self) self.ds_itr = self.ds.get_data() self.current_cnt = 0 def get_data(self): def add_next(): dp = next(self.ds_itr) for _ in range(self.nr_reuse): self.q.append(dp) try: while (self.q.maxlen > len(self.q)): add_next() except StopIteration: logger.error('LocallyShuffleData: cache_size is larger than the size of ds!') while True: self.rng.shuffle(self.q) for _ in range(self.q.maxlen): for _ in range(self.nr_reuse): (yield self.q.popleft()) try: add_next() except StopIteration: self.rng.shuffle(self.q) for v in self.q: (yield v) return
def SelectComponent(ds, idxs): '\n :param ds: a :mod:`DataFlow` instance\n :param idxs: a list of datapoint component index of the original dataflow\n ' return MapData(ds, (lambda dp: [dp[i] for i in idxs]))
def global_import(name): p = __import__(name, globals(), locals(), level=1) lst = (p.__all__ if ('__all__' in dir(p)) else dir(p)) for k in lst: globals()[k] = p.__dict__[k]
class BSDS500(RNGDataFlow): '\n `Berkeley Segmentation Data Set and Benchmarks 500\n <http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html#bsds500>`_.\n\n Produce (image, label) pair, where image has shape (321, 481, 3) and\n ranges in [0,255]. Label is binary and has shape (321, 481).\n Those pixels annotated as boundaries by <=2 annotators are set to 0.\n This is used in `Holistically-Nested Edge Detection\n <http://arxiv.org/abs/1504.06375>`_.\n ' def __init__(self, name, data_dir=None, shuffle=True): "\n :param name: 'train', 'test', 'val'\n :param data_dir: a directory containing the original 'BSR' directory.\n " if (data_dir is None): data_dir = get_dataset_path('bsds500_data') if (not os.path.isdir(os.path.join(data_dir, 'BSR'))): download(DATA_URL, data_dir) filename = DATA_URL.split('/')[(- 1)] filepath = os.path.join(data_dir, filename) import tarfile tarfile.open(filepath, 'r:gz').extractall(data_dir) self.data_root = os.path.join(data_dir, 'BSR', 'BSDS500', 'data') assert os.path.isdir(self.data_root) self.shuffle = shuffle assert (name in ['train', 'test', 'val']) self._load(name) def _load(self, name): image_glob = os.path.join(self.data_root, 'images', name, '*.jpg') image_files = glob.glob(image_glob) gt_dir = os.path.join(self.data_root, 'groundTruth', name) self.data = np.zeros((len(image_files), IMG_H, IMG_W, 3), dtype='uint8') self.label = np.zeros((len(image_files), IMG_H, IMG_W), dtype='float32') for (idx, f) in enumerate(image_files): im = cv2.imread(f, cv2.IMREAD_COLOR) assert (im is not None) if (im.shape[0] > im.shape[1]): im = np.transpose(im, (1, 0, 2)) assert (im.shape[:2] == (IMG_H, IMG_W)), '{} != {}'.format(im.shape[:2], (IMG_H, IMG_W)) imgid = os.path.basename(f).split('.')[0] gt_file = os.path.join(gt_dir, imgid) gt = loadmat(gt_file)['groundTruth'][0] n_annot = gt.shape[0] gt = sum((gt[k]['Boundaries'][0][0] for k in range(n_annot))) gt = gt.astype('float32') gt *= (1.0 / n_annot) if (gt.shape[0] > gt.shape[1]): gt = gt.transpose() assert (gt.shape == (IMG_H, IMG_W)) self.data[idx] = im self.label[idx] = gt def size(self): return self.data.shape[0] def get_data(self): idxs = np.arange(self.data.shape[0]) if self.shuffle: self.rng.shuffle(idxs) for k in idxs: (yield [self.data[k], self.label[k]])
def maybe_download_and_extract(dest_directory, cifar_classnum): "Download and extract the tarball from Alex's website.\n copied from tensorflow example " assert ((cifar_classnum == 10) or (cifar_classnum == 100)) if (cifar_classnum == 10): cifar_foldername = 'cifar-10-batches-py' else: cifar_foldername = 'cifar-100-python' if os.path.isdir(os.path.join(dest_directory, cifar_foldername)): logger.info('Found cifar{} data in {}.'.format(cifar_classnum, dest_directory)) return else: DATA_URL = (DATA_URL_CIFAR_10 if (cifar_classnum == 10) else DATA_URL_CIFAR_100) download(DATA_URL, dest_directory) filename = DATA_URL.split('/')[(- 1)] filepath = os.path.join(dest_directory, filename) import tarfile tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def read_cifar(filenames, cifar_classnum): assert ((cifar_classnum == 10) or (cifar_classnum == 100)) ret = [] for fname in filenames: fo = open(fname, 'rb') if six.PY3: dic = pickle.load(fo, encoding='bytes') else: dic = pickle.load(fo) data = dic[b'data'] if (cifar_classnum == 10): label = dic[b'labels'] IMG_NUM = 10000 elif (cifar_classnum == 100): label = dic[b'fine_labels'] IMG_NUM = (50000 if ('train' in fname) else 10000) fo.close() for k in range(IMG_NUM): img = data[k].reshape(3, 32, 32) img = np.transpose(img, [1, 2, 0]) ret.append([img, label[k]]) return ret
def get_filenames(dir, cifar_classnum): assert ((cifar_classnum == 10) or (cifar_classnum == 100)) if (cifar_classnum == 10): filenames = [os.path.join(dir, 'cifar-10-batches-py', ('data_batch_%d' % i)) for i in range(1, 6)] filenames.append(os.path.join(dir, 'cifar-10-batches-py', 'test_batch')) elif (cifar_classnum == 100): filenames = [os.path.join(dir, 'cifar-100-python', 'train'), os.path.join(dir, 'cifar-100-python', 'test')] return filenames
class CifarBase(RNGDataFlow): '\n Return [image, label],\n image is 32x32x3 in the range [0,255]\n ' def __init__(self, train_or_test, shuffle=True, dir=None, cifar_classnum=10): "\n Args:\n train_or_test: string either 'train' or 'test'\n shuffle: default to True\n " assert (train_or_test in ['train', 'test']) assert ((cifar_classnum == 10) or (cifar_classnum == 100)) self.cifar_classnum = cifar_classnum if (dir is None): dir = get_dataset_path('cifar{}_data'.format(cifar_classnum)) maybe_download_and_extract(dir, self.cifar_classnum) fnames = get_filenames(dir, cifar_classnum) if (train_or_test == 'train'): self.fs = fnames[:(- 1)] else: self.fs = [fnames[(- 1)]] for f in self.fs: if (not os.path.isfile(f)): raise ValueError(('Failed to find file: ' + f)) self.train_or_test = train_or_test self.data = read_cifar(self.fs, cifar_classnum) self.dir = dir self.shuffle = shuffle def size(self): return (50000 if (self.train_or_test == 'train') else 10000) def get_data(self): idxs = np.arange(len(self.data)) if self.shuffle: self.rng.shuffle(idxs) for k in idxs: (yield copy.copy(self.data[k])) def get_per_pixel_mean(self): '\n return a mean image of all (train and test) images of size 32x32x3\n ' fnames = get_filenames(self.dir, self.cifar_classnum) all_imgs = [x[0] for x in read_cifar(fnames, self.cifar_classnum)] arr = np.array(all_imgs, dtype='float32') mean = np.mean(arr, axis=0) return mean def get_per_channel_mean(self): '\n return three values as mean of each channel\n ' mean = self.get_per_pixel_mean() return np.mean(mean, axis=(0, 1))