code stringlengths 101 5.91M |
|---|
class BoxSpaceSensor(Sensor):
def __init__(self, name: typing.Text, shape: typing.Tuple[(int, ...)], lower_bound: _FLOAT_OR_ARRAY=(- np.pi), upper_bound: _FLOAT_OR_ARRAY=np.pi, dtype=np.float64) -> None:
super(BoxSpaceSensor, self).__init__(name)
self._shape = shape
self._dtype = dtype
if isinstance(lower_bound, (float, int)):
self._lower_bound = np.full(shape, lower_bound, dtype=dtype)
else:
self._lower_bound = np.array(lower_bound)
if isinstance(upper_bound, (float, int)):
self._upper_bound = np.full(shape, upper_bound, dtype=dtype)
else:
self._upper_bound = np.array(upper_bound)
def get_shape(self) -> typing.Tuple[(int, ...)]:
return self._shape
def get_dimension(self) -> int:
return len(self._shape)
def get_dtype(self):
pass
def get_observation_datatype(self) -> _DATATYPE_LIST:
return [(self._name, self._dtype, self._shape)]
def get_lower_bound(self) -> _ARRAY:
return self._lower_bound
def get_upper_bound(self) -> _ARRAY:
return self._upper_bound
def _get_observation(self) -> _ARRAY:
raise NotImplementedError()
def get_observation(self) -> np.ndarray:
return np.asarray(self._get_observation(), dtype=self._dtype) |
_GENERATOR_REGISTRY.register()
class DefaultAnchorGenerator(nn.Module):
box_dim: int = 4
def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5):
super().__init__()
self.strides = strides
self.num_features = len(self.strides)
sizes = _broadcast_params(sizes, self.num_features, 'sizes')
aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, 'aspect_ratios')
self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios)
self.offset = offset
assert (0.0 <= self.offset < 1.0), self.offset
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
return {'sizes': cfg.MODEL.ANCHOR_GENERATOR.SIZES, 'aspect_ratios': cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, 'strides': [x.stride for x in input_shape], 'offset': cfg.MODEL.ANCHOR_GENERATOR.OFFSET}
def _calculate_anchors(self, sizes, aspect_ratios):
cell_anchors = [self.generate_cell_anchors(s, a).float() for (s, a) in zip(sizes, aspect_ratios)]
return BufferList(cell_anchors)
def num_cell_anchors(self):
return self.num_anchors
def num_anchors(self):
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def _grid_anchors(self, grid_sizes: List[List[int]]):
anchors = []
buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()]
for (size, stride, base_anchors) in zip(grid_sizes, self.strides, buffers):
(shift_x, shift_y) = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append((shifts.view((- 1), 1, 4) + base_anchors.view(1, (- 1), 4)).reshape((- 1), 4))
return anchors
def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)):
anchors = []
for size in sizes:
area = (size ** 2.0)
for aspect_ratio in aspect_ratios:
w = math.sqrt((area / aspect_ratio))
h = (aspect_ratio * w)
(x0, y0, x1, y1) = (((- w) / 2.0), ((- h) / 2.0), (w / 2.0), (h / 2.0))
anchors.append([x0, y0, x1, y1])
return torch.tensor(anchors)
def forward(self, features: List[torch.Tensor]):
grid_sizes = [feature_map.shape[(- 2):] for feature_map in features]
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
return [Boxes(x) for x in anchors_over_all_feature_maps] |
def imagenet_vit_small_pretrained(output_dim):
model = timm.create_model('vit_small_patch16_224', pretrained=True)
return _vit_replace_fc(model, output_dim) |
def main(_):
logging.info('Start')
experiments_path = FLAGS.experiments_path
config_name = FLAGS.config_name
config = common.load_config(os.path.join(experiments_path, config_name))
dataset = config['dataset']
classes = config['num_classes']
channels = config['channels']
epochs = config['epochs']
batch_size = config['batch_size']
lr = config['lr']
lr_step = config['lr_step']
lr_decay = config['lr_decay']
weight_decay = config['weight_decay']
dropout_rate = config['dropout_rate']
model_name = config['model_name']
run(epochs, dataset, classes, channels, batch_size, lr, lr_step, lr_decay, weight_decay, dropout_rate, model_name, experiments_path)
logging.info('Finish') |
def run_inference(filepaths, IFrameCompressor: nn.Module, outputdir: Path, entropy_estimation: bool=False, trained_net: str='', description: str='', **args: Any):
with amp.autocast(enabled=args['half']):
with torch.no_grad():
if entropy_estimation:
metrics = eval_model_entropy_estimation(IFrameCompressor, filepaths, **args)
return metrics |
def create_row(time, data, metricname):
return namedtuple('DataRow', 'monitor')(namedtuple('DataRowMonitor', ('l', 'r'))(time, data[metricname])) |
class TorchCPUOpBuilder(CUDAOpBuilder):
def extra_ldflags(self):
if self.build_for_cpu:
return ['-fopenmp']
return ['-lcurand']
def cxx_args(self):
import torch
args = []
if (not self.build_for_cpu):
CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, 'lib64')
args += super().cxx_args()
args += [f'-L{CUDA_LIB64}', '-lcudart', '-lcublas', '-g']
CPU_ARCH = self.cpu_arch()
SIMD_WIDTH = self.simd_width()
CUDA_ENABLE = self.is_cuda_enable()
args += [CPU_ARCH, '-fopenmp', SIMD_WIDTH, CUDA_ENABLE]
return args |
class Bert4FnFunction(BaseFunction):
def __init__(self):
super().__init__()
def forward(self, batch=None):
(input_ids, attention_mask, word_pos, label_ids) = batch
sequence_output = self.bert(input_ids=input_ids, attention_mask=attention_mask)[0]
(batch_size, max_len, feat_dim) = sequence_output.shape
ave_output = torch.zeros(batch_size, feat_dim, dtype=torch.float, device=self.device)
for i in range(batch_size):
ave_output[i] = torch.mean(sequence_output[i][word_pos[i][0].item():word_pos[i][1].item()], dim=0)
ave_output = self.dropout(ave_output)
output = self.classifier(ave_output)
return output
def predict(self, batch=None):
output = self.forward(batch)
prediction = torch.argmax(f.log_softmax(output, dim=1), dim=1)
return prediction
def loss(self, batch=None, loss_function=None):
(input_ids, attention_mask, word_pos, label_ids) = batch
output = self.forward(batch)
loss = loss_function(output.view((- 1), self.label_size), label_ids.view((- 1)))
return loss
def evaluate(self, batch=None, metrics=None):
(input_ids, attention_mask, word_pos, label_ids) = batch
output = self.forward(batch)
prediction = torch.argmax(f.log_softmax(output, dim=1), dim=1)
metrics.evaluate(prediction, label_ids) |
class XHead(BaseModule):
def __init__(self, in_channels: int, feat_channels: Sequence[int], x_channels: int, x: str) -> None:
super().__init__()
conv_layers = []
for ch in feat_channels:
conv_layers.append(ConvModule(in_channels=in_channels, out_channels=ch, kernel_size=3, padding=1))
in_channels = ch
self.layers = nn.Sequential(*conv_layers)
if (x == 'flow'):
self.predict_layer = nn.Conv2d(feat_channels[(- 1)], x_channels, kernel_size=3, padding=1)
elif (x == 'mask'):
self.predict_layer = nn.Conv2d(feat_channels[(- 1)], x_channels, kernel_size=1, padding=0)
elif (x == 'tradeoff'):
self.predict_layer = nn.Conv2d(feat_channels[(- 1)], x_channels, kernel_size=3, padding=1)
else:
raise ValueError(f"x must be 'flow' or 'mask', but got {x}")
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.layers(x)
return self.predict_layer(x) |
class KerasModel(BaseModel):
def __init__(self, model, **kwargs):
self.component = None
self._model = model
if (not isinstance(model, tf.keras.Model)):
self._model_object = tf.keras.models.load_model(self._model)
else:
self._model_object = self._model
self._q_config = None
def q_config(self):
return self._q_config
_config.setter
def q_config(self, q_config):
self._q_config = q_config
def model(self):
return self._model_object
def graph_info(self):
return None
def save(self, root, *args, **kwargs):
self._model_object.save(root)
def _export(self, save_path: str, conf):
pass
def framework(self):
return 'keras'
def get_all_weight_names(self):
names = []
for (index, layer) in enumerate(self.model.layers):
if len(layer.weights):
names.append(index)
return names
def report_sparsity(self):
import numpy as np
import pandas as pd
import tensorflow as tf
df = pd.DataFrame(columns=['Name', 'Shape', 'NNZ (dense)', 'NNZ (sparse)', 'Sparsity(%)'])
pd.set_option('display.precision', 2)
param_dims = [2, 4]
params_size = 0
sparse_params_size = 0
for (index, layer) in enumerate(self.model.layers):
if (not len(layer.weights)):
continue
weights = layer.get_weights()[0]
if (weights.ndim in param_dims):
(param_size, sparse_param_size, dense_param_size) = compute_sparsity(weights)
density = (dense_param_size / param_size)
params_size += param_size
sparse_params_size += sparse_param_size
df.loc[len(df.index)] = [index, list(weights.shape), dense_param_size, sparse_param_size, ((1 - density) * 100)]
total_sparsity = ((sparse_params_size / params_size) * 100)
df.loc[len(df.index)] = ['Total sparsity:', '-', params_size, sparse_params_size, total_sparsity]
return (df, total_sparsity)
def input_node_names(self):
return self.model.input_names
def output_node_names(self):
return self.model.output_names |
class TestUnroll3qOrMore(QiskitTestCase):
def test_ccx(self):
qr1 = QuantumRegister(2, 'qr1')
qr2 = QuantumRegister(1, 'qr2')
circuit = QuantumCircuit(qr1, qr2)
circuit.ccx(qr1[0], qr1[1], qr2[0])
dag = circuit_to_dag(circuit)
pass_ = Unroll3qOrMore()
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 15)
for node in op_nodes:
self.assertIn(node.name, ['h', 't', 'tdg', 'cx'])
def test_cswap(self):
qr1 = QuantumRegister(2, 'qr1')
qr2 = QuantumRegister(1, 'qr2')
circuit = QuantumCircuit(qr1, qr2)
circuit.cswap(qr1[0], qr1[1], qr2[0])
dag = circuit_to_dag(circuit)
pass_ = Unroll3qOrMore()
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 17)
for node in op_nodes:
self.assertIn(node.name, ['h', 't', 'tdg', 'cx'])
def test_decompose_conditional(self):
qr = QuantumRegister(3, 'qr')
cr = ClassicalRegister(1, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.ccx(qr[0], qr[1], qr[2]).c_if(cr, 0)
dag = circuit_to_dag(circuit)
pass_ = Unroll3qOrMore()
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 15)
for node in op_nodes:
self.assertIn(node.name, ['h', 't', 'tdg', 'cx'])
self.assertEqual(node.condition, (cr, 0)) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
module = import_module('tasks')
try:
token_classification_task_clazz = getattr(module, model_args.task_type)
token_classification_task: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. Available tasks classes are: {TokenClassificationTask.__subclasses__()}')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (training_args.local_rank in [(- 1), 0]) else logging.WARN))
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool((training_args.local_rank != (- 1))), training_args.fp16)
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
labels = token_classification_task.get_labels(data_args.labels)
label_map: Dict[(int, str)] = {i: label for (i, label) in enumerate(labels)}
num_labels = len(labels)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, id2label=label_map, label2id={label: i for (i, label) in enumerate(labels)}, cache_dir=model_args.cache_dir)
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast)
model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir)
train_dataset = (TokenClassificationDataset(token_classification_task=token_classification_task, data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train) if training_args.do_train else None)
eval_dataset = (TokenClassificationDataset(token_classification_task=token_classification_task, data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev) if training_args.do_eval else None)
def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[(List[int], List[int])]:
preds = np.argmax(predictions, axis=2)
(batch_size, seq_len) = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if (label_ids[(i, j)] != nn.CrossEntropyLoss().ignore_index):
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return (preds_list, out_label_list)
def compute_metrics(p: EvalPrediction) -> Dict:
(preds_list, out_label_list) = align_predictions(p.predictions, p.label_ids)
return {'accuracy_score': accuracy_score(out_label_list, preds_list), 'precision': precision_score(out_label_list, preds_list), 'recall': recall_score(out_label_list, preds_list), 'f1': f1_score(out_label_list, preds_list)}
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics)
if training_args.do_train:
trainer.train(model_path=(model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None))
trainer.save_model()
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, 'eval_results.txt')
if trainer.is_world_process_zero():
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results *****')
for (key, value) in result.items():
logger.info(' %s = %s', key, value)
writer.write(('%s = %s\n' % (key, value)))
results.update(result)
if training_args.do_predict:
test_dataset = TokenClassificationDataset(token_classification_task=token_classification_task, data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test)
(predictions, label_ids, metrics) = trainer.predict(test_dataset)
(preds_list, _) = align_predictions(predictions, label_ids)
output_test_results_file = os.path.join(training_args.output_dir, 'test_results.txt')
if trainer.is_world_process_zero():
with open(output_test_results_file, 'w') as writer:
for (key, value) in metrics.items():
logger.info(' %s = %s', key, value)
writer.write(('%s = %s\n' % (key, value)))
output_test_predictions_file = os.path.join(training_args.output_dir, 'test_predictions.txt')
if trainer.is_world_process_zero():
with open(output_test_predictions_file, 'w') as writer:
with open(os.path.join(data_args.data_dir, 'test.txt'), 'r') as f:
token_classification_task.write_predictions_to_file(writer, f, preds_list)
return results |
def test_warning_when_missing_initializer():
wide = Wide(100, 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
deeptext = BasicRNN(vocab_size=vocab_size, embed_dim=32, padding_idx=0)
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, pred_dim=1)
with pytest.warns(UserWarning):
trainer = Trainer(model, objective='binary', verbose=True, initializers=initializers_3) |
def isolate_glossary(word, glossary):
if ((word == glossary) or (glossary not in word)):
return [word]
else:
splits = word.split(glossary)
segments = [segment.strip() for split in splits[:(- 1)] for segment in [split, glossary] if (segment != '')]
return ((segments + [splits[(- 1)].strip()]) if (splits[(- 1)] != '') else segments) |
def build_graph(graph):
node_map = {}
for n in graph.node:
node = init_node(n)
node_map[n.name] = node
for n in node_map:
for i in node_map[n]['node'].input:
if (':' in i):
i = i[:i.find(':')]
i = i.lstrip('^')
if (i not in node_map):
print('node {} not found'.format(i))
else:
connect_nodes(node_map[i], node_map[n])
return node_map |
def add_hist_seq(df: 'SparkDataFrame', cols: List[str], user_col: str, sort_col: str, min_len: int, max_len: int, num_seqs: int) -> 'SparkDataFrame':
return callZooFunc('float', 'addHistSeq', df, cols, user_col, sort_col, min_len, max_len, num_seqs) |
class Struc2VecTrainer(VecTrainer):
def __init__(self, embed_dim, train_data, city, tester):
super().__init__(embed_dim, train_data, city, tester)
self.vec_model = Struc2Vec(num_walks=200)
def save_model(self, model):
obj = {'embed_dim': self.embed_dim, 'city': self.city, 'distmult': model}
pickle.dump(obj, open((((data_dir + 'data_/struc2vec/models/') + self.city) + '_distmult.pkl'), 'wb')) |
def process_reference_line(working_line, journals_matches, pprint_repnum_len, pprint_repnum_matchtext, publishers_matches, removed_spaces, standardised_titles, kbs):
if (((len(journals_matches) + len(pprint_repnum_len)) + len(publishers_matches)) == 0):
tagged_line = working_line
else:
startpos = 0
previous_match = {}
replacement_types = {}
journals_keys = sorted(journals_matches.keys())
reports_keys = sorted(pprint_repnum_matchtext.keys())
publishers_keys = sorted(publishers_matches.keys())
spaces_keys = sorted(removed_spaces.keys())
replacement_types = get_replacement_types(journals_keys, reports_keys, publishers_keys)
replacement_locations = sorted(replacement_types.keys())
tagged_line = u''
for replacement_index in replacement_locations:
(true_replacement_index, extras) = account_for_stripped_whitespace(spaces_keys, removed_spaces, replacement_types, pprint_repnum_len, journals_matches, replacement_index)
if (replacement_types[replacement_index] == u'journal'):
(rebuilt_chunk, startpos, previous_match) = add_tagged_journal(reading_line=working_line, journal_info=journals_matches[replacement_index], previous_match=previous_match, startpos=startpos, true_replacement_index=true_replacement_index, extras=extras, standardised_titles=standardised_titles)
tagged_line += rebuilt_chunk
elif (replacement_types[replacement_index] == u'reportnumber'):
(rebuilt_chunk, startpos) = add_tagged_report_number(reading_line=working_line, len_reportnum=pprint_repnum_len[replacement_index], reportnum=pprint_repnum_matchtext[replacement_index], startpos=startpos, true_replacement_index=true_replacement_index, extras=extras)
tagged_line += rebuilt_chunk
elif (replacement_types[replacement_index] == u'publisher'):
(rebuilt_chunk, startpos) = add_tagged_publisher(reading_line=working_line, matched_publisher=publishers_matches[replacement_index], startpos=startpos, true_replacement_index=true_replacement_index, extras=extras, kb_publishers=kbs['publishers'])
tagged_line += rebuilt_chunk
tagged_line += working_line[startpos:]
tagged_line = wash_volume_tag(tagged_line)
tagged_line = identify_and_tag_authors(tagged_line, kbs['authors'])
tagged_line = identify_and_tag_collaborations(tagged_line, kbs['collaborations'])
return tagged_line.replace('\n', '') |
class Node(object):
def __init__(self, name, kind, layer=None):
self.name = name
self.kind = kind
self.layer = (LayerAdapter(layer, kind) if layer else None)
self.parents = []
self.children = []
self.data = None
self.output_shape = None
self.metadata = {}
def add_parent(self, parent_node):
assert (parent_node not in self.parents)
self.parents.append(parent_node)
if (self not in parent_node.children):
parent_node.children.append(self)
def add_child(self, child_node):
assert (child_node not in self.children)
self.children.append(child_node)
if (self not in child_node.parents):
child_node.parents.append(self)
def get_only_parent(self):
if (len(self.parents) != 1):
raise KaffeError(('Node (%s) expected to have 1 parent. Found %s.' % (self, len(self.parents))))
return self.parents[0]
def parameters(self):
if (self.layer is not None):
return self.layer.parameters
return None
def __str__(self):
return ('[%s] %s' % (self.kind, self.name))
def __repr__(self):
return ('%s (0x%x)' % (self.name, id(self))) |
def skintone_mad(data_file):
with open(data_file, 'r') as f:
data = json.load(f)
mads = []
all_values = []
for prompt in data:
model_values = data[prompt]
scores = []
avg_tone = []
for skintone in range(1, 11):
scores.append(0)
total_tones = 0
for i in range(len(model_values)):
if ((model_values[i] != (- 10)) and (not np.isnan(model_values[i]))):
all_values.append(model_values[i])
total_tones += 1
scores[(model_values[i] - 1)] += 1
avg_tone.append(model_values[i])
if ((len(scores) == 0) or (len(avg_tone) == 0)):
continue
for i in range(len(scores)):
scores[i] /= total_tones
avg_tone = np.average(avg_tone)
mad = np_mad(scores)
mads.append(mad)
c = Counter(all_values)
d = {}
for tone in c.keys():
d[tone] = round((c[tone] / len(all_values)), 2)
print('Average Skintone MAD', np.mean(mads)) |
def create_conv2_model(input_dim, input_channels=1, num_kernels=None, kernel_size=4, pool_size=2, n=1):
if (num_kernels is None):
num_kernels = [8, 16]
modules = [('conv1', nn.Conv2d(input_channels, num_kernels[0], kernel_size, bias=False)), ('repu1', RePU(n)), ('pool1', nn.MaxPool2d(pool_size)), ('conv2', nn.Conv2d(num_kernels[0], num_kernels[1], kernel_size, bias=False)), ('repu2', RePU(n)), ('pool2', nn.MaxPool2d(pool_size)), ('flatten', Flatten()), ('linear1', nn.Linear((num_kernels[1] * (int(((((input_dim - (kernel_size - 1)) / 2) - (kernel_size - 1)) / 2)) ** 2)), 10))]
return nn.Sequential(OrderedDict(modules)) |
.skipif((not hasattr(m, 'load_monostate_variant')), reason='no std::monostate')
def test_variant_monostate(doc):
assert (m.load_monostate_variant(None) == 'std::monostate')
assert (m.load_monostate_variant(1) == 'int')
assert (m.load_monostate_variant('1') == 'std::string')
assert (m.cast_monostate_variant() == (None, 5, 'Hello'))
assert (doc(m.load_monostate_variant) == 'load_monostate_variant(arg0: Union[None, int, str]) -> str') |
class InceptConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(InceptConv, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels, eps=0.001, momentum=0.1)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
return x |
def make_data_loader(dataset, batch_size, args):
shuffle = args.shuffle
if shuffle:
sampler = data_utils.samplers.RandomSampler(dataset, replacement=True, num_samples=(batch_size * args.train_iters))
else:
sampler = torch.utils.data.SequentialSampler(dataset)
world_size = torch.distributed.get_world_size(group=mpu.get_data_parallel_group())
rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group())
distributed = (world_size > 1)
drop_last = distributed
if distributed:
batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, drop_last, rank, world_size)
else:
batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, drop_last)
data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True)
return data_loader |
def overlap(dataset0, dataset1, args):
word_set0 = set()
word_set1 = set()
for d0 in dataset0:
if (args['type'] == 'single'):
sentence = d0['sentence']
elif (args['type'] == 'pair'):
sentence0 = d0['sentence1']
sentence1 = d0['sentence2']
sentence = (sentence0 + sentence1)
else:
raise ValueError('type not specified')
words0 = word_tokenize(sentence)
for word0 in words0:
if (word0 not in punc):
word_set0.add(word0)
for d1 in dataset1:
if (args['type'] == 'single'):
sentence = d1['sentence']
elif (args['type'] == 'pair'):
sentence0 = d1['sentence1']
sentence1 = d1['sentence2']
sentence = (sentence0 + sentence1)
else:
raise ValueError('type not specified')
words1 = word_tokenize(sentence)
for word1 in words1:
if (word1 not in punc):
word_set1.add(word1)
intersect = word_set0.intersection(word_set1)
if (len(intersect) == 0):
return 0
precision = (len(intersect) / len(word_set0))
recall = (len(intersect) / len(word_set1))
f1 = (((2 * precision) * recall) / (precision + recall))
return (f1, precision, recall) |
def wrap_module(module, *module_args, **module_kwargs):
def wrap(*args, **kwargs):
model = module(*module_args, **module_kwargs)
return model(*args, **kwargs)
return wrap |
def _get_city_pairs(folder, split='train'):
def get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for (root, _, files) in os.walk(img_folder):
for filename in files:
if filename.startswith('._'):
continue
if filename.endswith('.png'):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('leftImg8bit', 'gtFine_labelTrainIds')
maskpath = os.path.join(mask_folder, foldername, maskname)
if (os.path.isfile(imgpath) and os.path.isfile(maskpath)):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
logging.info('cannot find the mask or image:', imgpath, maskpath)
logging.info('Found {} images in the folder {}'.format(len(img_paths), img_folder))
return (img_paths, mask_paths)
if (split in ('train', 'val')):
img_folder = os.path.join(folder, ('leftImg8bit/' + split))
mask_folder = os.path.join(folder, ('gtFine/' + split))
(img_paths, mask_paths) = get_path_pairs(img_folder, mask_folder)
return (img_paths, mask_paths)
else:
assert (split == 'test')
logging.info('test set, but only val set')
val_img_folder = os.path.join(folder, 'leftImg8bit/val')
val_mask_folder = os.path.join(folder, 'gtFine/val')
(img_paths, mask_paths) = get_path_pairs(val_img_folder, val_mask_folder)
return (img_paths, mask_paths) |
class TestGraphInputOutputDetection(unittest.TestCase):
tf.compat.v1.disable_v2_behavior()
mb_fp32_pb_url = '
pb_path = '/tmp/.neural_compressor/mobilenet_fp32.pb'
platform = platform.system().lower()
if (platform == 'windows'):
pb_path = 'C:\\tmp\\.neural_compressor\\mobilenet_fp32.pb'
inputs = ['input']
outputs = ['MobilenetV1/Predictions/Reshape_1']
def setUpClass(self):
build_fake_yaml()
build_fake_yaml_2()
if (not os.path.exists(self.pb_path)):
if (self.platform == 'linux'):
os.system('mkdir -p /tmp/.neural_compressor && wget {} -O {} '.format(self.mb_fp32_pb_url, self.pb_path))
elif (self.platform == 'windows'):
os.system('md C:\\tmp\\.neural_compressor && cd C:\\tmp\\.neural_compressor')
from urllib import request
request.urlretrieve(self.mb_fp32_pb_url)
self.input_graph = tf.compat.v1.GraphDef()
with open(self.pb_path, 'rb') as f:
self.input_graph.ParseFromString(f.read())
build_fake_model_1()
build_fake_model_2()
build_fake_model_3()
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml_2.yaml')
os.remove('model_1.pb')
os.remove('model_2.pb')
os.remove('model_3.pb')
def test_identify_input_output(self):
g = GraphAnalyzer()
g.graph = self.input_graph
g.parse_graph()
(inputs, outputs) = g.get_graph_input_output()
self.assertEqual(inputs, self.inputs)
self.assertEqual(outputs, self.outputs)
(inputs, outputs) = get_input_output_node_names(self.input_graph)
self.assertEqual(inputs, self.inputs)
self.assertEqual(outputs, self.outputs)
input_graph = tf.compat.v1.GraphDef()
with open('model_1.pb', 'rb') as f:
input_graph.ParseFromString(f.read())
g = GraphAnalyzer()
g.graph = input_graph
g.parse_graph()
(inputs, outputs) = g.get_graph_input_output()
self.assertEqual(inputs, ['sub'])
self.assertEqual(outputs, ['op_to_store'])
(inputs, outputs) = get_input_output_node_names(input_graph)
self.assertEqual(inputs, ['sub'])
self.assertEqual(outputs, ['op_to_store'])
input_graph = tf.compat.v1.GraphDef()
with open('model_2.pb', 'rb') as f:
input_graph.ParseFromString(f.read())
g = GraphAnalyzer()
g.graph = input_graph
g.parse_graph()
(inputs, outputs) = g.get_graph_input_output()
self.assertEqual(inputs, [])
self.assertEqual(outputs, [])
(inputs, outputs) = get_input_output_node_names(input_graph)
self.assertEqual(inputs, [])
self.assertEqual(outputs, [])
input_graph = tf.compat.v1.GraphDef()
with open('model_3.pb', 'rb') as f:
input_graph.ParseFromString(f.read())
g = GraphAnalyzer()
g.graph = input_graph
g.parse_graph()
(inputs, outputs) = g.get_graph_input_output()
self.assertEqual(inputs, [])
self.assertEqual(outputs, [])
(inputs, outputs) = get_input_output_node_names(input_graph)
self.assertEqual(inputs, [])
self.assertEqual(outputs, [])
def test_no_input_output_config(self):
g = GraphAnalyzer()
g.graph = self.input_graph
g.parse_graph()
float_graph_def = g.dump_graph()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(20, 224, 224, 3), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
self.assertGreater(len(output_graph.graph_def.node), 0)
def test_invalid_input_output_config(self):
g = GraphAnalyzer()
g.graph = self.input_graph
g.parse_graph()
float_graph_def = g.dump_graph()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_2.yaml')
dataset = quantizer.dataset('dummy', shape=(20, 224, 224, 3), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
model = quantizer.fit()
self.assertNotEqual(model.input_node_names, ['x'])
self.assertNotEqual(model.output_node_names, ['op_to_store']) |
def setup_args():
description = 'Collect codec metrics and performances.'
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest='codec', help='Select codec')
subparsers.required = True
parser.add_argument('image', type=str, help='image filepath')
parser.add_argument('target', type=float, help='target value to match')
parser.add_argument('-m', '--metric', type=str, choices=['bpp', 'psnr', 'ms-ssim'], default='bpp')
parser.add_argument('--save', action='store_true', help='Save reconstructed image to disk')
return (parser, subparsers) |
def set_user_categories(user_id, user):
conn = getDb()
with closing(conn.cursor()) as cur:
cur.execute('DELETE FROM user_categories WHERE user_ID = %s', [user_id])
data = [(user_id, category_id) for category_id in user.categories]
cur.executemany('INSERT INTO user_categories VALUES(%s, %s)', data) |
(version='2.0')
def check_config(prune_config):
assert (prune_config['start_step'] >= 0), 'start_step should be greater than 0'
assert (prune_config['end_step'] >= (- 1)), 'end_step should be greater than 0'
assert (prune_config['end_step'] >= prune_config['start_step']), 'end_step should be greater than start_step'
assert ((prune_config['target_sparsity'] >= 0) and (prune_config['target_sparsity'] < 1.0)), 'begin_pruning_step should be in range [0,1)'
assert (prune_config['update_frequency_on_step'] > 0), 'update_frequency_on_step should be greater than 0'
assert ((prune_config['max_sparsity_ratio_per_layer'] >= 0) and (prune_config['max_sparsity_ratio_per_layer'] < 1)), 'update_frequency_on_step should be greater than 0'
assert ((prune_config['prune_domain'] == 'global') or (prune_config['prune_domain'] == 'local')), "only support 'global' and 'local' prune domain"
if ('x' in prune_config['pattern']):
pattern = prune_config['pattern'].split('_')[(- 1)].split('x')
if ((pattern[0] == 'channel') or (pattern[1] == 'channel')):
pass
else:
try:
N = int(pattern[0])
M = int(pattern[1])
except:
assert False, "N or M can't convert to int"
assert (N > 0), 'N should be greater than 0'
assert (M > 0), 'M should be greater than 0'
if (':' in prune_config['pattern']):
pattern = prune_config['pattern'].split('_')[(- 1)].split(':')
try:
N = int(pattern[0])
M = int(pattern[1])
except:
assert False, "N or M can't convert to int"
assert (N > 0), 'N should be greater than 0'
assert (M > N), 'M should be greater than N'
max_ratio = (float(N) / M)
assert (prune_config['target_sparsity'] <= max_ratio), 'in N:M pattern, the max sparsity is N/M={}'.format(max_ratio)
prune_config['max_sparsity_ratio_per_layer'] = min(max_ratio, prune_config['max_sparsity_ratio_per_layer']) |
class Aggregation(torch.autograd.Function):
def forward(ctx, A, X, W):
out = torch.mm(X, W)
out = torch.mm(A, out)
return out
def backward(ctx, d_output):
pass
return (None, None, None) |
def get_mol(smiles):
mol = Chem.MolFromSmiles(smiles)
if (mol is None):
return None
Chem.Kekulize(mol)
return mol |
def chamfer_loss_separate(output, target, weight=10000.0, phase='train', debug=False):
from chamferdist.chamferdist import ChamferDistance
cdist = ChamferDistance()
(model2scan, scan2model, idx1, idx2) = cdist(output, target)
if (phase == 'train'):
return (model2scan, scan2model, idx1, idx2)
else:
return ((torch.mean(model2scan, dim=(- 1)) * weight), (torch.mean(scan2model, dim=(- 1)) * weight)) |
class MultiscaleDiscriminator(nn.Module):
def modify_commandline_options(parser, is_train):
assert isinstance(parser, argparse.ArgumentParser)
parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to be used in multiscale')
parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
(opt, _) = parser.parse_known_args()
subnetD = SPADENLayerDiscriminator
subnetD.modify_commandline_options(parser, is_train)
parser.set_defaults(n_layers_D=4)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
for i in range(opt.num_D):
subnetD = SPADENLayerDiscriminator(opt)
self.add_module(('discriminator_%d' % i), subnetD)
def downsample(self, input):
return F.avg_pool2d(input, kernel_size=3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input):
result = []
for (name, D) in self.named_children():
out = D(input)
result.append(out)
input = self.downsample(input)
return result |
def generate_signature(features, predictions):
if (not isinstance(features, dict)):
raise ValueError(('generate_signature excepted features to be dict, but got %s' % features))
inputs = dict(zip(features, map((lambda x: utils.build_tensor_info(x)), features.values())))
if (not isinstance(predictions, dict)):
predictions = {'prediction': predictions}
outputs = dict(zip(predictions, map((lambda x: utils.build_tensor_info(x)), predictions.values())))
signature = signature_def_utils.build_signature_def(inputs, outputs)
ops.get_collection_ref('FEATURE_INPUTS').append(signature.SerializeToString())
return signature |
def make_handler(base_url, wiki_version, models, tagger_ner, argss, logger):
class GetHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.model = models
self.tagger_ner = tagger_ner
self.argss = argss
self.logger = logger
self.base_url = base_url
self.wiki_version = wiki_version
self.custom_ner = (not isinstance(tagger_ner, SequenceTagger))
self.mention_detection = MentionDetection(base_url, wiki_version)
super().__init__(*args, **kwargs)
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(json.dumps({'schemaVersion': 1, 'label': 'status', 'message': 'up', 'color': 'green'}), 'utf-8'))
return
def do_POST(self):
content_length = int(self.headers['Content-Length'])
print(content_length)
post_data = self.rfile.read(content_length)
self.send_response(200)
self.end_headers()
(text, spans) = self.read_json(post_data)
response = self.generate_response(text, spans)
print(response)
print('')
self.wfile.write(bytes(json.dumps(response), 'utf-8'))
return
def read_json(self, post_data):
data = json.loads(post_data.decode('utf-8'))
text = data['text']
text = text.replace('&', '&')
try:
spans = [list(d.values()) for d in data['spans']]
except Exception:
spans = data['spans']
pass
return (text, spans)
def generate_response(self, text, spans):
if (len(text) == 0):
return []
if (len(spans) > 0):
processed = {API_DOC: [text, spans]}
(mentions_dataset, total_ment) = self.mention_detection.format_spans(processed)
else:
processed = {API_DOC: [text, spans]}
(mentions_dataset, total_ment) = self.mention_detection.find_mentions(processed, self.tagger_ner)
data_to_link = []
temp_m = mentions_dataset[API_DOC]
for (i, m) in enumerate(temp_m):
temp = {'id': i, 'label': 'unknown', 'label_id': (- 1), 'context_left': m['context'][0].lower(), 'mention': m['ngram'].lower(), 'context_right': m['context'][1].lower()}
data_to_link.append(temp)
(_, _, _, _, _, predictions, scores) = main_dense.run(self.argss, self.logger, *self.model, test_data=data_to_link)
predictions = {API_DOC: [{'prediction': x[0].replace(' ', '_')} for x in predictions]}
result = process_results(mentions_dataset, predictions, processed, include_offset=(False if ((len(spans) > 0) or self.custom_ner) else True))
if (len(result) > 0):
return [*result.values()][0]
return []
return GetHandler |
class InProcessCommunicator(Communicator):
BYTES_PER_ELEMENT = 8
tls = threading.local()
mailbox = None
barrier = None
lock = threading.Lock()
def initialize(cls, rank, world_size, init_ttp=False):
cls.tls.instance = cls(rank, world_size)
def __init__(self, rank, world_size, init_ttp=False):
self.world_size = world_size
self.rank = rank
self.reset_communication_stats()
self._name = f'rank{rank}'
with InProcessCommunicator.lock:
if (InProcessCommunicator.mailbox is None):
InProcessCommunicator.mailbox = [Queue() for _ in range(self.world_size)]
InProcessCommunicator.barrier = threading.Barrier(self.world_size)
level = logging.getLogger().level
logging.getLogger().setLevel(logging.INFO)
logging.info('')
logging.info(('InProcessCommunicator with rank %d' % self.rank))
logging.info('')
logging.info(('World size = %d' % self.get_world_size()))
logging.getLogger().setLevel(level)
def get(cls):
if (not hasattr(cls.tls, 'instance')):
return None
return cls.tls.instance
def is_initialized(cls):
return hasattr(cls.tls, 'instance')
def send(self, tensor, dst):
self.mailbox[dst].put((self.rank, tensor.clone()))
def recv(self, tensor, src=None):
(rank, result) = self.mailbox[self.rank].get()
if ((src is not None) and (rank != src)):
raise NotImplementedError("Can't receive messages out of order yet")
return result
def isend(self, tensor, dst):
self.send(tensor, dst)
class Result():
def is_completed(self):
return True
def wait(self):
pass
return Result()
def irecv(self, tensor, src=None):
class Result():
def __init__(self, mailbox, rank):
self.completed = False
self.mailbox = mailbox
self.rank = rank
def is_completed(self):
return self.completed
def wait(self):
(rank, result) = self.mailbox[self.rank].get()
if ((src is not None) and (rank != src)):
raise NotImplementedError("Can't receive messages out of order yet")
tensor.copy_(result)
return Result(self.mailbox, self.rank)
def scatter(self, scatter_list, src, size=None, async_op=False):
if async_op:
raise NotImplementedError()
if (src == self.rank):
for i in range(self.world_size):
self.mailbox[i].put(scatter_list[i].clone())
self.barrier.wait()
return self.mailbox[self.rank].get()
def reduce(self, tensor, dst, op=ReduceOp.SUM, async_op=False):
tensors = self.gather(tensor, dst)
if (self.rank == dst):
reduce_fn = self._reduce_op_to_function(op)
return reduce_fn(torch.stack(tensors), dim=0)
def shutdown(cls):
cls.tls = threading.local()
cls.mailbox = None
cls.barrier = None
def _reduce_op_to_function(self, op):
if (op == ReduceOp.SUM):
return torch.sum
raise NotImplementedError()
def all_reduce(self, tensor, op=ReduceOp.SUM, async_op=False):
if async_op:
raise NotImplementedError()
ag = self.all_gather(tensor)
reduce_fn = self._reduce_op_to_function(op)
return reduce_fn(torch.stack(ag), dim=0)
def gather(self, tensor, dst, async_op=False):
if async_op:
raise NotImplementedError()
self.mailbox[dst].put((self.rank, tensor.clone()))
self.barrier.wait()
if (self.rank == dst):
result = [self.mailbox[dst].get() for _ in range(self.world_size)]
return [tensor for (rank, tensor) in sorted(result, key=itemgetter(0))]
def all_gather(self, tensor, async_op=False):
if async_op:
raise NotImplementedError()
for i in range(self.world_size):
self.mailbox[i].put((self.rank, tensor.clone()))
self.barrier.wait()
result = sorted((self.mailbox[self.rank].get() for _ in range(self.world_size)), key=itemgetter(0))
return [tensor for (rank, tensor) in result]
def broadcast(self, tensor, src, async_op=False):
if async_op:
raise NotImplementedError()
if (self.rank == src):
for i in range(self.get_world_size()):
self.mailbox[i].put(tensor.clone())
return self.mailbox[self.rank].get()
def get_world_size(self):
return self.world_size
def get_rank(self):
return self.rank
def set_name(self, name):
assert isinstance(name, str), f'Improper name provided to process on rank {self.get_rank()}'
self._name = name
def get_name(self):
return self._name |
def specificity(classify=(lambda document: False), documents=[]):
(TP, TN, FP, FN) = confusion_matrix(classify, documents)
return (float(TN) / ((TN + FP) or 1)) |
class GATSummarizeModel(nn.Module):
def __init__(self, config):
super(GATSummarizeModel, self).__init__()
self.config = config
self.use_nfeat = self.config.node_emb_layer['use_nfeature']
self.use_cuda = self.config.use_cuda
self.graph_config = getattr(self.config, 'gat')
self.forcing_ratio = 0.75
self.in_dim = self.graph_config['in_dim']
self.out_dim = self.graph_config['out_dim']
self.vocab_len = self.config.token_vocab_dict.vocab_size()
if (self.use_nfeat == 'structure'):
self.embedding = nn.Embedding(self.vocab_len, self.config.word_emb_dims)
self.node_emb_layer = NodeEmbedFactory().get_node_embed_technique(self.config)(self.config)
self.gat_layers = nn.ModuleList([GATLayer(config) for _ in range(self.graph_config['layers'])])
self.g_repr = nn.Linear(self.graph_config['out_dim'], self.graph_config['out_dim'])
self.node_repr = nn.Linear(self.graph_config['out_dim'], self.graph_config['out_dim'])
self.hid_fc = nn.Linear(self.graph_config['out_dim'], self.graph_config['out_dim'])
self.cell_fc = nn.Linear(self.graph_config['out_dim'], self.graph_config['out_dim'])
self.word_emb_dim = self.config.word_emb_dims
self.lstm_dims = self.config.lstm['dims']
self.lstm_dropout = self.config.lstm['dropout']
self.decoder = DecoderLSTM(self.vocab_len, input_dim=self.config.word_emb_dims, dec_hid_dim=self.lstm_dims, use_cuda=self.use_cuda, bidir=False)
def forward(self, batch_dict, running_mode, loss_fn):
g = batch_dict['graphs']
tgt = to_cuda(batch_dict['tgt_tensors'], use_cuda=self.use_cuda)
h = to_cuda(g.ndata['node_feat'], self.use_cuda)
node_len = g.ndata['node_len'].cpu().tolist()
h = self.node_emb_layer(h, node_len)
for gat in self.gat_layers:
h = gat(g, h)
h = torch.mean(h, dim=1)
g.ndata['h'] = h
mean_feats = graph_readout(g, self.graph_config['graph_agg'])
batch_size = mean_feats.shape[0]
(graph_repr, node_reprs, hidden, cell) = self.get_representations(g, mean_feats, h)
(src, model_output, loss) = self.decoding_phase(running_mode, tgt, batch_size, node_reprs, hidden, cell, loss_fn)
return (src, model_output, loss)
def decoding_phase(self, running_mode, target, bsz, node_reprs, hidden, cell, loss_fn):
if (running_mode in ['train', 'val']):
tgt = target.transpose(1, 0)
tgt_len = tgt.size(0)
else:
tgt = None
tgt_len = self.config.max_sequence_length
(logits, model_output, loss) = self.greedy_decode(bsz, tgt_len, hidden, cell, node_reprs, tgt=tgt, loss_fn=loss_fn, running_mode=running_mode)
return ('', model_output.transpose(1, 0).tolist(), loss)
def greedy_decode(self, batch_size, tgt_len, hidden, cell, encoder_outputs, tgt=None, loss_fn=None, running_mode=''):
loss = 0
if (running_mode == 'test'):
assert (tgt is None)
input_tensor = to_cuda(torch.zeros(batch_size).long(), self.use_cuda)
logits_output = to_cuda(torch.zeros(tgt_len, batch_size, self.vocab_len), self.use_cuda)
model_output = to_cuda(torch.zeros(tgt_len, batch_size), self.use_cuda)
(token_emb, node_type_emb) = self.node_emb_layer.get_embedding_layer()
for t in range(1, tgt_len):
if token_emb:
embed_itenser = token_emb(input_tensor)
else:
embed_itenser = self.embedding(input_tensor)
(output, attn, hidden, cell) = self.decoder(embed_itenser, hidden, cell, encoder_outputs)
logits_output[t] = output
top1 = output.argmax(1)
model_output[t] = top1
if (running_mode == 'train'):
teacher_force = (random.random() < self.forcing_ratio)
input_tensor = (tgt[t] if teacher_force else top1)
elif ((running_mode == 'val') or (running_mode == 'test')):
input_tensor = top1
if (tgt is not None):
cur_loss = loss_fn(output, tgt[t])
loss += cur_loss
return (logits_output, model_output, loss)
def get_representations(self, g, mean_feats, h):
return get_reprs(g, mean_feats, h, self.g_repr, self.node_repr, self.hid_fc, self.cell_fc) |
def test_prediction_with_dataframe(model, data_with_covariates):
model.predict(data_with_covariates, fast_dev_run=True) |
def build_densepose_head(cfg: CfgNode, input_channels: int):
from .roi_heads.registry import ROI_DENSEPOSE_HEAD_REGISTRY
head_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.NAME
return ROI_DENSEPOSE_HEAD_REGISTRY.get(head_name)(cfg, input_channels) |
class TestJumanjiSpecsToGymSpaces():
def test_array(self) -> None:
jumanji_spec = specs.Array((1, 2), jnp.int32)
gym_space = gym.spaces.Box((- np.inf), np.inf, (1, 2), jnp.int32)
converted_spec = specs.jumanji_specs_to_gym_spaces(jumanji_spec)
assert (type(converted_spec) == type(gym_space))
assert_trees_all_equal(converted_spec.low, gym_space.low)
assert_trees_all_equal(converted_spec.high, gym_space.high)
assert (converted_spec.shape == gym_space.shape)
assert (converted_spec.dtype == gym_space.dtype)
def test_bounded_array(self) -> None:
jumanji_spec = specs.BoundedArray(shape=(1, 2), dtype=jnp.float32, minimum=0.0, maximum=1.0)
gym_space = gym.spaces.Box(low=0.0, high=1.0, shape=(1, 2), dtype=jnp.float32)
converted_spec = specs.jumanji_specs_to_gym_spaces(jumanji_spec)
assert (type(converted_spec) == type(gym_space))
assert (converted_spec.shape == gym_space.shape)
assert (converted_spec.dtype == gym_space.dtype)
assert_trees_all_equal(converted_spec.low, gym_space.low)
assert_trees_all_equal(converted_spec.high, gym_space.high)
def test_discrete_array(self) -> None:
jumanji_spec = specs.DiscreteArray(num_values=5, dtype=jnp.int32)
gym_space = gym.spaces.Discrete(n=5)
converted_spec = specs.jumanji_specs_to_gym_spaces(jumanji_spec)
assert (type(converted_spec) == type(gym_space))
assert (converted_spec.shape == gym_space.shape)
assert (converted_spec.dtype == gym_space.dtype)
assert (converted_spec.n == gym_space.n)
def test_multi_discrete_array(self) -> None:
jumanji_spec = specs.MultiDiscreteArray(num_values=jnp.array([5, 6], dtype=jnp.int32))
gym_space = gym.spaces.MultiDiscrete(nvec=[5, 6])
converted_spec = specs.jumanji_specs_to_gym_spaces(jumanji_spec)
assert (type(converted_spec) == type(gym_space))
assert (converted_spec.shape == gym_space.shape)
assert (converted_spec.dtype == gym_space.dtype)
assert jnp.array_equal(converted_spec.nvec, gym_space.nvec)
def test_triply_nested_spec(self, triply_nested_spec: specs.Spec) -> None:
converted_spec = specs.jumanji_specs_to_gym_spaces(triply_nested_spec)
assert isinstance(converted_spec, gym.spaces.Dict)
assert isinstance(converted_spec['doubly_nested'], gym.spaces.Dict)
assert isinstance(converted_spec['doubly_nested']['singly_nested'], gym.spaces.Dict)
assert isinstance(converted_spec['doubly_nested']['singly_nested']['array'], gym.spaces.Box)
assert isinstance(converted_spec['doubly_nested']['singly_nested']['bounded_array'], gym.spaces.Box)
assert isinstance(converted_spec['doubly_nested']['singly_nested']['multi_discrete_array'], gym.spaces.MultiDiscrete)
assert isinstance(converted_spec['doubly_nested']['discrete_array'], gym.spaces.Discrete)
assert isinstance(converted_spec['bounded_array'], gym.spaces.Box)
assert isinstance(converted_spec['discrete_array'], gym.spaces.Discrete)
def test_mixed_spec(self, mixed_spec: specs.Spec) -> None:
converted_spec = specs.jumanji_specs_to_gym_spaces(mixed_spec)
assert isinstance(converted_spec, gym.spaces.Dict)
assert isinstance(converted_spec['singly_nested'], gym.spaces.Dict)
assert_tree_with_leaves_of_type(converted_spec['singly_nested'].spaces, gym.spaces.Space)
assert (not converted_spec['not_jumanji_type'])
assert mixed_spec['not_jumanji_type']
def test_not_jumanji_type_spec(self, not_jumanji_type_spec: specs.Spec) -> None:
converted_spec = specs.jumanji_specs_to_gym_spaces(not_jumanji_type_spec)
assert isinstance(converted_spec, gym.spaces.Dict)
assert (not converted_spec) |
def exponential_fit(counts, mode, target_day=np.array([1])):
predicted_counts = []
for i in range(len(counts)):
if (mode == 'eval_mode'):
num_days_back = target_day[(- 1)]
train_ts = counts[i][:(- num_days_back)]
elif (mode == 'predict_future'):
train_ts = counts[i]
else:
print('Unknown mode')
raise ValueError
if (train_ts[(- 1)] >= 1):
start = (np.where((train_ts == 0))[0][(- 1)] + 1)
else:
start = len(train_ts)
active_day = (len(train_ts) - start)
if (active_day > 5):
active_day = 5
start = (len(train_ts) - active_day)
if ((active_day <= 2) or (min(train_ts[start:]) == max(train_ts[start:]))):
predicted_counts.append(np.array(([train_ts[(- 1)]] * len(target_day))))
elif ((min(train_ts[start:]) > 0) and (min(np.diff(np.log(train_ts[start:]))) == max(np.diff(np.log(train_ts[start:]))))):
rate = ((1.0 * train_ts[(- 1)]) / train_ts[(- 2)])
predicted_counts.append(np.array((train_ts[(- 1)] * np.array([(rate ** i) for i in target_day]))))
else:
X_train = np.transpose(np.vstack((np.array(range(active_day)), np.ones(active_day))))
m = sm.GLM(train_ts[start:], X_train, family=sm.families.Poisson(), freq_weights=np.array([(1 ** i) for i in range(active_day)])[::(- 1)])
try:
m = m.fit()
X_test = np.transpose(np.vstack((((target_day + active_day) - 1), np.ones(len(target_day)))))
predicted_counts.append(np.array(m.predict(X_test)))
except PerfectSeparationError as e:
print('Warning: PerfectSeparationError detected')
rate = ((1.0 * train_ts[(- 1)]) / train_ts[(- 2)])
predicted_counts.append(np.array((train_ts[(- 1)] * np.array([(rate ** i) for i in target_day]))))
return predicted_counts |
def string_find(args):
params = functionParams(args, ('source', 'target', 'start', 'plain'))
source = params.get('source', '')
pattern = params.get('target', '')
start = (int(('0' + params.get('start', 1))) - 1)
plain = int(('0' + params.get('plain', 1)))
if ((source == '') or (pattern == '')):
return 0
if plain:
return (source.find(pattern, start) + 1)
else:
return ((re.compile(pattern).search(source, start) or (- 1)) + 1) |
def stl10_root(_extracted=False):
CLASS_NAMES = ('airplane', 'bird')
ARCHIVE_NAME = 'stl10_binary'
NUM_FOLDS = 10
def mock_target(attr, partial='torchvision.datasets.stl10.STL10'):
return f'{partial}.{attr}'
def make_binary_file(num_elements, root, name):
file = os.path.join(root, name)
np.zeros(num_elements, dtype=np.uint8).tofile(file)
return (name, compute_md5(file))
def make_image_file(num_images, root, name, num_channels=3, height=96, width=96):
return make_binary_file((((num_images * num_channels) * height) * width), root, name)
def make_label_file(num_images, root, name):
return make_binary_file(num_images, root, name)
def make_class_names_file(root, name='class_names.txt'):
with open(os.path.join(root, name), 'w') as fh:
for name in CLASS_NAMES:
fh.write(f'''{name}
''')
def make_fold_indices_file(root):
offset = 0
with open(os.path.join(root, 'fold_indices.txt'), 'w') as fh:
for fold in range(NUM_FOLDS):
line = ' '.join([str(idx) for idx in range(offset, ((offset + fold) + 1))])
fh.write(f'''{line}
''')
offset += (fold + 1)
return tuple(range(1, (NUM_FOLDS + 1)))
def make_train_files(stack, root, num_unlabeled_images=1):
num_images_in_fold = make_fold_indices_file(root)
num_train_images = sum(num_images_in_fold)
train_list = [list(make_image_file(num_train_images, root, 'train_X.bin')), list(make_label_file(num_train_images, root, 'train_y.bin')), list(make_image_file(1, root, 'unlabeled_X.bin'))]
mock_class_attribute(stack, target=mock_target('train_list'), new=train_list)
return (num_images_in_fold, dict(train=num_train_images, unlabeled=num_unlabeled_images))
def make_test_files(stack, root, num_images=2):
test_list = [list(make_image_file(num_images, root, 'test_X.bin')), list(make_label_file(num_images, root, 'test_y.bin'))]
mock_class_attribute(stack, target=mock_target('test_list'), new=test_list)
return dict(test=num_images)
def make_archive(stack, root, name):
(archive, md5) = make_tar(root, name, name, compression='gz')
mock_class_attribute(stack, target=mock_target('tgz_md5'), new=md5)
return archive
with contextlib.ExitStack() as stack, get_tmp_dir() as root:
archive_folder = os.path.join(root, ARCHIVE_NAME)
os.mkdir(archive_folder)
(num_images_in_folds, num_images_in_split) = make_train_files(stack, archive_folder)
num_images_in_split.update(make_test_files(stack, archive_folder))
make_class_names_file(archive_folder)
archive = make_archive(stack, root, ARCHIVE_NAME)
dir_util.remove_tree(archive_folder)
data = dict(num_images_in_folds=num_images_in_folds, num_images_in_split=num_images_in_split, archive=archive)
(yield (root, data)) |
class CIFAR10ReinitServer(ReinitServer):
def init_test_loader(self):
self.test_loader = get_data_loader(EXP_NAME, data_type='test', batch_size=1000, num_workers=8, pin_memory=True)
def init_clients(self):
rand_perm = torch.randperm(NUM_TRAIN_DATA).tolist()
indices = []
len_slice = (NUM_TRAIN_DATA // num_slices)
for i in range(num_slices):
indices.append(rand_perm[(i * len_slice):((i + 1) * len_slice)])
models = [self.model for _ in range(NUM_CLIENTS)]
return (models, indices) |
class BitextOutput(object):
def __init__(self, output_file, backwards, right_to_left, bpe_symbol, prefix_len=None, target_prefix_frac=None, source_prefix_frac=None):
(source, hypo, score, target, pos_score) = reprocess(output_file)
if backwards:
self.hypo_fracs = source_prefix_frac
else:
self.hypo_fracs = target_prefix_frac
(score, num_bpe_tokens) = get_score_from_pos(pos_score, prefix_len, hypo, bpe_symbol, self.hypo_fracs, backwards)
source_lengths = {}
target_lengths = {}
assert (hypo.keys() == source.keys()), 'key mismatch'
if backwards:
tmp = hypo
hypo = source
source = tmp
for i in source:
if backwards:
len_src = len(source[i][0].split())
if (len_src == (num_bpe_tokens[i][0] - 1)):
source_lengths[i] = (num_bpe_tokens[i][0] - 1)
else:
source_lengths[i] = num_bpe_tokens[i][0]
target_lengths[i] = len(hypo[i].split())
source[i] = remove_bpe(source[i][0], bpe_symbol)
target[i] = remove_bpe(target[i], bpe_symbol)
hypo[i] = remove_bpe(hypo[i], bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
else:
len_tgt = len(hypo[i][0].split())
if (len_tgt == (num_bpe_tokens[i][0] - 1)):
target_lengths[i] = (num_bpe_tokens[i][0] - 1)
else:
target_lengths[i] = num_bpe_tokens[i][0]
source_lengths[i] = len(source[i].split())
if right_to_left:
source[i] = remove_bpe(make_right_to_left(source[i]), bpe_symbol)
target[i] = remove_bpe(make_right_to_left(target[i]), bpe_symbol)
hypo[i] = remove_bpe(make_right_to_left(hypo[i][0]), bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
else:
assert (len(hypo[i]) == 1), 'expected only one hypothesis per source sentence'
source[i] = remove_bpe(source[i], bpe_symbol)
target[i] = remove_bpe(target[i], bpe_symbol)
hypo[i] = remove_bpe(hypo[i][0], bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
self.rescore_source = source
self.rescore_hypo = hypo
self.rescore_score = score
self.rescore_target = target
self.rescore_pos_score = pos_score
self.backwards = backwards
self.right_to_left = right_to_left
self.target_lengths = target_lengths
self.source_lengths = source_lengths |
def to_md(comment_dict):
doc = ''
if ('short_description' in comment_dict):
doc += comment_dict['short_description']
doc += '\n\n'
if ('long_description' in comment_dict):
doc += md_parse_line_break(comment_dict['long_description'])
doc += '\n'
if (('Args' in comment_dict) and (comment_dict['Args'] is not None)):
doc += '##### Args\n'
for (arg, des) in comment_dict['Args'].items():
doc += (((('* **' + arg) + '**: ') + des) + '\n\n')
if (('Attributes' in comment_dict) and (comment_dict['Attributes'] is not None)):
doc += '##### Attributes\n'
for (arg, des) in comment_dict['Attributes'].items():
doc += (((('* **' + arg) + '**: ') + des) + '\n\n')
if (('Returns' in comment_dict) and (comment_dict['Returns'] is not None)):
doc += '##### Returns\n'
if isinstance(comment_dict['Returns'], str):
doc += comment_dict['Returns']
doc += '\n'
else:
for (arg, des) in comment_dict['Returns'].items():
doc += (((('* **' + arg) + '**: ') + des) + '\n\n')
if (('Example' in comment_dict) and (comment_dict['Example'] is not None)):
doc += '##### Example usage\n'
doc += '```python\n'
if isinstance(comment_dict['Example'], str):
for i in comment_dict['Example'].split('<sep>'):
doc = (doc + i)
doc += '\n'
doc += '```\n'
return doc |
class upBlock(nn.Module):
def __init__(self, in_c, out_c, conv_num=2):
super().__init__()
additional_conv = []
layer_length = 4
for i in range(1, (conv_num + 1)):
additional_conv += [nn.ConstantPad2d((2, 1, 2, 1), 0), nn.ConvTranspose2d(out_c, out_c, kernel_size=4, stride=1, padding=3, bias=False), nn.BatchNorm2d(out_c, eps=0.001, momentum=0.001), nn.ReLU(inplace=True)]
self.main = nn.Sequential(nn.ConvTranspose2d(in_c, out_c, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(out_c, eps=0.001, momentum=0.001), nn.ReLU(inplace=True), *additional_conv)
def forward(self, x):
x = self.main(x)
return x |
def setup_multi_processes(cfg):
logger = get_root_logger()
if (platform.system() != 'Windows'):
mp_start_method = cfg.get('mp_start_method', None)
current_method = mp.get_start_method(allow_none=True)
if (mp_start_method in ('fork', 'spawn', 'forkserver')):
logger.info(f'Multi-processing start method `{mp_start_method}` is different from the previous setting `{current_method}`.It will be force set to `{mp_start_method}`.')
mp.set_start_method(mp_start_method, force=True)
else:
logger.info(f'Multi-processing start method is `{mp_start_method}`')
opencv_num_threads = cfg.get('opencv_num_threads', None)
if isinstance(opencv_num_threads, int):
logger.info(f'OpenCV num_threads is `{opencv_num_threads}`')
cv2.setNumThreads(opencv_num_threads)
else:
logger.info(f'OpenCV num_threads is `{cv2.getNumThreads()}')
if (cfg.data.workers_per_gpu > 1):
omp_num_threads = cfg.get('omp_num_threads', None)
if ('OMP_NUM_THREADS' not in os.environ):
if isinstance(omp_num_threads, int):
logger.info(f'OMP num threads is {omp_num_threads}')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
else:
logger.info(f"OMP num threads is {os.environ['OMP_NUM_THREADS']}")
if ('MKL_NUM_THREADS' not in os.environ):
mkl_num_threads = cfg.get('mkl_num_threads', None)
if isinstance(mkl_num_threads, int):
logger.info(f'MKL num threads is {mkl_num_threads}')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
else:
logger.info(f"MKL num threads is {os.environ['MKL_NUM_THREADS']}") |
class MQF2Distribution(Distribution):
def __init__(self, picnn: torch.nn.Module, hidden_state: torch.Tensor, prediction_length: int, is_energy_score: bool=True, es_num_samples: int=50, beta: float=1.0, threshold_input: float=100.0, validate_args: bool=False) -> None:
self.picnn = picnn
self.hidden_state = hidden_state
self.prediction_length = prediction_length
self.is_energy_score = is_energy_score
self.es_num_samples = es_num_samples
self.beta = beta
self.threshold_input = threshold_input
super().__init__(batch_shape=self.batch_shape, validate_args=validate_args)
self.context_length = (self.hidden_state.shape[(- 2)] if (len(self.hidden_state.shape) > 2) else 1)
self.numel_batch = self.get_numel(self.batch_shape)
mu = torch.tensor(0, dtype=hidden_state.dtype, device=hidden_state.device)
sigma = torch.ones_like(mu)
self.standard_normal = Normal(mu, sigma)
def stack_sliding_view(self, z: torch.Tensor) -> torch.Tensor:
z = z.unfold(dimension=(- 1), size=self.prediction_length, step=1)
z = z.reshape((- 1), z.shape[(- 1)])
return z
def loss(self, z: torch.Tensor) -> torch.Tensor:
if self.is_energy_score:
return self.energy_score(z)
else:
return (- self.log_prob(z))
def log_prob(self, z: torch.Tensor) -> torch.Tensor:
z = torch.clamp(z, min=(- self.threshold_input), max=self.threshold_input)
z = self.stack_sliding_view(z)
loss = self.picnn.logp(z, self.hidden_state.reshape((- 1), self.hidden_state.shape[(- 1)]))
return loss
def energy_score(self, z: torch.Tensor) -> torch.Tensor:
es_num_samples = self.es_num_samples
beta = self.beta
z = self.stack_sliding_view(z)
reshaped_hidden_state = self.hidden_state.reshape((- 1), self.hidden_state.shape[(- 1)])
loss = self.picnn.energy_score(z, reshaped_hidden_state, es_num_samples=es_num_samples, beta=beta)
return loss
def rsample(self, sample_shape: torch.Size=torch.Size()) -> torch.Tensor:
numel_batch = self.numel_batch
prediction_length = self.prediction_length
num_samples_per_batch = MQF2Distribution.get_numel(sample_shape)
num_samples = (num_samples_per_batch * numel_batch)
hidden_state_repeat = self.hidden_state.repeat_interleave(repeats=num_samples_per_batch, dim=0)
alpha = torch.rand((num_samples, prediction_length), dtype=self.hidden_state.dtype, device=self.hidden_state.device, layout=self.hidden_state.layout).clamp(min=0.0001, max=(1 - 0.0001))
samples = self.quantile(alpha, hidden_state_repeat).reshape((((numel_batch,) + sample_shape) + (prediction_length,))).transpose(0, 1)
return samples
def quantile(self, alpha: torch.Tensor, hidden_state: Optional[torch.Tensor]=None) -> torch.Tensor:
if (hidden_state is None):
hidden_state = self.hidden_state
normal_quantile = self.standard_normal.icdf(alpha)
if self.is_energy_score:
result = self.picnn(normal_quantile, context=hidden_state)
else:
result = self.picnn.reverse(normal_quantile, context=hidden_state)
return result
def get_numel(tensor_shape: torch.Size) -> int:
return torch.prod(torch.tensor(tensor_shape)).item()
def batch_shape(self) -> torch.Size:
return self.hidden_state.shape[:(- 1)]
def event_shape(self) -> Tuple:
return (self.prediction_length,)
def event_dim(self) -> int:
return 1 |
def shufflenet_v2_x2_0(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ShuffleNetV2:
return _shufflenetv2('shufflenetv2_x2.0', pretrained, progress, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs) |
def resnet101_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse)
return x |
class nnUNetTrainerV2_warmup(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16)
self.max_num_epochs = 1050
def maybe_update_lr(self, epoch=None):
if (self.epoch < 50):
lr = (((self.epoch + 1) / 50) * self.initial_lr)
self.optimizer.param_groups[0]['lr'] = lr
self.print_to_log_file('epoch:', self.epoch, 'lr:', lr)
else:
if (epoch is not None):
ep = (epoch - 49)
else:
ep = (self.epoch - 49)
assert (ep > 0), 'epoch must be >0'
return super().maybe_update_lr(ep) |
def _unordered_query_matcher(request1, request2):
if (request1.query == request2.query):
return True
dict1 = dict(request1.query)
dict2 = dict(request2.query)
if (dict1 == dict2):
return True
if (dict1.keys() != dict2.keys()):
return False
for (key, value) in dict1.items():
with suppress(ValueError):
dict1[key] = json.loads(value)
dict2[key] = json.loads(dict2[key])
return (dict1 == dict2) |
class Node():
def __init__(self, x_ind, y_ind, yaw_ind, direction, x_list, y_list, yaw_list, directions, steer=0.0, parent_index=None, cost=None):
self.x_index = x_ind
self.y_index = y_ind
self.yaw_index = yaw_ind
self.direction = direction
self.x_list = x_list
self.y_list = y_list
self.yaw_list = yaw_list
self.directions = directions
self.steer = steer
self.parent_index = parent_index
self.cost = cost |
class MLPDir(BaseDir):
def __init__(self, in_channels, hidden_channels, n_mods, out_channels, **kwargs):
super().__init__(**kwargs)
in_channels += 6
mlp = []
for _ in range((n_mods - 1)):
mlp.append(nn.Linear(in_channels, hidden_channels))
mlp.append(nn.ReLU())
in_channels = hidden_channels
mlp.append(nn.Linear(in_channels, out_channels))
self.mlp = nn.Sequential(*mlp)
def _net_forward(self, feat, point_key, point_edges):
return self.mlp(feat) |
class ActionHistory(object):
def __init__(self, history: List[Action], action_space_size: int):
self.history = list(history)
self.action_space_size = action_space_size
def clone(self):
return ActionHistory(self.history, self.action_space_size)
def add_action(self, action: Action):
self.history.append(action)
def last_action(self) -> Action:
return self.history[(- 1)]
def action_space(self) -> List[Action]:
return [Action(i) for i in range(self.action_space_size)]
def to_play(self) -> Player:
return Player() |
def remove_page_boundary_lines(docbody):
number_head_lines = number_foot_lines = 0
if (not document_contains_text(docbody)):
return docbody
page_break_posns = get_page_break_positions(docbody)
number_head_lines = get_number_header_lines(docbody, page_break_posns)
number_foot_lines = get_number_footer_lines(docbody, page_break_posns)
docbody = strip_headers_footers_pagebreaks(docbody, page_break_posns, number_head_lines, number_foot_lines)
return docbody |
def run_app():
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow(MainWindow)
MainWindow.showMaximized()
sys.exit(app.exec_()) |
def model_fn(features, mode, params):
hub_module = params['hub_module']
finetune_layer = params['finetune_layer']
num_classes = params['num_classes']
initial_learning_rate = params['initial_learning_rate']
momentum = params['momentum']
lr_decay_factor = params['lr_decay_factor']
decay_steps = params['decay_steps']
warmup_steps = params['warmup_steps']
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
module_path = hub.resolve(hub_module)
is_legacy_hub_module = tf.io.gfile.exists(os.path.join(module_path, 'tfhub_module.pb'))
if is_legacy_hub_module:
module = hub.Module(hub_module, trainable=is_training, tags=({'train'} if is_training else None))
pre_logits = module(features['image'], signature=params['hub_module_signature'], as_dict=True)[finetune_layer]
else:
module = hub.load(hub_module)
tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES).extend(module.trainable_variables)
pre_logits = module(features['image'], training=is_training)
num_dim_pre_logits = len(pre_logits.get_shape().as_list())
if (num_dim_pre_logits == 4):
pre_logits = tf.squeeze(pre_logits, [1, 2])
elif (num_dim_pre_logits != 2):
raise ValueError('Invalid number of dimensions in the representation layer: {}, but only 2 or 4 are allowed'.format(num_dim_pre_logits))
logits = tf.layers.dense(pre_logits, units=num_classes, kernel_initializer=tf.zeros_initializer(), name='linear_head')
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=features['label'])
loss = tf.reduce_mean(loss)
def accuracy_metric(logits, labels):
return {'accuracy': tf.metrics.accuracy(labels=labels, predictions=tf.argmax(logits, axis=(- 1)))}
eval_metrics = (accuracy_metric, [logits, features['label']])
if (mode == tf.estimator.ModeKeys.EVAL):
if (params['tpu_name'] is not None):
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=loss, eval_metrics=eval_metrics)
else:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics[0](*eval_metrics[1]))
elif (mode == tf.estimator.ModeKeys.TRAIN):
train_op = trainer.get_train_op(loss, initial_learning_rate, momentum, lr_decay_factor, decay_steps, warmup_steps, use_tpu=(params['tpu_name'] is not None))
spec_type = (tf.contrib.tpu.TPUEstimatorSpec if (params['tpu_name'] is not None) else tf.estimator.EstimatorSpec)
return spec_type(mode=mode, loss=loss, train_op=train_op)
else:
raise ValueError(('Unsupported mode %s' % mode)) |
.parametrize('experiment_name', ['chem1', 'chem2', 'chem3'])
.parametrize('model_name', ['compfs1', 'lasso'])
def test_chem(experiment_name: str, model_name: str) -> None:
experiment_no = '1'
experiment_helper(experiment_name=experiment_name, model_name=model_name, experiment_no=experiment_no) |
def lstsq(A, b):
P = array_map(np.linalg.pinv, [A], (A.ndim - 2))
return array_map(np.dot, [P, b], (A.ndim - 2)) |
def _assert_valid_results(results):
assert isinstance(results, dict)
assert len(results)
model = list(results.keys())[0]
assert ('epochs' in results[model])
assert isinstance(results[model]['epochs'], dict)
assert len(results[model]['epochs']) |
class Ya(BaseBow):
def __init__(self):
super().__init__('ya', weight=1, damage=D.Dice.from_str('d7'), material=M.Metal, hit=0) |
class LabelEncoder(object):
def __init__(self, try_to_fit_numeric=False):
self.lbl = sk_preproc.LabelEncoder()
self._try_to_fit_numeric = try_to_fit_numeric
def fit(self, x):
self.lbl.fit(x)
if self._try_to_fit_numeric:
logger.debug('Try to fit numeric in LabelEncoder')
try:
arr = {Decimal(c): c for c in self.lbl.classes_}
sorted_arr = dict(sorted(arr.items()))
self.lbl.classes_ = np.array(list(sorted_arr.values()), dtype=self.lbl.classes_.dtype)
except Exception as e:
pass
def transform(self, x):
try:
return self.lbl.transform(x)
except ValueError as ve:
classes = np.unique(x)
diff = np.setdiff1d(classes, self.lbl.classes_)
self.lbl.classes_ = np.concatenate((self.lbl.classes_, diff))
return self.lbl.transform(x)
def inverse_transform(self, x):
return self.lbl.inverse_transform(x)
def to_json(self):
data_json = {}
for (i, cl) in enumerate(self.lbl.classes_):
data_json[str(cl)] = i
return data_json
def from_json(self, data_json):
keys = np.array(list(data_json.keys()))
if ((len(keys) == 2) and ('False' in keys) and ('True' in keys)):
keys = [False, True]
self.lbl.classes_ = keys |
class EncodingModel(object):
def __init__(self, feature_list, name, aux, feature_types, centroid_types, must_link_rules, must_not_link_rules):
self.feature_list = feature_list
self.name = name
self.aux = aux
self.feature_types = feature_types
self.centroid_types = centroid_types
self.must_link_rules = must_link_rules
self.must_not_link_rules = must_not_link_rules
logging.info('Creating model %s with %s features = [%s]', name, len(feature_list), ', '.join([x.name for x in feature_list]))
def encode(self, mentions):
features = []
for (idx, f) in enumerate(self.feature_list):
f_enc = f.encode(mentions)
is_dense = (not scipy.sparse.issparse(f_enc))
features.append((f.name, is_dense, f_enc.shape[1], f_enc, self.feature_types[idx], self.centroid_types[idx]))
return features |
class ResNet_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(ResNet_FeatureExtractor, self).__init__()
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3])
def forward(self, input):
return self.ConvNet(input) |
class DiscriminatorModel2(nn.Module):
def __init__(self):
super(DiscriminatorModel2, self).__init__()
input_dim = (784 + 100)
output_dim = 1
self.label_embedding = nn.Embedding(10, 100)
self.hidden_layer1 = nn.Sequential(nn.Linear(input_dim, 1024), nn.LeakyReLU(0.2), nn.Dropout(0.3))
self.hidden_layer2 = nn.Sequential(nn.Linear(1024, 512), nn.LeakyReLU(0.2), nn.Dropout(0.3))
self.hidden_layer3 = nn.Sequential(nn.Linear(512, 256), nn.LeakyReLU(0.2), nn.Dropout(0.3))
self.hidden_layer4 = nn.Sequential(nn.Linear(256, output_dim), nn.Sigmoid())
def forward(self, x, labels):
c = self.label_embedding(labels)
x = torch.cat([x, c], 1)
output = self.hidden_layer1(x)
output = self.hidden_layer2(output)
output = self.hidden_layer3(output)
output = self.hidden_layer4(output)
return output.to(device) |
class TestOptimizerInterface(TfGraphTestCase):
def test_tf_make_optimizer_with_type(self):
optimizer_type = tf.compat.v1.train.AdamOptimizer
lr = 0.123
optimizer = make_optimizer(optimizer_type, learning_rate=lr, name='testOptimizer')
assert isinstance(optimizer, optimizer_type)
self.sess.run(tf.compat.v1.global_variables_initializer())
assert (optimizer._name == 'testOptimizer')
assert np.allclose(optimizer._lr, lr)
def test_tf_make_optimizer_with_tuple(self):
lr = 0.123
optimizer_type = (tf.compat.v1.train.AdamOptimizer, {'learning_rate': lr})
optimizer = make_optimizer(optimizer_type)
assert isinstance(optimizer, optimizer_type)
self.sess.run(tf.compat.v1.global_variables_initializer())
assert np.allclose(optimizer._lr, lr)
def test_tf_make_optimizer_raise_value_error(self):
lr = 0.123
optimizer_type = (tf.compat.v1.train.AdamOptimizer, {'learning_rate': lr})
with pytest.raises(ValueError):
_ = make_optimizer(optimizer_type, learning_rate=lr)
def test_torch_make_optimizer_with_type(self):
optimizer_type = torch.optim.Adam
module = torch.nn.Linear(2, 1)
lr = 0.123
optimizer = make_optimizer(optimizer_type, module=module, lr=lr)
assert isinstance(optimizer, optimizer_type)
assert (optimizer.defaults['lr'] == lr)
def test_torch_make_optimizer_with_tuple(self):
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
optimizer = make_optimizer(optimizer_type, module=module)
assert isinstance(optimizer, optimizer_type)
assert (optimizer.defaults['lr'] == optimizer_type[1]['lr'])
def test_torch_make_optimizer_raise_value_error(self):
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
with pytest.raises(ValueError):
_ = make_optimizer(optimizer_type, module=module, lr=0.123) |
class CIFDensity(Density):
def __init__(self, prior, p_u_density, bijection, q_u_density):
super().__init__()
self.bijection = bijection
self.prior = prior
self.p_u_density = p_u_density
self.q_u_density = q_u_density
def p_parameters(self):
return [*self.bijection.parameters(), *self.p_u_density.parameters(), *self.prior.p_parameters()]
def q_parameters(self):
result = list(self.q_u_density.parameters())
prior_q_params = list(self.prior.q_parameters())
result += prior_q_params
if prior_q_params:
result += list(self.bijection.parameters())
return result
def _elbo(self, x, detach_q_params, detach_q_samples):
result = self.q_u_density.sample(cond_inputs=x, detach_params=detach_q_params, detach_samples=detach_q_samples)
u = result['sample']
log_q_u = result['log-prob']
result = self.bijection.x_to_z(x, u=u)
z = result['z']
log_jac = result['log-jac']
result = self.p_u_density.log_prob(inputs=u, cond_inputs=z)
log_p_u = result['log-prob']
prior_dict = self.prior('elbo', z, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
return {'log-p': ((log_jac + log_p_u) + prior_dict['log-p']), 'log-q': (log_q_u + prior_dict['log-q']), 'bijection-info': result, 'prior-dict': prior_dict}
def _fix_random_u(self):
(fixed_prior, z) = self.prior._fix_random_u()
z = z.unsqueeze(0)
u = self.p_u_density.sample(z)['sample']
fixed_bijection = self.bijection.condition(u.squeeze(0))
new_z = fixed_bijection.z_to_x(z)['x'].squeeze(0)
return (FlowDensity(prior=fixed_prior, bijection=fixed_bijection), new_z)
def fix_u(self, u):
fixed_prior = self.prior.fix_u(u=u[1:])
fixed_bijection = self.bijection.condition(u[0])
return FlowDensity(prior=fixed_prior, bijection=fixed_bijection)
def _sample(self, num_samples):
z = self.prior.sample(num_samples)
u = self.p_u_density.sample(z)['sample']
return self.bijection.z_to_x(z, u=u)['x']
def _fixed_sample(self, noise):
z = self.prior.fixed_sample(noise=noise)
u = self.p_u_density.sample(z)['sample']
return self.bijection.z_to_x(z, u=u)['x'] |
def test_uniform_range_as_range():
from lasagne.init import Uniform
sample = Uniform((0.0, 1.0)).sample((300, 400))
assert (sample.shape == (300, 400))
assert (0.0 <= sample.min() < 0.1)
assert (0.9 < sample.max() <= 1.0) |
def _demo_mm_inputs(input_shape, num_classes):
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(low=0, high=(num_classes - 1), size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False} for _ in range(N)]
mm_inputs = {'imgs': torch.FloatTensor(imgs).requires_grad_(True), 'img_metas': img_metas, 'gt_semantic_seg': torch.LongTensor(segs)}
return mm_inputs |
def resnet_l4(relu_end=True):
model = resnet101(pretrained=True)
l4 = model.layer4
if (not relu_end):
l4[(- 1)].relu_end = False
l4[0].conv2.stride = (1, 1)
l4[0].downsample[0].stride = (1, 1)
return l4 |
class ImageNetSR(Dataset):
def __init__(self, size=None, degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.0, random_crop=True):
self.base = self.get_base()
assert size
assert (size / downscale_f).is_integer()
self.size = size
self.LR_size = int((size / downscale_f))
self.min_crop_f = min_crop_f
self.max_crop_f = max_crop_f
assert (max_crop_f <= 1.0)
self.center_crop = (not random_crop)
self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)
self.pil_interpolation = False
if (degradation == 'bsrgan'):
self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)
elif (degradation == 'bsrgan_light'):
self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)
else:
interpolation_fn = {'cv_nearest': cv2.INTER_NEAREST, 'cv_bilinear': cv2.INTER_LINEAR, 'cv_bicubic': cv2.INTER_CUBIC, 'cv_area': cv2.INTER_AREA, 'cv_lanczos': cv2.INTER_LANCZOS4, 'pil_nearest': PIL.Image.NEAREST, 'pil_bilinear': PIL.Image.BILINEAR, 'pil_bicubic': PIL.Image.BICUBIC, 'pil_box': PIL.Image.BOX, 'pil_hamming': PIL.Image.HAMMING, 'pil_lanczos': PIL.Image.LANCZOS}[degradation]
self.pil_interpolation = degradation.startswith('pil_')
if self.pil_interpolation:
self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)
else:
self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, interpolation=interpolation_fn)
def __len__(self):
return len(self.base)
def __getitem__(self, i):
example = self.base[i]
image = Image.open(example['file_path_'])
if (not (image.mode == 'RGB')):
image = image.convert('RGB')
image = np.array(image).astype(np.uint8)
min_side_len = min(image.shape[:2])
crop_side_len = (min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None))
crop_side_len = int(crop_side_len)
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)
else:
self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)
image = self.cropper(image=image)['image']
image = self.image_rescaler(image=image)['image']
if self.pil_interpolation:
image_pil = PIL.Image.fromarray(image)
LR_image = self.degradation_process(image_pil)
LR_image = np.array(LR_image).astype(np.uint8)
else:
LR_image = self.degradation_process(image=image)['image']
example['image'] = ((image / 127.5) - 1.0).astype(np.float32)
example['LR_image'] = ((LR_image / 127.5) - 1.0).astype(np.float32)
return example |
class MolDataset(Dataset):
def __init__(self, keys, data_dir, id_to_y, random_rotation=0.0, pos_noise_std=0.0):
self.keys = keys
self.data_dir = data_dir
self.id_to_y = id_to_y
self.random_rotation = random_rotation
self.amino_acids = ['ALA', 'ARG', 'ASN', 'ASP', 'ASX', 'CYS', 'GLU', 'GLN', 'GLX', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
self.pos_noise_std = pos_noise_std
def __len__(self):
return len(self.keys)
def __getitem__(self, idx):
key = self.keys[idx]
with open(((self.data_dir + '/') + key), 'rb') as f:
(m1, m1_uff, m2, interaction_data) = pickle.load(f)
sample = mol_to_feature(m1, m1_uff, m2, interaction_data, self.pos_noise_std)
sample['affinity'] = (self.id_to_y[key] * (- 1.36))
sample['key'] = key
return sample |
def calc_prf(match, gold, test):
if (gold == 0):
if (test == 0):
return (1.0, 1.0, 1.0)
return (0.0, 1.0, 0.0)
if ((test == 0) or (match == 0)):
return (0.0, 0.0, 0.0)
precision = (match / float(test))
recall = (match / float(gold))
try:
fscore = ((2 * match) / float((test + gold)))
return (precision, recall, fscore)
except ZeroDivisionError:
return (0.0, 0.0, 0.0) |
def create_weightspace(value):
num = value['num']
space = []
sum = 0
rand = 1
for i in range((num - 1)):
while ((sum + rand) >= 1):
rand = round(np.random.rand(), 2)
space.append(rand)
sum += rand
rand = 1
space.append(round((1 - sum), 2))
return space |
class AnyOf(SymbolMatcher):
def __init__(self, bind_name: str, matchers: List[SymbolMatcher]) -> None:
super().__init__(bind_name)
self.matchers = matchers
def matches(self, sym: Symbol) -> Optional[Dict[(str, Any)]]:
bindings = None
for matcher in self.matchers:
matcher_bindings = matcher.matches(sym)
if (matcher_bindings is not None):
bindings = (bindings or {})
bindings.update(matcher_bindings)
bindings[self.bind_name] = sym
return bindings |
class SelectPercentileClassificationTest(unittest.TestCase):
def test_default_configuration(self):
(transformation, original) = _test_preprocessing(SelectPercentileClassification)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int((original.shape[1] / 2)))
self.assertFalse((transformation == 0).all())
(transformation, original) = _test_preprocessing(SelectPercentileClassification, make_sparse=True)
self.assertTrue(scipy.sparse.issparse(transformation))
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int((original.shape[1] / 2)))
(X_train, Y_train, X_test, Y_test) = get_dataset(dataset='digits')
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1, **{hp_name: default[hp_name] for hp_name in default if (default[hp_name] is not None)})
transformer = preprocessor.fit(X_train, Y_train)
(transformation, original) = (transformer.transform(X_train), original_X_train)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int((original.shape[1] / 2)))
def test_preprocessing_dtype(self):
(X_train, Y_train, X_test, Y_test) = get_dataset('iris')
self.assertEqual(X_train.dtype, np.float32)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1, **{hp_name: default[hp_name] for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
(X_train, Y_train, X_test, Y_test) = get_dataset('iris')
X_train = X_train.astype(np.float64)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1, **{hp_name: default[hp_name] for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
(X_train, Y_train, X_test, Y_test) = get_dataset('iris', make_sparse=True)
self.assertEqual(X_train.dtype, np.float32)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1, **{hp_name: default[hp_name] for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
(X_train, Y_train, X_test, Y_test) = get_dataset('iris', make_sparse=True)
X_train = X_train.astype(np.float64)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1, **{hp_name: default[hp_name] for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64) |
def main():
if (args.data_name == 'train'):
if (args.h_flip == 1):
with open('{}/split{}/train_224_h_flip.txt'.format(args.data_dir, args.split_num), 'r') as f:
lines = f.readlines()
f_ = open('{}/split{}/train_224_h_flip.lst'.format(args.out_dir, args.split_num), 'w')
for line in lines:
xx = line.split('\t')
xx[2] = xx[2].strip('\n').strip('\r')
f_.write(('%d\t%d\t%s\n' % (int(xx[1]), int(xx[2]), xx[0])))
f_.close()
else:
with open('{}/split{}/train_224.txt'.format(args.data_dir, args.split_num), 'r') as f:
lines = f.readlines()
f_ = open('{}/split{}/train_224.lst'.format(args.out_dir, args.split_num), 'w')
for line in lines:
xx = line.split('\t')
xx[2] = xx[2].strip('\n').strip('\r')
f_.write(('%d\t%d\t%s\n' % (int(xx[1]), int(xx[2]), xx[0])))
f_.close()
elif (args.data_name == 'test'):
if (args.h_flip == 1):
with open('{}/split{}/test_h_flip.txt'.format(args.data_dir, args.split_num), 'r') as f:
lines = f.readlines()
f_ = open('{}/split{}/test_h_flip.lst'.format(args.out_dir, args.split_num), 'w')
for line in lines:
xx = line.split('\t')
xx[2] = xx[2].strip('\n').strip('\r')
f_.write(('%d\t%d\t%s\n' % (int(xx[1]), int(xx[2]), xx[0])))
f_.close()
else:
with open('{}/split{}/test.txt'.format(args.data_dir, args.split_num), 'r') as f:
lines = f.readlines()
f_ = open('{}/split{}/test.lst'.format(args.out_dir, args.split_num), 'w')
for line in lines:
xx = line.split('\t')
xx[2] = xx[2].strip('\n').strip('\r')
f_.write(('%d\t%d\t%s\n' % (int(xx[1]), int(xx[2]), xx[0])))
f_.close()
elif (args.data_name == 'Gallery'):
if (args.h_flip == 1):
with open('{}/split{}/Gallery_h_flip.txt'.format(args.data_dir, args.split_num), 'r') as f:
lines = f.readlines()
f_ = open('{}/split{}/Gallery_h_flip.lst'.format(args.out_dir, args.split_num), 'w')
for line in lines:
xx = line.split('\t')
xx[2] = xx[2].strip('\n').strip('\r')
f_.write(('%d\t%d\t%s\n' % (int(xx[1]), int(xx[2]), xx[0])))
f_.close()
else:
with open('{}/split{}/Gallery.txt'.format(args.data_dir, args.split_num), 'r') as f:
lines = f.readlines()
f_ = open('{}/split{}/Gallery.lst'.format(args.out_dir, args.split_num), 'w')
for line in lines:
xx = line.split('\t')
xx[2] = xx[2].strip('\n').strip('\r')
f_.write(('%d\t%d\t%s\n' % (int(xx[1]), int(xx[2]), xx[0])))
f_.close()
elif (args.data_name == 'Probe'):
if (args.h_flip == 1):
with open('{}/split{}/Probe_h_flip.txt'.format(args.data_dir, args.split_num), 'r') as f:
lines = f.readlines()
f_ = open('{}/split{}/Probe_h_flip.lst'.format(args.out_dir, args.split_num), 'w')
for line in lines:
xx = line.split('\t')
xx[2] = xx[2].strip('\n').strip('\r')
f_.write(('%d\t%d\t%s\n' % (int(xx[1]), int(xx[2]), xx[0])))
f_.close()
else:
with open('{}/split{}/Probe.txt'.format(args.data_dir, args.split_num), 'r') as f:
lines = f.readlines()
f_ = open('{}/split{}/Probe.lst'.format(args.out_dir, args.split_num), 'w')
for line in lines:
xx = line.split('\t')
xx[2] = xx[2].strip('\n').strip('\r')
f_.write(('%d\t%d\t%s\n' % (int(xx[1]), int(xx[2]), xx[0])))
f_.close()
else:
raise ValueError('do not support {} yet'.format(args.data_name)) |
class RotationTransform():
def __init__(self, angle):
self.angle = angle
def __call__(self, x):
return TorchVisionFunc.rotate(x, self.angle, fill=(0,)) |
def explained_variance_1d(ypred, y, valids=None):
if (valids is not None):
ypred = ypred[valids.astype(np.bool)]
y = y[valids.astype(np.bool)]
assert ((y.ndim == 1) and (ypred.ndim == 1))
vary = np.var(y)
if np.isclose(vary, 0):
if (np.var(ypred) > 0):
return 0
return 1
return (1 - (np.var((y - ypred)) / (vary + 1e-08))) |
def evaluate(net_apply, params, net_state, train_set, test_set, predict_fn, metrics_fns, log_prior_fn):
(net_state, test_predictions) = onp.asarray(predict_fn(net_apply, params, net_state, test_set))
(net_state, train_predictions) = onp.asarray(predict_fn(net_apply, params, net_state, train_set))
test_stats = train_utils.evaluate_metrics(test_predictions, test_set[1], metrics_fns)
train_stats = train_utils.evaluate_metrics(train_predictions, train_set[1], metrics_fns)
train_stats['prior'] = log_prior_fn(params)
return (net_state, test_predictions, train_predictions, test_stats, train_stats) |
def load_data(data_dir, partition, url):
download_and_extract_archive(url, data_dir)
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(data_dir, 'modelnet40_ply_hdf5_2048', ('ply_data_%s*.h5' % partition))):
with h5py.File(h5_name, 'r') as f:
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0).squeeze((- 1))
return (all_data, all_label) |
class QXIconButton(QPushButton):
def __init__(self, icon, tooltip=None, shortcut=None, click_func=None, first_repeat_delay=300, repeat_delay=20):
super().__init__(icon, '')
self.setIcon(icon)
if (shortcut is not None):
tooltip = f"{tooltip} ( {StringsDB['S_HOT_KEY']}: {shortcut} )"
self.setToolTip(tooltip)
self.seq = (QKeySequence(shortcut) if (shortcut is not None) else None)
QXMainWindow.inst.add_keyPressEvent_listener(self.on_keyPressEvent)
QXMainWindow.inst.add_keyReleaseEvent_listener(self.on_keyReleaseEvent)
self.click_func = click_func
self.first_repeat_delay = first_repeat_delay
self.repeat_delay = repeat_delay
self.repeat_timer = None
self.op_device = None
self.pressed.connect((lambda : self.action(is_pressed=True)))
self.released.connect((lambda : self.action(is_pressed=False)))
def action(self, is_pressed=None, op_device=None):
if (self.click_func is None):
return
if (is_pressed is not None):
if is_pressed:
if (self.repeat_timer is None):
self.click_func()
self.repeat_timer = QTimer()
self.repeat_timer.timeout.connect(self.action)
self.repeat_timer.start(self.first_repeat_delay)
elif (self.repeat_timer is not None):
self.repeat_timer.stop()
self.repeat_timer = None
else:
self.click_func()
if (self.repeat_timer is not None):
self.repeat_timer.setInterval(self.repeat_delay)
def on_keyPressEvent(self, ev):
key = ev.nativeVirtualKey()
if ev.isAutoRepeat():
return
if (self.seq is not None):
if (key == self.seq[0]):
self.action(is_pressed=True)
def on_keyReleaseEvent(self, ev):
key = ev.nativeVirtualKey()
if ev.isAutoRepeat():
return
if (self.seq is not None):
if (key == self.seq[0]):
self.action(is_pressed=False) |
class FlaxUpsample2D(nn.Module):
out_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype)
def __call__(self, hidden_states):
(batch, height, width, channels) = hidden_states.shape
hidden_states = jax.image.resize(hidden_states, shape=(batch, (height * 2), (width * 2), channels), method='nearest')
hidden_states = self.conv(hidden_states)
return hidden_states |
def load_tf_linear(weights, layer):
if isinstance(weights, list):
if (len(weights) == 2):
layer.bias.data = torch.tensor(weights[1]).view(layer.bias.data.shape)
weights = weights[0]
layer.weight.data = torch.tensor(weights).transpose((- 1), 0).view(layer.weight.data.shape) |
class Model(object):
def __init__(self, config, scope, emb_mat, rep=True):
self.scope = scope
self.config = config
self.emb_mat = emb_mat
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32', initializer=tf.constant_initializer(0), trainable=False)
if (config.split_supports is True):
(N, M, JX, JQ, VW, VC, W) = (config.batch_size, 1, config.max_para_size, config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size)
self.x = tf.placeholder('int32', [None, None, None], name='x')
self.cx = tf.placeholder('int32', [None, None, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [None, None, None], name='x_mask')
self.x_sents_len = tf.placeholder('int32', [None, M, 10], name='x_sents_len')
else:
(N, M, JX, JQ, VW, VC, W) = (config.batch_size, config.max_num_sents, config.max_sent_size, config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size)
self.x = tf.placeholder('int32', [N, None, None], name='x')
self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
self.q = tf.placeholder('int32', [N, None], name='q')
self.cq = tf.placeholder('int32', [N, None, W], name='cq')
self.q_sub = tf.placeholder('int32', [N, None], name='q_sub')
self.cq_sub = tf.placeholder('int32', [N, None, W], name='cq_sub')
self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
self.q_sub_mask = tf.placeholder('bool', [N, None], name='q_sub_mask')
self.y = tf.placeholder('bool', [N, None, None], name='y')
self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
self.wy = tf.placeholder('bool', [N, None, None], name='wy')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
self.na = tf.placeholder('bool', [N], name='na')
if ((config.reasoning_layer is not None) and (config.mac_prediction == 'candidates')):
self.candidate_spans = tf.placeholder('int32', [N, None, None, 2], name='cand_spans')
self.candidate_span_y = tf.placeholder('int32', [N, None], name='cand_span_y')
self.num_exceed_cand = tf.placeholder('int32', [N, None, None], name='num_exceed_cand')
self.x_group = tf.placeholder('int32', [N], name='x_group')
if config.supervise_first_doc:
self.first_doc_ids = tf.placeholder('int32', [N], name='first_doc_ids')
if config.use_assembler:
self.selected_sent_ids = tf.placeholder('int32', [config.batch_size, config.num_hops], name='selected_sent_ids')
self.answer_doc_ids = tf.placeholder('int32', [N, None], name='answer_doc_ids')
self.answer_word_ids = tf.placeholder('int32', [N, None], name='answer_word_ids')
self.period_id = None
self.tensor_dict = {}
self.logits = None
self.yp = None
self.var_list = None
self.na_prob = None
self.loss = None
self._build_forward()
self._build_loss()
self.var_ema = None
if rep:
self._build_var_ema()
if (config.mode == 'train'):
self._build_ema()
self.summary = tf.summary.merge_all()
self.summary = tf.summary.merge(tf.get_collection('summaries', scope=self.scope))
def _build_forward(self):
config = self.config
(N, M, JX, JQ, VW, VC, d, W) = (config.batch_size, config.max_num_sents, config.max_sent_size, config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size)
if config.split_supports:
M = 1
JX = tf.shape(self.x)[2]
JQ = tf.shape(self.q)[1]
JQ_sub = tf.shape(self.q_sub)[1]
M = tf.shape(self.x)[1]
(dc, dw, dco) = (config.char_emb_size, config.word_emb_size, config.char_out_size)
with tf.variable_scope('emb'):
if config.use_char_emb:
with tf.variable_scope('emb_var'), tf.device('/cpu:0'):
char_emb_mat = tf.get_variable('char_emb_mat', shape=[VC, dc], dtype='float')
with tf.variable_scope('char'):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx)
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq)
Acx = tf.reshape(Acx, [(- 1), JX, W, dc])
Acq = tf.reshape(Acq, [(- 1), JQ, W, dc])
if config.get_query_subject:
Acq_sub = tf.nn.embedding_lookup(char_emb_mat, self.cq_sub)
Acq_sub = tf.reshape(Acq_sub, [(- 1), JQ_sub, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert (sum(filter_sizes) == dco), (filter_sizes, dco)
with tf.variable_scope('conv'):
xx = multi_conv1d(Acx, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='xx')
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='xx')
if config.get_query_subject:
qq_sub = multi_conv1d(Acq_sub, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='xx')
else:
qq = multi_conv1d(Acq, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='qq')
if config.get_query_subject:
qq_sub = multi_conv1d(Acq_sub, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='qq')
xx = tf.reshape(xx, [(- 1), M, JX, dco])
qq = tf.reshape(qq, [(- 1), JQ, dco])
if config.get_query_subject:
qq_sub = tf.reshape(qq_sub, [(- 1), JQ_sub, dco])
if config.use_word_emb:
with tf.variable_scope('emb_var'), tf.device('/cpu:0'):
if (config.mode == 'train'):
word_emb_mat = tf.get_variable('word_emb_mat', dtype='float', shape=[VW, dw], initializer=get_initializer(self.emb_mat))
else:
word_emb_mat = tf.get_variable('word_emb_mat', shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(axis=0, values=[word_emb_mat, self.new_emb_mat])
with tf.name_scope('word'):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x)
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q)
if config.get_query_subject:
Aq_sub = tf.nn.embedding_lookup(word_emb_mat, self.q_sub)
self.tensor_dict['q_sub'] = Aq_sub
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
if config.use_char_emb:
xx = tf.concat(axis=3, values=[xx, Ax])
qq = tf.concat(axis=2, values=[qq, Aq])
if config.get_query_subject:
qq_sub = tf.concat(axis=2, values=[qq_sub, Aq_sub])
else:
xx = Ax
qq = Aq
if config.get_query_subject:
qq_sub = Aq_sub
if config.highway:
with tf.variable_scope('highway'):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob)
if config.get_query_subject:
qq_sub = highway_network(qq_sub, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
cell_fw = BasicLSTMCell(d, state_is_tuple=True)
cell_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell_fw = SwitchableDropoutWrapper(cell_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell_bw = SwitchableDropoutWrapper(cell_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell2_fw = BasicLSTMCell(d, state_is_tuple=True)
cell2_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell2_fw = SwitchableDropoutWrapper(cell2_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell2_bw = SwitchableDropoutWrapper(cell2_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell3_fw = BasicLSTMCell(d, state_is_tuple=True)
cell3_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell3_fw = SwitchableDropoutWrapper(cell3_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell3_bw = SwitchableDropoutWrapper(cell3_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell4_fw = BasicLSTMCell(d, state_is_tuple=True)
cell4_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell4_fw = SwitchableDropoutWrapper(cell4_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell4_bw = SwitchableDropoutWrapper(cell4_bw, self.is_train, input_keep_prob=config.input_keep_prob)
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2)
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1)
q_sub_len = tf.reduce_sum(tf.cast(self.q_sub_mask, 'int32'), 1)
with tf.variable_scope('prepro'):
if config.cudnn_rnn:
if ((config.reasoning_layer == 'mac_rnn') and (config.use_control_unit is False)):
with tf.variable_scope('u1'):
(u_bod, _) = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, (1 - config.input_keep_prob), qq, (q_len - q_sub_len), self.is_train)
u_st = zhong_selfatt(tf.expand_dims(u_bod, axis=1), (config.hidden_size * 2), seq_len=(q_len - q_sub_len), transform='squeeze')
tf.get_variable_scope().reuse_variables()
(u, _) = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, (1 - config.input_keep_prob), qq, q_len, self.is_train)
else:
with tf.variable_scope('u1'):
(u, _) = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, (1 - config.input_keep_prob), qq, q_len, self.is_train)
if (config.reasoning_layer == 'mac_rnn'):
u_st = zhong_selfatt(tf.expand_dims(u, axis=1), (config.hidden_size * 2), seq_len=q_len, transform='squeeze')
q_sub_st = None
if config.share_lstm_weights:
with tf.variable_scope('u1', reuse=True):
(h, _) = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, (1 - config.input_keep_prob), tf.squeeze(xx, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
h = tf.expand_dims(h, axis=1)
if (config.reasoning_layer == 'mac_rnn'):
h_st = zhong_selfatt(h, (config.hidden_size * 2), seq_len=tf.squeeze(x_len, axis=1), transform='squeeze')
else:
h_st = tf.reduce_mean(tf.squeeze(h, axis=1), axis=1)
if config.get_query_subject:
(q_sub, _) = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, (1 - config.input_keep_prob), qq_sub, q_sub_len, self.is_train)
q_sub_st = zhong_selfatt(tf.expand_dims(q_sub, axis=1), (config.hidden_size * 2), seq_len=q_sub_len, transform='squeeze')
else:
if ((config.reasoning_layer == 'mac_rnn') and (config.use_control_unit is False)):
((fw_u, bw_u), (fw_u_f_st, bw_u_f_st)) = bidirectional_dynamic_rnn(d_cell_fw, d_cell_bw, qq, (q_len - q_sub_len), dtype='float', scope='u1')
u_st = tf.concat(axis=1, values=[fw_u_f_st.c, bw_u_f_st.c])
((fw_u, bw_u), _) = bidirectional_dynamic_rnn(d_cell_fw, d_cell_bw, qq, q_len, dtype='float', scope='u1')
u = tf.concat(axis=2, values=[fw_u, bw_u])
else:
((fw_u, bw_u), (fw_u_f_st, bw_u_f_st)) = bidirectional_dynamic_rnn(d_cell_fw, d_cell_bw, qq, q_len, dtype='float', scope='u1')
u = tf.concat(axis=2, values=[fw_u, bw_u])
if config.share_lstm_weights:
tf.get_variable_scope().reuse_variables()
((fw_h, bw_h), (fw_h_f_st, bw_h_f_st)) = bidirectional_dynamic_rnn(cell_fw, cell_bw, xx, x_len, dtype='float', scope='u1')
h = tf.concat(axis=3, values=[fw_h, bw_h])
h_st = tf.concat(axis=1, values=[fw_h_f_st.c, bw_h_f_st.c])
if config.get_query_subject:
(_, (fw_u2_f_st, bw_u2_f_st)) = bidirectional_dynamic_rnn(cell_fw, cell_bw, qq_sub, q_sub_len, dtype='float', scope='u1')
q_sub_st = tf.concat(axis=1, values=[fw_u2_f_st.c, bw_u2_f_st.c])
else:
q_sub_st = None
else:
((fw_h, bw_h), (fw_h_f_st, bw_h_f_st)) = bidirectional_dynamic_rnn(cell_fw, cell_bw, xx, x_len, dtype='float', scope='h1')
h = tf.concat(axis=3, values=[fw_h, bw_h])
h_st = tf.concat(axis=1, values=[fw_h_f_st.c, bw_h_f_st.c])
if config.get_query_subject:
tf.get_variable_scope().reuse_variables()
(_, (fw_u2_f_st, bw_u2_f_st)) = bidirectional_dynamic_rnn(cell_fw, cell_bw, qq_sub, q_sub_len, dtype='float', scope='u1')
q_sub_st = tf.concat(axis=2, values=[fw_u2_f_st.c, bw_u2_f_st.c])
else:
q_sub_st = None
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope('main'):
context_dim = (config.hidden_size * 2)
if config.split_supports:
if (config.select_top_n_doc > 0):
first_n_doc_idx = select_topn_doc_idx(N, config.select_top_n_doc, self.x_group)
h_plus_one = tf.concat([h, tf.expand_dims(tf.zeros_like(h[0], tf.float32), axis=0)], axis=0)
h_st_plus_one = tf.concat([h_st, tf.expand_dims(tf.zeros_like(h_st[0], tf.float32), axis=0)], axis=0)
x_len_plus_one = tf.concat([x_len, tf.expand_dims(tf.zeros_like(x_len[0], tf.int32), axis=0)], axis=0)
x_mask_plus_one = tf.concat([self.x_mask, tf.expand_dims(tf.zeros_like(self.x_mask[0], tf.bool), axis=0)], axis=0)
top_n_h = tf.gather(h_plus_one, first_n_doc_idx)
top_n_h_st = tf.gather(h_st_plus_one, first_n_doc_idx)
top_n_x_len = tf.gather(x_len_plus_one, first_n_doc_idx)
top_n_x_mask = tf.gather(x_mask_plus_one, first_n_doc_idx)
if (config.hierarchical_attn is False):
(h, x_len, x_mask) = reconstruct_batches(h, x_len, self.x_group, target_batch_size=N, max_para_size=config.max_para_size, model=self)
else:
if config.bidaf:
context_dim = (config.hidden_size * 4)
batch_nums = []
for i in range(config.batch_size):
batch_nums = tf.concat([batch_nums, tf.tile([i], [self.x_group[i]])], axis=0)
u_tiled = tf.gather(u, batch_nums)
q_mask_tiled = tf.gather(self.q_mask, batch_nums)
h = attention_layer(config, self.is_train, h, u_tiled, h_mask=self.x_mask, u_mask=q_mask_tiled, scope='p0', tensor_dict=self.tensor_dict)
W = tf.get_variable('W', [160, 80])
b = tf.get_variable('b', [80])
h = (tf.einsum('ijkl,lm->ijkm', h, W) + b)
(h_reconstruct, _, _) = reconstruct_batches(h, x_len, self.x_group, target_batch_size=N, max_para_size=config.max_para_size, model=self, emb_dim=context_dim)
if (config.select_top_n_doc > 1):
top_n_x_group = []
for i in range(N):
to_append = tf.cond((self.x_group[i] > config.select_top_n_doc), (lambda : config.select_top_n_doc), (lambda : self.x_group[i]))
top_n_x_group.append(to_append)
top_n_x_group = tf.stack(top_n_x_group)
(h, p_st, x_mask, pdoc_mask, self.x_sents_len_reconstruct) = reconstruct_batchesV2(top_n_h, top_n_h_st, top_n_x_mask, top_n_x_group, self.x_sents_len, target_batch_size=N, max_para_size=config.max_para_size, model=self)
else:
(h, p_st, x_mask, pdoc_mask, self.x_sents_len_reconstruct) = reconstruct_batchesV2(h, h_st, self.x_mask, self.x_group, self.x_sents_len, target_batch_size=N, max_para_size=config.max_para_size, model=self)
if (config.select_top_n_doc > 0):
x_len = top_n_x_len
else:
x_mask = self.x_mask
if (config.bidaf and (config.hierarchical_attn is False)):
context_dim = (config.hidden_size * 8)
if ((config.use_control_unit is False) and (config.reasoning_layer == 'mac_rnn')):
if (config.select_top_n_doc > 0):
p0 = attention_layer(config, self.is_train, top_n_h, u, h_mask=top_n_x_mask, u_mask=self.q_mask, scope='p0', tensor_dict=self.tensor_dict)
else:
p0 = attention_layer(config, self.is_train, h, u, h_mask=x_mask, u_mask=self.q_mask, scope='p0', tensor_dict=self.tensor_dict)
elif (config.select_top_n_doc > 0):
p0 = attention_layer(config, self.is_train, top_n_h, u, h_mask=top_n_x_mask, u_mask=self.q_mask, scope='p0', tensor_dict=self.tensor_dict)
else:
p0 = attention_layer(config, self.is_train, h, u, h_mask=x_mask, u_mask=self.q_mask, scope='p0', tensor_dict=self.tensor_dict)
else:
p0 = h
first_cell_fw = d_cell2_fw
second_cell_fw = d_cell3_fw
first_cell_bw = d_cell2_bw
second_cell_bw = d_cell3_bw
if (config.reasoning_layer == 'mac_rnn'):
query_dim = (config.hidden_size * 2)
if config.hierarchical_attn:
mac_rnn_cell = HierarchicalAttnMACRnn(config.batch_size, context_dim, query_dim, num_hops=config.num_hops, reuse_cell=config.reuse_cell, is_train=self.is_train, use_control_unit=config.use_control_unit, mode=config.mode, read_strategy=config.mac_read_strategy, output_unit_type=config.mac_output_unit, answer_state_update_rule=config.mac_answer_state_update_rule, reasoning_unit=config.mac_reasoning_unit, memory_state_update_rule=config.mac_memory_state_update_rule, answer_doc_ids=(self.answer_doc_ids if (config.supervise_final_doc or (config.oracle is not None)) else None), sents_len=self.x_sents_len_reconstruct, oracle=config.oracle, input_keep_prob=config.input_keep_prob, attention_cell_dropout=config.attention_cell_dropout, read_topk_docs=config.read_topk_docs)
self.mac_rnn_cell = mac_rnn_cell
if (config.mac_prediction == 'candidates'):
(cand_emb, cand_mask) = span_to_avg_emb(self.candidate_spans, h_reconstruct, config.batch_size, self)
g1 = dynamic_mac_rnn(mac_rnn_cell, p0, u, q_len, x_mask, self.q_mask, q_sub_st=q_sub_st, context_st=p_st, query_st=u_st, cdoc_mask=pdoc_mask, candidates=cand_emb, cand_mask=cand_mask)
self.doc_attn_logits = mac_rnn_cell.doc_attn_logits_lst
self.word_attn_logits = mac_rnn_cell.word_attn_logits_lst
self.doc_labels = mac_rnn_cell.doc_attn
self.g1 = g1
self.cand_mask = cand_mask
self.cand_emb = cand_emb
self.pdoc_mask = pdoc_mask
self.p_st = p_st
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=cand_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
JX = tf.shape(g1)[2]
self.JX = JX
self.g1_shape = tf.shape(g1)
flat_logits = tf.reshape(logits, [config.batch_size, (M * JX)])
flat_yp = tf.nn.softmax(flat_logits)
yp = tf.reshape(flat_yp, [config.batch_size, M, JX])
self.logits = flat_logits
self.yp = yp
if (config.use_assembler or config.attn_visualization):
self.yp_list = []
self.logits_list = []
for i in range(config.num_hops):
logits = get_logits([mac_rnn_cell.answer_list[i]], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=cand_mask, is_train=self.is_train, func=config.answer_func, scope='logits1', reuse=True)
flat_logits = tf.reshape(logits, [config.batch_size, (M * JX)])
flat_yp = tf.nn.softmax(flat_logits)
yp = tf.reshape(flat_yp, [config.batch_size, M, JX])
self.yp_list.append(yp)
self.logits_list.append(flat_logits)
if config.use_assembler:
if (config.assembler_type == 'BiAttn'):
self.assembler = BiAttnAssembler(config, self.is_train, self, context_dim=context_dim)
self.assembler.build_forward(p0, x_mask, u, u_st, self.q_mask, cand_emb, cand_mask)
else:
raise NotImplementedError
return
else:
raise NotImplementedError
else:
mac_rnn_cell = MACRnn(config.batch_size, p0.get_shape()[(- 1)], u.get_shape()[(- 1)], num_hops=config.num_hops, prediction=config.mac_prediction, reuse_cell=config.reuse_cell, is_train=self.is_train, use_control_unit=config.use_control_unit, mode=config.mode)
if (config.mac_prediction == 'candidates'):
(cand_emb, cand_mask) = span_to_avg_emb(self.candidate_spans, p0, config.batch_size, self)
g1 = dynamic_mac_rnn(mac_rnn_cell, p0, u, q_len, x_mask, self.q_mask, candidates=cand_emb, cand_mask=cand_mask, q_sub_st=q_sub_st)
self.g1 = g1
self.cand_mask = cand_mask
self.cand_emb = cand_emb
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=cand_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
JX = tf.shape(g1)[2]
flat_logits = tf.reshape(logits, [config.batch_size, (M * JX)])
flat_yp = tf.nn.softmax(flat_logits)
yp = tf.reshape(flat_yp, [config.batch_size, M, JX])
self.logits = flat_logits
self.yp = yp
return
elif (config.mac_prediction == 'span-dual'):
(g1, g2) = dynamic_mac_rnn(mac_rnn_cell, p0, qq, q_len)
if (config.split_supports is True):
M = 1
JX = config.max_para_size
N = config.batch_size
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
logits2 = get_logits([g2], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits2')
else:
assert (config.mac_prediction == 'span-single')
(g1, logits) = dynamic_mac_rnn(mac_rnn_cell, p0, qq, q_len, x_mask, self.q_mask)
if (config.split_supports is True):
M = 1
JX = config.max_para_size
N = config.batch_size
a1i = softsel(tf.reshape(g1, [N, (M * JX), 80]), tf.reshape(logits, [N, (M * JX)]))
a1i = tf.tile(tf.expand_dims(tf.expand_dims(a1i, 1), 1), [1, M, JX, 1])
else:
if config.cudnn_rnn:
with tf.variable_scope('g0'):
(g0, _) = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, (1 - config.input_keep_prob), tf.squeeze(p0, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
g0 = tf.expand_dims(g0, axis=1)
else:
((fw_g0, bw_g0), _) = bidirectional_dynamic_rnn(first_cell_fw, first_cell_bw, p0, x_len, dtype='float', scope='g0')
g0 = tf.concat(axis=3, values=[fw_g0, bw_g0])
if config.cudnn_rnn:
with tf.variable_scope('g1'):
(g1, _) = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, (1 - config.input_keep_prob), tf.squeeze(g0, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
g1 = tf.expand_dims(g1, axis=1)
else:
((fw_g1, bw_g1), (fw_g1_f_st, bw_g1_f_st)) = bidirectional_dynamic_rnn(second_cell_fw, second_cell_bw, g0, x_len, dtype='float', scope='g1')
g1 = tf.concat(axis=3, values=[fw_g1, bw_g1])
if ((config.reasoning_layer == 'bidaf') and (config.mac_prediction == 'candidates')):
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=x_mask, is_train=self.is_train, scope='a_state_logits')
probs = tf.nn.softmax(logits)
a_state = tf.einsum('ijkl,ijk->ijl', h, probs)
a_state = tf.squeeze(a_state, axis=1)
(cand_emb, cand_mask) = span_to_avg_emb(self.candidate_spans, h, config.batch_size, self)
cand_emb = tf.squeeze(cand_emb, axis=1)
cand_dim = (config.hidden_size * 2)
with tf.variable_scope('output_unit'):
num_cand = tf.shape(cand_emb)[1]
similarity = tf.einsum('ik,ijk->ijk', a_state, cand_emb)
M = tf.tile(tf.expand_dims(a_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [(3 * cand_dim), (2 * cand_dim)])
b1 = tf.get_variable('b1', [(2 * cand_dim)])
W2 = tf.get_variable('W2', [(2 * cand_dim), cand_dim])
b2 = tf.get_variable('b2', [cand_dim])
concat_in = tf.concat(axis=(- 1), values=[tf.reshape(M, [(- 1), cand_dim]), tf.reshape(cand_emb, [(- 1), cand_dim]), tf.reshape(similarity, [(- 1), cand_dim])])
output = (tf.matmul(tf.nn.relu((tf.matmul(concat_in, W1) + b1)), W2) + b2)
g1 = tf.expand_dims(tf.reshape(output, [self.config.batch_size, (- 1), 40]), axis=1)
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=cand_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
JX = tf.shape(g1)[2]
flat_logits = tf.reshape(logits, [config.batch_size, JX])
flat_yp = tf.nn.softmax(flat_logits)
yp = tf.reshape(flat_yp, [config.batch_size, 1, JX])
self.logits = flat_logits
self.yp = yp
return
logits = get_logits([g1, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
if (config.split_supports is True):
M = 1
JX = config.max_para_size
N = config.batch_size
a1i = softsel(tf.reshape(g1, [N, (M * JX), (2 * d)]), tf.reshape(logits, [N, (M * JX)]))
a1i = tf.tile(tf.expand_dims(tf.expand_dims(a1i, 1), 1), [1, M, JX, 1])
if ((config.reasoning_layer is None) or (config.mac_prediction == 'span-single')):
if config.cudnn_rnn:
with tf.variable_scope('g2'):
g2_in = tf.squeeze(tf.concat(axis=3, values=[p0, g1, a1i, (g1 * a1i)]), axis=1)
(g2, _) = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, (1 - config.input_keep_prob), g2_in, tf.squeeze(x_len, axis=1), self.is_train)
g2 = tf.expand_dims(g2, axis=1)
else:
((fw_g2, bw_g2), _) = bidirectional_dynamic_rnn(d_cell4_fw, d_cell4_bw, tf.concat(axis=3, values=[p0, g1, a1i, (g1 * a1i)]), x_len, dtype='float', scope='g2')
g2 = tf.concat(axis=3, values=[fw_g2, bw_g2])
logits2 = get_logits([g2, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits2')
flat_logits = tf.reshape(logits, [(- 1), (M * JX)])
flat_yp = tf.nn.softmax(flat_logits)
flat_logits2 = tf.reshape(logits2, [(- 1), (M * JX)])
flat_yp2 = tf.nn.softmax(flat_logits2)
if config.na:
na_bias = tf.get_variable('na_bias', shape=[], dtype='float')
na_bias_tiled = tf.tile(tf.reshape(na_bias, [1, 1]), [N, 1])
concat_flat_logits = tf.concat(axis=1, values=[na_bias_tiled, flat_logits])
concat_flat_yp = tf.nn.softmax(concat_flat_logits)
na_prob = tf.squeeze(tf.slice(concat_flat_yp, [0, 0], [(- 1), 1]), [1])
flat_yp = tf.slice(concat_flat_yp, [0, 1], [(- 1), (- 1)])
concat_flat_logits2 = tf.concat(axis=1, values=[na_bias_tiled, flat_logits2])
concat_flat_yp2 = tf.nn.softmax(concat_flat_logits2)
na_prob2 = tf.squeeze(tf.slice(concat_flat_yp2, [0, 0], [(- 1), 1]), [1])
flat_yp2 = tf.slice(concat_flat_yp2, [0, 1], [(- 1), (- 1)])
self.concat_logits = concat_flat_logits
self.concat_logits2 = concat_flat_logits2
self.na_prob = (na_prob * na_prob2)
yp = tf.reshape(flat_yp, [(- 1), M, JX])
yp2 = tf.reshape(flat_yp2, [(- 1), M, JX])
wyp = tf.nn.sigmoid(logits2)
self.logits = flat_logits
self.logits2 = flat_logits2
self.yp = yp
self.yp2 = yp2
self.wyp = wyp
def _build_loss(self):
config = self.config
JX = tf.shape(self.x)[2]
N = config.batch_size
if (config.split_supports is True):
M = 1
JX = config.max_para_size
else:
M = tf.shape(self.x)[1]
JQ = tf.shape(self.q)[1]
loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
if config.wy:
losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=tf.reshape(self.logits2, [(- 1), M, JX]), labels=tf.cast(self.wy, 'float'))
num_pos = tf.reduce_sum(tf.cast(self.wy, 'float'))
num_neg = (tf.reduce_sum(tf.cast(self.x_mask, 'float')) - num_pos)
damp_ratio = (num_pos / num_neg)
dampened_losses = (losses * (((tf.cast(self.x_mask, 'float') - tf.cast(self.wy, 'float')) * damp_ratio) + tf.cast(self.wy, 'float')))
new_losses = tf.reduce_sum(dampened_losses, [1, 2])
ce_loss = tf.reduce_mean((loss_mask * new_losses))
tf.add_to_collection('losses', ce_loss)
elif ((config.reasoning_layer is not None) and (config.mac_prediction == 'candidates')):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(tf.reshape(self.candidate_span_y, [config.batch_size]), 'int32'))
ce_loss = tf.reduce_mean((loss_mask * losses))
tf.add_to_collection('losses', ce_loss)
else:
if config.na:
na = tf.reshape(self.na, [(- 1), 1])
concat_y = tf.concat(axis=1, values=[na, tf.reshape(self.y, [(- 1), (M * JX)])])
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.concat_logits, labels=tf.cast(concat_y, 'float'))
concat_y2 = tf.concat(axis=1, values=[na, tf.reshape(self.y2, [(- 1), (M * JX)])])
losses2 = tf.nn.softmax_cross_entropy_with_logits(logits=self.concat_logits2, labels=tf.cast(concat_y2, 'float'))
else:
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(tf.reshape(self.y, [(- 1), (M * JX)]), 'float'))
losses2 = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits2, labels=tf.cast(tf.reshape(self.y2, [(- 1), (M * JX)]), 'float'))
ce_loss = tf.reduce_mean((loss_mask * losses))
ce_loss2 = tf.reduce_mean((loss_mask * losses2))
tf.add_to_collection('losses', ce_loss)
tf.add_to_collection('losses', ce_loss2)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
self.ansProp_loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='ansProp_loss')
self.docExpl_ansProp_loss = self.ansProp_loss
tf.summary.scalar(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
if config.supervise_first_doc:
doc_first_attn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.doc_attn_logits[0], labels=self.first_doc_ids)
doc_first_attn_loss = tf.reduce_mean(doc_first_attn_loss, name='doc_first_attn_loss')
tf.summary.scalar('doc_first_attn_loss', doc_first_attn_loss)
tf.add_to_collection('ema/scalar', doc_first_attn_loss)
self.loss = (self.loss + (config.first_attn_loss_coeff * doc_first_attn_loss))
self.docExpl_loss = (config.first_attn_loss_coeff * doc_first_attn_loss)
else:
self.docExpl_loss = 0.0
if config.supervise_final_doc:
answer_doc_ids = tf.squeeze(tf.slice(self.answer_doc_ids, [0, 0], [(- 1), 1]), axis=1)
answer_word_ids = tf.squeeze(tf.slice(self.answer_word_ids, [0, 0], [(- 1), 1]), axis=1)
if (config.mac_read_strategy == 'one_doc_per_it_and_repeat_2nd_step'):
doc_attn_logits = self.doc_attn_logits[1]
if (config.mac_memory_state_update_rule is None):
batch_nums = tf.range(0, limit=N)
doc_indices = tf.stack([batch_nums, answer_doc_ids], axis=1)
word_attn_logits = tf.gather_nd(self.word_attn_logits[1], doc_indices)
else:
word_attn_logits = self.word_attn_logits[1]
else:
doc_attn_logits = self.doc_attn_logits[(- 1)]
if (config.mac_memory_state_update_rule is None):
batch_nums = tf.range(0, limit=N)
doc_indices = tf.stack([batch_nums, answer_doc_ids], axis=1)
word_attn_logits = tf.gather_nd(self.word_attn_logits[(- 1)], doc_indices)
else:
word_attn_logits = self.word_attn_logits[(- 1)]
doc_final_attn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=doc_attn_logits, labels=answer_doc_ids)
word_attn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=word_attn_logits, labels=answer_word_ids)
doc_final_attn_loss = tf.reduce_mean(doc_final_attn_loss, name='doc_final_attn_loss')
word_attn_loss = tf.reduce_mean(word_attn_loss, name='word_attn_loss')
tf.summary.scalar('doc_final_attn_loss', doc_final_attn_loss)
tf.summary.scalar('word_attn_loss', word_attn_loss)
tf.add_to_collection('ema/scalar', word_attn_loss)
tf.add_to_collection('ema/scalar', doc_final_attn_loss)
self.docExpl_loss += (config.attn_loss_coeff * (doc_final_attn_loss + word_attn_loss))
self.loss = ((self.loss + (config.attn_loss_coeff * doc_final_attn_loss)) + (config.attn_loss_coeff * word_attn_loss))
self.docExpl_ansProp_loss += self.docExpl_loss
tf.summary.scalar('total_loss', self.loss)
if config.use_assembler:
assembler_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.assembler.logits, labels=tf.cast(tf.reshape(self.candidate_span_y, [config.batch_size]), 'int32'))
self.assembler_loss = tf.reduce_mean((loss_mask * assembler_losses), name='assembler_loss')
self.loss += (config.assembler_loss_coeff * self.assembler_loss)
tf.summary.scalar('assembler_loss', self.assembler_loss)
tf.add_to_collection('ema/scalar', self.assembler_loss)
def _build_ema(self):
self.ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema = self.ema
tensors = (tf.get_collection('ema/scalar', scope=self.scope) + tf.get_collection('ema/vector', scope=self.scope))
ema_op = ema.apply(tensors)
for var in tf.get_collection('ema/scalar', scope=self.scope):
ema_var = ema.average(var)
tf.summary.scalar(ema_var.op.name, ema_var)
for var in tf.get_collection('ema/vector', scope=self.scope):
ema_var = ema.average(var)
tf.summary.histogram(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def _build_var_ema(self):
self.var_ema = tf.train.ExponentialMovingAverage(self.config.var_decay)
ema = self.var_ema
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self, model_name):
if (model_name == 'expl+prop'):
self.var_list = [var for var in tf.trainable_variables() if ('assembler' not in var.name)]
elif (model_name == 'expl+prop_only'):
self.var_list = [var for var in tf.trainable_variables() if (('MACRnn' in var.name) or ('main/logits1' in var.name))]
elif (model_name == 'assembler'):
self.var_list = [var for var in tf.trainable_variables() if (('MACRnn' not in var.name) and ('main/logits1' not in var.name))]
elif (model_name == 'assembler_only'):
self.var_list = [var for var in tf.trainable_variables() if ('assembler' in var.name)]
elif ((model_name == 'model_network') or (model_name == 'all')):
self.var_list = [var for var in tf.trainable_variables()]
else:
raise NotImplementedError
assert (len(self.var_list) > 0)
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
return _get_feed_dict(self, batch, is_train, supervised) |
class TfExampleDecoderTest(tf.test.TestCase):
def _EncodeImage(self, image_tensor, encoding_type='jpeg'):
with self.test_session():
if (encoding_type == 'jpeg'):
image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
elif (encoding_type == 'png'):
image_encoded = tf.image.encode_png(tf.constant(image_tensor)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_encoded
def _DecodeImage(self, image_encoded, encoding_type='jpeg'):
with self.test_session():
if (encoding_type == 'jpeg'):
image_decoded = tf.image.decode_jpeg(tf.constant(image_encoded)).eval()
elif (encoding_type == 'png'):
image_decoded = tf.image.decode_png(tf.constant(image_encoded)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_decoded
def _Int64Feature(self, value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _FloatFeature(self, value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _BytesFeature(self, value):
if isinstance(value, list):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def testDecodeJpegImage(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
decoded_jpeg = self._DecodeImage(encoded_jpeg)
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/source_id': self._BytesFeature('image_id')})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.image].get_shape().as_list(), [None, None, 3])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
def testDecodeImageKeyAndFilename(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/key/sha256': self._BytesFeature('abc'), 'image/filename': self._BytesFeature('filename')})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertEqual('abc', tensor_dict[fields.InputDataFields.key])
self.assertEqual('filename', tensor_dict[fields.InputDataFields.filename])
def testDecodePngImage(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_png = self._EncodeImage(image_tensor, encoding_type='png')
decoded_png = self._DecodeImage(encoded_png, encoding_type='png')
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_png), 'image/format': self._BytesFeature('png'), 'image/source_id': self._BytesFeature('image_id')})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.image].get_shape().as_list(), [None, None, 3])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image])
self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
def testDecodeBoundingBox(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/bbox/ymin': self._FloatFeature(bbox_ymins), 'image/object/bbox/xmin': self._FloatFeature(bbox_xmins), 'image/object/bbox/ymax': self._FloatFeature(bbox_ymaxs), 'image/object/bbox/xmax': self._FloatFeature(bbox_xmaxs)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_boxes].get_shape().as_list(), [None, 4])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes, tensor_dict[fields.InputDataFields.groundtruth_boxes])
def testDecodeObjectLabel(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes = [0, 1]
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/class/label': self._Int64Feature(bbox_classes)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_classes].get_shape().as_list(), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(bbox_classes, tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMapping(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes_text = ['cat', 'dog']
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/class/text': self._BytesFeature(bbox_classes_text)})).SerializeToString()
label_map_string = "\n item {\n id:3\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n "
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_classes].get_shape().as_list(), [None])
with self.test_session() as sess:
sess.run(tf.tables_initializer())
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([3, 1], tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectArea(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_area = [100.0, 174.0]
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/area': self._FloatFeature(object_area)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_area].get_shape().as_list(), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(object_area, tensor_dict[fields.InputDataFields.groundtruth_area])
def testDecodeObjectIsCrowd(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_is_crowd = [0, 1]
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/is_crowd': self._Int64Feature(object_is_crowd)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list(), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([bool(item) for item in object_is_crowd], tensor_dict[fields.InputDataFields.groundtruth_is_crowd])
def testDecodeObjectDifficult(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_difficult = [0, 1]
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/difficult': self._Int64Feature(object_difficult)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_difficult].get_shape().as_list(), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([bool(item) for item in object_difficult], tensor_dict[fields.InputDataFields.groundtruth_difficult])
def testDecodeObjectGroupOf(self):
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_group_of = [0, 1]
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/group_of': self._Int64Feature(object_group_of)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_group_of].get_shape().as_list(), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([bool(item) for item in object_group_of], tensor_dict[fields.InputDataFields.groundtruth_group_of])
def testDecodeInstanceSegmentation(self):
num_instances = 4
image_height = 5
image_width = 3
image_tensor = np.random.randint(255, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
instance_masks = np.random.randint(2, size=(num_instances, image_height, image_width)).astype(np.float32)
instance_masks_flattened = np.reshape(instance_masks, [(- 1)])
object_classes = np.random.randint(100, size=num_instances).astype(np.int64)
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/height': self._Int64Feature([image_height]), 'image/width': self._Int64Feature([image_width]), 'image/object/mask': self._FloatFeature(instance_masks_flattened), 'image/object/class/label': self._Int64Feature(object_classes)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(load_instance_masks=True)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_instance_masks].get_shape().as_list(), [None, None, None])
self.assertAllEqual(tensor_dict[fields.InputDataFields.groundtruth_classes].get_shape().as_list(), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(instance_masks.astype(np.float32), tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(object_classes, tensor_dict[fields.InputDataFields.groundtruth_classes])
def testInstancesNotAvailableByDefault(self):
num_instances = 4
image_height = 5
image_width = 3
image_tensor = np.random.randint(255, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
instance_masks = np.random.randint(2, size=(num_instances, image_height, image_width)).astype(np.float32)
instance_masks_flattened = np.reshape(instance_masks, [(- 1)])
object_classes = np.random.randint(100, size=num_instances).astype(np.int64)
example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/height': self._Int64Feature([image_height]), 'image/width': self._Int64Feature([image_width]), 'image/object/mask': self._FloatFeature(instance_masks_flattened), 'image/object/class/label': self._Int64Feature(object_classes)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertTrue((fields.InputDataFields.groundtruth_instance_masks not in tensor_dict)) |
def pack_innermost_dim_as_hex_string(ndarray, dtype, pad_to_nbits, reverse_inner=False, prefix='0x'):
if ((type(ndarray) != np.ndarray) or (ndarray.dtype != np.float32)):
ndarray = np.asarray(ndarray, dtype=np.float32)
def fun(x):
return array2hexstring(x, dtype, pad_to_nbits, reverse=reverse_inner, prefix=prefix)
return np.apply_along_axis(fun, (ndarray.ndim - 1), ndarray) |
def display_in_terminal(obj):
try:
import PIL
from libsixel import sixel_output_new, sixel_dither_new, sixel_dither_initialize, sixel_dither_set_palette, sixel_dither_set_pixelformat, sixel_dither_get, sixel_encode, sixel_dither_unref, sixel_output_unref, SIXEL_PIXELFORMAT_RGBA8888, SIXEL_PIXELFORMAT_RGB888, SIXEL_PIXELFORMAT_PAL8, SIXEL_PIXELFORMAT_G8, SIXEL_PIXELFORMAT_G1
except ImportError:
raise ImportError('Display in Terminal requires Pillow, libsixel and a libsixel compatible terminal. Please read info at and install with pip install Pillow libsixel-python')
s = BytesIO()
images = convert_to_images(obj)
(widths, heights) = zip(*(i.size for i in images))
output_width = sum(widths)
output_height = max(heights)
output_image = PIL.Image.new('RGB', (output_width, output_height))
x_offset = 0
for im in images:
output_image.paste(im, (x_offset, 0))
x_offset += im.size[0]
try:
data = output_image.tobytes()
except NotImplementedError:
data = output_image.tostring()
output = sixel_output_new((lambda data, s: s.write(data)), s)
try:
if (output_image.mode == 'RGBA'):
dither = sixel_dither_new(256)
sixel_dither_initialize(dither, data, output_width, output_height, SIXEL_PIXELFORMAT_RGBA8888)
elif (output_image.mode == 'RGB'):
dither = sixel_dither_new(256)
sixel_dither_initialize(dither, data, output_width, output_height, SIXEL_PIXELFORMAT_RGB888)
elif (output_image.mode == 'P'):
palette = output_image.getpalette()
dither = sixel_dither_new(256)
sixel_dither_set_palette(dither, palette)
sixel_dither_set_pixelformat(dither, SIXEL_PIXELFORMAT_PAL8)
elif (output_image.mode == 'L'):
dither = sixel_dither_get(SIXEL_BUILTIN_G8)
sixel_dither_set_pixelformat(dither, SIXEL_PIXELFORMAT_G8)
elif (output_image.mode == '1'):
dither = sixel_dither_get(SIXEL_BUILTIN_G1)
sixel_dither_set_pixelformat(dither, SIXEL_PIXELFORMAT_G1)
else:
raise RuntimeError('unexpected output_image mode')
try:
sixel_encode(data, output_width, output_height, 1, dither, output)
print(s.getvalue().decode('ascii'))
finally:
sixel_dither_unref(dither)
finally:
sixel_output_unref(output) |
def make_dataloaders(data_with_covariates, **kwargs):
training_cutoff = '2016-09-01'
max_encoder_length = 4
max_prediction_length = 3
kwargs.setdefault('target', 'volume')
kwargs.setdefault('group_ids', ['agency', 'sku'])
kwargs.setdefault('add_relative_time_idx', True)
kwargs.setdefault('time_varying_unknown_reals', ['volume'])
training = TimeSeriesDataSet(data_with_covariates[(lambda x: (x.date < training_cutoff))].copy(), time_idx='time_idx', max_encoder_length=max_encoder_length, max_prediction_length=max_prediction_length, **kwargs)
validation = TimeSeriesDataSet.from_dataset(training, data_with_covariates.copy(), min_prediction_idx=(training.index.time.max() + 1))
train_dataloader = training.to_dataloader(train=True, batch_size=2, num_workers=0)
val_dataloader = validation.to_dataloader(train=False, batch_size=2, num_workers=0)
test_dataloader = validation.to_dataloader(train=False, batch_size=1, num_workers=0)
return dict(train=train_dataloader, val=val_dataloader, test=test_dataloader) |
def apply_pq_coupler_config_settings(schema, config):
new_schema = []
flattened = False
for layer in schema:
if (layer['type'] == 'flatten'):
flattened = True
if (layer.get('num_u_channels', 0) > 0):
layer = {**layer, 'p_coupler': get_p_coupler_config(config, flattened), 'q_coupler': get_q_coupler_config(config, flattened)}
new_schema.append(layer)
return new_schema |
def aggregate_rank_corrs(full_df, task, num_layers, METRICS, sub_df_fn, list_layers=None):
if (list_layers == None):
list_layers = list(range(num_layers))
rho = {metric: [] for metric in METRICS}
rho_p = {metric: [] for metric in METRICS}
tau = {metric: [] for metric in METRICS}
tau_p = {metric: [] for metric in METRICS}
bad_fracs = {metric: [] for metric in METRICS}
for ref_depth in list_layers:
sub_df = sub_df_fn(full_df, task, ref_depth)
for metric in METRICS:
(rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac) = get_rank_corrs(sub_df, metric, task)
rho[metric].append(rho_corr)
rho_p[metric].append(rho_os_p)
tau[metric].append(tau_corr)
tau_p[metric].append(tau_os_p)
bad_fracs[metric].append(bad_frac)
return (rho, rho_p, tau, tau_p, bad_fracs) |
class RetinaNetModule(torch.nn.Module):
def __init__(self, cfg, in_channels, BBAM=False):
super(RetinaNetModule, self).__init__()
self.cfg = cfg.clone()
self.BBAM = BBAM
anchor_generator = make_anchor_generator_retinanet(cfg)
head = RetinaNetHead(cfg, in_channels)
box_coder = BoxCoder(weights=(10.0, 10.0, 5.0, 5.0))
box_selector_test = make_retinanet_postprocessor(cfg, box_coder, is_train=False, BBAM=self.BBAM)
loss_evaluator = make_retinanet_loss_evaluator(cfg, box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
(box_cls, box_regression) = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, box_cls, box_regression, targets)
else:
return self._forward_test(anchors, box_cls, box_regression)
def _forward_train(self, anchors, box_cls, box_regression, targets):
(loss_box_cls, loss_box_reg) = self.loss_evaluator(anchors, box_cls, box_regression, targets)
losses = {'loss_retina_cls': loss_box_cls, 'loss_retina_reg': loss_box_reg}
return (anchors, losses)
def _forward_test(self, anchors, box_cls, box_regression):
if self.BBAM:
return_for_BBAM = {}
return_for_BBAM['proposals'] = anchors
return_for_BBAM['class_logits'] = box_cls
return_for_BBAM['box_regression'] = box_regression
(boxes, _) = self.box_selector_test(anchors, box_cls, box_regression, BBAM=self.BBAM)
return (boxes, {}, return_for_BBAM)
else:
boxes = self.box_selector_test(anchors, box_cls, box_regression)
return (boxes, {}) |
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs):
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / base_channels))) * groups)
self.avg_down_stride = (avg_down_stride and (self.conv2_stride > 1))
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
self.with_modulated_dcn = False
self.conv2 = SplitAttentionConv2d(width, width, kernel_size=3, stride=(1 if self.avg_down_stride else self.conv2_stride), padding=self.dilation, dilation=self.dilation, groups=groups, radix=radix, reduction_factor=reduction_factor, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=self.dcn)
delattr(self, self.norm2_name)
if self.avg_down_stride:
self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
if self.avg_down_stride:
out = self.avd_layer(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out |
class DownAttBlock(nn.Module):
def __init__(self, in_channels, out_channels, length):
super(DownAttBlock, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_blocks = ResBlockSequence(in_channels=in_channels, out_channels=out_channels, length=length)
def forward(self, x):
x = self.pool(x)
x = self.res_blocks(x)
return x |
def m_elbo(model, x, K=1):
(qz_xs, px_zs, zss) = model(x)
(lpx_zs, klds) = ([], [])
for (r, qz_x) in enumerate(qz_xs):
kld = kl_divergence(qz_x, model.pz(*model.pz_params))
klds.append(kld.sum((- 1)))
for d in range(len(px_zs)):
lpx_z = px_zs[d][d].log_prob(x[d]).view(*px_zs[d][d].batch_shape[:2], (- 1))
lpx_z = (lpx_z * model.vaes[d].llik_scaling).sum((- 1))
if (d == r):
lwt = torch.tensor(0.0)
else:
zs = zss[d].detach()
lwt = (qz_x.log_prob(zs) - qz_xs[d].log_prob(zs).detach()).sum((- 1))
lpx_zs.append((lwt.exp() * lpx_z))
obj = ((1 / len(model.vaes)) * (torch.stack(lpx_zs).sum(0) - torch.stack(klds).sum(0)))
return obj.mean(0).sum() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.