code stringlengths 101 5.91M |
|---|
def _parse_prolog_expression(tree):
_features = []
for line in tree.splitlines():
if (':-' in line):
_rhs = line.split(':-')[1]
for _portion in _rhs.split(' '):
if ('(' in _portion):
_features += [_portion.split('(')[0]]
return _features |
def main(argv=sys.argv[1:]):
p = argparse.ArgumentParser()
p.add_argument('input_files', nargs='+')
p.add_argument('-o', '--output')
args = p.parse_args(argv)
assert args.output, 'must specify -o'
output_filename = args.output
outfp = bgzf.open(output_filename, 'wb')
print('output file will be {}'.format(output_filename))
for input_file in args.input_files:
print('turning {} into a block-gzipped (BGZF) file'.format(input_file))
with screed.open(input_file) as records_iter:
for (n, record) in enumerate(records_iter):
offset = outfp.tell()
if hasattr(record, 'quality'):
outfp.write('{}\n{}\n+\n{}\n'.format(record.name, record.sequence, record.quality))
else:
outfp.write('>{}\n{}\n'.format(record.name, record.sequence))
if ((n % 100000) == 0):
print('offset for {} is {}'.format(n, offset), end='\r')
print('')
outfp.close()
print('done!')
return 0 |
def add_jpeg_decoding(input_width, input_height, input_depth, input_mean, input_std):
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int)
offset_image = tf.subtract(resized_image, input_mean)
mul_image = tf.multiply(offset_image, (1.0 / input_std))
return (jpeg_data, mul_image) |
def knn(x: torch.Tensor, y: torch.Tensor, k: int, batch_x: Optional[torch.Tensor]=None, batch_y: Optional[torch.Tensor]=None, cosine: bool=False, num_workers: int=1, batch_size: Optional[int]=None) -> torch.Tensor:
if ((x.numel() == 0) or (y.numel() == 0)):
return torch.empty(2, 0, dtype=torch.long, device=x.device)
x = (x.view((- 1), 1) if (x.dim() == 1) else x)
y = (y.view((- 1), 1) if (y.dim() == 1) else y)
(x, y) = (x.contiguous(), y.contiguous())
if (batch_size is None):
batch_size = 1
if (batch_x is not None):
assert (x.size(0) == batch_x.numel())
batch_size = (int(batch_x.max()) + 1)
if (batch_y is not None):
assert (y.size(0) == batch_y.numel())
batch_size = max(batch_size, (int(batch_y.max()) + 1))
assert (batch_size > 0)
ptr_x: Optional[torch.Tensor] = None
ptr_y: Optional[torch.Tensor] = None
if (batch_size > 1):
assert (batch_x is not None)
assert (batch_y is not None)
arange = torch.arange((batch_size + 1), device=x.device)
ptr_x = torch.bucketize(arange, batch_x)
ptr_y = torch.bucketize(arange, batch_y)
return torch.ops.torch_cluster.knn(x, y, ptr_x, ptr_y, k, cosine, num_workers) |
def write_file(num_of_users):
f = open('./darknet/data/train.txt', 'w')
for user in range(num_of_users):
f.write('data/dog.jpg\n')
f.close() |
def index_in_saturation(A, proof=True):
r = A.rank()
if (r == 0):
return ZZ.one()
if (r < A.nrows()):
A = A.hermite_form(proof=proof, include_zero_rows=False)
if A.is_square():
return abs(A.determinant(proof=proof))
A = A.transpose()
A = A.hermite_form(proof=proof, include_zero_rows=False)
return abs(A.determinant(proof=proof)) |
def main():
args = parse_arguments()
args.output_dir.mkdir(exist_ok=True)
for subdir in ('train', 'valid', 'test'):
(args.output_dir / subdir).mkdir(exist_ok=True)
assert (args.n_jobs >= 1), '`n_jobs` must be a positive integer.'
setup_loggers(filename=(args.output_dir / Path(__file__).with_suffix('.log').name), quiet=args.quiet)
random.seed(0)
logging.info('Collecting filenames...')
filenames = list(args.input_dir.rglob('*.mid'))
splits = random.choices(('train', 'valid', 'test'), (8, 1, 1), k=len(filenames))
assert filenames, 'No input files found.'
logging.info('Start collecting data...')
if (args.n_jobs == 1):
count = 0
filenames = tqdm.tqdm(filenames, disable=args.quiet, ncols=80)
for (filename, split) in zip(filenames, splits):
if process_and_save(filename, args.output_dir, split):
count += 1
logging.info(f'Successfully saved {count} files.')
else:
results = joblib.Parallel(args.n_jobs, verbose=(0 if args.quiet else 5))((joblib.delayed(process_and_save)(filename, args.output_dir, split) for (filename, split) in zip(filenames, splits)))
count = sum((bool(x) for x in results))
logging.info(f'Successfully saved {count} files.')
sample_filenames = list(args.output_dir.glob('test/*.json'))
if (len(sample_filenames) > args.samples):
sample_filenames = random.sample(sample_filenames, args.samples)
with open((args.output_dir / 'samples.txt'), 'w') as f:
for sample_filename in sample_filenames:
f.write(f'''{sample_filename.stem}
''')
logging.info(f'Successfully sampled {len(sample_filenames)} test files.') |
def ken_lm_abs_score_bpe_strings_dense(handle, bpe_merge_symbol, strings, labels):
return get_tf_mod().ken_lm_abs_score_bpe_strings_dense(handle=handle, bpe_merge_symbol=bpe_merge_symbol, strings=strings, labels=labels) |
class SennaVocab(EmbeddedVocab):
embeddings_url = '
words_url = '
n_dim = 50
def __init__(self, unk='UNKNOWN'):
super(SennaVocab, self).__init__(unk=unk)
def gen_word_list(cls, fname):
with open(fname) as f:
for line in f:
(yield line.rstrip('\n\r'))
def gen_embeddings(cls, fname):
with open(fname) as f:
for line in f:
(yield np.fromstring(line, sep=' '))
def get_embeddings(self, rand=None, dtype='float32'):
rand = (rand if rand else (lambda shape: np.random.uniform((- 0.1), 0.1, size=shape)))
embeddings = get_data_or_download('senna', 'embeddings.txt', self.embeddings_url)
words = get_data_or_download('senna', 'words.lst', self.words_url)
E = rand((len(self), self.n_dim)).astype(dtype)
seen = []
for word_emb in izip(self.gen_word_list(words), self.gen_embeddings(embeddings)):
(w, e) = word_emb
if (w in self):
seen += [w]
E[self[w]] = e
self.backfill_unk_emb(E, set(seen))
return E |
def tf_efficientnet_es(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_edge('tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model |
class ZoomWidget():
def __init__(self, viz):
self.viz = viz
self.fov = 18.837
self.fov_default = 18.837
_utils.scoped_by_object_id
def __call__(self, show=True):
viz = self.viz
if show:
imgui.text('FOV')
imgui.same_line(viz.label_w)
with imgui_utils.item_width((viz.font_size * 10)):
(_changed, self.fov) = imgui.slider_float('##fov', self.fov, 12, 45, format='%.2f Degrees')
imgui.same_line((((viz.label_w + (viz.font_size * 13)) + viz.button_w) + (viz.spacing * 3)))
snapped = round(self.fov)
if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.fov != snapped)):
self.fov = snapped
imgui.same_line()
if imgui_utils.button('Reset', width=(- 1), enabled=(abs((self.fov - self.fov_default)) > 0.01)):
self.fov = self.fov_default
viz.args.focal_length = float((1 / (np.tan(((self.fov * 3.14159) / 360)) * 1.414))) |
def direct_mask_generation(rep_mask, direct, attn_self, name=None):
assert (direct in ['forward', 'backward'])
with tf.name_scope((name or 'direct_mask_generation')):
rep_shape = get_shape_list(rep_mask, 2)
(bs, sl) = rep_shape
rep_mask_epd1 = tf.expand_dims(rep_mask, 1)
rep_mask_epd2 = tf.expand_dims(rep_mask, 2)
rep_mask_mat = tf.logical_and(rep_mask_epd1, rep_mask_epd2)
sl_indices = tf.range(sl, dtype=tf.int32)
(sl_col, sl_row) = tf.meshgrid(sl_indices, sl_indices)
comp_func = (tf.greater_equal if attn_self else tf.greater)
if (direct == 'forward'):
direct_mask = comp_func(sl_row, sl_col)
elif (direct == 'backward'):
direct_mask = comp_func(sl_col, sl_row)
else:
raise AttributeError
direct_mask = tf.tile(tf.expand_dims(direct_mask, 0), [bs, 1, 1])
return tf.logical_and(rep_mask_mat, direct_mask) |
class MLflowCallback(TrainerCallback):
def __init__(self):
if (not is_mlflow_available()):
raise RuntimeError('MLflowCallback requires mlflow to be installed. Run `pip install mlflow`.')
import mlflow
self._MAX_PARAM_VAL_LENGTH = mlflow.utils.validation.MAX_PARAM_VAL_LENGTH
self._MAX_PARAMS_TAGS_PER_BATCH = mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH
self._initialized = False
self._auto_end_run = False
self._log_artifacts = False
self._ml_flow = mlflow
def setup(self, args, state, model):
self._log_artifacts = (os.getenv('HF_MLFLOW_LOG_ARTIFACTS', 'FALSE').upper() in ENV_VARS_TRUE_VALUES)
self._nested_run = (os.getenv('MLFLOW_NESTED_RUN', 'FALSE').upper() in ENV_VARS_TRUE_VALUES)
self._experiment_name = os.getenv('MLFLOW_EXPERIMENT_NAME', None)
self._flatten_params = (os.getenv('MLFLOW_FLATTEN_PARAMS', 'FALSE').upper() in ENV_VARS_TRUE_VALUES)
self._run_id = os.getenv('MLFLOW_RUN_ID', None)
logger.debug(f'MLflow experiment_name={self._experiment_name}, run_name={args.run_name}, nested={self._nested_run}, tags={self._nested_run}')
if state.is_world_process_zero:
if ((self._ml_flow.active_run() is None) or self._nested_run or self._run_id):
if self._experiment_name:
self._ml_flow.set_experiment(self._experiment_name)
self._ml_flow.start_run(run_name=args.run_name, nested=self._nested_run)
logger.debug(f'MLflow run started with run_id={self._ml_flow.active_run().info.run_id}')
self._auto_end_run = True
combined_dict = args.to_dict()
if (hasattr(model, 'config') and (model.config is not None)):
model_config = model.config.to_dict()
combined_dict = {**model_config, **combined_dict}
combined_dict = (flatten_dict(combined_dict) if self._flatten_params else combined_dict)
for (name, value) in list(combined_dict.items()):
if (len(str(value)) > self._MAX_PARAM_VAL_LENGTH):
logger.warning(f"""Trainer is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow's log_param() only accepts values no longer than 250 characters so we dropped this attribute. You can use `MLFLOW_FLATTEN_PARAMS` environment variable to flatten the parameters and avoid this message.""")
del combined_dict[name]
combined_dict_items = list(combined_dict.items())
for i in range(0, len(combined_dict_items), self._MAX_PARAMS_TAGS_PER_BATCH):
self._ml_flow.log_params(dict(combined_dict_items[i:(i + self._MAX_PARAMS_TAGS_PER_BATCH)]))
mlflow_tags = os.getenv('MLFLOW_TAGS', None)
if mlflow_tags:
mlflow_tags = json.loads(mlflow_tags)
self._ml_flow.set_tags(mlflow_tags)
self._initialized = True
def on_train_begin(self, args, state, control, model=None, **kwargs):
if (not self._initialized):
self.setup(args, state, model)
def on_log(self, args, state, control, logs, model=None, **kwargs):
if (not self._initialized):
self.setup(args, state, model)
if state.is_world_process_zero:
metrics = {}
for (k, v) in logs.items():
if isinstance(v, (int, float)):
metrics[k] = v
else:
logger.warning(f"""Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. MLflow's log_metric() only accepts float and int types so we dropped this attribute.""")
self._ml_flow.log_metrics(metrics=metrics, step=state.global_step)
def on_train_end(self, args, state, control, **kwargs):
if (self._initialized and state.is_world_process_zero):
if (self._auto_end_run and self._ml_flow.active_run()):
self._ml_flow.end_run()
def on_save(self, args, state, control, **kwargs):
if (self._initialized and state.is_world_process_zero and self._log_artifacts):
ckpt_dir = f'checkpoint-{state.global_step}'
artifact_path = os.path.join(args.output_dir, ckpt_dir)
logger.info(f'Logging checkpoint artifacts in {ckpt_dir}. This may take time.')
self._ml_flow.pyfunc.log_model(ckpt_dir, artifacts={'model_path': artifact_path}, python_model=self._ml_flow.pyfunc.PythonModel())
def __del__(self):
if (self._auto_end_run and callable(getattr(self._ml_flow, 'active_run', None)) and (self._ml_flow.active_run() is not None)):
self._ml_flow.end_run() |
class CupyFrontend():
def argument(cupy_ndarray: 'cp.ndarray'):
return cuda.CUdeviceptr(int(cupy_ndarray.data.ptr)) |
def attention_decoder(decoder_inputs, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=None, scope=None, initial_state_attention=False):
if (not decoder_inputs):
raise ValueError('Must provide at least 1 input to attention decoder.')
if (num_heads < 1):
raise ValueError('With less than 1 heads, use a non-attention decoder.')
if (attention_states.get_shape()[2].value is None):
raise ValueError(('Shape[2] of attention_states must be known: %s' % attention_states.get_shape()))
if (output_size is None):
output_size = cell.output_size
with variable_scope.variable_scope((scope or 'attention_decoder'), dtype=dtype) as scope:
dtype = scope.dtype
batch_size = array_ops.shape(decoder_inputs[0])[0]
attn_length = attention_states.get_shape()[1].value
if (attn_length is None):
attn_length = array_ops.shape(attention_states)[1]
attn_size = attention_states.get_shape()[2].value
hidden = array_ops.reshape(attention_states, [(- 1), attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size
for a in xrange(num_heads):
k = variable_scope.get_variable(('AttnW_%d' % a), [1, 1, attn_size, attention_vec_size])
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], 'SAME'))
v.append(variable_scope.get_variable(('AttnV_%d' % a), [attention_vec_size]))
state = initial_state
def attention(query):
ds = []
if nest.is_sequence(query):
query_list = nest.flatten(query)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert (ndims == 2)
query = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope(('Attention_%d' % a)):
y = linear(query, attention_vec_size, True)
y = array_ops.reshape(y, [(- 1), 1, 1, attention_vec_size])
s = math_ops.reduce_sum((v[a] * math_ops.tanh((hidden_features[a] + y))), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum((array_ops.reshape(a, [(- 1), attn_length, 1, 1]) * hidden), [1, 2])
ds.append(array_ops.reshape(d, [(- 1), attn_size]))
return (ds, a)
outputs = []
prev = None
batch_attn_size = array_ops.stack([batch_size, attn_size])
attns = [array_ops.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)]
for a in attns:
a.set_shape([None, attn_size])
if initial_state_attention:
(attns, _) = attention(initial_state)
for (i, inp) in enumerate(decoder_inputs):
if (i > 0):
variable_scope.get_variable_scope().reuse_variables()
if ((loop_function is not None) and (prev is not None)):
with variable_scope.variable_scope('loop_function', reuse=True):
inp = loop_function(prev, i)
input_size = inp.get_shape().with_rank(2)[1]
if (input_size.value is None):
raise ValueError(('Could not infer input size from input: %s' % inp.name))
x = linear(([inp] + attns), input_size, True)
(cell_output, state) = cell(x, state)
if ((i == 0) and initial_state_attention):
with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=True):
(attns, _) = attention(state)
else:
(attns, weights) = attention(state)
with variable_scope.variable_scope('AttnOutputProjection'):
output = linear(([cell_output] + attns), output_size, True)
if (loop_function is not None):
prev = output
outputs.append(output)
return (outputs, state, weights) |
class VGG(nn.Module):
def __init__(self, args, conv3x3=common.default_conv, conv1x1=None):
super(VGG, self).__init__()
norm = common.default_norm
bias = (not args.no_bias)
configs = {'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], '16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], '19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], 'ef': [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M']}
body_list = []
in_channels = args.n_colors
for v in configs[args.vgg_type]:
if (v == 'M'):
body_list.append(nn.MaxPool2d(kernel_size=2, stride=2))
else:
body_list.append(common.BasicBlock(in_channels, v, args.kernel_size, bias=bias, conv3x3=conv3x3, norm=norm))
in_channels = v
assert (args.data_train.find('CIFAR') >= 0)
n_classes = int(args.data_train[5:])
self.features = nn.Sequential(*body_list)
self.classifier = nn.Linear(in_channels, n_classes)
if (conv3x3 == common.default_conv):
if ((args.pretrained == 'download') or (args.extend == 'download')):
url = '
model_dir = os.path.join('..', 'models')
os.makedirs(model_dir, exist_ok=True)
state = torch.utils.model_zoo.load_url(url, model_dir=model_dir)
elif args.extend:
state = torch.load(args.extend)
else:
common.init_vgg(self)
return
self.load_state_dict(state)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.squeeze())
return x |
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None):
super().__init__()
self.loss = nn.NLLLoss2d(weight)
def forward(self, outputs, targets):
return self.loss(F.log_softmax(outputs, 1), targets) |
class THNNFunctionBackend(FunctionBackend):
def __reduce__(self):
return (_get_thnn_function_backend, ())
def __deepcopy__(self, memo):
memo[id(self)] = self
return self
def __copy__(self):
return self |
def mean_std_per_layer(convs):
res = {}
for i in tqdm(range(len(convs))):
df = pd.DataFrame()
w = convs[i][1].weight
w = w.view(w.shape[0], (- 1)).detach().numpy()
df['mean'] = w.mean((- 1))
df['std'] = w.std((- 1))
df['range'] = (w.max((- 1)) - w.min((- 1)))
df['s-r'] = (df['std'] / df['range'])
df['m-r'] = (df['mean'].abs() / df['range'])
df['bin'] = (df['range'] / 16)
res[convs[i][0]] = df
return res |
class FileTypes(enum.Enum):
T1 = 1
T2 = 2
GT = 3
MASK = 4
AGE = 5
GPA = 6
GENDER = 7 |
def get_idx_and_Y(df, task, split):
train_idx = df[(df[split] == 'Train')].index
valid_idx = df[(df[split] == 'Test')].index
test_idx = df[(df[split] == 'Ext')].index
def _apply_float(x):
if (type(x) == float):
return x
else:
x = x.replace(',', '.')
return float(x)
Y = df[task].apply(_apply_float).to_frame().values
Y = np.log10((Y + 1e-08))
print(len(train_idx), len(valid_idx), len(test_idx))
return ((train_idx, valid_idx, test_idx), Y) |
class BaseResults():
def __init__(self):
self.strategy_names = []
self.dataset_names = []
self.cv = None
def save_predictions(self, strategy_name, dataset_name, y_true, y_pred, y_proba, index, cv_fold, train_or_test):
raise NotImplementedError()
def load_predictions(self, cv_fold, train_or_test):
raise NotImplementedError('abstract method')
def check_predictions_exist(self, strategy, dataset_name, cv_fold, train_or_test):
raise NotImplementedError()
def save_fitted_strategy(self, strategy, dataset_name, cv_fold):
raise NotImplementedError()
def load_fitted_strategy(self, strategy_name, dataset_name, cv_fold):
raise NotImplementedError()
def check_fitted_strategy_exists(self, strategy, dataset_name, cv_fold):
raise NotImplementedError()
def _append_key(self, strategy_name, dataset_name):
if (strategy_name not in self.strategy_names):
self.strategy_names.append(strategy_name)
if (dataset_name not in self.dataset_names):
self.dataset_names.append(dataset_name)
def _generate_key(self, strategy_name, dataset_name, cv_fold, train_or_test):
raise NotImplementedError()
def __repr__(self):
class_name = self.__class__.__name__
return f'{class_name}(strategies={self.strategy_names}, datasets={self.dataset_names}, cv_folds={self.cv.get_n_splits()})'
def save(self):
NotImplementedError()
def _iter(self):
for strategy_name in self.strategy_names:
for dataset_name in self.dataset_names:
(yield (strategy_name, dataset_name)) |
def make_conv_out_spatial_dims(in_spatial_dims: Sequence[Dim], *, filter_size: Union[(Sequence[Union[(int, Dim)]], int, Dim)], padding: str, strides: Union[(Sequence[int], int)]=1, dilation_rate: Union[(Sequence[int], int)]=1, description_prefix: Optional[str]=None) -> Sequence[Dim]:
nd = len(in_spatial_dims)
if isinstance(filter_size, (int, Dim)):
filter_size = ([filter_size] * nd)
filter_size = [(d.dimension if isinstance(d, Dim) else d) for d in filter_size]
assert all((isinstance(s, int) for s in filter_size))
if isinstance(strides, int):
strides = ([strides] * nd)
if isinstance(dilation_rate, int):
dilation_rate = ([dilation_rate] * nd)
assert (nd == len(in_spatial_dims) == len(filter_size) == len(strides) == len(dilation_rate))
assert (padding.lower() in ('valid', 'same'))
out_spatial_dims = []
for i in range(nd):
in_spatial_dim = in_spatial_dims[i]
if ((filter_size[i] == strides[i] == 1) or ((strides[i] == 1) and (padding.lower() == 'same'))):
out_spatial_dims.append(in_spatial_dim)
else:
out_spatial_dim = _calc_out_dim(in_dim=in_spatial_dim, filter_size=filter_size[i], stride=strides[i], dilation_rate=dilation_rate[i], padding=padding)
assert isinstance(out_spatial_dim, Dim)
if (description_prefix and (out_spatial_dim != in_spatial_dim)):
out_spatial_dim.name = f'{description_prefix}:spatial{i}'
if (in_spatial_dim.dyn_size_ext and (not out_spatial_dim.dyn_size_ext)):
out_spatial_dim.dyn_size_ext = _calc_out_dim(in_dim=in_spatial_dim.dyn_size_ext, filter_size=filter_size[i], stride=strides[i], dilation_rate=dilation_rate[i], padding=padding)
out_spatial_dims.append(out_spatial_dim)
return out_spatial_dims |
class AtomicOpsPlan(BenchmarkPlan):
def __init__(self, arch: str):
super().__init__('atomic_ops', arch, basic_repeat_times=10)
atomic_ops = AtomicOps()
atomic_ops.remove(['atomic_sub', 'atomic_and', 'atomic_xor', 'atomic_max'])
self.create_plan(atomic_ops, Container(), DataType(), DataSize(), MetricType())
self.add_func(['field'], reduction_default)
self.add_func(['ndarray'], reduction_default) |
_test()
def test_hardware_axpy_double_pump_vec2():
return test_hardware_axpy_double_pump(veclen=2) |
class CocoDistEvalRecallHook(DistEvalHook):
def __init__(self, dataset, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)):
super(CocoDistEvalRecallHook, self).__init__(dataset)
self.proposal_nums = np.array(proposal_nums, dtype=np.int32)
self.iou_thrs = np.array(iou_thrs, dtype=np.float32)
def evaluate(self, runner, results):
ar = fast_eval_recall(results, self.dataset.coco, self.proposal_nums, self.iou_thrs)
for (i, num) in enumerate(self.proposal_nums):
runner.log_buffer.output['{}'.format(num)] = ar[i]
runner.log_buffer.ready = True |
class FreeModuleTensor(ModuleElementWithMutability):
_fmodule: FiniteRankFreeModule
def __init__(self, fmodule: FiniteRankFreeModule, tensor_type, name: Optional[str]=None, latex_name: Optional[str]=None, sym=None, antisym=None, parent=None):
if (parent is None):
parent = fmodule.tensor_module(*tensor_type)
ModuleElementWithMutability.__init__(self, parent)
self._fmodule = fmodule
self._tensor_type = tuple(tensor_type)
self._tensor_rank = (self._tensor_type[0] + self._tensor_type[1])
self._is_zero = False
self._name = name
if (latex_name is None):
self._latex_name = self._name
else:
self._latex_name = latex_name
self._components: Dict[(FreeModuleBasis, Components)] = {}
(self._sym, self._antisym) = CompWithSym._canonicalize_sym_antisym(self._tensor_rank, sym, antisym)
FreeModuleTensor._init_derived(self)
def __bool__(self):
basis = self.pick_a_basis()
if (not self._components[basis].is_zero()):
self._is_zero = False
return True
self._is_zero = True
return False
def _repr_(self):
if ((self._tensor_type == (0, 2)) and (self._sym == ((0, 1),))):
description = 'Symmetric bilinear form '
else:
description = 'Type-({},{}) tensor'.format(self._tensor_type[0], self._tensor_type[1])
if (self._name is not None):
description += (' ' + self._name)
description += ' on the {}'.format(self._fmodule)
return description
def _latex_(self):
if (self._latex_name is None):
return (('\\mbox{' + str(self)) + '}')
return self._latex_name
def _init_derived(self):
pass
def _del_derived(self):
pass
def tensor_type(self):
return self._tensor_type
def tensor_rank(self):
return self._tensor_rank
def base_module(self):
return self._fmodule
def symmetries(self):
if (len(self._sym) == 0):
s = 'no symmetry; '
elif (len(self._sym) == 1):
s = 'symmetry: {}; '.format(self._sym[0])
else:
s = 'symmetries: {}; '.format(list(self._sym))
if (len(self._antisym) == 0):
a = 'no antisymmetry'
elif (len(self._antisym) == 1):
a = 'antisymmetry: {}'.format(self._antisym[0])
else:
a = 'antisymmetries: {}'.format(list(self._antisym))
print((s + a))
def _preparse_display(self, basis=None, format_spec=None):
if (basis is None):
basis = self._fmodule._def_basis
return (basis, format_spec)
def display(self, basis=None, format_spec=None):
from sage.misc.latex import latex
from sage.typeset.unicode_characters import unicode_otimes
from .format_utilities import is_atomic, FormattedExpansion
(basis, format_spec) = self._preparse_display(basis=basis, format_spec=format_spec)
cobasis = basis.dual_basis()
comp = self.comp(basis)
terms_txt = []
terms_latex = []
n_con = self._tensor_type[0]
for ind in comp.index_generator():
ind_arg = (ind + (format_spec,))
coef = comp[ind_arg]
if hasattr(coef, 'is_trivial_zero'):
zero_coef = coef.is_trivial_zero()
else:
zero_coef = (coef == 0)
if (not zero_coef):
bases_txt = []
bases_latex = []
for k in range(n_con):
bases_txt.append(basis[ind[k]]._name)
bases_latex.append(latex(basis[ind[k]]))
for k in range(n_con, self._tensor_rank):
bases_txt.append(cobasis[ind[k]]._name)
bases_latex.append(latex(cobasis[ind[k]]))
basis_term_txt = unicode_otimes.join(bases_txt)
basis_term_latex = '\\otimes '.join(bases_latex)
coef_txt = repr(coef)
if (coef_txt == '1'):
terms_txt.append(basis_term_txt)
terms_latex.append(basis_term_latex)
elif (coef_txt == '-1'):
terms_txt.append(('-' + basis_term_txt))
terms_latex.append(('-' + basis_term_latex))
else:
coef_latex = latex(coef)
if is_atomic(coef_txt):
terms_txt.append(((coef_txt + ' ') + basis_term_txt))
else:
terms_txt.append(((('(' + coef_txt) + ') ') + basis_term_txt))
if is_atomic(coef_latex):
terms_latex.append((coef_latex + basis_term_latex))
else:
terms_latex.append(((('\\left(' + coef_latex) + '\\right)') + basis_term_latex))
if (terms_txt == []):
expansion_txt = '0'
else:
expansion_txt = terms_txt[0]
for term in terms_txt[1:]:
if (term[0] == '-'):
expansion_txt += (' - ' + term[1:])
else:
expansion_txt += (' + ' + term)
if (terms_latex == []):
expansion_latex = '0'
else:
expansion_latex = terms_latex[0]
for term in terms_latex[1:]:
if (term[0] == '-'):
expansion_latex += term
else:
expansion_latex += ('+' + term)
if (self._name is None):
resu_txt = expansion_txt
else:
resu_txt = ((self._name + ' = ') + expansion_txt)
if (self._latex_name is None):
resu_latex = expansion_latex
else:
resu_latex = ((latex(self) + ' = ') + expansion_latex)
return FormattedExpansion(resu_txt, resu_latex)
disp = display
def display_comp(self, basis=None, format_spec=None, symbol=None, latex_symbol=None, index_labels=None, index_latex_labels=None, only_nonzero=True, only_nonredundant=False):
if (basis is None):
basis = self._fmodule._def_basis
if (symbol is None):
if (self._name is not None):
symbol = self._name
else:
symbol = 'X'
if (latex_symbol is None):
if (self._latex_name is not None):
latex_symbol = self._latex_name
else:
latex_symbol = 'X'
index_positions = ((self._tensor_type[0] * 'u') + (self._tensor_type[1] * 'd'))
return self.comp(basis).display(symbol, latex_symbol=latex_symbol, index_positions=index_positions, index_labels=index_labels, index_latex_labels=index_latex_labels, format_spec=format_spec, only_nonzero=only_nonzero, only_nonredundant=only_nonredundant)
def set_name(self, name: Optional[str]=None, latex_name: Optional[str]=None):
if (name is not None):
self._name = name
if (latex_name is None):
self._latex_name = self._name
if (latex_name is not None):
self._latex_name = latex_name
def _new_instance(self):
return self.__class__(self._fmodule, self._tensor_type, sym=self._sym, antisym=self._antisym)
def _new_comp(self, basis):
fmodule = self._fmodule
if ((not self._sym) and (not self._antisym)):
return Components(fmodule._ring, basis, self._tensor_rank, start_index=fmodule._sindex, output_formatter=fmodule._output_formatter)
for isym in self._sym:
if (len(isym) == self._tensor_rank):
return CompFullySym(fmodule._ring, basis, self._tensor_rank, start_index=fmodule._sindex, output_formatter=fmodule._output_formatter)
for isym in self._antisym:
if (len(isym) == self._tensor_rank):
return CompFullyAntiSym(fmodule._ring, basis, self._tensor_rank, start_index=fmodule._sindex, output_formatter=fmodule._output_formatter)
return CompWithSym(fmodule._ring, basis, self._tensor_rank, start_index=fmodule._sindex, output_formatter=fmodule._output_formatter, sym=self._sym, antisym=self._antisym)
def components(self, basis=None, from_basis=None) -> Components:
fmodule = self._fmodule
if (basis is None):
basis = fmodule._def_basis
try:
basis = basis._base_module_basis
except AttributeError:
pass
if (basis not in self._components):
if (from_basis is None):
for known_basis in self._components:
if (((known_basis, basis) in self._fmodule._basis_changes) and ((basis, known_basis) in self._fmodule._basis_changes)):
from_basis = known_basis
break
if (from_basis is None):
raise ValueError(('no basis could be found for computing ' + 'the components in the {}'.format(basis)))
elif (from_basis not in self._components):
raise ValueError(('the tensor components are not known in ' + 'the {}'.format(from_basis)))
(n_con, n_cov) = self._tensor_type
pp = None
if (n_cov > 0):
if ((from_basis, basis) not in fmodule._basis_changes):
raise ValueError((('the change-of-basis matrix from the ' + '{} to the {}'.format(from_basis, basis)) + ' has not been set'))
pp = fmodule._basis_changes[(from_basis, basis)].comp(from_basis)
ppinv = None
if (n_con > 0):
if ((basis, from_basis) not in fmodule._basis_changes):
raise ValueError((('the change-of-basis matrix from the ' + '{} to the {}'.format(basis, from_basis)) + ' has not been set'))
ppinv = fmodule._basis_changes[(basis, from_basis)].comp(from_basis)
old_comp = self._components[from_basis]
new_comp = self._new_comp(basis)
rank = self._tensor_rank
nproc = Parallelism().get('tensor')
if (nproc != 1):
lol = (lambda lst, sz: [lst[i:(i + sz)] for i in range(0, len(lst), sz)])
ind_list = [ind for ind in new_comp.non_redundant_index_generator()]
ind_step = max(1, int(((len(ind_list) / nproc) / 2)))
local_list = lol(ind_list, ind_step)
listParalInput = [(old_comp, ppinv, pp, n_con, rank, ii) for ii in local_list]
(p_iter='multiprocessing', ncpus=nproc)
def paral_newcomp(old_comp, ppinv, pp, n_con, rank, local_list_ind):
partial = []
for ind in local_list_ind:
res = 0
for ind_old in old_comp.index_generator():
t = old_comp[[ind_old]]
for i in range(n_con):
t *= ppinv[[ind[i], ind_old[i]]]
for i in range(n_con, rank):
t *= pp[[ind_old[i], ind[i]]]
res += t
partial.append([ind, res])
return partial
for (ii, val) in paral_newcomp(listParalInput):
for jj in val:
new_comp[[jj[0]]] = jj[1]
else:
for ind_new in new_comp.non_redundant_index_generator():
res = 0
for ind_old in old_comp.index_generator():
t = old_comp[[ind_old]]
for i in range(n_con):
t *= ppinv[[ind_new[i], ind_old[i]]]
for i in range(n_con, rank):
t *= pp[[ind_old[i], ind_new[i]]]
res += t
new_comp[ind_new] = res
self._components[basis] = new_comp
return self._components[basis]
comp = components
def _set_comp_unsafe(self, basis=None):
if (basis is None):
basis = self._fmodule._def_basis
if (basis not in self._components):
if (basis not in self._fmodule._known_bases):
raise ValueError(('the {} has not been '.format(basis) + 'defined on the {}'.format(self._fmodule)))
self._components[basis] = self._new_comp(basis)
self._del_derived()
self.del_other_comp(basis)
return self._components[basis]
def set_comp(self, basis=None):
if self.is_immutable():
raise ValueError('the components of an immutable element cannot be changed')
self._is_zero = False
return self._set_comp_unsafe(basis)
def _add_comp_unsafe(self, basis=None):
if (basis is None):
basis = self._fmodule._def_basis
if (basis not in self._components):
if (basis not in self._fmodule._known_bases):
raise ValueError(('the {} has not been '.format(basis) + 'defined on the {}'.format(self._fmodule)))
self._components[basis] = self._new_comp(basis)
self._del_derived()
return self._components[basis]
def add_comp(self, basis=None):
if self.is_immutable():
raise ValueError('the components of an immutable element cannot be changed')
self._is_zero = False
return self._add_comp_unsafe(basis)
def del_other_comp(self, basis=None):
if (basis is None):
basis = self._fmodule._def_basis
if (basis not in self._components):
raise ValueError(('the components w.r.t. the {}'.format(basis) + ' have not been defined'))
to_be_deleted = []
for other_basis in self._components:
if (other_basis != basis):
to_be_deleted.append(other_basis)
for other_basis in to_be_deleted:
del self._components[other_basis]
def __getitem__(self, args) -> Components:
if isinstance(args, str):
return TensorWithIndices(self, args).update()
if isinstance(args, list):
if isinstance(args[0], (int, Integer, slice)):
basis = self._fmodule._def_basis
else:
basis = args[0]
args = args[1:]
elif isinstance(args, (int, Integer, slice)):
basis = self._fmodule._def_basis
elif (not isinstance(args[0], (int, Integer, slice))):
basis = args[0]
args = args[1:]
if (len(args) == 1):
args = args[0]
else:
basis = self._fmodule._def_basis
return self.comp(basis)[args]
def __setitem__(self, args, value):
if isinstance(args, list):
if isinstance(args[0], (int, Integer, slice, tuple)):
basis = self._fmodule._def_basis
else:
basis = args[0]
args = args[1:]
elif isinstance(args, (int, Integer, slice)):
basis = self._fmodule._def_basis
elif (not isinstance(args[0], (int, Integer, slice))):
basis = args[0]
args = args[1:]
if (len(args) == 1):
args = args[0]
else:
basis = self._fmodule._def_basis
self.set_comp(basis)[args] = value
def copy_from(self, other):
if self.is_immutable():
raise ValueError('the components of an immutable element cannot be changed')
if (other not in self.parent()):
raise TypeError(('the original must be an element ' + 'of {}'.format(self.parent())))
self._del_derived()
self._components.clear()
for (basis, comp) in other._components.items():
self._components[basis] = comp.copy()
self._is_zero = other._is_zero
def copy(self, name=None, latex_name=None):
resu = self._new_instance()
resu.set_name(name=name, latex_name=latex_name)
for (basis, comp) in self._components.items():
resu._components[basis] = comp.copy()
resu._is_zero = self._is_zero
return resu
def common_basis(self, other):
if (not isinstance(other, FreeModuleTensor)):
raise TypeError('the argument must be a tensor on a free module')
fmodule = self._fmodule
if (other._fmodule != fmodule):
raise TypeError(('the two tensors are not defined on the same ' + 'free module'))
def_basis = fmodule._def_basis
if ((def_basis in self._components) and (def_basis in other._components)):
return def_basis
for basis1 in self._components:
if (basis1 in other._components):
return basis1
if (def_basis in self._components):
for obasis in other._components:
if ((obasis, def_basis) in fmodule._basis_changes):
other.comp(def_basis, from_basis=obasis)
return def_basis
if (def_basis in other._components):
for sbasis in self._components:
if ((sbasis, def_basis) in fmodule._basis_changes):
self.comp(def_basis, from_basis=sbasis)
return def_basis
for sbasis in self._components:
for obasis in other._components:
if ((obasis, sbasis) in fmodule._basis_changes):
other.comp(sbasis, from_basis=obasis)
return sbasis
if ((sbasis, obasis) in fmodule._basis_changes):
self.comp(obasis, from_basis=sbasis)
return obasis
for sbasis in self._components:
for obasis in other._components:
if (((sbasis, def_basis) in fmodule._basis_changes) and ((obasis, def_basis) in fmodule._basis_changes)):
self.comp(def_basis, from_basis=sbasis)
other.comp(def_basis, from_basis=obasis)
return def_basis
for basis in fmodule._known_bases:
if (((sbasis, basis) in fmodule._basis_changes) and ((obasis, basis) in fmodule._basis_changes)):
self.comp(basis, from_basis=sbasis)
other.comp(basis, from_basis=obasis)
return basis
return None
def pick_a_basis(self):
if (self._fmodule._def_basis in self._components):
return self._fmodule._def_basis
else:
return next(iter(self._components.items()))[0]
def __eq__(self, other):
if (self is other):
return True
if (self._tensor_rank == 0):
raise NotImplementedError('scalar comparison not implemented')
if isinstance(other, (int, Integer)):
if (other == 0):
return self.is_zero()
else:
return False
elif (not isinstance(other, FreeModuleTensor)):
return False
else:
if (other._fmodule != self._fmodule):
return False
if (other._tensor_type != self._tensor_type):
return False
basis = self.common_basis(other)
if (basis is None):
raise ValueError('no common basis for the comparison')
return bool((self._components[basis] == other._components[basis]))
def __ne__(self, other):
return (not (self == other))
def __pos__(self):
result = self._new_instance()
for basis in self._components:
result._components[basis] = (+ self._components[basis])
if (self._name is not None):
result._name = ('+' + self._name)
if (self._latex_name is not None):
result._latex_name = ('+' + self._latex_name)
return result
def __neg__(self):
result = self._new_instance()
for basis in self._components:
result._components[basis] = (- self._components[basis])
if (self._name is not None):
result._name = ('-' + self._name)
if (self._latex_name is not None):
result._latex_name = ('-' + self._latex_name)
return result
def _add_(self, other):
if self._is_zero:
return other
if other._is_zero:
return self
basis = self.common_basis(other)
if (basis is None):
raise ValueError('no common basis for the addition')
comp_result = (self._components[basis] + other._components[basis])
result = self._fmodule.tensor_from_comp(self._tensor_type, comp_result)
if ((self._name is not None) and (other._name is not None)):
result._name = ((self._name + '+') + other._name)
if ((self._latex_name is not None) and (other._latex_name is not None)):
result._latex_name = ((self._latex_name + '+') + other._latex_name)
return result
def _sub_(self, other):
if self._is_zero:
return (- other)
if other._is_zero:
return self
basis = self.common_basis(other)
if (basis is None):
raise ValueError('no common basis for the subtraction')
comp_result = (self._components[basis] - other._components[basis])
result = self._fmodule.tensor_from_comp(self._tensor_type, comp_result)
if ((self._name is not None) and (other._name is not None)):
result._name = ((self._name + '-') + other._name)
if ((self._latex_name is not None) and (other._latex_name is not None)):
result._latex_name = ((self._latex_name + '-') + other._latex_name)
return result
def _rmul_(self, other):
if isinstance(other, FreeModuleTensor):
raise NotImplementedError('left tensor product not implemented')
result = self._new_instance()
for basis in self._components:
result._components[basis] = (other * self._components[basis])
try:
from .format_utilities import format_mul_txt, format_mul_latex
result_name = format_mul_txt(other._name, '*', self._name)
result_latex = format_mul_latex(other._latex_name, ' \\cdot ', self._latex_name)
result.set_name(name=result_name, latex_name=result_latex)
except AttributeError:
pass
return result
def __mul__(self, other):
from sage.typeset.unicode_characters import unicode_otimes
from .format_utilities import format_mul_txt, format_mul_latex
if isinstance(other, FreeModuleTensor):
basis = self.common_basis(other)
if (basis is None):
raise ValueError('no common basis for the tensor product')
comp_prov = (self._components[basis] * other._components[basis])
(k1, l1) = self._tensor_type
(k2, l2) = other._tensor_type
if (l1 != 0):
comp_result = comp_prov.swap_adjacent_indices(k1, self._tensor_rank, (self._tensor_rank + k2))
else:
comp_result = comp_prov
result = self._fmodule.tensor_from_comp(((k1 + k2), (l1 + l2)), comp_result)
result._name = format_mul_txt(self._name, unicode_otimes, other._name)
result._latex_name = format_mul_latex(self._latex_name, '\\otimes ', other._latex_name)
return result
return FreeModuleTensor._rmul_(self, other)
def __truediv__(self, other):
result = self._new_instance()
for basis in self._components:
result._components[basis] = (self._components[basis] / other)
return result
def __call__(self, *args) -> Expression:
p = len(args)
if (p != self._tensor_rank):
raise TypeError((str(self._tensor_rank) + ' arguments must be provided'))
for i in range(self._tensor_type[0]):
if (not isinstance(args[i], FreeModuleTensor)):
raise TypeError((('the argument no. ' + str((i + 1))) + ' must be a linear form'))
if (args[i]._tensor_type != (0, 1)):
raise TypeError((('the argument no. ' + str((i + 1))) + ' must be a linear form'))
for i in range(self._tensor_type[0], p):
if (not isinstance(args[i], FreeModuleTensor)):
raise TypeError((('the argument no. ' + str((i + 1))) + ' must be a module element'))
if (args[i]._tensor_type != (1, 0)):
raise TypeError((('the argument no. ' + str((i + 1))) + ' must be a module element'))
fmodule = self._fmodule
if (self._tensor_type == (0, 1)):
vector = args[0]
basis = self.common_basis(vector)
if (basis is None):
raise ValueError('no common basis for the components')
omega = self._components[basis]
vv = vector._components[basis]
resu = 0
for i in fmodule.irange():
resu += (omega[[i]] * vv[[i]])
if hasattr(resu, '_name'):
if ((self._name is not None) and (vector._name is not None)):
resu._name = (((self._name + '(') + vector._name) + ')')
if hasattr(resu, '_latex_name'):
if ((self._latex_name is not None) and (vector._latex_name is not None)):
resu._latex_name = (((self._latex_name + '\\left(') + vector._latex_name) + '\\right)')
return resu
basis = None
def_basis = fmodule._def_basis
if (def_basis in self._components):
basis = def_basis
for arg in args:
if (def_basis not in arg._components):
basis = None
break
if (basis is None):
for bas in self._components:
basis = bas
for arg in args:
if (bas not in arg._components):
basis = None
break
if (basis is not None):
break
if (basis is None):
for arg in args:
self.common_basis(arg)
for bas in self._components:
basis = bas
for arg in args:
if (bas not in arg._components):
basis = None
break
if (basis is not None):
break
if (basis is None):
raise ValueError('no common basis for the components')
t = self._components[basis]
v = [args[i]._components[basis] for i in range(p)]
res = 0
for ind in t.index_generator():
prod = t[[ind]]
for i in range(p):
prod *= v[i][[ind[i]]]
res += prod
if hasattr(res, '_name'):
res_name = None
if (self._name is not None):
res_name = (self._name + '(')
for i in range((p - 1)):
if (args[i]._name is not None):
res_name += (args[i]._name + ',')
else:
res_name = None
break
if (res_name is not None):
if (args[(p - 1)]._name is not None):
res_name += (args[(p - 1)]._name + ')')
else:
res_name = None
res._name = res_name
if hasattr(res, '_latex_name'):
res_latex = None
if (self._latex_name is not None):
res_latex = (self._latex_name + '\\left(')
for i in range((p - 1)):
if (args[i]._latex_name is not None):
res_latex += (args[i]._latex_name + ',')
else:
res_latex = None
break
if (res_latex is not None):
if (args[(p - 1)]._latex_name is not None):
res_latex += (args[(p - 1)]._latex_name + '\\right)')
else:
res_latex = None
res._latex_name = res_latex
return res
def trace(self, pos1: int=0, pos2: int=1, using: Optional[Union[(PseudoRiemannianMetric, SymplecticForm, PoissonTensorField)]]=None):
if (using is not None):
if (self.tensor_type() != (0, 2)):
raise ValueError('trace with respect to a non-degenerate form is only defined for type-(0,2) tensor')
return self.up(using, 1).trace()
k_con = self._tensor_type[0]
l_cov = self._tensor_type[1]
if ((pos1 < k_con) and (pos2 < k_con)):
raise IndexError(('contraction on two contravariant indices is ' + 'not allowed'))
if ((pos1 >= k_con) and (pos2 >= k_con)):
raise IndexError(('contraction on two covariant indices is ' + 'not allowed'))
if (self._fmodule._def_basis in self._components):
basis = self._fmodule._def_basis
else:
basis = self.pick_a_basis()
resu_comp = self._components[basis].trace(pos1, pos2)
if (self._tensor_rank == 2):
return resu_comp
else:
return self._fmodule.tensor_from_comp(((k_con - 1), (l_cov - 1)), resu_comp)
def contract(self, *args):
nargs = len(args)
for (i, arg) in enumerate(args):
if isinstance(arg, FreeModuleTensor):
other = arg
it = i
break
else:
raise TypeError('a tensor must be provided in the argument list')
if (it == 0):
pos1 = ((self._tensor_rank - 1),)
else:
pos1 = args[:it]
if (it == (nargs - 1)):
pos2 = (0,)
else:
pos2 = args[(it + 1):]
ncontr = len(pos1)
if (len(pos2) != ncontr):
raise TypeError('different number of indices for the contraction')
(k1, l1) = self._tensor_type
(k2, l2) = other._tensor_type
for i in range(ncontr):
p1 = pos1[i]
p2 = pos2[i]
if ((p1 < k1) and (p2 < k2)):
raise TypeError(('contraction on two contravariant indices ' + 'not permitted'))
if ((p1 >= k1) and (p2 >= k2)):
raise TypeError(('contraction on two covariant indices ' + 'not permitted'))
basis = self.common_basis(other)
if (basis is None):
raise ValueError('no common basis for the contraction')
args = ((pos1 + (other._components[basis],)) + pos2)
cmp_res = self._components[basis].contract(*args)
if (((self._tensor_rank + other._tensor_rank) - (2 * ncontr)) == 0):
return cmp_res
nb_cov_s = 0
for pos in range(k1, (k1 + l1)):
if (pos not in pos1):
nb_cov_s += 1
nb_con_o = 0
for pos in range(0, k2):
if (pos not in pos2):
nb_con_o += 1
if ((nb_cov_s != 0) and (nb_con_o != 0)):
p2 = ((k1 + l1) - ncontr)
p1 = (p2 - nb_cov_s)
p3 = (p2 + nb_con_o)
cmp_res = cmp_res.swap_adjacent_indices(p1, p2, p3)
type_res = (((k1 + k2) - ncontr), ((l1 + l2) - ncontr))
return self._fmodule.tensor_from_comp(type_res, cmp_res)
def symmetrize(self, *pos, **kwargs):
if (not pos):
pos = range(self._tensor_rank)
pos_cov = self._tensor_type[0]
pos0 = pos[0]
if (pos0 < pos_cov):
for k in range(1, len(pos)):
if (pos[k] >= pos_cov):
raise TypeError(((((str(pos[0]) + ' is a contravariant position, while ') + str(pos[k])) + ' is a covariant position; \nsymmetrization is meaningful only on tensor ') + 'arguments of the same type'))
else:
for k in range(1, len(pos)):
if (pos[k] < pos_cov):
raise TypeError(((((str(pos[0]) + ' is a covariant position, while ') + str(pos[k])) + ' is a contravariant position; \nsymmetrization is meaningful only on tensor ') + 'arguments of the same type'))
if ('basis' in kwargs):
basis = kwargs['basis']
else:
basis = self.pick_a_basis()
res_comp = self._components[basis].symmetrize(*pos)
return self._fmodule.tensor_from_comp(self._tensor_type, res_comp)
def antisymmetrize(self, *pos, **kwargs):
if (not pos):
pos = range(self._tensor_rank)
pos_cov = self._tensor_type[0]
pos0 = pos[0]
if (pos0 < pos_cov):
for k in range(1, len(pos)):
if (pos[k] >= pos_cov):
raise TypeError(((((str(pos[0]) + ' is a contravariant position, while ') + str(pos[k])) + ' is a covariant position; \nantisymmetrization is meaningful only on tensor ') + 'arguments of the same type'))
else:
for k in range(1, len(pos)):
if (pos[k] < pos_cov):
raise TypeError(((((str(pos[0]) + ' is a covariant position, while ') + str(pos[k])) + ' is a contravariant position; \nantisymmetrization is meaningful only on tensor ') + 'arguments of the same type'))
if ('basis' in kwargs):
basis = kwargs['basis']
else:
basis = self.pick_a_basis()
res_comp = self._components[basis].antisymmetrize(*pos)
return self._fmodule.tensor_from_comp(self._tensor_type, res_comp) |
def create_learner(seed: int, observations: jnp.ndarray, actions: jnp.ndarray, value_def, actor_lr: float=0.0003, value_lr: float=0.0003, critic_lr: float=0.0003, value_tx=None, hidden_dims: Sequence[int]=(256, 256), discount: float=0.99, tau: float=0.005, expectile: float=0.8, temperature: float=0.1, dropout_rate: Optional[float]=None, max_steps: Optional[int]=None, opt_decay_schedule: str='cosine', **kwargs):
print('Extra kwargs:', kwargs)
rng = jax.random.PRNGKey(seed)
(rng, actor_key, critic_key, value_key) = jax.random.split(rng, 4)
action_dim = actions.shape[(- 1)]
actor_def = Policy(hidden_dims, action_dim=action_dim, log_std_min=(- 5.0), state_dependent_std=False, tanh_squash_distribution=False)
if (opt_decay_schedule == 'cosine'):
schedule_fn = optax.cosine_decay_schedule((- actor_lr), max_steps)
actor_tx = optax.chain(optax.scale_by_adam(), optax.scale_by_schedule(schedule_fn))
else:
actor_tx = optax.adam(learning_rate=actor_lr)
actor_params = actor_def.init(actor_key, observations)['params']
actor = TrainState.create(actor_def, actor_params, tx=actor_tx)
critic_def = ensemblize(Critic, num_qs=2)(hidden_dims)
critic_params = critic_def.init(critic_key, observations, actions)['params']
critic = TrainState.create(critic_def, critic_params, tx=optax.adam(learning_rate=critic_lr))
value_params = value_def.init(value_key, observations)['params']
if (value_tx is None):
value_tx = optax.adam(learning_rate=value_lr)
value = TrainState.create(value_def, value_params, tx=value_tx)
target_value = TrainState.create(value_def, value_params)
config = flax.core.FrozenDict(dict(discount=discount, temperature=temperature, expectile=expectile, target_update_rate=tau))
return IQLAgent(rng, critic=critic, value=value, target_value=target_value, actor=actor, config=config) |
def parse_args():
parser = argparse.ArgumentParser(description='OpenSelfSup extract features of a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', default=None, help='checkpoint file')
parser.add_argument('--pretrained', default='random', help='pretrained model file, exclusive to --checkpoint')
parser.add_argument('--dataset-config', default='benchmarks/extract_info/voc07.py', help='extract dataset config file path')
parser.add_argument('--layer-ind', type=str, help='layer indices, separated by comma, e.g., "0,1,2,3,4"')
parser.add_argument('--work_dir', type=str, default=None, help='the dir to save logs and models')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--port', type=int, default=29500, help='port only works when launcher=="slurm"')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args |
def collect_env_info():
has_gpu = torch.cuda.is_available()
torch_version = torch.__version__
from torch.utils.cpp_extension import CUDA_HOME
has_rocm = False
if (tuple(map(int, torch_version.split('.')[:2])) >= (1, 5)):
from torch.utils.cpp_extension import ROCM_HOME
if ((getattr(torch.version, 'hip', None) is not None) and (ROCM_HOME is not None)):
has_rocm = True
has_cuda = (has_gpu and (not has_rocm))
data = []
data.append(('sys.platform', sys.platform))
data.append(('Python', sys.version.replace('\n', '')))
data.append(('numpy', np.__version__))
try:
import fastreid
data.append(('fastreid', ((fastreid.__version__ + ' ') + os.path.dirname(fastreid.__file__))))
except ImportError:
data.append(('fastreid', 'failed to import'))
data.append(get_env_module())
data.append(('PyTorch', ((torch_version + ' ') + os.path.dirname(torch.__file__))))
data.append(('PyTorch debug build', torch.version.debug))
data.append(('GPU available', has_gpu))
if has_gpu:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for (name, devids) in devices.items():
data.append((('GPU ' + ','.join(devids)), name))
if has_rocm:
data.append(('ROCM_HOME', str(ROCM_HOME)))
else:
data.append(('CUDA_HOME', str(CUDA_HOME)))
cuda_arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
if cuda_arch_list:
data.append(('TORCH_CUDA_ARCH_LIST', cuda_arch_list))
data.append(('Pillow', PIL.__version__))
try:
data.append(('torchvision', ((str(torchvision.__version__) + ' ') + os.path.dirname(torchvision.__file__))))
if has_cuda:
try:
torchvision_C = importlib.util.find_spec('torchvision._C').origin
msg = detect_compute_compatibility(CUDA_HOME, torchvision_C)
data.append(('torchvision arch flags', msg))
except ImportError:
data.append(('torchvision._C', 'failed to find'))
except AttributeError:
data.append(('torchvision', 'unknown'))
try:
import fvcore
data.append(('fvcore', fvcore.__version__))
except ImportError:
pass
try:
import cv2
data.append(('cv2', cv2.__version__))
except ImportError:
pass
env_str = (tabulate(data) + '\n')
env_str += collect_torch_env()
return env_str |
def main():
global LstmCellTypes
print('Benchmarking LSTMs.')
better_exchook.install()
print('Args:', ' '.join(sys.argv))
arg_parser = ArgumentParser()
arg_parser.add_argument('cfg', nargs='*', help=('opt=value, opt in %r' % sorted(base_settings.keys())))
arg_parser.add_argument('--no-cpu', action='store_true')
arg_parser.add_argument('--no-gpu', action='store_true')
arg_parser.add_argument('--selected', help=('comma-separated list from %r' % LstmCellTypes))
arg_parser.add_argument('--no-setup-tf-thread-pools', action='store_true')
args = arg_parser.parse_args()
for opt in args.cfg:
(key, value) = opt.split('=', 1)
assert (key in base_settings)
value_type = type(base_settings[key])
base_settings[key] = value_type(value)
print('Settings:')
pprint(base_settings)
log.initialize(verbosity=[4])
print('Returnn:', describe_returnn_version(), file=log.v3)
print('TensorFlow:', describe_tensorflow_version(), file=log.v3)
print('Python:', sys.version.replace('\n', ''), sys.platform)
if (not args.no_setup_tf_thread_pools):
setup_tf_thread_pools(log_file=log.v2)
else:
print('Not setting up the TF thread pools. Will be done automatically by TF to number of CPU cores.')
if args.no_gpu:
print('GPU will not be used.')
else:
print(('GPU available: %r' % is_gpu_available()))
print_available_devices()
if args.selected:
LstmCellTypes = args.selected.split(',')
benchmarks = {}
if ((not args.no_gpu) and is_gpu_available()):
for lstm_unit in LstmCellTypes:
benchmarks[('GPU:' + lstm_unit)] = benchmark(lstm_unit=lstm_unit, use_gpu=True)
if (not args.no_cpu):
for lstm_unit in LstmCellTypes:
if (lstm_unit in GpuOnlyCellTypes):
continue
benchmarks[('CPU:' + lstm_unit)] = benchmark(lstm_unit=lstm_unit, use_gpu=False)
print(('-' * 20))
print('Settings:')
pprint(base_settings)
print('Final results:')
for (t, lstm_unit) in sorted([(t, lstm_unit) for (lstm_unit, t) in sorted(benchmarks.items())]):
print((' %s: %s' % (lstm_unit, hms_fraction(t))))
print('Done.') |
def init_train_step_run_ctx(*, train_flag: Union[(bool, Tensor)]=True, step: Union[(int, Tensor)]=0, epoch: Union[(int, Tensor)]=1):
global _run_ctx
_run_ctx = RunCtx(stage='train_step', train_flag=train_flag, step=step, epoch=epoch) |
def reject_location_related_install_options(requirements, options):
def format_options(option_names):
return ['--{}'.format(name.replace('_', '-')) for name in option_names]
offenders = []
for requirement in requirements:
install_options = requirement.install_options
location_options = parse_distutils_args(install_options)
if location_options:
offenders.append('{!r} from {}'.format(format_options(location_options.keys()), requirement))
if options:
location_options = parse_distutils_args(options)
if location_options:
offenders.append('{!r} from command line'.format(format_options(location_options.keys())))
if (not offenders):
return
raise CommandError('Location-changing options found in --install-option: {}. This is unsupported, use pip-level options like --user, --prefix, --root, and --target instead.'.format('; '.join(offenders))) |
_start_docstrings('\n MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n ', MMBT_START_DOCSTRING, MMBT_INPUTS_DOCSTRING)
class MMBTForClassification(nn.Module):
def __init__(self, config, transformer, encoder):
super().__init__()
self.num_labels = config.num_labels
self.mmbt = MMBTModel(config, transformer, encoder)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, labels=None, return_dict=None):
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
outputs = self.mmbt(input_modal=input_modal, input_ids=input_ids, modal_start_tokens=modal_start_tokens, modal_end_tokens=modal_end_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, modal_token_type_ids=modal_token_type_ids, position_ids=position_ids, modal_position_ids=modal_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if (labels is not None):
if (self.num_labels == 1):
loss_fct = MSELoss()
loss = loss_fct(logits.view((- 1)), labels.view((- 1)))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1)))
if (not return_dict):
output = ((logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) |
def _simplify_operator(element: Union[(SparsePauliOp, OpTreeOperator)]) -> Union[(SparsePauliOp, OpTreeOperator)]:
if isinstance(element, OpTreeOperator):
operator = element.operator
input_type = 'leaf'
else:
operator = element
input_type = 'operator'
pauli_list = []
coeff_list = []
for (i, pauli) in enumerate(operator.paulis):
if (pauli in pauli_list):
index = pauli_list.index(pauli)
coeff_list[index] += operator.coeffs[i]
else:
pauli_list.append(pauli)
coeff_list.append(operator.coeffs[i])
if (len(pauli_list) > 0):
operator_simp = SparsePauliOp(pauli_list, coeff_list)
if (input_type == 'leaf'):
return OpTreeOperator(operator_simp)
return operator_simp
else:
return None |
def RNNTanhCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hy = torch.tanh((F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh)))
return hy |
def register_Ns3UplinkLteGlobalPathlossDatabase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UplinkLteGlobalPathlossDatabase const &', 'arg0')])
cls.add_method('UpdatePathloss', 'void', [param('std::string', 'context'), param('ns3::Ptr< ns3::SpectrumPhy >', 'txPhy'), param('ns3::Ptr< ns3::SpectrumPhy >', 'rxPhy'), param('double', 'lossDb')], is_virtual=True)
return |
def infer_dim_tags(*, name, batch_dim_axis=NotSpecified, time_dim_axis=NotSpecified, feature_dim_axis=NotSpecified, dim_tags: Optional[Sequence[Dim]]=None, shape: Optional[Sequence[Optional[int]]]=None, sparse_dim: Optional[Dim]=None, dim=NotSpecified, size_placeholder=None, auto_create_placeholders=False, batch=None, **_other_kwargs) -> Tuple[(Dim, ...)]:
if (dim_tags is not None):
return tuple(dim_tags)
if (batch_dim_axis is NotSpecified):
batch_dim_axis = 0
if (shape is None):
if (time_dim_axis is NotSpecified):
time_dim_axis = _default_time_dim_axis_no_shape(batch_dim_axis=batch_dim_axis, feature_dim_axis=feature_dim_axis)
(shape, time_dim_axis) = _infer_default_shape_and_time(batch_dim_axis=batch_dim_axis, feature_dim_axis=feature_dim_axis, time_dim_axis=time_dim_axis, sparse=bool(sparse_dim), dim=dim)
elif (time_dim_axis is NotSpecified):
time_dim_axis = _default_time_dim_axis(batch_dim_axis=batch_dim_axis, shape=shape)
dims = _infer_dim_tags_tuple_from_shape(shape, batch_dim_axis=batch_dim_axis, time_dim_axis=time_dim_axis, feature_dim_axis=feature_dim_axis, size_placeholder=size_placeholder, name=name, extern_data=auto_create_placeholders, sparse=bool(sparse_dim), batch=batch)
if (dim is not NotSpecified):
if sparse_dim:
assert (sparse_dim.dimension == dim)
elif (feature_dim_axis is None):
assert (dim is None)
elif (feature_dim_axis is NotSpecified):
pass
else:
assert (dims[feature_dim_axis].dimension == dim)
return dims |
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, no_norm=False, activation='relu'):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, bias=False)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, bias=False)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = (nn.LayerNorm(d_model) if (not no_norm) else nn.Identity())
self.norm2 = (nn.LayerNorm(d_model) if (not no_norm) else nn.Identity())
self.norm3 = (nn.LayerNorm(d_model) if (not no_norm) else nn.Identity())
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def with_pos_embed(self, tensor, pos):
return (tensor if (pos is None) else (tensor + pos))
def forward(self, tgt, memory, pos=None, query_pos=None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2)[0]
tgt = (tgt + self.dropout1(tgt2))
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory)[0]
tgt = (tgt + self.dropout2(tgt2))
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = (tgt + self.dropout3(tgt2))
return tgt |
def aggregate_meanpool(features, agg_transform_size, adj_with_self_loops_indices, degrees, num_nodes, num_features, name):
with tf.name_scope(name):
(self_indices, neighbours_indices) = adj_with_self_loops_indices
fc_weights = tf.get_variable(f'{name}-fc_weights', shape=[num_features, agg_transform_size], dtype=tf.float32, initializer=tf.glorot_uniform_initializer())
if isinstance(features, tf.SparseTensor):
transformed_features = tf.sparse_tensor_dense_matmul(features, fc_weights)
else:
transformed_features = tf.matmul(features, fc_weights)
transformed_features = tf.nn.relu(transformed_features)
edge_features = tf.gather(transformed_features, neighbours_indices)
output = scatter_add_tensor(edge_features, self_indices, out_shape=[num_nodes, agg_transform_size])
output = (output / degrees)
return output |
_loss
def distribution_focal_loss(pred, label):
dis_left = label.long()
dis_right = (dis_left + 1)
weight_left = (dis_right.float() - label)
weight_right = (label - dis_left.float())
loss = ((F.cross_entropy(pred, dis_left, reduction='none') * weight_left) + (F.cross_entropy(pred, dis_right, reduction='none') * weight_right))
return loss |
def eval_epoch(args, model, test_dataloader, device, n_gpu):
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
multi_sentence_ = False
(cut_off_points_, sentence_num_, pair_num_) = ([], (- 1), (- 1))
if (hasattr(test_dataloader.dataset, 'multi_sentence_per_pair') and test_dataloader.dataset.multi_sentence_per_pair):
multi_sentence_ = True
cut_off_points_ = test_dataloader.dataset.cut_off_points
sentence_num_ = test_dataloader.dataset.sentence_num
pair_num_ = test_dataloader.dataset.image_num
cut_off_points_ = [(itm - 1) for itm in cut_off_points_]
if multi_sentence_:
logger.warning('Eval under the multi-sentence per pair setting.')
logger.warning('sentence num: {}, pair num: {}'.format(sentence_num_, pair_num_))
model.eval()
with torch.no_grad():
batch_list_t = []
batch_list_v = []
(batch_sequence_output_list, batch_visual_output_list) = ([], [])
total_pair_num = 0
for (bid, batch) in enumerate(test_dataloader):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, bef_image, aft_image, image_mask) = batch
image_pair = torch.cat([bef_image, aft_image], 1)
if multi_sentence_:
(b, *_t) = image_pair.shape
(sequence_output, _) = model.get_sequence_output(input_ids, segment_ids, input_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids))
(s_, e_) = (total_pair_num, (total_pair_num + b))
filter_inds = [(itm - s_) for itm in cut_off_points_ if ((itm >= s_) and (itm < e_))]
if (len(filter_inds) > 0):
(image_pair, pair_mask) = (image_pair[(filter_inds, ...)], image_mask[(filter_inds, ...)])
(visual_output, _) = model.get_visual_output(image_pair, pair_mask)
batch_visual_output_list.append(visual_output)
batch_list_v.append((pair_mask,))
total_pair_num += b
print('{}/{}\r'.format(bid, len(test_dataloader)), end='')
if (n_gpu > 1):
device_ids = list(range(n_gpu))
batch_list_t_splits = []
batch_list_v_splits = []
batch_t_output_splits = []
batch_v_output_splits = []
bacth_len = len(batch_list_t)
split_len = (((bacth_len + n_gpu) - 1) // n_gpu)
for dev_id in device_ids:
(s_, e_) = ((dev_id * split_len), ((dev_id + 1) * split_len))
if (dev_id == 0):
batch_list_t_splits.append(batch_list_t[s_:e_])
batch_list_v_splits.append(batch_list_v)
batch_t_output_splits.append(batch_sequence_output_list[s_:e_])
batch_v_output_splits.append(batch_visual_output_list)
else:
devc = torch.device('cuda:{}'.format(str(dev_id)))
devc_batch_list = [tuple((t.to(devc) for t in b)) for b in batch_list_t[s_:e_]]
batch_list_t_splits.append(devc_batch_list)
devc_batch_list = [tuple((t.to(devc) for t in b)) for b in batch_list_v]
batch_list_v_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]]
batch_t_output_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_visual_output_list]
batch_v_output_splits.append(devc_batch_list)
parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id], batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids]
parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids)
sim_matrix = []
for idx in range(len(parallel_outputs)):
sim_matrix += parallel_outputs[idx]
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
else:
sim_matrix = _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list)
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
if multi_sentence_:
logger.info('before reshape, sim matrix size: {} x {}'.format(sim_matrix.shape[0], sim_matrix.shape[1]))
cut_off_points2len_ = [(itm + 1) for itm in cut_off_points_]
max_length = max([(e_ - s_) for (s_, e_) in zip(([0] + cut_off_points2len_[:(- 1)]), cut_off_points2len_)])
sim_matrix_new = []
for (s_, e_) in zip(([0] + cut_off_points2len_[:(- 1)]), cut_off_points2len_):
sim_matrix_new.append(np.concatenate((sim_matrix[s_:e_], np.full((((max_length - e_) + s_), sim_matrix.shape[1]), (- np.inf))), axis=0))
sim_matrix = np.stack(tuple(sim_matrix_new), axis=0)
logger.info('after reshape, sim matrix size: {} x {} x {}'.format(sim_matrix.shape[0], sim_matrix.shape[1], sim_matrix.shape[2]))
tv_metrics = tensor_text_to_video_metrics(sim_matrix)
vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix))
logger.info('Text-to-Image-Pair:')
logger.info('\t>>> : {:.1f} - : {:.1f} - : {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'.format(tv_metrics['R1'], tv_metrics['R5'], tv_metrics['R10'], tv_metrics['MR'], tv_metrics['MeanR']))
logger.info('Image-Pair-to-Text:')
logger.info('\t>>> V2T$: {:.1f} - V2T$: {:.1f} - V2T$: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'.format(vt_metrics['R1'], vt_metrics['R5'], vt_metrics['R10'], vt_metrics['MR'], vt_metrics['MeanR']))
R1 = tv_metrics['R1']
return R1 |
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
assert os.path.isdir(directory), 'dataset is not exists!{}'.format(directory)
return sorted([os.path.join(root, f) for (root, _, files) in os.walk(directory) for f in files if re.match((('([\\w]+\\.(?:' + ext) + '))'), f)]) |
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = ((self.sum / self.count) if (self.count != 0) else 0) |
def test_wrap_bare_list():
data = [1, 2, 3, 4, 5]
index = ak.index.Index64(data)
other_data = np.asarray(index)
assert (other_data.tolist() == data) |
class CUBDataset(ConfounderDataset):
def __init__(self, root_dir, target_name, confounder_names, augment_data=False, model_type=None):
self.root_dir = root_dir
self.target_name = target_name
self.confounder_names = confounder_names
self.model_type = model_type
self.augment_data = augment_data
self.data_dir = os.path.join(self.root_dir, 'data', '_'.join(([self.target_name] + self.confounder_names)))
if (not os.path.exists(self.data_dir)):
raise ValueError(f'{self.data_dir} does not exist yet. Please generate the dataset first.')
self.metadata_df = pd.read_csv(os.path.join(self.data_dir, 'metadata.csv'))
self.y_array = self.metadata_df['y'].values
self.n_classes = 2
self.confounder_array = self.metadata_df['place'].values
self.n_confounders = 1
self.n_groups = pow(2, 2)
self.group_array = ((self.y_array * (self.n_groups / 2)) + self.confounder_array).astype('int')
self.filename_array = self.metadata_df['img_filename'].values
self.split_array = self.metadata_df['split'].values
self.split_dict = {'train': 0, 'val': 1, 'test': 2}
if (model_attributes[self.model_type]['feature_type'] == 'precomputed'):
self.features_mat = torch.from_numpy(np.load(os.path.join(root_dir, 'features', model_attributes[self.model_type]['feature_filename']))).float()
self.train_transform = None
self.eval_transform = None
else:
self.features_mat = None
self.train_transform = get_transform_cub(self.model_type, train=True, augment_data=augment_data)
self.eval_transform = get_transform_cub(self.model_type, train=False, augment_data=augment_data) |
.flaky
def test_exponential_distribution():
q_max = 100.0
sample = stellar_mass.schechter_smf_mass(0, 0, 1, size=1000, m_min=1e-10, m_max=q_max, resolution=1000)
(d, p_value) = scipy.stats.kstest(sample, 'truncexpon', args=(q_max,))
assert (p_value >= 0.01) |
def get_op_args(declaration, argmap):
call_args_override = declaration.get('call_args')
if call_args_override:
keys = call_args_override
else:
keys = [param['name'] for param in declaration['arguments']]
if is_tensor_method(declaration):
keys = [k for k in keys if (k != 'self')]
if call_args_override:
return [argmap.get(k, k) for k in keys]
else:
return [argmap[k] for k in keys] |
class SingleDeconv3DBlock(nn.Module):
def __init__(self, in_planes, out_planes):
super().__init__()
self.block = nn.ConvTranspose3d(in_planes, out_planes, kernel_size=2, stride=2, padding=0, output_padding=0)
def forward(self, x):
return self.block(x) |
def move_to_cuda(sample):
if (len(sample) == 0):
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {key: _move_to_cuda(value) for (key, value) in maybe_tensor.items()}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample) |
class BatchNormBatchStat(BatchNorm2d):
def forward(self, input):
if self.training:
return super().forward(input)
return F.batch_norm(input, None, None, self.weight, self.bias, True, 1.0, self.eps) |
class MMBTForClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
_dispatch
def dct(x, type=2, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None, orthogonalize=None):
return (Dispatchable(x, np.ndarray),) |
def get_hip_file_path(filepath, is_pytorch_extension=False):
if ((not is_pytorch_extension) and (not is_out_of_place(filepath))):
return filepath
(dirpath, filename) = os.path.split(filepath)
(root, ext) = os.path.splitext(filename)
if (ext == '.cu'):
ext = '.hip'
orig_filename = filename
orig_dirpath = dirpath
dirpath = dirpath.replace('cuda', 'hip')
dirpath = dirpath.replace('THC', 'THH')
root = root.replace('cuda', 'hip')
root = root.replace('CUDA', 'HIP')
if (dirpath != 'caffe2/core'):
root = root.replace('THC', 'THH')
if ((not is_pytorch_extension) and (dirpath == orig_dirpath)):
dirpath = os.path.join(dirpath, 'hip')
if (is_pytorch_extension and (dirpath == orig_dirpath) and ((root + ext) == orig_filename)):
root = (root + '_hip')
return os.path.join(dirpath, (root + ext)) |
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True):
with tf.variable_scope(scope) as sc:
(dist, idx) = three_nn(xyz1, xyz2)
dist = tf.maximum(dist, 1e-10)
norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
norm = tf.tile(norm, [1, 1, 3])
weight = ((1.0 / dist) / norm)
interpolated_points = three_interpolate(points2, idx, weight)
if (points1 is not None):
new_points1 = tf.concat(axis=2, values=[interpolated_points, points1])
else:
new_points1 = interpolated_points
new_points1 = tf.expand_dims(new_points1, 2)
for (i, num_out_channel) in enumerate(mlp):
new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv_%d' % i), bn_decay=bn_decay)
new_points1 = tf.squeeze(new_points1, [2])
return new_points1 |
def create_mounts(mode, base_log_dir, sync_interval=180, local_input_dir_to_mount_point_dict=None):
if (mode == 'sss'):
code_mounts = SSS_CODE_MOUNTS
non_code_mounts = SSS_NON_CODE_MOUNTS
else:
code_mounts = CODE_MOUNTS
non_code_mounts = NON_CODE_MOUNTS
if (local_input_dir_to_mount_point_dict is None):
local_input_dir_to_mount_point_dict = {}
else:
raise NotImplementedError('TODO(vitchyr): Implement this')
mounts = [m for m in code_mounts]
for (dir, mount_point) in local_input_dir_to_mount_point_dict.items():
mounts.append(mount.MountLocal(local_dir=dir, mount_point=mount_point, pythonpath=False))
if (mode != 'local'):
for m in non_code_mounts:
mounts.append(m)
if (mode == 'ec2'):
output_mount = mount.MountS3(s3_path='', mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, sync_interval=sync_interval, include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar', '*.log', '*.pkl', '*.mp4', '*.png', '*.jpg', '*.jpeg', '*.patch'))
elif (mode == 'gcp'):
output_mount = mount.MountGCP(gcp_path='', mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True, gcp_bucket_name=conf.GCP_BUCKET_NAME, sync_interval=sync_interval, include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar', '*.log', '*.pkl', '*.mp4', '*.png', '*.jpg', '*.jpeg', '*.patch'))
elif (mode in ['local', 'local_singularity', 'slurm_singularity', 'sss']):
output_mount = mount.MountLocal(local_dir=base_log_dir, mount_point=None, output=True)
elif (mode == 'local_docker'):
output_mount = mount.MountLocal(local_dir=base_log_dir, mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True)
elif (mode == 'ssh'):
output_mount = mount.MountLocal(local_dir=base_log_dir, mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET, output=True)
else:
raise NotImplementedError('Mode not supported: {}'.format(mode))
mounts.append(output_mount)
return mounts |
def normalize_obs(obs, mean, std):
if (mean is not None):
return np.divide((obs - mean), std)
else:
return obs |
class Explore_Decoder_Graph_Explorative():
def __init__(self, DISCOURSE_SENTENCE_MODEL, MAX_SPLIT_PAIR_SIZE, RESTRICTED_DROP_REL, ALLOWED_DROP_MOD, probability_tables, METHOD_FEATURE_EXTRACT):
self.DISCOURSE_SENTENCE_MODEL = DISCOURSE_SENTENCE_MODEL
self.MAX_SPLIT_PAIR_SIZE = MAX_SPLIT_PAIR_SIZE
self.RESTRICTED_DROP_REL = RESTRICTED_DROP_REL
self.ALLOWED_DROP_MOD = ALLOWED_DROP_MOD
self.probability_tables = probability_tables
self.METHOD_FEATURE_EXTRACT = METHOD_FEATURE_EXTRACT
self.method_feature_extract = function_select_methods.select_feature_extract_method(self.METHOD_FEATURE_EXTRACT)
def explore_decoder_graph(self, sentid, main_sentence, main_sent_dict, boxer_graph):
decoder_graph = Training_Graph()
nodes_2_process = []
if boxer_graph.isEmpty():
nodeset = boxer_graph.get_nodeset()
filtered_mod_pos = []
simple_sentences = []
majornode_data = ('fin', nodeset, simple_sentences, filtered_mod_pos)
(majornode_name, isNew) = decoder_graph.create_majornode(majornode_data)
nodes_2_process.append(majornode_name)
else:
nodeset = boxer_graph.get_nodeset()
(majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'split', nodeset, [], [])
nodes_2_process.append(majornode_name)
while (len(nodes_2_process) != 0):
nodes_2_process = self.expand_decoder_graph(nodes_2_process[:], main_sent_dict, boxer_graph, decoder_graph)
return decoder_graph
def expand_decoder_graph(self, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph):
node_name = nodes_2_process[0]
operreq = decoder_graph.get_majornode_type(node_name)
nodeset = decoder_graph.get_majornode_nodeset(node_name)[:]
oper_candidates = decoder_graph.get_majornode_oper_candidates(node_name)[:]
processed_oper_candidates = decoder_graph.get_majornode_processed_oper_candidates(node_name)[:]
filtered_postions = decoder_graph.get_majornode_filtered_postions(node_name)[:]
if (operreq == 'split'):
split_candidate_tuples = oper_candidates
nodes_2_process = self.process_split_node_decoder_graph(node_name, nodeset, split_candidate_tuples, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph)
if (operreq == 'drop-rel'):
relnode_candidates = oper_candidates
processed_relnode_candidates = processed_oper_candidates
filtered_mod_pos = filtered_postions
nodes_2_process = self.process_droprel_node_decoder_graph(node_name, nodeset, relnode_candidates, processed_relnode_candidates, filtered_mod_pos, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph)
if (operreq == 'drop-mod'):
mod_candidates = oper_candidates
processed_mod_pos = processed_oper_candidates
filtered_mod_pos = filtered_postions
nodes_2_process = self.process_dropmod_node_decoder_graph(node_name, nodeset, mod_candidates, processed_mod_pos, filtered_mod_pos, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph)
if (operreq == 'drop-ood'):
oodnode_candidates = oper_candidates
processed_oodnode_candidates = processed_oper_candidates
filtered_mod_pos = filtered_postions
nodes_2_process = self.process_dropood_node_decoder_graph(node_name, nodeset, oodnode_candidates, processed_oodnode_candidates, filtered_mod_pos, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph)
return nodes_2_process[1:]
def process_split_node_decoder_graph(self, node_name, nodeset, split_candidate_tuples, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph):
parent_subgraph_nodeset_dict = boxer_graph.extract_parent_subgraph_nodeset_dict()
not_applied_cands = [item for item in split_candidate_tuples]
opernode_data = ('split', None, not_applied_cands)
opernode_name = decoder_graph.create_opernode(opernode_data)
decoder_graph.create_edge((node_name, opernode_name, None))
child_nodeset = nodeset[:]
(child_majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'drop-rel', child_nodeset, [], [])
if isNew:
nodes_2_process.append(child_majornode_name)
decoder_graph.create_edge((opernode_name, child_majornode_name, None))
for split_candidate in split_candidate_tuples:
(node_subgraph_nodeset_dict, node_span_dict) = boxer_graph.partition_drs_for_successful_candidate(split_candidate, parent_subgraph_nodeset_dict)
split_results = []
for tnodename in split_candidate:
tspan = node_span_dict[tnodename]
tnodeset = node_subgraph_nodeset_dict[tnodename][:]
split_results.append((tspan, tnodeset, tnodename))
split_results.sort()
not_applied_cands = [item for item in split_candidate_tuples if (item is not split_candidate)]
opernode_data = ('split', split_candidate, not_applied_cands)
opernode_name = decoder_graph.create_opernode(opernode_data)
decoder_graph.create_edge((node_name, opernode_name, split_candidate))
for item in split_results:
child_nodeset = item[1][:]
child_nodeset.sort()
parent_child_nodeset = item[2]
(child_majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'drop-rel', child_nodeset, [], [])
if isNew:
nodes_2_process.append(child_majornode_name)
decoder_graph.create_edge((opernode_name, child_majornode_name, parent_child_nodeset))
return nodes_2_process
def process_droprel_node_decoder_graph(self, node_name, nodeset, relnode_candidates, processed_relnode_candidates, filtered_mod_pos, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph):
relnode_to_process = relnode_candidates[0]
processed_relnode_candidates.append(relnode_to_process)
opernode_data = ('drop-rel', relnode_to_process, 'False')
opernode_name = decoder_graph.create_opernode(opernode_data)
decoder_graph.create_edge((node_name, opernode_name, relnode_to_process))
child_nodeset = nodeset[:]
child_filtered_mod_pos = filtered_mod_pos[:]
(child_majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'drop-rel', child_nodeset, processed_relnode_candidates, child_filtered_mod_pos)
if isNew:
nodes_2_process.append(child_majornode_name)
decoder_graph.create_edge((opernode_name, child_majornode_name, 'False'))
opernode_data = ('drop-rel', relnode_to_process, 'True')
opernode_name = decoder_graph.create_opernode(opernode_data)
decoder_graph.create_edge((node_name, opernode_name, relnode_to_process))
(child_nodeset, child_filtered_mod_pos) = boxer_graph.drop_relation(nodeset, relnode_to_process, filtered_mod_pos)
(child_majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'drop-rel', child_nodeset, processed_relnode_candidates, child_filtered_mod_pos)
if isNew:
nodes_2_process.append(child_majornode_name)
decoder_graph.create_edge((opernode_name, child_majornode_name, 'True'))
return nodes_2_process
def process_dropmod_node_decoder_graph(self, node_name, nodeset, mod_candidates, processed_mod_pos, filtered_mod_pos, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph):
modcand_to_process = mod_candidates[0]
modcand_position_to_process = modcand_to_process[0]
modcand_word = main_sent_dict[modcand_position_to_process][0]
modcand_node = modcand_to_process[1]
processed_mod_pos.append(modcand_position_to_process)
opernode_data = ('drop-mod', modcand_to_process, 'False')
opernode_name = decoder_graph.create_opernode(opernode_data)
decoder_graph.create_edge((node_name, opernode_name, modcand_to_process))
child_nodeset = nodeset
(child_majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'drop-mod', child_nodeset, processed_mod_pos, filtered_mod_pos)
if isNew:
nodes_2_process.append(child_majornode_name)
decoder_graph.create_edge((opernode_name, child_majornode_name, 'False'))
opernode_data = ('drop-mod', modcand_to_process, 'True')
opernode_name = decoder_graph.create_opernode(opernode_data)
decoder_graph.create_edge((node_name, opernode_name, modcand_to_process))
child_nodeset = nodeset[:]
tfiltered_mod_pos = filtered_mod_pos[:]
tfiltered_mod_pos.append(modcand_position_to_process)
(child_majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'drop-mod', child_nodeset, processed_mod_pos, tfiltered_mod_pos)
if isNew:
nodes_2_process.append(child_majornode_name)
decoder_graph.create_edge((opernode_name, child_majornode_name, 'True'))
return nodes_2_process
def process_dropood_node_decoder_graph(self, node_name, nodeset, oodnode_candidates, processed_oodnode_candidates, filtered_mod_pos, nodes_2_process, main_sent_dict, boxer_graph, decoder_graph):
oodnode_to_process = oodnode_candidates[0]
processed_oodnode_candidates.append(oodnode_to_process)
opernode_data = ('drop-ood', oodnode_to_process, 'False')
opernode_name = decoder_graph.create_opernode(opernode_data)
decoder_graph.create_edge((node_name, opernode_name, oodnode_to_process))
child_nodeset = nodeset[:]
(child_majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'drop-ood', child_nodeset, processed_oodnode_candidates, filtered_mod_pos)
if isNew:
nodes_2_process.append(child_majornode_name)
decoder_graph.create_edge((opernode_name, child_majornode_name, 'False'))
opernode_data = ('drop-ood', oodnode_to_process, 'True')
opernode_name = decoder_graph.create_opernode(opernode_data)
decoder_graph.create_edge((node_name, opernode_name, oodnode_to_process))
child_nodeset = nodeset[:]
child_nodeset.remove(oodnode_to_process)
(child_majornode_name, isNew) = self.addition_major_node(main_sent_dict, boxer_graph, decoder_graph, 'drop-ood', child_nodeset, processed_oodnode_candidates, filtered_mod_pos)
if isNew:
nodes_2_process.append(child_majornode_name)
decoder_graph.create_edge((opernode_name, child_majornode_name, 'True'))
return nodes_2_process
def addition_major_node(self, main_sent_dict, boxer_graph, decoder_graph, opertype, nodeset, processed_candidates, extra_data):
type_val = {'split': 1, 'drop-rel': 2, 'drop-mod': 3, 'drop-ood': 4}
operval = type_val[opertype]
simple_sentences = []
if (operval <= type_val['split']):
if (opertype in self.DISCOURSE_SENTENCE_MODEL):
split_candidate_tuples = boxer_graph.extract_split_candidate_tuples(nodeset, self.MAX_SPLIT_PAIR_SIZE)
if (len(split_candidate_tuples) != 0):
majornode_data = ('split', nodeset[:], simple_sentences, split_candidate_tuples)
(majornode_name, isNew) = decoder_graph.create_majornode(majornode_data)
return (majornode_name, isNew)
if (operval <= type_val['drop-rel']):
if (opertype in self.DISCOURSE_SENTENCE_MODEL):
processed_relnode = (processed_candidates[:] if (opertype == 'drop-rel') else [])
filtered_mod_pos = (extra_data if (opertype == 'drop-rel') else [])
relnode_set = boxer_graph.extract_drop_rel_candidates(nodeset, self.RESTRICTED_DROP_REL, processed_relnode)
if (len(relnode_set) != 0):
majornode_data = ('drop-rel', nodeset[:], simple_sentences, relnode_set, processed_relnode, filtered_mod_pos)
(majornode_name, isNew) = decoder_graph.create_majornode(majornode_data)
return (majornode_name, isNew)
if (operval <= type_val['drop-mod']):
if (opertype in self.DISCOURSE_SENTENCE_MODEL):
processed_mod_pos = (processed_candidates[:] if (opertype == 'drop-mod') else [])
filtered_mod_pos = extra_data
modcand_set = boxer_graph.extract_drop_mod_candidates(nodeset, main_sent_dict, self.ALLOWED_DROP_MOD, processed_mod_pos)
if (len(modcand_set) != 0):
majornode_data = ('drop-mod', nodeset[:], simple_sentences, modcand_set, processed_mod_pos, filtered_mod_pos)
(majornode_name, isNew) = decoder_graph.create_majornode(majornode_data)
return (majornode_name, isNew)
if (operval <= type_val['drop-ood']):
if (opertype in self.DISCOURSE_SENTENCE_MODEL):
processed_oodnodes = (processed_candidates if (opertype == 'drop-ood') else [])
filtered_mod_pos = extra_data
oodnode_candidates = boxer_graph.extract_ood_candidates(nodeset, processed_oodnodes)
if (len(oodnode_candidates) != 0):
majornode_data = ('drop-ood', nodeset[:], simple_sentences, oodnode_candidates, processed_oodnodes, filtered_mod_pos)
(majornode_name, isNew) = decoder_graph.create_majornode(majornode_data)
return (majornode_name, isNew)
filtered_mod_pos = extra_data[:]
majornode_data = ('fin', nodeset[:], simple_sentences, filtered_mod_pos)
(majornode_name, isNew) = decoder_graph.create_majornode(majornode_data)
return (majornode_name, isNew)
def start_probability_update(self, main_sentence, main_sent_dict, boxer_graph, decoder_graph):
node_probability_dict = {}
potential_edges = []
bottom_nodes = decoder_graph.find_all_fin_majornode()
nodes_to_process = bottom_nodes[:]
while (len(nodes_to_process) != 0):
(nodes_to_process, node_probability_dict, potential_edges) = self.bottom_up_probability_update(nodes_to_process, node_probability_dict, potential_edges, main_sentence, main_sent_dict, boxer_graph, decoder_graph)
return (node_probability_dict, potential_edges)
def bottom_up_probability_update(self, nodes_to_process, node_probability_dict, potential_edges, main_sentence, main_sent_dict, boxer_graph, decoder_graph):
node_to_process = nodes_to_process[0]
if node_to_process.startswith('MN'):
if (decoder_graph.get_majornode_type(node_to_process) == 'fin'):
node_probability_dict[node_to_process] = 1
else:
children_oper_nodes = decoder_graph.find_children_of_majornode(node_to_process)
probability_children = [(node_probability_dict[child], child) for child in children_oper_nodes]
probability_children.sort(reverse=True)
node_probability_dict[node_to_process] = probability_children[0][0]
potential_edges.append((node_to_process, probability_children[0][1]))
parents_oper_nodes = decoder_graph.find_parents_of_majornode(node_to_process)
for parent_oper_node in parents_oper_nodes:
if ((parent_oper_node not in nodes_to_process) and (parent_oper_node not in node_probability_dict)):
children_major_nodes = decoder_graph.find_children_of_opernode(parent_oper_node)
flag = True
for child_major_node in children_major_nodes:
if ((child_major_node not in nodes_to_process) and (child_major_node not in node_probability_dict)):
flag = False
break
if (flag == True):
nodes_to_process.append(parent_oper_node)
else:
prob_oper_node = self.fetch_probability(node_to_process, main_sentence, main_sent_dict, boxer_graph, decoder_graph)
children_major_nodes = decoder_graph.find_children_of_opernode(node_to_process)
for child in children_major_nodes:
prob_oper_node = (prob_oper_node * node_probability_dict[child])
potential_edges.append((node_to_process, child))
node_probability_dict[node_to_process] = prob_oper_node
parent_major_node = decoder_graph.find_parent_of_opernode(node_to_process)
if ((parent_major_node not in nodes_to_process) and (parent_major_node not in node_probability_dict)):
children_oper_nodes = decoder_graph.find_children_of_majornode(parent_major_node)
flag = True
for child_oper_node in children_oper_nodes:
if ((child_oper_node not in nodes_to_process) and (child_oper_node not in node_probability_dict)):
flag = False
break
if (flag == True):
nodes_to_process.append(parent_major_node)
return (nodes_to_process[1:], node_probability_dict, potential_edges)
def fetch_probability(self, oper_node, main_sentence, main_sent_dict, boxer_graph, decoder_graph):
oper_node_type = decoder_graph.get_opernode_type(oper_node)
if (oper_node_type == 'split'):
parent_major_node = decoder_graph.find_parent_of_opernode(oper_node)
parent_nodeset = decoder_graph.get_majornode_nodeset(parent_major_node)
parent_filtered_mod_pos = decoder_graph.get_majornode_filtered_postions(parent_major_node)
parent_sentence = boxer_graph.extract_main_sentence(parent_nodeset, main_sent_dict, parent_filtered_mod_pos)
children_major_nodes = decoder_graph.find_children_of_opernode(oper_node)
children_sentences = []
for child_major_node in children_major_nodes:
child_nodeset = decoder_graph.get_majornode_nodeset(child_major_node)
child_filtered_mod_pos = decoder_graph.get_majornode_filtered_postions(child_major_node)
child_sentence = boxer_graph.extract_main_sentence(child_nodeset, main_sent_dict, child_filtered_mod_pos)
children_sentences.append(child_sentence)
total_probability = 1
split_candidate = decoder_graph.get_opernode_oper_candidate(oper_node)
if (split_candidate != None):
split_feature = self.method_feature_extract.get_split_feature(split_candidate, parent_sentence, children_sentences, boxer_graph)
if (split_feature in self.probability_tables['split']):
total_probability = self.probability_tables['split'][split_feature]['true']
else:
total_probability = 0.5
return total_probability
else:
not_applied_cands = decoder_graph.get_opernode_failed_oper_candidates(oper_node)
for split_candidate_left in not_applied_cands:
split_feature_left = self.method_feature_extract.get_split_feature(split_candidate_left, parent_sentence, children_sentences, boxer_graph)
if (split_feature_left in self.probability_tables['split']):
total_probability = (total_probability * self.probability_tables['split'][split_feature_left]['false'])
else:
total_probability = (total_probability * 0.5)
return total_probability
elif (oper_node_type == 'drop-rel'):
parent_major_node = decoder_graph.find_parent_of_opernode(oper_node)
parent_nodeset = decoder_graph.get_majornode_nodeset(parent_major_node)
rel_candidate = decoder_graph.get_opernode_oper_candidate(oper_node)
drop_rel_feature = self.method_feature_extract.get_drop_rel_feature(rel_candidate, parent_nodeset, main_sent_dict, boxer_graph)
isDropped = decoder_graph.get_opernode_drop_result(oper_node)
dropVal = ('true' if (isDropped == 'True') else 'false')
prob_value = 0
if (drop_rel_feature in self.probability_tables['drop-rel']):
prob_value = self.probability_tables['drop-rel'][drop_rel_feature][dropVal]
else:
prob_value = 0.5
return prob_value
elif (oper_node_type == 'drop-mod'):
mod_candidate = decoder_graph.get_opernode_oper_candidate(oper_node)
drop_mod_feature = self.method_feature_extract.get_drop_mod_feature(mod_candidate, main_sent_dict, boxer_graph)
isDropped = decoder_graph.get_opernode_drop_result(oper_node)
dropVal = ('true' if (isDropped == 'True') else 'false')
prob_value = 0
if (drop_mod_feature in self.probability_tables['drop-mod']):
prob_value = self.probability_tables['drop-mod'][drop_mod_feature][dropVal]
else:
prob_value = 0.5
return prob_value
elif (oper_node_type == 'drop-ood'):
parent_major_node = decoder_graph.find_parent_of_opernode(oper_node)
parent_nodeset = decoder_graph.get_majornode_nodeset(parent_major_node)
ood_candidate = decoder_graph.get_opernode_oper_candidate(oper_node)
drop_ood_feature = self.method_feature_extract.get_drop_ood_feature(ood_candidate, parent_nodeset, main_sent_dict, boxer_graph)
isDropped = decoder_graph.get_opernode_drop_result(oper_node)
dropVal = ('true' if (isDropped == 'True') else 'false')
prob_value = 0
if (drop_ood_feature in self.probability_tables['drop-ood']):
prob_value = self.probability_tables['drop-ood'][drop_ood_feature][dropVal]
else:
prob_value = 0.5
return prob_value
def create_filtered_decoder_graph(self, potential_edges, main_sentence, main_sent_dict, boxer_graph, decoder_graph):
filtered_decoder_graph = Training_Graph()
root_major_node = 'MN-1'
filtered_decoder_graph.major_nodes['MN-1'] = copy.copy(decoder_graph.major_nodes['MN-1'])
nodes_to_process = [root_major_node]
while (len(nodes_to_process) != 0):
node_to_process = nodes_to_process[0]
if node_to_process.startswith('MN'):
filtered_decoder_graph.major_nodes[node_to_process] = copy.copy(decoder_graph.major_nodes[node_to_process])
else:
filtered_decoder_graph.oper_nodes[node_to_process] = copy.copy(decoder_graph.oper_nodes[node_to_process])
for edge in potential_edges:
if (edge[0] == node_to_process):
dependent = edge[1]
nodes_to_process.append(dependent)
for tedge in decoder_graph.edges:
if ((tedge[0] == node_to_process) and (tedge[1] == dependent)):
filtered_decoder_graph.edges.append(copy.copy(tedge))
nodes_to_process = nodes_to_process[1:]
return filtered_decoder_graph |
def create_r_action(action):
def r_action(tn, t):
token_hit = tn
def fn(world, n):
if (n > MAX_FUNC_CALL):
(token_hit, n, False)
try:
world.state_transition(action)
except:
return (token_hit, n, False)
else:
return (token_hit, n, True)
return [('action', (- 1), fn)]
return r_action |
class xDeepFM(BaseModel):
def __init__(self, linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 256), cin_layer_size=(256, 128), cin_split_half=True, cin_activation='relu', l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, l2_reg_cin=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu', gpus=None):
super(xDeepFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device, gpus=gpus)
self.dnn_hidden_units = dnn_hidden_units
self.use_dnn = ((len(dnn_feature_columns) > 0) and (len(dnn_hidden_units) > 0))
if self.use_dnn:
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2=l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2=l2_reg_dnn)
self.cin_layer_size = cin_layer_size
self.use_cin = ((len(self.cin_layer_size) > 0) and (len(dnn_feature_columns) > 0))
if self.use_cin:
field_num = len(self.embedding_dict)
if (cin_split_half == True):
self.featuremap_num = ((sum(cin_layer_size[:(- 1)]) // 2) + cin_layer_size[(- 1)])
else:
self.featuremap_num = sum(cin_layer_size)
self.cin = CIN(field_num, cin_layer_size, cin_activation, cin_split_half, l2_reg_cin, seed, device=device)
self.cin_linear = nn.Linear(self.featuremap_num, 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: ('weight' in x[0])), self.cin.named_parameters()), l2=l2_reg_cin)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
linear_logit = self.linear_model(X)
if self.use_cin:
cin_input = torch.cat(sparse_embedding_list, dim=1)
cin_output = self.cin(cin_input)
cin_logit = self.cin_linear(cin_output)
if self.use_dnn:
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
if ((len(self.dnn_hidden_units) == 0) and (len(self.cin_layer_size) == 0)):
final_logit = linear_logit
elif ((len(self.dnn_hidden_units) == 0) and (len(self.cin_layer_size) > 0)):
final_logit = (linear_logit + cin_logit)
elif ((len(self.dnn_hidden_units) > 0) and (len(self.cin_layer_size) == 0)):
final_logit = (linear_logit + dnn_logit)
elif ((len(self.dnn_hidden_units) > 0) and (len(self.cin_layer_size) > 0)):
final_logit = ((linear_logit + dnn_logit) + cin_logit)
else:
raise NotImplementedError
y_pred = self.out(final_logit)
return y_pred |
def create_train_parser(base_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='Run Training on the TAPE datasets', parents=[base_parser])
parser.add_argument('task', choices=list(registry.task_name_mapping.keys()), help='TAPE Task to train/eval on')
parser.add_argument('--learning_rate', default=0.0001, type=float, help='Learning rate')
parser.add_argument('--batch_size', default=1024, type=int, help='Batch size')
parser.add_argument('--data_dir', default='./data', type=utils.check_is_dir, help='Directory from which to load task data')
parser.add_argument('--num_train_epochs', default=10, type=int, help='Number of training epochs')
parser.add_argument('--num_log_iter', default=20, type=int, help='Number of training steps per log iteration')
parser.add_argument('--fp16', action='store_true', help='Whether to use fp16 weights')
parser.add_argument('--warmup_steps', default=10000, type=int, help='Number of learning rate warmup steps')
parser.add_argument('--gradient_accumulation_steps', default=1, type=int, help='Number of forward passes to make for each backwards pass')
parser.add_argument('--loss_scale', default=0, type=int, help='Loss scaling. Only used during fp16 training.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Maximum gradient norm')
parser.add_argument('--exp_name', default=None, type=str, help='Name to give to this experiment')
parser.add_argument('--from_pretrained', default=None, type=str, help='Directory containing config and pretrained model weights')
parser.add_argument('--log_dir', default='./logs', type=str)
parser.add_argument('--eval_freq', type=int, default=1, help='Frequency of eval pass. A value <= 0 means the eval pass is not run')
parser.add_argument('--save_freq', default=1, type=utils.int_or_str, help="How often to save the model during training. Either an integer frequency or the string 'improvement'")
parser.add_argument('--patience', default=(- 1), type=int, help='How many epochs without improvement to wait before ending training')
parser.add_argument('--resume_from_checkpoint', action='store_true', help='whether to resume training from the checkpoint')
return parser |
_numpy_output(check_dtype=True)
def test_ufunc_negative_f(A: dace.float32[10]):
return np.negative(A) |
def return_emb2(k, j):
config = Config()
outfile = ((config.emb + config.data) + '2')
hf = h5py.File((outfile + '.h5'), 'r')
kk = k
if ((k % 32) != 0):
k = (k - (k % 32))
n1 = hf.get(('dataset_' + str(k)))
n1 = np.array(n1)
if (((kk % 32) + j) > 32):
n2 = hf.get(('dataset_' + str((k + 32))))
n2 = np.array(n2)
n1 = np.concatenate((n1[(kk % 32):32], n2[0:(j - (32 - (kk % 32)))]), axis=0)
else:
n1 = n1[(kk % 32):((kk % 32) + j)]
return n1 |
def get_parallel_factor(k, lamada, sequence, phyche_value):
theta = []
l = len(sequence)
for i in range(1, (lamada + 1)):
temp_sum = 0.0
for j in range(0, (((l - k) - i) + 1)):
nucleotide1 = sequence[j:(j + k)]
nucleotide2 = sequence[(j + i):((j + i) + k)]
temp_sum += parallel_cor_function(nucleotide1, nucleotide2, phyche_value)
theta.append((temp_sum / (((l - k) - i) + 1)))
return theta |
def remove_specific_requirements(reqs):
rtd = ('READTHEDOCS' in os.environ)
excluded = {'fasttext': rtd}
updated_reqs = []
for req in reqs:
without_version = req.split('==')[0]
if (not excluded.get(without_version, False)):
updated_reqs.append(req)
return updated_reqs |
def _vggface2_nonmates():
np.random.seed(42)
return VGGFace2('/proj/janus6/vggface2').take_per_subject(1) |
def F0_close(x, y):
e = (y - x)
L = (((((- x) * e) + (((1 / 6) * ((x ** 2) - 2)) * (e ** 2))) - ((1 / 180) * (((x ** 4) + (2 * (x ** 2))) - 8))) + np.log(((2 * e) / np.sqrt(np.pi))))
return (L - (x ** 2)) |
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, sr_ratio=1):
super().__init__()
assert ((dim % num_heads) == 0), f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (qk_scale or (head_dim ** (- 0.5)))
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, (dim * 2), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if (sr_ratio > 1):
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
def forward(self, x, H, W):
(B, N, C) = x.shape
q = self.q(x).reshape(B, N, self.num_heads, (C // self.num_heads)).permute(0, 2, 1, 3).contiguous()
if (self.sr_ratio > 1):
x_ = x.permute(0, 2, 1).contiguous().reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, (- 1)).permute(0, 2, 1).contiguous()
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, (- 1), 2, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4).contiguous()
else:
kv = self.kv(x).reshape(B, (- 1), 2, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4).contiguous()
(k, v) = (kv[0], kv[1])
attn = ((q k.transpose((- 2), (- 1)).contiguous()) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn v).transpose(1, 2).contiguous().reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x |
def test_combine_workspace_deepcopied(workspace_factory):
ws = workspace_factory()
new_ws = ws.rename(channels={channel: f'renamed_{channel}' for channel in ws.channels})
new_ws.get_measurement(measurement_name='GaussExample')['config']['parameters'][0]['bounds'] = [[0.0, 1.0]]
new_ws['observations'][0]['data'][0] = (- 10.0)
assert (ws.get_measurement(measurement_name='GaussExample')['config']['parameters'][0]['bounds'] != new_ws.get_measurement(measurement_name='GaussExample')['config']['parameters'][0]['bounds'])
assert (ws['observations'][0]['data'] != new_ws['observations'][0]['data']) |
def test_unflatten_raises_for_invalid_shape() -> None:
x_old = tf.random.uniform([2, 3, 4, 5])
(flat_x_old, unflatten) = flatten_leading_dims(x_old)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
unflatten(x_old) |
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, args, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None, saver=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, num_training_steps_per_epoch=None, update_freq=None, use_amp=False):
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train(True)
optimizer.zero_grad()
end = time.time()
last_idx = (len(data_loader) - 1)
for (data_iter_step, (samples, targets)) in enumerate(data_loader):
last_batch = (data_iter_step == last_idx)
data_time_m.update((time.time() - end))
step = (data_iter_step // update_freq)
if (step >= num_training_steps_per_epoch):
continue
it = (start_steps + step)
if ((lr_schedule_values is not None) or ((wd_schedule_values is not None) and ((data_iter_step % update_freq) == 0))):
for (i, param_group) in enumerate(optimizer.param_groups):
if (lr_schedule_values is not None):
if ('flag' in param_group.keys()):
param_group['lr'] = ((0.1 * lr_schedule_values[it]) * param_group['lr_scale'])
else:
param_group['lr'] = (lr_schedule_values[it] * param_group['lr_scale'])
if ((wd_schedule_values is not None) and (param_group['weight_decay'] > 0)):
param_group['weight_decay'] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
if use_amp:
with torch.cuda.amp.autocast():
output = model(samples)
loss = criterion(output, targets)
else:
output = model(samples)
loss = criterion(output, targets)
loss_value = loss.item()
if (not args.distributed):
losses_m.update(loss.item(), samples.size(0))
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
assert math.isfinite(loss_value)
if use_amp:
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order, update_grad=(((data_iter_step + 1) % update_freq) == 0))
if (((data_iter_step + 1) % update_freq) == 0):
optimizer.zero_grad()
if (model_ema is not None):
model_ema.update(model)
else:
loss /= update_freq
loss.backward()
if (((data_iter_step + 1) % update_freq) == 0):
optimizer.step()
optimizer.zero_grad()
if (model_ema is not None):
model_ema.update(model)
if args.distributed:
if hasattr(model.module, 'clipping'):
model.module.clipping()
elif hasattr(model, 'clipping'):
model.clipping()
torch.cuda.synchronize()
batch_time_m.update((time.time() - end))
if (last_batch or ((data_iter_step % args.log_interval) == 0)):
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = (sum(lrl) / len(lrl))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update((reduced_loss.item() * update_freq), samples.size(0))
print('Train: {} [{:>4d}/{} ({:>3.0f}%)] Loss: {loss.val:#.4g} ({loss.avg:#.3g}) Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) LR: {lr:.3e} Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(epoch, data_iter_step, len(data_loader), ((100.0 * data_iter_step) / last_idx), loss=losses_m, batch_time=batch_time_m, rate=((samples.size(0) * args.world_size) / batch_time_m.val), rate_avg=((samples.size(0) * args.world_size) / batch_time_m.avg), lr=lr, data_time=data_time_m))
if ((saver is not None) and args.recovery_interval and (last_batch or (((data_iter_step + 1) % args.recovery_interval) == 0))):
saver.save_recovery(epoch, batch_idx=data_iter_step)
end = time.time()
return OrderedDict([('loss', losses_m.avg)]) |
def extract_nums(s):
s = s.replace(',', '')
nums = re.findall('[+-]? *(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][+-]?\\d+)?', s)
return_list = []
for i in range(len(nums)):
try:
return_list.append(eval(nums[i].strip().lstrip(' 0')))
except:
pass
return return_list |
def _find_nn(syn: pd.DataFrame, ori: pd.DataFrame, n_jobs: int, n_neighbors: int) -> np.ndarray:
nn = MixedTypeKNeighbors(n_jobs=n_jobs, n_neighbors=n_neighbors)
if (syn.ndim == 1):
syn = syn.to_frame()
if (ori.ndim == 1):
ori = ori.to_frame()
nn.fit(syn)
return cast(np.ndarray, nn.kneighbors(ori, return_distance=False)) |
def get_pitch_classes(fifths: int) -> List[int]:
if (fifths >= 0):
return [0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6]
return [0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6] |
def export_gt_depths_kitti():
parser = argparse.ArgumentParser(description='export_gt_depth')
parser.add_argument('--data_path', type=str, help='path to the root of the KITTI data', required=True)
parser.add_argument('--split', type=str, help='which split to export gt from', default='eigen', choices=['eigen', 'eigen_benchmark'])
opt = parser.parse_args()
split_folder = os.path.join(os.path.dirname(__file__), 'splits', opt.split)
lines = readlines(os.path.join(split_folder, 'test_files.txt'))
print('Exporting ground truth depths for {}'.format(opt.split))
gt_depths = []
for line in lines:
(folder, frame_id, _) = line.split()
frame_id = int(frame_id)
if (opt.split == 'eigen'):
calib_dir = os.path.join(opt.data_path, folder.split('/')[0])
velo_filename = os.path.join(opt.data_path, folder, 'velodyne_points/data', '{:010d}.bin'.format(frame_id))
gt_depth = generate_depth_map(calib_dir, velo_filename, 2, True)
elif (opt.split == 'eigen_benchmark'):
gt_depth_path = os.path.join(opt.data_path, folder, 'proj_depth', 'groundtruth', 'image_02', '{:010d}.png'.format(frame_id))
gt_depth = (np.array(pil.open(gt_depth_path)).astype(np.float32) / 256)
gt_depths.append(gt_depth.astype(np.float32))
output_path = os.path.join(split_folder, 'gt_depths.npz')
print('Saving to {}'.format(opt.split))
np.savez_compressed(output_path, data=np.array(gt_depths)) |
class SchemeHomset_toric_variety(SchemeHomset_generic):
def __init__(self, X, Y, category=None, check=True, base=ZZ):
SchemeHomset_generic.__init__(self, X, Y, category=category, check=check, base=base)
from sage.schemes.toric.variety import is_ToricVariety
if (is_ToricVariety(X) and is_ToricVariety(Y)):
self.register_conversion(MatrixSpace(ZZ, X.fan().dim(), Y.fan().dim()))
def _element_constructor_(self, x, check=True):
from sage.schemes.toric.morphism import SchemeMorphism_polynomial_toric_variety
if isinstance(x, (list, tuple)):
return SchemeMorphism_polynomial_toric_variety(self, x, check=check)
from sage.categories.map import Map
from sage.categories.rings import Rings
if (isinstance(x, Map) and x.category_for().is_subcategory(Rings())):
assert (x.domain() is self.codomain().coordinate_ring())
assert (x.codomain() is self.domain().coordinate_ring())
return SchemeMorphism_polynomial_toric_variety(self, x.im_gens(), check=check)
if is_Matrix(x):
x = FanMorphism(x, self.domain().fan(), self.codomain().fan())
if isinstance(x, FanMorphism):
if x.is_dominant():
from sage.schemes.toric.morphism import SchemeMorphism_fan_toric_variety_dominant
return SchemeMorphism_fan_toric_variety_dominant(self, x, check=check)
else:
from sage.schemes.toric.morphism import SchemeMorphism_fan_toric_variety
return SchemeMorphism_fan_toric_variety(self, x, check=check)
raise TypeError('x must be a fan morphism or a list/tuple of polynomials')
def _an_element_(self):
from sage.matrix.constructor import zero_matrix
zero = zero_matrix(self.domain().dimension_relative(), self.codomain().dimension_relative())
return self(zero) |
class GoogleNet(Network):
def setup(self):
self.feed('data').conv(7, 7, 64, 2, 2, name='conv1_7x7_s2').max_pool(3, 3, 2, 2, name='pool1_3x3_s2').lrn(2, 2e-05, 0.75, name='pool1_norm1').conv(1, 1, 64, 1, 1, name='conv2_3x3_reduce').conv(3, 3, 192, 1, 1, name='conv2_3x3').lrn(2, 2e-05, 0.75, name='conv2_norm2').max_pool(3, 3, 2, 2, name='pool2_3x3_s2').conv(1, 1, 64, 1, 1, name='inception_3a_1x1')
self.feed('pool2_3x3_s2').conv(1, 1, 96, 1, 1, name='inception_3a_3x3_reduce').conv(3, 3, 128, 1, 1, name='inception_3a_3x3')
self.feed('pool2_3x3_s2').conv(1, 1, 16, 1, 1, name='inception_3a_5x5_reduce').conv(5, 5, 32, 1, 1, name='inception_3a_5x5')
self.feed('pool2_3x3_s2').max_pool(3, 3, 1, 1, name='inception_3a_pool').conv(1, 1, 32, 1, 1, name='inception_3a_pool_proj')
self.feed('inception_3a_1x1', 'inception_3a_3x3', 'inception_3a_5x5', 'inception_3a_pool_proj').concat(3, name='inception_3a_output').conv(1, 1, 128, 1, 1, name='inception_3b_1x1')
self.feed('inception_3a_output').conv(1, 1, 128, 1, 1, name='inception_3b_3x3_reduce').conv(3, 3, 192, 1, 1, name='inception_3b_3x3')
self.feed('inception_3a_output').conv(1, 1, 32, 1, 1, name='inception_3b_5x5_reduce').conv(5, 5, 96, 1, 1, name='inception_3b_5x5')
self.feed('inception_3a_output').max_pool(3, 3, 1, 1, name='inception_3b_pool').conv(1, 1, 64, 1, 1, name='inception_3b_pool_proj')
self.feed('inception_3b_1x1', 'inception_3b_3x3', 'inception_3b_5x5', 'inception_3b_pool_proj').concat(3, name='inception_3b_output').max_pool(3, 3, 2, 2, name='pool3_3x3_s2').conv(1, 1, 192, 1, 1, name='inception_4a_1x1')
self.feed('pool3_3x3_s2').conv(1, 1, 96, 1, 1, name='inception_4a_3x3_reduce').conv(3, 3, 208, 1, 1, name='inception_4a_3x3')
self.feed('pool3_3x3_s2').conv(1, 1, 16, 1, 1, name='inception_4a_5x5_reduce').conv(5, 5, 48, 1, 1, name='inception_4a_5x5')
self.feed('pool3_3x3_s2').max_pool(3, 3, 1, 1, name='inception_4a_pool').conv(1, 1, 64, 1, 1, name='inception_4a_pool_proj')
self.feed('inception_4a_1x1', 'inception_4a_3x3', 'inception_4a_5x5', 'inception_4a_pool_proj').concat(3, name='inception_4a_output').conv(1, 1, 160, 1, 1, name='inception_4b_1x1')
self.feed('inception_4a_output').conv(1, 1, 112, 1, 1, name='inception_4b_3x3_reduce').conv(3, 3, 224, 1, 1, name='inception_4b_3x3')
self.feed('inception_4a_output').conv(1, 1, 24, 1, 1, name='inception_4b_5x5_reduce').conv(5, 5, 64, 1, 1, name='inception_4b_5x5')
self.feed('inception_4a_output').max_pool(3, 3, 1, 1, name='inception_4b_pool').conv(1, 1, 64, 1, 1, name='inception_4b_pool_proj')
self.feed('inception_4b_1x1', 'inception_4b_3x3', 'inception_4b_5x5', 'inception_4b_pool_proj').concat(3, name='inception_4b_output').conv(1, 1, 128, 1, 1, name='inception_4c_1x1')
self.feed('inception_4b_output').conv(1, 1, 128, 1, 1, name='inception_4c_3x3_reduce').conv(3, 3, 256, 1, 1, name='inception_4c_3x3')
self.feed('inception_4b_output').conv(1, 1, 24, 1, 1, name='inception_4c_5x5_reduce').conv(5, 5, 64, 1, 1, name='inception_4c_5x5')
self.feed('inception_4b_output').max_pool(3, 3, 1, 1, name='inception_4c_pool').conv(1, 1, 64, 1, 1, name='inception_4c_pool_proj')
self.feed('inception_4c_1x1', 'inception_4c_3x3', 'inception_4c_5x5', 'inception_4c_pool_proj').concat(3, name='inception_4c_output').conv(1, 1, 112, 1, 1, name='inception_4d_1x1')
self.feed('inception_4c_output').conv(1, 1, 144, 1, 1, name='inception_4d_3x3_reduce').conv(3, 3, 288, 1, 1, name='inception_4d_3x3')
self.feed('inception_4c_output').conv(1, 1, 32, 1, 1, name='inception_4d_5x5_reduce').conv(5, 5, 64, 1, 1, name='inception_4d_5x5')
self.feed('inception_4c_output').max_pool(3, 3, 1, 1, name='inception_4d_pool').conv(1, 1, 64, 1, 1, name='inception_4d_pool_proj')
self.feed('inception_4d_1x1', 'inception_4d_3x3', 'inception_4d_5x5', 'inception_4d_pool_proj').concat(3, name='inception_4d_output').conv(1, 1, 256, 1, 1, name='inception_4e_1x1')
self.feed('inception_4d_output').conv(1, 1, 160, 1, 1, name='inception_4e_3x3_reduce').conv(3, 3, 320, 1, 1, name='inception_4e_3x3')
self.feed('inception_4d_output').conv(1, 1, 32, 1, 1, name='inception_4e_5x5_reduce').conv(5, 5, 128, 1, 1, name='inception_4e_5x5')
self.feed('inception_4d_output').max_pool(3, 3, 1, 1, name='inception_4e_pool').conv(1, 1, 128, 1, 1, name='inception_4e_pool_proj')
self.feed('inception_4e_1x1', 'inception_4e_3x3', 'inception_4e_5x5', 'inception_4e_pool_proj').concat(3, name='inception_4e_output').max_pool(3, 3, 2, 2, name='pool4_3x3_s2').conv(1, 1, 256, 1, 1, name='inception_5a_1x1')
self.feed('pool4_3x3_s2').conv(1, 1, 160, 1, 1, name='inception_5a_3x3_reduce').conv(3, 3, 320, 1, 1, name='inception_5a_3x3')
self.feed('pool4_3x3_s2').conv(1, 1, 32, 1, 1, name='inception_5a_5x5_reduce').conv(5, 5, 128, 1, 1, name='inception_5a_5x5')
self.feed('pool4_3x3_s2').max_pool(3, 3, 1, 1, name='inception_5a_pool').conv(1, 1, 128, 1, 1, name='inception_5a_pool_proj')
self.feed('inception_5a_1x1', 'inception_5a_3x3', 'inception_5a_5x5', 'inception_5a_pool_proj').concat(3, name='inception_5a_output').conv(1, 1, 384, 1, 1, name='inception_5b_1x1')
self.feed('inception_5a_output').conv(1, 1, 192, 1, 1, name='inception_5b_3x3_reduce').conv(3, 3, 384, 1, 1, name='inception_5b_3x3')
self.feed('inception_5a_output').conv(1, 1, 48, 1, 1, name='inception_5b_5x5_reduce').conv(5, 5, 128, 1, 1, name='inception_5b_5x5')
self.feed('inception_5a_output').max_pool(3, 3, 1, 1, name='inception_5b_pool').conv(1, 1, 128, 1, 1, name='inception_5b_pool_proj')
self.feed('inception_5b_1x1', 'inception_5b_3x3', 'inception_5b_5x5', 'inception_5b_pool_proj').concat(3, name='inception_5b_output').avg_pool(7, 7, 1, 1, padding='VALID', name='pool5_7x7_s1').fc(1000, relu=False, name='loss3_classifier').softmax(name='prob') |
def residual_model(input_shape):
inputs = Input(shape=input_shape)
y = Conv2D(7, 8)(inputs)
x = BatchNormalization()(y)
x = Activation('relu')(x)
outputs = Add()([x, y])
model = keras.Model(inputs=inputs, outputs=outputs)
return model |
def init_random_state(seed=0):
import torch
import random
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) |
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None):
if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)):
if (args.prefetcher and loader.mixup_enabled):
loader.mixup_enabled = False
elif (mixup_fn is not None):
mixup_fn.mixup_enabled = False
second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = (len(loader) - 1)
num_updates = (epoch * len(loader))
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
data_time_m.update((time.time() - end))
if (not args.prefetcher):
(input, target) = (input.cuda(), target.cuda())
if (mixup_fn is not None):
(input, target) = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if (not args.distributed):
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if (loss_scaler is not None):
loss_scaler(loss, optimizer, clip_grad=args.clip_grad, clip_mode=args.clip_mode, parameters=model_parameters(model, exclude_head=('agc' in args.clip_mode)), create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if (args.clip_grad is not None):
dispatch_clip_grad(model_parameters(model, exclude_head=('agc' in args.clip_mode)), value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if (model_ema is not None):
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update((time.time() - end))
if (last_batch or ((batch_idx % args.log_interval) == 0)):
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = (sum(lrl) / len(lrl))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if (args.local_rank == 0):
_logger.info('Train: {} [{:>4d}/{} ({:>3.0f}%)] Loss: {loss.val:#.4g} ({loss.avg:#.3g}) Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) LR: {lr:.3e} Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(epoch, batch_idx, len(loader), ((100.0 * batch_idx) / last_idx), loss=losses_m, batch_time=batch_time_m, rate=((input.size(0) * args.world_size) / batch_time_m.val), rate_avg=((input.size(0) * args.world_size) / batch_time_m.avg), lr=lr, data_time=data_time_m))
if (args.save_images and output_dir):
torchvision.utils.save_image(input, os.path.join(output_dir, ('train-batch-%d.jpg' % batch_idx)), padding=0, normalize=True)
if ((saver is not None) and args.recovery_interval and (last_batch or (((batch_idx + 1) % args.recovery_interval) == 0))):
saver.save_recovery(epoch, batch_idx=batch_idx)
if (lr_scheduler is not None):
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)]) |
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = F.celu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
w = F.avg_pool2d(out, out.size(2))
w = F.celu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
out = (out * w)
out += self.shortcut(x)
out = F.celu(out)
return out |
class TestSumPricesTabular(unittest.TestCase):
def setUp(self):
return super().setUp()
def test_dataset_corrupted(self):
with self.assertRaises(RuntimeError, msg=M.DATASET_NOT_FOUND):
SumPricesRegression(root='./dummy_dir')
def tearDown(self):
return super().tearDown() |
def freeze_bn_stats(mod):
if (type(mod) in set([ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d])):
mod.freeze_bn_stats() |
class RandomHorizontalFlipVideo():
def __init__(self, p=0.5):
self.p = p
def __call__(self, clip):
if (random.random() < self.p):
clip = F.hflip(clip)
return clip
def __repr__(self) -> str:
return f'{self.__class__.__name__}(p={self.p})' |
class resblock(nn.Module):
def __init__(self, in_channels, out_channels):
super(resblock, self).__init__()
self.conv1 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.add = Add()
def forward(self, x):
res = x
out = self.conv1(x)
out = self.conv2(out)
out = self.add(out, res)
return out |
def interleave_datasets(datasets, probabilities=None, probabilities_handle=None, seed=None, stopping_strategy='all_exhausted'):
iterable_datasets = []
for dataset in datasets:
if (not isinstance(dataset, IterableDataset)):
iterable_datasets.append(dataset.to_iterable_dataset())
else:
iterable_datasets.append(dataset)
ex_iterables = [d._ex_iterable for d in iterable_datasets]
generator = np.random.default_rng(seed)
ex_iterable = UpdatableRandomlyCyclingMultiSourcesExamplesIterable(ex_iterables, generator=generator, probabilities=probabilities, probabilities_handle=probabilities_handle, stopping_strategy=stopping_strategy)
return IterableDataset(ex_iterable=ex_iterable) |
def test_explicit_broadcasting():
nparray = np.arange(((2 * 3) * 5)).reshape(2, 3, 5)
lsarray = ak.highlevel.Array(nparray.tolist(), check_valid=True)
rgarray = ak.highlevel.Array(nparray, check_valid=True)
assert (to_list((rgarray + np.array([[[100]], [[200]]]))) == to_list((nparray + np.array([[[100]], [[200]]]))))
assert (to_list((lsarray + np.array([[[100]], [[200]]]))) == to_list((nparray + np.array([[[100]], [[200]]]))))
assert (to_list((np.array([[[100]], [[200]]]) + rgarray)) == to_list((np.array([[[100]], [[200]]]) + nparray)))
assert (to_list((np.array([[[100]], [[200]]]) + lsarray)) == to_list((np.array([[[100]], [[200]]]) + nparray)))
assert (to_list((rgarray + np.array([[[100, 200, 300, 400, 500]]]))) == to_list((nparray + np.array([[[100, 200, 300, 400, 500]]]))))
assert (to_list((lsarray + np.array([[[100, 200, 300, 400, 500]]]))) == to_list((nparray + np.array([[[100, 200, 300, 400, 500]]]))))
assert (to_list((np.array([[[100, 200, 300, 400, 500]]]) + rgarray)) == to_list((np.array([[[100, 200, 300, 400, 500]]]) + nparray)))
assert (to_list((np.array([[[100, 200, 300, 400, 500]]]) + lsarray)) == to_list((np.array([[[100, 200, 300, 400, 500]]]) + nparray))) |
class PointPillar(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
if ((cur_module.__class__.__name__ == 'PointHeadSimple') and (not self.training)):
continue
batch_dict = cur_module(batch_dict)
if self.training:
(loss, tb_dict, disp_dict) = self.get_training_loss()
ret_dict = {'loss': loss}
return (ret_dict, tb_dict, disp_dict)
else:
(pred_dicts, recall_dicts) = self.post_processing(batch_dict)
return (pred_dicts, recall_dicts)
def get_training_loss(self):
disp_dict = {}
(loss_rpn, tb_dict) = self.dense_head.get_loss()
tb_dict = {'loss_rpn': loss_rpn.item(), **tb_dict}
if (self.point_head is not None):
(loss_point, tb_dict) = self.point_head.get_loss(tb_dict)
loss = (loss_rpn + loss_point)
else:
loss = loss_rpn
return (loss, tb_dict, disp_dict) |
def deleteContext(broker, ctxObj):
broker = broker.rsplit('/', 1)[0]
print(broker)
headers = {'Accept': 'application/ld+json', 'Content-Type': 'application/json'}
response = requests.delete(((broker + '/ngsi-ld/v1/entities/') + ctxObj['id']), headers=headers)
if (response.status_code != 200):
print('failed to delete context')
print(response.text) |
.run_in_serial
_utils.test(arch=ti.cuda)
def test_memory_allocate():
HUGE_SIZE = ((1024 ** 2) * 128)
x = ti.field(ti.i32, shape=(HUGE_SIZE,))
for i in range(10):
x[i] = i |
def test_pipeline_with_step_that_it_is_pipeline():
(X, y) = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=5000, random_state=0)
clf = LogisticRegression(solver='lbfgs')
rus = RandomUnderSampler(random_state=0)
filter1 = SelectKBest(f_classif, k=2)
pipe1 = Pipeline([('rus', rus), ('anova', filter1)])
with raises(TypeError):
pipe2 = Pipeline([('pipe1', pipe1), ('logistic', clf)])
pipe2.fit(X, y) |
class SmoothBivariateSpline(BivariateSpline):
def __init__(self, x, y, z, w=None, bbox=([None] * 4), kx=3, ky=3, s=None, eps=None):
(xb, xe, yb, ye) = bbox
(nx, tx, ny, ty, c, fp, wrk1, ier) = dfitpack.surfit_smth(x, y, z, w, xb, xe, yb, ye, kx, ky, s=s, eps=eps, lwrk2=1)
if (ier > 10):
(nx, tx, ny, ty, c, fp, wrk1, ier) = dfitpack.surfit_smth(x, y, z, w, xb, xe, yb, ye, kx, ky, s=s, eps=eps, lwrk2=ier)
if (ier in [0, (- 1), (- 2)]):
pass
else:
message = _surfit_messages.get(ier, ('ier=%s' % ier))
warnings.warn(message)
self.fp = fp
self.tck = (tx[:nx], ty[:ny], c[:(((nx - kx) - 1) * ((ny - ky) - 1))])
self.degrees = (kx, ky) |
def main(H, vis):
quanitzer_and_generator_state_dict = retrieve_autoencoder_components_state_dicts(H, ['quantize', 'generator'], remove_component_from_key=True)
embedding_weight = quanitzer_and_generator_state_dict.pop('embedding.weight')
embedding_weight = embedding_weight.cuda()
generator = Generator(H)
generator.load_state_dict(quanitzer_and_generator_state_dict, strict=False)
generator = generator.cuda()
model = get_sampler(H, embedding_weight)
model = load_model(model, (H.sampler + '_ema'), H.load_step, H.load_dir).cuda()
model = model.eval()
shape = (1, H.shape[0], H.shape[1])
log(f'Generating latents of shape: {shape}')
step = 1
time_steps = (shape[1] * shape[2])
with torch.no_grad():
latents = model.sample_shape(shape[1:], H.batch_size, time_steps=time_steps, step=step)
latents_one_hot = latent_ids_to_onehot(latents, shape, H.codebook_size).cuda()
q = torch.matmul(latents_one_hot, embedding_weight).view(latents_one_hot.size(0), shape[1], shape[2], H.emb_dim).permute(0, 3, 1, 2).contiguous()
all_images = []
del model
for image_latents in torch.split(q, 8):
all_images.append(generator(image_latents))
gen_images = torch.cat(all_images, dim=0)
vis.images(gen_images.clamp(0, 1), win='large_samples', opts=dict(title='large_samples')) |
class SRWLOptT(SRWLOpt):
def __init__(self, _nx=1, _ny=1, _rx=0.001, _ry=0.001, _arTr=None, _extTr=0, _Fx=1e+23, _Fy=1e+23, _x=0, _y=0, _ne=1, _eStart=0, _eFin=0, _alloc_base=[0]):
self.arTr = _arTr
if ((_arTr is None) or ((len(_arTr) != (((_ne * _nx) * _ny) * 2)) and (((_ne * _nx) * _ny) > 0))):
self.allocate(_ne, _nx, _ny, _alloc_base)
halfRangeX = (0.5 * _rx)
halfRangeY = (0.5 * _ry)
if (not hasattr(self, 'mesh')):
self.mesh = SRWLRadMesh(_eStart, _eFin, _ne, (_x - halfRangeX), (_x + halfRangeX), _nx, (_y - halfRangeY), (_y + halfRangeY), _ny)
else:
self.mesh.eStart = _eStart
self.mesh.eFin = _eFin
self.mesh.xStart = (_x - halfRangeX)
self.mesh.xFin = (_x + halfRangeX)
self.mesh.yStart = (_y - halfRangeY)
self.mesh.yFin = (_y + halfRangeY)
self.extTr = _extTr
self.Fx = _Fx
self.Fy = _Fy
def allocate(self, _ne, _nx, _ny, _alloc_base=[0]):
if hasattr(self, 'mesh'):
self.mesh.ne = _ne
self.mesh.nx = _nx
self.mesh.ny = _ny
else:
self.mesh = SRWLRadMesh(0, 0, _ne, 0, 0, _nx, 0, 0, _ny)
nTot = (((2 * _ne) * _nx) * _ny)
lenBase = len(_alloc_base)
if (lenBase > 1):
nTot = int(round((nTot / lenBase)))
self.arTr = srwl_uti_array_alloc('d', nTot, _alloc_base)
def get_data(self, _typ, _dep=3, _e=0, _x=0, _y=0):
nTot = ((self.mesh.ne * self.mesh.nx) * self.mesh.ny)
arAux = array('d', ([0] * nTot))
for i in range(nTot):
tr = 0
if ((_typ == 1) or (_typ == 2)):
tr = self.arTr[(i * 2)]
if (_typ == 2):
tr *= tr
else:
tr = self.arTr[((i * 2) + 1)]
arAux[i] = tr
if ((_dep == 3) and (self.mesh.ne == 1)):
return arAux
arOut = None
xStep = 0
if (self.mesh.nx > 1):
xStep = ((self.mesh.xFin - self.mesh.xStart) / (self.mesh.nx - 1))
yStep = 0
if (self.mesh.ny > 1):
yStep = ((self.mesh.yFin - self.mesh.yStart) / (self.mesh.ny - 1))
inperpOrd = 1
if (_dep == 0):
arOut = array('d', ([0] * self.mesh.ne))
for ie in range(self.mesh.ne):
arOut[ie] = uti_math.interp_2d(_x, _y, self.mesh.xStart, xStep, self.mesh.nx, self.mesh.yStart, yStep, self.mesh.ny, arAux, inperpOrd, self.mesh.ne, ie)
else:
ie = 0
if (self.mesh.ne > 1):
if (_e >= self.mesh.eFin):
ie = (self.mesh.ne - 1)
elif (_e > self.mesh.eStart):
eStep = ((self.mesh.eFin - self.mesh.eStart) / (self.mesh.ne - 1))
ie = int(round(((_e - self.mesh.eStart) / eStep)))
if (_dep == 1):
arOut = array('d', ([0] * self.mesh.nx))
xx = self.mesh.xStart
for ix in range(self.mesh.nx):
arOut[ix] = uti_math.interp_2d(xx, _y, self.mesh.xStart, xStep, self.mesh.nx, self.mesh.yStart, yStep, self.mesh.ny, arAux, inperpOrd, self.mesh.ne, ie)
xx += xStep
elif (_dep == 2):
arOut = array('d', ([0] * self.mesh.ny))
yy = self.mesh.yStart
for iy in range(self.mesh.ny):
arOut[iy] = uti_math.interp_2d(_x, yy, self.mesh.xStart, xStep, self.mesh.nx, self.mesh.yStart, yStep, self.mesh.ny, arAux, inperpOrd, self.mesh.ne, ie)
yy += yStep
elif (_dep == 3):
nTot = (self.mesh.nx * self.mesh.ny)
arOut = array('d', ([0] * nTot))
yy = self.mesh.yStart
i = 0
for iy in range(self.mesh.ny):
xx = self.mesh.xStart
for ix in range(self.mesh.nx):
arOut[i] = uti_math.interp_2d(xx, yy, self.mesh.xStart, xStep, self.mesh.nx, self.mesh.yStart, yStep, self.mesh.ny, arAux, inperpOrd, self.mesh.ne, ie)
i += 1
xx += xStep
yy += yStep
del arAux
return arOut |
class ImageEncoder(nn.Module):
def __init__(self, size_image, num_output_length, if_tanh=False):
super(ImageEncoder, self).__init__()
self.if_tanh = if_tanh
self.conv1 = nn.Sequential(nn.Conv2d(3, 16, 5, stride=2, padding=2), nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(16, 32, 5, stride=2, padding=2), nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(nn.Conv2d(32, 64, 5, stride=2, padding=2), nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(nn.Conv2d(64, 128, 5, stride=2, padding=2), nn.ReLU(inplace=True))
size_mini_map = self.get_size(size_image, 4)
self.fc = nn.Linear(((size_mini_map * size_mini_map) * 128), num_output_length)
def get_size(self, size_image, num_layers):
return int((size_image / (2 ** num_layers)))
def forward(self, inputs):
img_e_conv1 = self.conv1(inputs)
img_e_conv2 = self.conv2(img_e_conv1)
img_e_conv3 = self.conv3(img_e_conv2)
img_e_conv4 = self.conv4(img_e_conv3)
img_e_fc_5 = img_e_conv4.contiguous().view(img_e_conv4.shape[0], (- 1))
img_e_fc_5 = self.fc(img_e_fc_5)
if self.if_tanh:
img_e_fc_5 = F.tanh(img_e_fc_5)
return (img_e_fc_5, img_e_conv1, img_e_conv2, img_e_conv3, img_e_conv4) |
def slerp(z_A, z_B, t, eps=1e-20):
cos_val = (z_A * z_B).sum(dim=1, keepdim=True)
temp_z_A = z_A.pow(2).sum(dim=1, keepdim=True).sqrt()
temp_z_B = z_B.pow(2).sum(dim=1, keepdim=True).sqrt()
cos_val = (cos_val / z_A.pow(2).sum(dim=1, keepdim=True).sqrt())
cos_val = (cos_val / z_B.pow(2).sum(dim=1, keepdim=True).sqrt())
cos_val = torch.clamp(cos_val, min=(- 1), max=1)
theta = torch.acos(cos_val)
s1 = (torch.sin(((1 - t) * (theta + eps))) / (torch.sin(theta) + eps))
s2 = (torch.sin((t * (theta + eps))) / (torch.sin(theta) + eps))
z_t = ((s1 * z_A) + (s2 * z_B))
return z_t |
_model
def regnetx_064(pretrained=False, **kwargs):
return _regnet('regnetx_064', pretrained, **kwargs) |
def get_image_list(image_dir, count=0):
image_path_list = []
for image_name in os.listdir(image_dir):
if is_image_file(image_name):
image_path_list.append(os.path.join(image_dir, image_name))
end = (count if (count > 0) else len(image_path_list))
return image_path_list[0:end] |
_SEG_HEADS_REGISTRY.register()
class M2FPHead(nn.Module):
_version = 2
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if (newk != k):
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if (not scratch):
logger.warning(f'Weight format of {self.__class__.__name__} have changed! Please upgrade your models. Applying automatic conversion now ...')
def __init__(self, input_shape: Dict[(str, ShapeSpec)], *, num_classes: int, pixel_decoder: nn.Module, loss_weight: float=1.0, ignore_value: int=(- 1), transformer_predictor: nn.Module, transformer_in_feature: str):
super().__init__()
input_shape = sorted(input_shape.items(), key=(lambda x: x[1].stride))
self.in_features = [k for (k, v) in input_shape]
feature_strides = [v.stride for (k, v) in input_shape]
feature_channels = [v.channels for (k, v) in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
def from_config(cls, cfg, input_shape: Dict[(str, ShapeSpec)]):
if (cfg.MODEL.M2FP.TRANSFORMER_IN_FEATURE == 'transformer_encoder'):
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif (cfg.MODEL.M2FP.TRANSFORMER_IN_FEATURE == 'pixel_embedding'):
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif (cfg.MODEL.M2FP.TRANSFORMER_IN_FEATURE == 'multi_scale_pixel_decoder'):
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.M2FP.TRANSFORMER_IN_FEATURE].channels
return {'input_shape': {k: v for (k, v) in input_shape.items() if (k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES)}, 'ignore_value': cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, 'num_classes': cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, 'pixel_decoder': build_pixel_decoder(cfg, input_shape), 'loss_weight': cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, 'transformer_in_feature': cfg.MODEL.M2FP.TRANSFORMER_IN_FEATURE, 'transformer_predictor': build_transformer_decoder(cfg, transformer_predictor_in_channels, mask_classification=True)}
def forward(self, features, mask=None):
return self.layers(features, mask)
def layers(self, features, mask=None):
(mask_features, transformer_encoder_features, multi_scale_features) = self.pixel_decoder.forward_features(features)
if (self.transformer_in_feature == 'multi_scale_pixel_decoder'):
predictions = self.predictor(multi_scale_features, mask_features, mask)
elif (self.transformer_in_feature == 'transformer_encoder'):
assert (transformer_encoder_features is not None), 'Please use the TransformerEncoderPixelDecoder.'
predictions = self.predictor(transformer_encoder_features, mask_features, mask)
elif (self.transformer_in_feature == 'pixel_embedding'):
predictions = self.predictor(mask_features, mask_features, mask)
else:
predictions = self.predictor(features[self.transformer_in_feature], mask_features, mask)
return predictions |
class DeterministicGuard():
def __init__(self, deterministic):
self.deterministic = deterministic
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
torch.use_deterministic_algorithms(self.deterministic)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(self.deterministic_restore) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.