code stringlengths 101 5.91M |
|---|
def get_metric(metric: str):
metric = metric.lower()
if (metric == 'accuracy'):
return (metric, (lambda y_hat, y: _metric_wrapper(X.metrics.accuracy, y_hat, y, key='hard')))
if (metric == 'f1_score'):
return (metric, (lambda y_hat, y: _metric_wrapper(X.metrics.f1_score, y_hat, y, key='hard')))
if (metric == 'brier_score'):
return (metric, (lambda y_hat, y: _metric_wrapper(brier_score, y_hat, y, key='soft')))
if (metric == 'ece'):
return ('ECE', (lambda y_hat, y: _metric_wrapper(expected_calibration_error, y_hat, y, key=None)))
if (metric == 'mce'):
return ('MCE', (lambda y_hat, y: _metric_wrapper(maximum_calibration_error, y_hat, y, key=None)))
if (metric == 'ce'):
return ('CE', (lambda y_hat, y: _metric_wrapper(cross_entropy, y_hat, y, key='soft')))
if (metric == 'confidence_aleatoric_auroc'):
return (metric, (lambda y_hat, y: _metric_wrapper(confidence, y_hat, y, key=None, score_type='AUROC', uncertainty_type='aleatoric')))
if (metric == 'confidence_aleatoric_apr'):
return (metric, (lambda y_hat, y: _metric_wrapper(confidence, y_hat, y, key=None, score_type='APR', uncertainty_type='aleatoric')))
if (metric == 'confidence_epistemic_auroc'):
return (metric, (lambda y_hat, y: _metric_wrapper(confidence, y_hat, y, key=None, score_type='AUROC', uncertainty_type='epistemic')))
if (metric == 'confidence_epistemic_apr'):
return (metric, (lambda y_hat, y: _metric_wrapper(confidence, y_hat, y, key=None, score_type='APR', uncertainty_type='epistemic')))
if (metric == 'confidence_structure_auroc'):
return (metric, (lambda y_hat, y: _metric_wrapper(confidence, y_hat, y, key=None, score_type='AUROC', uncertainty_type='structure')))
if (metric == 'confidence_structure_apr'):
return (metric, (lambda y_hat, y: _metric_wrapper(confidence, y_hat, y, key=None, score_type='APR', uncertainty_type='structure')))
if (metric == 'avg_prediction_confidence_aleatoric'):
return (metric, (lambda y_hat, y: _metric_wrapper(average_confidence, y_hat, y, key=None, confidence_type='prediction', uncertainty_type='aleatoric')))
if (metric == 'avg_prediction_confidence_epistemic'):
return (metric, (lambda y_hat, y: _metric_wrapper(average_confidence, y_hat, y, key=None, confidence_type='prediction', uncertainty_type='epistemic')))
if (metric == 'avg_sample_confidence_aleatoric'):
return (metric, (lambda y_hat, y: _metric_wrapper(average_confidence, y_hat, y, key=None, confidence_type='sample', uncertainty_type='aleatoric')))
if (metric == 'avg_sample_confidence_epistemic'):
return (metric, (lambda y_hat, y: _metric_wrapper(average_confidence, y_hat, y, key=None, confidence_type='sample', uncertainty_type='epistemic')))
if (metric == 'avg_sample_confidence_features'):
return (metric, (lambda y_hat, y: _metric_wrapper(average_confidence, y_hat, y, key=None, confidence_type='sample', uncertainty_type='features')))
if (metric == 'avg_sample_confidence_neighborhood'):
return (metric, (lambda y_hat, y: _metric_wrapper(average_confidence, y_hat, y, key=None, confidence_type='sample', uncertainty_type='neighborhood')))
if (metric == 'average_entropy'):
return ('average_entropy', (lambda y_hat, y: _metric_wrapper(average_entropy, y_hat, y, key=None)))
if (metric == 'ood_detection_aleatoric_auroc'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection, y_hat, y, y_hat_ood, y_ood, key=None, score_type='AUROC', uncertainty_type='aleatoric')))
if (metric == 'ood_detection_aleatoric_apr'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection, y_hat, y, y_hat_ood, y_ood, key=None, score_type='APR', uncertainty_type='aleatoric')))
if (metric == 'ood_detection_epistemic_auroc'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection, y_hat, y, y_hat_ood, y_ood, key=None, score_type='AUROC', uncertainty_type='epistemic')))
if (metric == 'ood_detection_epistemic_apr'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection, y_hat, y, y_hat_ood, y_ood, key=None, score_type='APR', uncertainty_type='epistemic')))
if (metric == 'ood_detection_features_auroc'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection_features, y_hat, y, y_hat_ood, y_ood, key=None, score_type='AUROC')))
if (metric == 'ood_detection_features_apr'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection_features, y_hat, y, y_hat_ood, y_ood, key=None, score_type='APR')))
if (metric == 'ood_detection_neighborhood_auroc'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection_neighborhood, y_hat, y, y_hat_ood, y_ood, key=None, score_type='AUROC')))
if (metric == 'ood_detection_neighborhood_apr'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection_neighborhood, y_hat, y, y_hat_ood, y_ood, key=None, score_type='APR')))
if (metric == 'ood_detection_structure_auroc'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection_structure, y_hat, y, y_hat_ood, y_ood, key=None, score_type='AUROC')))
if (metric == 'ood_detection_structure_apr'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(ood_detection_structure, y_hat, y, y_hat_ood, y_ood, key=None, score_type='APR')))
if (metric == 'ood_accuracy'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(X.metrics.accuracy, y_hat, y, y_hat_ood, y_ood, key='hard', setting='ood')))
if (metric == 'ood_avg_prediction_confidence_aleatoric'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='ood', confidence_type='prediction', uncertainty_type='aleatoric')))
if (metric == 'ood_avg_prediction_confidence_epistemic'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='ood', confidence_type='prediction', uncertainty_type='epistemic')))
if (metric == 'ood_avg_sample_confidence_aleatoric'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='ood', confidence_type='sample', uncertainty_type='aleatoric')))
if (metric == 'ood_avg_sample_confidence_epistemic'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='ood', confidence_type='sample', uncertainty_type='epistemic')))
if (metric == 'ood_avg_sample_confidence_features'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='ood', confidence_type='sample', uncertainty_type='features')))
if (metric == 'ood_avg_sample_confidence_neighborhood'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='ood', confidence_type='sample', uncertainty_type='neighborhood')))
if (metric == 'ood_average_entropy'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_entropy, y_hat, y, y_hat_ood, y_ood, key=None, setting='ood')))
if (metric == 'id_accuracy'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(X.metrics.accuracy, y_hat, y, y_hat_ood, y_ood, key='hard', setting='id')))
if (metric == 'id_avg_prediction_confidence_aleatoric'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='id', confidence_type='prediction', uncertainty_type='aleatoric')))
if (metric == 'id_avg_prediction_confidence_epistemic'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='id', confidence_type='prediction', uncertainty_type='epistemic')))
if (metric == 'id_avg_sample_confidence_aleatoric'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='id', confidence_type='sample', uncertainty_type='aleatoric')))
if (metric == 'id_avg_sample_confidence_epistemic'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='id', confidence_type='sample', uncertainty_type='epistemic')))
if (metric == 'id_avg_sample_confidence_features'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='id', confidence_type='sample', uncertainty_type='features')))
if (metric == 'id_avg_sample_confidence_neighborhood'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_confidence, y_hat, y, y_hat_ood, y_ood, key=None, setting='id', confidence_type='sample', uncertainty_type='neighborhood')))
if (metric == 'id_average_entropy'):
return (metric, (lambda y_hat, y, y_hat_ood, y_ood: _ood_metric_wrapper(average_entropy, y_hat, y, y_hat_ood, y_ood, key=None, setting='id')))
raise NotImplementedError(f'{metric} currently not implemented!') |
def p_value_calc(TP, POP, NIR):
try:
n = POP
x = sum(list(TP.values()))
p = NIR
result = 0
for j in range(x):
result += ((ncr(n, j) * (p ** j)) * ((1 - p) ** (n - j)))
return (1 - result)
except Exception:
return 'None' |
def get_candidates_mask(config: configure_pretraining.PretrainingConfig, inputs: pretrain_data.Inputs, disallow_from_mask=None):
vocab = get_vocab(config)
ignore_ids = [vocab['[SEP]'], vocab['[CLS]'], vocab['[MASK]']]
candidates_mask = tf.ones_like(inputs.input_ids, tf.bool)
for ignore_id in ignore_ids:
candidates_mask &= tf.not_equal(inputs.input_ids, ignore_id)
candidates_mask &= tf.cast(inputs.input_mask, tf.bool)
if (disallow_from_mask is not None):
candidates_mask &= (~ disallow_from_mask)
return candidates_mask |
def ifft(x, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None, *, plan=None):
return _execute_1D('ifft', _pocketfft.ifft, x, n=n, axis=axis, norm=norm, overwrite_x=overwrite_x, workers=workers, plan=plan) |
class _rot_operation(_operation):
def __init__(self, num_qubits: int, variablegroup_tuple: tuple, map=None):
super().__init__(num_qubits, variablegroup_tuple, map)
def apply_param_vectors(self, QC, r_star, var_param_assignment):
if (self.default_map and (len(self.variablegroup_tuple) > 2)):
raise ValueError('There are too many variable groups given without a map. There can only be one or two parameters without any given map.')
elif (self.default_map and (len(self.variablegroup_tuple) == 1)):
if (self.variablegroup_tuple[0].size == None):
for qubit in range(self.num_qubits):
QC.append(r_star(var_param_assignment[hash(self.variablegroup_tuple[0])][self.variablegroup_tuple[0].index]), [qubit], [])
self.variablegroup_tuple[0].increase_index(1)
else:
for qubit in range(self.num_qubits):
QC.append(r_star(var_param_assignment[hash(self.variablegroup_tuple[0])][(self.variablegroup_tuple[0].index % self.variablegroup_tuple[0].size)]), [qubit], [])
self.variablegroup_tuple[0].increase_index(1)
else:
for qubit in range(self.num_qubits):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(r_star(self.map(*buffer_param_vectors_list)), [qubit], [])
return QC |
def plot_figure(data, x, y, title=None, xlabel=None, ylabel=None, legend=None, x_axis_type='linear', y_axis_type='linear', width=800, height=400, line_width=2, colors=['red', 'green', 'blue', 'orange', 'black', 'purple', 'brown'], tools='pan,box_zoom,wheel_zoom,box_select,hover,reset,save', append_figure=None):
if (not isinstance(y, list)):
y = [y]
xlabel = (xlabel or x)
legend = (legend or y)
assert (len(legend) == len(y))
if (append_figure is not None):
f = append_figure
else:
f = figure(title=title, tools=tools, width=width, height=height, x_axis_label=(xlabel or x), y_axis_label=(ylabel or ''), x_axis_type=x_axis_type, y_axis_type=y_axis_type)
colors = cycle(colors)
for (i, yi) in enumerate(y):
f.line(data[x], data[yi], line_width=line_width, line_color=next(colors), legend=legend[i])
f.legend.click_policy = 'hide'
return f |
class MJVCAMERAPOSE(Structure):
_fields_ = [('head_pos', (c_double * 3)), ('head_right', (c_double * 3)), ('window_pos', (c_double * 3)), ('window_right', (c_double * 3)), ('window_up', (c_double * 3)), ('window_normal', (c_double * 3)), ('window_size', (c_double * 2)), ('scale', c_double), ('ipd', c_double)] |
def parse_args():
parser = argparse.ArgumentParser(description='Type classifier')
parser.add_argument('--cfg', help='decide which cfg to use', required=False, default='/home/test.yaml', type=str)
parser.add_argument('--gpu', type=int, default=0, help='use gpu device. default:0')
parser.add_argument('--seed', type=int, default=5, help='random seed for gpu.default:5')
args = parser.parse_args()
return args |
def local_inline(A: dace.float64[W], B: dace.float64[W], C: dace.float64[W]):
local_inline_inner(A, B)
local_inline_inner(B, C) |
def register_Ns3WifiTxCurrentModel_methods(root_module, cls):
cls.add_constructor([param('ns3::WifiTxCurrentModel const &', 'arg0')])
cls.add_constructor([])
cls.add_method('CalcTxCurrent', 'double', [param('double', 'txPowerDbm')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
return |
class PARegressor(BasePA, base.Regressor):
def __init__(self, C=1.0, mode=1, eps=0.1, fit_intercept=True, data=[], learning_rate=0.1, rho=0.9):
super().__init__(C=C, mode=mode, fit_intercept=fit_intercept, data=data, learning_rate=learning_rate, rho=rho)
self.loss = optim.losses.EpsilonInsensitiveHinge(eps=eps)
self.x_act = None
self.y_act = None
def fit_one(self, X, x, y):
(x_pred, y_pred) = self.predict_one(X, False, (- 1), (- 1))
tau_x = self.calc_tau(X, self.loss(x, x_pred))
tau_y = self.calc_tau(X, self.loss(y, y_pred))
step_x = (tau_x * np.sign((x - x_pred)))
step_y = (tau_y * np.sign((y - y_pred)))
for (i, xi) in X.items():
self.momentum_x[i] = ((self.rho * self.momentum_x[i]) + ((1 - self.rho) * (step_x ** 2)))
self.momentum_y[i] = ((self.rho * self.momentum_y[i]) + ((1 - self.rho) * (step_y ** 2)))
self.weights_x[i] += (step_x * xi)
self.weights_y[i] += (step_y * xi)
if self.fit_intercept:
self.intercept_x += step_x
self.intercept_y += step_y
return self
def fit_n(self, frames):
for k in frames:
[X, x, y] = self.data[k]
(x_pred, y_pred) = self.predict_one(X, False, (- 1), (- 1))
tau_x = self.calc_tau(X, self.loss(x, x_pred))
tau_y = self.calc_tau(X, self.loss(y, y_pred))
step_x = ((self.learning_rate * tau_x) * np.sign((x - x_pred)))
step_y = ((self.learning_rate * tau_y) * np.sign((y - y_pred)))
for (i, xi) in X.items():
self.momentum_x[i] = ((self.rho * self.momentum_x[i]) + ((1 - self.rho) * (step_x ** 2)))
self.momentum_y[i] = ((self.rho * self.momentum_y[i]) + ((1 - self.rho) * (step_y ** 2)))
self.weights_x[i] += (step_x * xi)
self.weights_y[i] += (step_y * xi)
if self.fit_intercept:
self.intercept_x += step_x
self.intercept_y += step_y
return self
def predict_one(self, x, use_momentum, x_act, y_act):
return ((utils.math.dot(x, self.weights_x) + self.intercept_x), (utils.math.dot(x, self.weights_y) + self.intercept_y)) |
def qa2hypo(question, answer):
question = question.lstrip().rstrip()
answer = answer.lstrip().rstrip()
for (task, regex) in regexs.items():
string = out_strings[task]
result = apply(regex, string, question, answer)
if result:
return result
raise Exception('Unknown question format: {}'.format(question)) |
def get_values_from_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c', help='YAML configuration file', type=str, default='./models/att/att.yaml')
parser.add_argument('--test-only', '-t', action='store_true', default=False)
parser.add_argument('--local_rank', default=0)
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
return (config, args) |
class ToTensor(object):
def __call__(self, image, target):
return (F.to_tensor(image), target) |
def get_scale_factor(img, auto_scale, as_uint16):
if auto_scale:
if (img.dtype == np.uint8):
if as_uint16:
return 256
elif (img.dtype != np.uint16):
return (65535 if as_uint16 else 255)
return 1 |
class roi_pose_head_v1convX(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
hidden_dim = cfg.KRCNN.CONV_HEAD_DIM
kernel_size = cfg.KRCNN.CONV_HEAD_KERNEL
pad_size = (kernel_size // 2)
module_list = []
for _ in range(cfg.KRCNN.NUM_STACKED_CONVS):
module_list.append(nn.Conv2d(dim_in, hidden_dim, kernel_size, 1, pad_size))
module_list.append(nn.ReLU(inplace=True))
dim_in = hidden_dim
self.conv_fcn = nn.Sequential(*module_list)
self.dim_out = hidden_dim
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
if (cfg.KRCNN.CONV_INIT == 'GaussianFill'):
init.normal_(m.weight, std=0.01)
elif (cfg.KRCNN.CONV_INIT == 'MSRAFill'):
mynn.init.MSRAFill(m.weight)
else:
ValueError('Unexpected cfg.KRCNN.CONV_INIT: {}'.format(cfg.KRCNN.CONV_INIT))
init.constant_(m.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {}
orphan_in_detectron = []
for i in range(cfg.KRCNN.NUM_STACKED_CONVS):
detectron_weight_mapping[('conv_fcn.%d.weight' % (2 * i))] = ('conv_fcn%d_w' % (i + 1))
detectron_weight_mapping[('conv_fcn.%d.bias' % (2 * i))] = ('conv_fcn%d_b' % (i + 1))
return (detectron_weight_mapping, orphan_in_detectron)
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='keypoint_rois', method=cfg.KRCNN.ROI_XFORM_METHOD, resolution=cfg.KRCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.KRCNN.ROI_XFORM_SAMPLING_RATIO)
x = self.conv_fcn(x)
return x |
.auto
class BaseConfig(ABC):
def to_str(self) -> str:
pass
def to_json(self) -> str:
return json.dumps(self.__dict__)
def get_config(self) -> dict:
return asdict(self)
def asdict(self) -> dict:
return asdict(self)
def to_dict(self) -> dict:
return deepcopy(self.__dict__)
def to_file(self, fpath: os.PathLike) -> None:
with open(fpath, 'w') as f:
json.dump(self.to_json(), f, indent=4)
def from_file(self, fpath: os.PathLike) -> None:
with open(fpath, 'w') as f:
with open(fpath, 'r') as f:
config = json.load(f)
self.__init__(**config)
def __getitem__(self, key):
return super().__getattribute__(key) |
def list_ctx_and_func_name(fnames):
l = []
for fname in fnames:
l += [(fname, x[0], x[1]) for x in list_context(snake_to_camel(fname))]
return l |
_utils.test()
def test_oop_with_static_decorator():
_oriented
class TestStatic():
def kernel_static() -> ti.i32:
return 42
def raw_static():
return 3
a = TestStatic()
assert (a.kernel_static() == 42)
assert (a.raw_static() == 3) |
_module
class PointCloudDataset(Dataset):
NumPointFeatures = (- 1)
CLASSES = None
def __init__(self, root_path, info_path, pipeline=None, test_mode=False, class_names=None, **kwrags):
self._info_path = info_path
self._root_path = Path(root_path)
self._class_names = class_names
self.test_mode = test_mode
self._set_group_flag()
if (pipeline is None):
self.pipeline = None
else:
self.pipeline = Compose(pipeline)
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def get_sensor_data(self, query):
raise NotImplementedError
def evaluation(self, dt_annos, output_dir):
raise NotImplementedError
def ground_truth_annotations(self):
raise NotImplementedError
def pre_pipeline(self, results):
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
def _filter_imgs(self, min_size=32):
valid_inds = []
for (i, img_info) in enumerate(self.img_infos):
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
self.flag = np.ones(len(self), dtype=np.uint8)
def prepare_train_input(self, idx):
raise NotImplementedError
def prepare_test_input(self, idx):
raise NotImplementedError |
(expr=(st.text() | st.lists((st.sampled_from(['.', '}', '{', '$']) | st.text())).map(''.join)))
(deadline=None)
def test_random_expression(expr):
try:
expressions.evaluate(expr, context)
except RuntimeExpressionError:
pass |
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if (s is None):
shapeless = 1
if (axes is None):
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if (axes is None):
axes = list(range((- len(s)), 0))
if (len(s) != len(axes)):
raise ValueError('Shape and axes have different lengths.')
if (invreal and shapeless):
s[(- 1)] = ((a.shape[axes[(- 1)]] - 1) * 2)
return (s, axes) |
def gmul(W, x):
x_size = x.size()
W_size = W.size()
N = W_size[(- 2)]
W = W.split(1, 3)
W = torch.cat(W, 1).squeeze(3)
output = torch.bmm(W, x)
output = output.split(N, 1)
output = torch.cat(output, 2)
output = output.contiguous()
return output |
def test_isinstance_check():
proxy = tt.ObjectProxy(42)
with tt.shim_isinstance():
assert isinstance(proxy, int)
assert (int in tt.UsageTraceNode.from_proxy(proxy).type_checks) |
def test_numpytype_datetime64_parameter():
t = NumpyType('datetime64', {'__array__': 'Something'})
assert (str(parser.parse(str(t))) == str(t)) |
def exponential_mechanism(q, eps, sensitivity, prng=np.random, monotonic=False):
coef = (1.0 if monotonic else 0.5)
scores = (((coef * eps) / sensitivity) * q)
probas = np.exp((scores - logsumexp(scores)))
return prng.choice(q.size, p=probas) |
def write(graph_file, num_vertices: int, edges: Iterable[Tuple[(int, int)]]):
graph_file.write('{}\n'.format(num_vertices))
for (u, v) in edges:
graph_file.write('{} {}\n'.format(u, v)) |
def LF_doctime_complication(span):
return (POSITIVE if (('tdelta' in span.props) and (span.props['tdelta'] < 1)) else ABSTAIN) |
def vgg11_bn(pretrained=False, progress=True, device='cpu', **kwargs):
return _vgg('vgg11_bn', 'A', True, pretrained, progress, device, **kwargs) |
def register_Ns3SfVectorTlvValue_methods(root_module, cls):
cls.add_constructor([param('ns3::SfVectorTlvValue const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Copy', 'ns3::SfVectorTlvValue *', [], is_const=True, is_virtual=True)
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')], is_virtual=True)
return |
def ensemble_average_binarize(img):
a = (niblack(img) * 1.0)
b = (sauvola(img) * 1.0)
c = (otsu(img) * 1.0)
m = (((a + b) + c) / 3)
return (m > 0.5) |
def _test_block_gc():
N = 100000
dx = (1 / 128)
inv_dx = (1.0 / dx)
x = ti.Vector.field(2, dtype=ti.f32)
indices = ti.ij
grid_m = ti.field(dtype=ti.i32)
grid = ti.root.pointer(indices, 64)
grid.pointer(indices, 32).dense(indices, 8).place(grid_m)
ti.root.dense(ti.i, N).place(x)
def init():
for i in x:
x[i] = ti.Vector([((ti.random() * 0.1) + 0.5), ((ti.random() * 0.1) + 0.5)])
init()
def build_grid():
for p in x:
base = int(ti.floor(((x[p] * inv_dx) - 0.5)))
grid_m[base] += 1
def move():
for p in x:
x[p] += ti.Vector([0.0, 0.1])
assert (grid._num_dynamically_allocated == 0)
for _ in range(100):
grid.deactivate_all()
build_grid()
move()
ti.sync()
assert (1 <= grid._num_dynamically_allocated <= 2), grid._num_dynamically_allocated |
class ElectraConfig(LMConfig):
def __init__(self, args=None):
super(ElectraConfig, self).__init__(args)
self.model = 'Electra'
self._post_init(args)
para_prefix = {**LMConfig.para_prefix}
args_to_parse = list(para_prefix.keys())
meta_data = {'Electra': SN(hf_model='google/electra-small-discriminator', father_model='Electra', hidden_dim=256, max_bsz=SN(train={12: 8, 16: 40, 24: 9, 32: 80, 40: 18, 70: 48}, inf={12: 150, 16: 350, 24: 150, 32: 700, 40: 300, 70: 560}), prt_lm={'arxiv': SN(model='FtV1', cmd='--att_dropout=0.1 --cla_dropout=0.4 --dropout=0.3 --epochs=4 --eq_batch_size=36 --eval_patience=50000 --label_smoothing_factor=0.3 --load_best_model_at_end=T --lr=2e-05 --warmup_epochs=0.6', max_n_gpus=4), 'products': SN(model='FtV1', cmd='--lr=2e-05 --eq_batch_size=144 --weight_decay=0.01 --dropout=0.1 --att_dropout=0.3 --cla_dropout=0.2 --cla_bias=T --warmup_epochs=0.2 --eval_patience=65308 --epochs=4 --label_smoothing_factor=0.1 --warmup_epochs=0.6', max_n_gpus=8), 'paper': SN(model='FtV1', cmd='--att_dropout=0.1 --cla_dropout=0.4 --dropout=0.3 --epochs=5 --eq_batch_size=288 --eval_patience=410000 --label_smoothing_factor=0.3 --load_best_model_at_end=T --lr=5e-05 --warmup_epochs=0.6', max_n_gpus=16)}), 'Electra-base': SN(hf_model='google/electra-base-discriminator', father_model='Electra', hidden_dim=768, max_bsz=SN(train={12: 8, 16: 18, 24: 9, 32: 40, 40: 18, 70: 48}, inf={12: 150, 16: 150, 24: 150, 32: 512, 40: 300, 70: 560}), prt_lm={'arxiv': SN(model='FtV1', cmd='--att_dropout=0.1 --cla_dropout=0.4 --dropout=0.3 --epochs=4 --eq_batch_size=36 --eval_patience=50000 --label_smoothing_factor=0.3 --load_best_model_at_end=T --lr=2e-05 --warmup_epochs=0.6', max_n_gpus=4), 'products': SN(model='FtV1', cmd='--lr=2e-05 --eq_batch_size=144 --weight_decay=0.01 --dropout=0.1 --att_dropout=0.3 --cla_dropout=0.2 --cla_bias=T --warmup_epochs=0.2 --eval_patience=65308 --epochs=4 --label_smoothing_factor=0.1 --warmup_epochs=0.6', max_n_gpus=8), 'paper': SN(model='FtV1', cmd='--att_dropout=0.1 --cla_dropout=0.4 --dropout=0.3 --epochs=5 --eq_batch_size=288 --eval_patience=410000 --label_smoothing_factor=0.3 --load_best_model_at_end=T --lr=5e-05 --warmup_epochs=0.6', max_n_gpus=16)}), 'Electra-large': SN(hf_model='google/electra-large-discriminator', father_model='Electra', hidden_dim=768, max_bsz=SN(train={12: 8, 16: 12, 24: 9, 32: 12, 40: 18, 70: 48}, inf={12: 150, 16: 200, 24: 150, 32: 200, 40: 300, 70: 560}), prt_lm={'arxiv': SN(model='FtV1', cmd='--att_dropout=0.1 --cla_dropout=0.4 --dropout=0.3 --epochs=4 --eq_batch_size=36 --eval_patience=50000 --label_smoothing_factor=0.3 --load_best_model_at_end=T --lr=2e-05 --warmup_epochs=0.6', max_n_gpus=4), 'products': SN(model='FtV1', cmd='--lr=2e-05 --eq_batch_size=144 --weight_decay=0.01 --dropout=0.1 --att_dropout=0.3 --cla_dropout=0.2 --cla_bias=T --warmup_epochs=0.2 --eval_patience=65308 --epochs=4 --label_smoothing_factor=0.1 --warmup_epochs=0.6', max_n_gpus=8), 'paper': SN(model='FtV1', cmd='--att_dropout=0.1 --cla_dropout=0.4 --dropout=0.3 --epochs=5 --eq_batch_size=288 --eval_patience=410000 --label_smoothing_factor=0.3 --load_best_model_at_end=T --lr=5e-05 --warmup_epochs=0.6', max_n_gpus=16)})} |
def get_OT_dual_sol(feature_extractor, trainloader, testloader, training_size=10000, p=2, resize=32, device='cuda'):
embedder = feature_extractor.to(device)
embedder.fc = torch.nn.Identity()
for p in embedder.parameters():
p.requires_grad = False
feature_cost = FeatureCost(src_embedding=embedder, src_dim=(3, resize, resize), tgt_embedding=embedder, tgt_dim=(3, resize, resize), p=2, device='cuda')
dist = DatasetDistance(trainloader, testloader, inner_ot_method='exact', debiased_loss=True, feature_cost=feature_cost, _x=1.0, _y=1.0, sqrt_method='spectral', sqrt_niters=10, precision='single', p=2, entreg=0.1, device='cuda')
tic = time.perf_counter()
dual_sol = dist.dual_sol(maxsamples=training_size, return_coupling=True)
toc = time.perf_counter()
print(f'distance calculation takes {(toc - tic):0.4f} seconds')
for i in range(len(dual_sol)):
dual_sol[i] = dual_sol[i].to('cpu')
return dual_sol |
('orion.benchmark.load_signal')
def test__load_signal_test_split_float(load_signal_mock):
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
load_signal_mock.return_value = (train, test)
test_split = 0.2
returned = benchmark._load_signal('signal-name', test_split)
assert isinstance(returned, tuple)
assert (len(returned) == 2)
expected_calls = [call('signal-name', test_size=test_split)]
assert (load_signal_mock.call_args_list == expected_calls) |
_interact(title=(lambda : text_control('<h2>Taylor polynomial</h2>')), f=(lambda : input_box((sin(x) * exp((- x))), label='$f(x)=$')), order=(lambda : slider(range(1, 13))))
def taylor_polynomial(title, f, order: int):
x0 = 0
p = plot(f, (x, (- 1), 5), thickness=2)
dot = point((x0, f(x=x0)), pointsize=80, rgbcolor=(1, 0, 0))
ft = f.taylor(x, x0, order)
pt = plot(ft, ((- 1), 5), color='green', thickness=2)
html(('$f(x)\\;=\\;%s$' % latex(f)))
html(('$\\hat{f}(x;%s)\\;=\\;%s+\\mathcal{O}(x^{%s})$' % (x0, latex(ft), (order + 1))))
show(((dot + p) + pt), ymin=(- 0.5), ymax=1) |
def _memoize_get_funcs(func):
memo = {}
func.memo = memo
(func)
def getter(names, arrays=(), dtype=None, ilp64=False):
key = (names, dtype, ilp64)
for array in arrays:
key += (array.dtype.char, array.flags.fortran)
try:
value = memo.get(key)
except TypeError:
key = None
value = None
if (value is not None):
return value
value = func(names, arrays, dtype, ilp64)
if (key is not None):
memo[key] = value
return value
return getter |
def test():
net = LiSHT_GoogLeNet()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size()) |
_node_type()
class Product(optplan.Function):
type = schema_utils.polymorphic_model_type('function.product')
functions = types.ListType(optplan.ReferenceType(optplan.Function))
def __mul__(self, obj):
if isinstance(obj, Product):
return Product(functions=(self.functions + obj.functions))
if isinstance(obj, optplan.Function):
return Product(functions=(self.functions + [obj]))
if isinstance(obj, (numbers.Number, optplan.ComplexNumber)):
return Product(functions=(self.functions + [make_constant(obj)]))
raise TypeError('Attempting to multiply a node with type {} to type `Product`.'.format(type(obj))) |
def get_channel(channel_type, **kwargs):
channel = CHANNEL_CLASSES[channel_type](**kwargs)
return channel |
class SparseEdgeConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(SparseEdgeConv, self).__init__()
self.model = SparseEdgeConvLayer(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.node_feature = self.model(batch.node_feature, batch.edge_index, batch.edge_feature)
return batch |
def _ankan(state: State, target):
curr_player = state.current_player
meld = Meld.init((target + 34), target, src=0)
state = _append_meld(state, meld, curr_player)
hand = state._hand.at[curr_player].set(Hand.ankan(state._hand[curr_player], target))
return state.replace(_hand=hand) |
def most_requent_value_compression(t, base_bit=8, compressed_bit=1):
torch.cuda.empty_cache()
pk = torch.unique(t.flatten(), return_counts=True)[1]
mfv_count = pk.max()
res = (((mfv_count * compressed_bit) + ((t.numel() - mfv_count) * base_bit)) / t.numel())
return res |
class NVCNet(Module):
def __init__(self, hp):
self.hp = hp
self.encoder = Encoder(hp)
self.decoder = Decoder(hp)
self.speaker = Speaker(hp)
def call(self, x, y):
style = self.embed(y)[0]
content = self.encode(x)
out = self.decode(content, style)
return out
def encode(self, x):
with nn.parameter_scope('', self.parameter_scope):
with nn.parameter_scope('encoder'):
return self.encoder(x)
def decode(self, content, spk_emb):
with nn.parameter_scope('', self.parameter_scope):
with nn.parameter_scope('decoder'):
x = self.decoder(content, spk_emb)
return x
def embed(self, x):
with nn.parameter_scope('', self.parameter_scope):
with nn.parameter_scope('embedding'):
(mu, logvar) = self.speaker(x)
spk_emb = self.sample(mu, logvar)
return (spk_emb, mu, logvar)
def kl_loss(self, mu, logvar):
return (0.5 * F.mean(F.sum((((F.exp(logvar) + (mu ** 2)) - 1.0) - logvar), axis=1)))
def sample(self, mu, logvar):
if self.training:
eps = F.randn(shape=mu.shape)
return (mu + (F.exp((0.5 * logvar)) * eps))
return mu |
def test_Record_getitem():
record = ak.highlevel.Array([{'x': 0.0, 'y': []}, {'x': 1.1, 'y': [1]}, {'x': 2.2, 'y': [2, 2]}, {'x': 3.3, 'y': [3, 3, 3]}, {'x': 4.4, 'y': [4, 4, 4, 4]}], check_valid=True)[3]
def f1(x):
return x['x']
assert (f1(record) == 3.3)
def f2(x):
return x['y']
assert (ak.operations.to_list(f2(record)) == [3, 3, 3])
def f3(x):
return x.x
assert (f3(record) == 3.3)
def f4(x):
return x.y
assert (ak.operations.to_list(f4(record)) == [3, 3, 3]) |
def make_temp_dataset(in_folder, out_folder, do_norm=True):
os.makedirs(out_folder, exist_ok=True)
all_pc_list = []
for f in sorted(os.listdir(in_folder)):
filepath = os.path.join(in_folder, f)
if os.path.isdir(filepath):
all_pc_list.append(os.path.join(in_folder, f, 'pointcloud.ply'))
else:
continue
print(all_pc_list)
for pc_path in all_pc_list:
(points, normals) = read_point_ply(pc_path)
vert_max = points.max(axis=0)
vert_min = points.min(axis=0)
if do_norm:
loc = ((vert_min + vert_max) / 2)
scale = (vert_max - vert_min).max()
else:
loc = np.array([0, 0, 0], dtype=np.float64)
scale = np.array([1.0], dtype=np.float64)
print('loc', loc, 'scale', scale)
points = ((points - loc) / scale)
normals = (normals / np.linalg.norm(normals, axis=1, keepdims=True))
obj_name = pc_path.split('/')[(- 2)]
os.makedirs(os.path.join(out_folder, 'large_scenes', obj_name), exist_ok=True)
save_npz_path = os.path.join(out_folder, 'large_scenes', obj_name, 'pointcloud.npz')
npz_dict = dict(points=points.astype(np.float16), normals=normals.astype(np.float16), loc=loc.astype(np.float64), scale=scale.astype(np.float64))
np.savez(save_npz_path, **npz_dict)
print(f'we save pointcloud npz to: {save_npz_path}')
save_npz_path = os.path.join(out_folder, 'large_scenes', obj_name, 'points_iou.npz')
NUM_SPATIAL_PTS = 1000000
RATIO_SUR = 0.25
RATIO_STD = 0.75
if do_norm:
STD = 0.1
else:
STD = (0.1 * (vert_max - vert_min).max())
spatial_points_xyz = np.concatenate([(points[np.random.choice(len(points), size=(int((NUM_SPATIAL_PTS * RATIO_STD)),))] + (np.random.randn(int((NUM_SPATIAL_PTS * RATIO_STD)), 3) * STD)), ((np.random.rand(int((NUM_SPATIAL_PTS * ((1 - RATIO_SUR) - RATIO_STD))), 3) * 1.1) - 0.55), points[np.random.choice(len(points), size=(int((NUM_SPATIAL_PTS * RATIO_SUR)),))]], axis=0)
spatial_points_occ = np.concatenate([np.ones([int((NUM_SPATIAL_PTS * (1 - RATIO_SUR)))]), np.zeros([int((NUM_SPATIAL_PTS * RATIO_SUR))])], axis=0)
shuffle_index = np.random.permutation(NUM_SPATIAL_PTS)
spatial_points_xyz = spatial_points_xyz[shuffle_index]
spatial_points_occ = spatial_points_occ[shuffle_index]
npz_dict = dict(points=spatial_points_xyz.astype(dtype=np.float16), occupancies=np.packbits(spatial_points_occ.astype(dtype=bool)), z_scale=np.array(0).astype(np.float64), semantics=np.zeros([NUM_SPATIAL_PTS], dtype=np.int64))
np.savez(save_npz_path, **npz_dict)
print(f'we save points_iou npz to: {save_npz_path}')
test_lst_path = os.path.join(out_folder, 'large_scenes', 'test.lst')
with open(test_lst_path, 'w') as f:
[f.write((s.split('/')[(- 2)] + '\n')) for s in all_pc_list]
print(f'we save test list to: {test_lst_path}') |
def fix_script(path):
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if (not firstline.startswith(b'#!python')):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = ((b'#!' + exename) + os.linesep.encode('ascii'))
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True |
.skipif((version_info < (3, 8)), reason='This is a feature for python>=3.8')
_utils.test()
def test_exception_in_node_with_body():
frameinfo = getframeinfo(currentframe())
def foo():
for i in range(1, 2, 3):
a = 1
b = 1
c = 1
d = 1
with pytest.raises(ti.TaichiCompilationError) as e:
foo()
lineno = frameinfo.lineno
file = frameinfo.filename
msg = f'''
File "{file}", line {(lineno + 3)}, in foo:
for i in range(1, 2, 3):
Range should have 1 or 2 arguments, found 3'''
print(e.value.args[0])
assert (e.value.args[0] == msg) |
class VGG(nn.Module):
def __init__(self, cfg, batch_norm=False, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = self.make_layers(cfg, batch_norm)
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.lin1 = nn.Linear(((512 * 7) * 7), 4096)
self.relu = nn.ReLU(True)
self.dropout = nn.Dropout()
self.lin2 = nn.Linear(4096, 4096)
self.lin3 = nn.Linear(4096, num_classes)
self.loss_fn = nn.CrossEntropyLoss()
if init_weights:
self._initialize_weights()
def forward(self, x, target):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.lin1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.lin2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.lin3(x)
return self.loss_fn(x, target)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers) |
class Decoder(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=(0, 0), complex=False):
super().__init__()
if complex:
tconv = complex_nn.ComplexConvTranspose2d
bn = complex_nn.ComplexBatchNorm2d
else:
tconv = nn.ConvTranspose2d
bn = nn.BatchNorm2d
self.transconv = tconv(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = bn(out_channels)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
x = self.transconv(x)
x = self.bn(x)
x = self.relu(x)
return x |
def test_save_and_load_empty():
dense_matrix = np.zeros((4, 6))
_check_save_and_load(dense_matrix) |
def threshold_local(image, block_size=3, method='gaussian', offset=0, mode='reflect', param=None, cval=0):
if np.isscalar(block_size):
block_size = ((block_size,) * image.ndim)
elif (len(block_size) != image.ndim):
raise ValueError('len(block_size) must equal image.ndim.')
block_size = tuple(block_size)
if any((((b % 2) == 0) for b in block_size)):
raise ValueError(f'block_size must be odd! Given block_size {block_size} contains even values.')
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
thresh_image = np.zeros(image.shape, dtype=float_dtype)
if (method == 'generic'):
ndi.generic_filter(image, param, block_size, output=thresh_image, mode=mode, cval=cval)
elif (method == 'gaussian'):
if (param is None):
sigma = tuple([((b - 1) / 6.0) for b in block_size])
else:
sigma = param
gaussian(image, sigma, output=thresh_image, mode=mode, cval=cval)
elif (method == 'mean'):
ndi.uniform_filter(image, block_size, output=thresh_image, mode=mode, cval=cval)
elif (method == 'median'):
ndi.median_filter(image, block_size, output=thresh_image, mode=mode, cval=cval)
else:
raise ValueError('Invalid method specified. Please use `generic`, `gaussian`, `mean`, or `median`.')
return (thresh_image - offset) |
def test_RuleManager_load():
rule_manager = RuleManager()
for _ in range(100):
priority = random.randint(20)
rule = Rule(priority, None, None, None, None)
rule_manager.load(rule)
for i in range(1, len(rule_manager)):
assert (rule_manager[i].priority >= rule_manager[(i - 1)].priority)
assert (id(rule_manager[i].rule_manager) == id(rule_manager)) |
def TranslateX(img, v):
assert ((- 0.45) <= v <= 0.45)
if (random_mirror and (random.random() > 0.5)):
v = (- v)
v = (v * img.size[0])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)) |
class LinRegSD(ShardDescriptor):
def __init__(self, rank: int, n_samples: int=10, noise: float=0.15) -> None:
np.random.seed(rank)
self.n_samples = max(n_samples, 5)
self.interval = 240
self.x_start = 60
x = ((np.random.rand(n_samples, 1) * self.interval) + self.x_start)
x *= (np.pi / 180)
y = (np.sin(x) + np.random.normal(0, noise, size=(n_samples, 1)))
self.data = np.concatenate((x, y), axis=1)
def get_dataset(self, dataset_type: str) -> np.ndarray:
if (dataset_type == 'train'):
return self.data[:(self.n_samples // 2)]
elif (dataset_type == 'val'):
return self.data[(self.n_samples // 2):]
else:
pass
def sample_shape(self) -> List[str]:
(*x, _) = self.data[0]
return [str(i) for i in np.array(x, ndmin=1).shape]
def target_shape(self) -> List[str]:
(*_, y) = self.data[0]
return [str(i) for i in np.array(y, ndmin=1).shape]
def dataset_description(self) -> str:
return 'Allowed dataset types are `train` and `val`' |
def sample_parameter(parameter, samples, seed=None, parent_key=''):
if ('type' not in parameter):
raise ConfigError(f'No type found in parameter {parameter}')
return_items = []
allowed_keys = ['seed', 'type']
if (seed is not None):
np.random.seed(seed)
elif ('seed' in parameter):
np.random.seed(parameter['seed'])
param_type = parameter['type']
if (param_type == 'choice'):
choices = parameter['options']
allowed_keys.append('options')
sampled_values = [random.choice(choices) for _ in range(samples)]
return_items.append((parent_key, sampled_values))
elif (param_type == 'uniform'):
min_val = parameter['min']
max_val = parameter['max']
allowed_keys.extend(['min', 'max'])
sampled_values = np.random.uniform(min_val, max_val, samples)
return_items.append((parent_key, sampled_values))
elif (param_type == 'loguniform'):
if (parameter['min'] <= 0):
raise ConfigError('Cannot take log of values <= 0')
min_val = np.log(parameter['min'])
max_val = np.log(parameter['max'])
allowed_keys.extend(['min', 'max'])
sampled_values = np.exp(np.random.uniform(min_val, max_val, samples))
return_items.append((parent_key, sampled_values))
elif (param_type == 'randint'):
min_val = int(parameter['min'])
max_val = int(parameter['max'])
allowed_keys.extend(['min', 'max'])
sampled_values = np.random.randint(min_val, max_val, samples)
return_items.append((parent_key, sampled_values))
elif (param_type == 'randint_unique'):
min_val = int(parameter['min'])
max_val = int(parameter['max'])
allowed_keys.extend(['min', 'max'])
sampled_values = np.random.choice(np.arange(min_val, max_val), samples, replace=False)
return_items.append((parent_key, sampled_values))
elif (param_type == 'parameter_collection'):
sub_items = [sample_parameter(v, parent_key=f'{parent_key}.{k}', seed=seed, samples=samples) for (k, v) in parameter['params'].items()]
return_items.extend([sub_item for item in sub_items for sub_item in item])
else:
raise ConfigError(f'Parameter type {param_type} not implemented.')
if (param_type != 'parameter_collection'):
extra_keys = set(parameter.keys()).difference(set(allowed_keys))
if (len(extra_keys) > 0):
raise ConfigError(f"Unexpected keys in parameter definition. Allowed keys for type '{param_type}' are {allowed_keys}. Unexpected keys: {extra_keys}")
return return_items |
def splicing_NxN(root, out_root, image_list, n_image=100, start_id=0, size=2):
print(' start augmentation: {}x{} '.format(size, size))
ha = 0
for idx in tqdm(range(n_image)):
list_image = []
list_mask = []
for x in range(size):
image_1 = choice(image_list)
mask_1 = image_1.replace('jpg', 'png')
img1 = cv2.imread('./DataDiffusion/{}/Image/{}'.format(root, image_1))
mas1 = cv2.imread('./DataDiffusion/{}/Mask/{}'.format(root, mask_1))
for y in range((size - 1)):
image_2 = choice(image_list)
mask_2 = image_2.replace('jpg', 'png')
img2 = cv2.imread('./DataDiffusion/{}/Image/{}'.format(root, image_2))
mas2 = cv2.imread('./DataDiffusion/{}/Mask/{}'.format(root, mask_2))
img1 = np.concatenate([img1, img2], axis=1)
mas1 = np.concatenate([mas1, mas2], axis=1)
list_image.append(img1)
list_mask.append(mas1)
list_image_ha = list_image[0]
list_mask_ha = list_mask[0]
for i in range(1, size):
list_image_ha = np.concatenate((list_image_ha, list_image[i]))
list_mask_ha = np.concatenate((list_mask_ha, list_mask[i]))
cv2.imwrite('./DataDiffusion/{}/Image/splicing_{}.jpg'.format(out_root, start_id), list_image_ha)
cv2.imwrite('./DataDiffusion/{}/Mask/splicing_{}.png'.format(out_root, start_id), list_mask_ha)
start_id += 1 |
def neg_mean_inertia(X, labels, centers):
return (- (np.asarray((X - centers[labels])) ** 2).sum(axis=1).mean()) |
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]], dtype=float)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array([[(2.0 / 16.0), 0.0, (6.0 / 400.0)], [(1.0 / 16.0), 0.0, (5.0 / 400.0)]], dtype=float)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32))
boxlist2 = np_box_list.BoxList(np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0], [1.0, 1.0]], dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32))
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(np.array([[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32))
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [(- 0.2), (- 0.3), 0.7, 1.5]], dtype=np.float32))
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [0.0, 0.0, 0.7, 1.0]], dtype=np.float32))
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [(- 0.2), (- 0.3), 0.7, 1.5]], dtype=np.float32))
(boxlist_pruned, _) = np_box_list_ops.prune_outside_window(boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_pruned = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32))
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32))
boxlist2 = np_box_list.BoxList(np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
self.assertAllClose(boxlist_concatenated_expected.get(), boxlist_concatenated.get())
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32))
boxlist_coord = np_box_list_ops.change_coordinate_frame(boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32))
expected_boxlist_coord = np_box_list.BoxList(np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32))
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32))
boxlist.add_field('scores', np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32))
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get()) |
def checkpoint(acc, epoch):
print('Saving..')
state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'acc': acc, 'epoch': epoch, 'seed': args.manualSeed}
torch.save(state, (((args.save_dir + args.network) + '/') + 'checkpoint.t7')) |
def CalculateAllMaxPCharge(mol):
Hmol = Chem.AddHs(mol)
GMCharge.ComputeGasteigerCharges(Hmol, iter_step)
res = []
for atom in Hmol.GetAtoms():
res.append(float(atom.GetProp('_GasteigerCharge')))
if (res == []):
return 0
else:
return max(res) |
class CelebAFID(FID):
def __init__(self, batch_size=256, data_name='celeba', workers=0, verbose=True):
self.batch_size = batch_size
self.workers = workers
super().__init__(data_name, verbose)
def complete_data(self):
data = datasets.ImageFolder('celeba', transforms.Compose([transforms.CenterCrop(108), transforms.Resize(size=64, interpolation=Image.BICUBIC), transforms.ToTensor()]))
images = len(data)
data_loader = DataLoader(data, batch_size=self.batch_size, num_workers=self.workers)
return (data_loader, images) |
def add_sos_eos(tokens):
return map((lambda token_list: (([nlc_data.SOS_ID] + token_list) + [nlc_data.EOS_ID])), tokens) |
class NumpyLookup(ContentLookup):
ARRAY = 0
def tolookup(cls, layout, positions):
pos = len(positions)
positions.append(layout.to_contiguous().data)
return pos
def tolayout(self, lookup, pos, fields):
assert (fields == ())
return ak.contents.NumpyArray(lookup.positions[(pos + self.ARRAY)], parameters=self.parameters) |
def GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 11, 0)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor)]
fill_modes = [FillMode.Lower, FillMode.Upper]
math_instructions = [MathInstruction([16, 8, 8], DataType.tf32, DataType.tf32, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 8], DataType.f32, DataType.f32, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add_fast_f32)]
min_cc = 80
max_cc = 1024
alignment_constraints = [1, 2, 4]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc)]
data_type = [DataType.f32, DataType.f32, DataType.f32]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, data_type, alignment_constraints, BlasMode.symmetric) |
class Simulator(object):
def __init__(self) -> None:
self.extra_deps: tp.List[Simulator] = []
self.name = ''
def resreq_cores(self) -> int:
return 1
def resreq_mem(self) -> int:
return 64
def full_name(self) -> str:
return ''
def prep_cmds(self, env: ExpEnv) -> tp.List[str]:
return []
def run_cmd(self, env: ExpEnv) -> tp.Optional[str]:
return None
def dependencies(self) -> tp.List[Simulator]:
return []
def sockets_cleanup(self, env: ExpEnv) -> tp.List[str]:
return []
def sockets_wait(self, env: ExpEnv) -> tp.List[str]:
return []
def start_delay(self) -> int:
return 5
def wait_terminate(self) -> bool:
return False |
def S1():
var('x y z')
e = ((((x + y) + z) + 1) ** 7)
f = (e * (e + 1))
t1 = clock()
f = f.expand()
t2 = clock()
return (t2 - t1) |
_args('v', 'is')
def permute(g, self, dims):
if (dims == list(range(0, len(dims)))):
return self
return g.op('Transpose', self, perm_i=dims) |
def is_sagemaker_model_parallel_available():
smp_options = os.getenv('SM_HP_MP_PARAMETERS', '{}')
try:
smp_options = json.loads(smp_options)
if ('partitions' not in smp_options):
return False
except json.JSONDecodeError:
return False
mpi_options = os.getenv('SM_FRAMEWORK_PARAMS', '{}')
try:
mpi_options = json.loads(mpi_options)
if (not mpi_options.get('sagemaker_mpi_enabled', False)):
return False
except json.JSONDecodeError:
return False
return (importlib.util.find_spec('smdistributed') is not None) |
def aad_active_learn(x, y, ensembles, aad_opts, glad_opts):
logger.debug(('dataset: %s, shape: %s' % (aad_opts.dataset, str(x.shape))))
all_aad_results = SequentialResults()
orig_randseed = aad_opts.randseed
for (i, ensemble) in enumerate(ensembles):
tm = Timer()
aad_opts.randseed = (orig_randseed + i)
aad_opts.runidx = (i + 1)
set_random_seeds(aad_opts.randseed, (aad_opts.randseed + 1), (aad_opts.randseed + 2))
logger.debug(('# ensemble members: %d' % ensemble.m))
aad_results = aad_active_learn_ensemble(x, y, ensemble, aad_opts)
all_aad_results.merge(aad_results)
logger.debug(tm.message(('completed AAD run %d/%d:' % ((i + 1), aad_opts.reruns))))
tmp = glad_opts.ensemble_type
glad_opts.ensemble_type = 'aad'
all_aad_results.write_to_csv(glad_opts)
glad_opts.ensemble_type = tmp
return (x, y, all_aad_results, ensembles) |
def _at_least_x_are_equal(a, b, x):
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x) |
('/api/account/api_key')
def handle_update_api_key():
def perform(args):
auth = Authentication(**json.loads(args['auth']))
account = from_dict(Account, json.loads(args['account']))
return dataclasses.asdict(service.rotate_api_key(auth, account))
return safe_call(perform) |
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-08, epsrel=1.49e-08):
def temp_ranges(*args):
return [(gfun(args[0]) if callable(gfun) else gfun), (hfun(args[0]) if callable(hfun) else hfun)]
return nquad(func, [temp_ranges, [a, b]], args=args, opts={'epsabs': epsabs, 'epsrel': epsrel}) |
def get_index(x, cands):
for (i, z) in enumerate(cands):
if (x == z):
return i
return 999 |
class Model(object):
def __init__(self, typ, meta):
self._typ = typ
self._meta = meta
def get_type(self):
return self._typ
def get_meta(self, name, default=None):
return self._meta.get(name, default)
def _to_dict(self):
meta = dict(self._meta)
meta['model_type'] = self._typ
return meta
def _from_dict(d):
typ = d.pop('model_type')
return Model(typ, d)
def estimator_type(estimator):
estimator = estimator.lower()
if (estimator in ['kmeans', 'randomforests']):
return EstimatorType.PAIML
elif estimator.startswith('xgboost'):
return EstimatorType.XGBOOST
else:
return EstimatorType.TENSORFLOW
def _zip(self, local_dir, tarball):
from runtime.pai.prepare_archive import ALL_TAR_FILES
def filter(tarinfo):
name = tarinfo.name
if name.startswith('./'):
name = name[2:]
if (name in ALL_TAR_FILES):
return None
return tarinfo
zip_dir(local_dir, tarball, arcname='./', filter=filter)
def _unzip(local_dir, tarball):
unzip_dir(tarball, local_dir)
def save_to_db(self, datasource, table, local_dir=None, oss_model_dir=None):
if (local_dir is None):
local_dir = os.getcwd()
conn = connect_with_data_source(datasource)
if oss_model_dir:
cur_dir = os.getcwd()
os.chdir(local_dir)
oss.load_dir(oss_model_dir)
os.chdir(cur_dir)
if ('.' not in table):
project_name = conn.param('database')
table = ((project_name + '.') + table)
with temp_file.TemporaryDirectory() as tmp_dir:
tarball = os.path.join(tmp_dir, TARBALL_NAME)
self._zip(local_dir, tarball)
def _bytes_reader(filename, buf_size=(8 * 32)):
def _gen():
with open(filename, 'rb') as f:
while True:
data = f.read(buf_size)
if data:
(yield data)
else:
break
return _gen
write_with_generator_and_metadata(datasource, table, _bytes_reader(tarball), self._to_dict())
conn.persist_table(table)
conn.close()
return table
def load_from_db(datasource, table, local_dir=None):
if (local_dir is None):
local_dir = os.getcwd()
(model_zoo_addr, table, tag) = _decompose_model_name(table)
if model_zoo_addr:
(gen, metadata) = load_model_from_model_zoo(model_zoo_addr, table, tag)
else:
(gen, metadata) = read_with_generator_and_metadata(datasource, table)
with temp_file.TemporaryDirectory() as tmp_dir:
tarball = os.path.join(tmp_dir, TARBALL_NAME)
with open(tarball, 'wb') as f:
for data in gen():
f.write(bytes(data))
Model._unzip(local_dir, tarball)
return Model._from_dict(metadata)
def load_metadata_from_db(datasource, table):
try:
return Model._load_metadata_from_db_impl(datasource, table)
except:
return Model._load_metadata_from_db_impl(datasource, (table + '_sqlflow_pai_model'))
def _load_metadata_from_db_impl(datasource, table):
(model_zoo_addr, table, tag) = _decompose_model_name(table)
if model_zoo_addr:
(_, metadata) = load_model_from_model_zoo(model_zoo_addr, table, tag, meta_only=True)
else:
(_, metadata) = read_with_generator_and_metadata(datasource, table, meta_only=True)
return Model._from_dict(metadata)
def save_to_oss(self, oss_model_dir, local_dir=None):
if (local_dir is None):
local_dir = os.getcwd()
with temp_file.TemporaryDirectory() as tmp_dir:
tarball = os.path.join(tmp_dir, TARBALL_NAME)
self._zip(local_dir, tarball)
oss.save_file(oss_model_dir, tarball, TARBALL_NAME)
with temp_file.TemporaryDirectory() as tmp_dir:
model_obj_file = os.path.join(tmp_dir, MODEL_OBJ_FILE_NAME)
with open(model_obj_file, 'w') as f:
f.write(json.dumps(self._to_dict(), cls=JSONEncoderWithFeatureColumn))
oss.save_file(oss_model_dir, model_obj_file, MODEL_OBJ_FILE_NAME)
def load_from_oss(oss_model_dir, local_dir=None):
if (local_dir is None):
local_dir = os.getcwd()
with temp_file.TemporaryDirectory() as tmp_dir:
tarball = os.path.join(tmp_dir, TARBALL_NAME)
oss.load_file(oss_model_dir, tarball, TARBALL_NAME)
Model._unzip(local_dir, tarball)
model_obj_file = os.path.join(tmp_dir, MODEL_OBJ_FILE_NAME)
oss.load_file(oss_model_dir, model_obj_file, MODEL_OBJ_FILE_NAME)
with open(model_obj_file, 'r') as f:
d = json.loads(f.read(), cls=JSONDecoderWithFeatureColumn)
model = Model._from_dict(d)
return model |
def beta1(rce, T1, T2):
r = rce[0]
c = rce[1]
e = rce[2]
assert (T1[(r, c)] == e)
assert (e >= 0)
for x in range(T1.nrows()):
if (T2[(x, c)] == e):
return (x, c, e)
raise ValueError |
()
_context
('network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH', required=True, default='')
('network_pkl_trans', '--network_trans', help='Network pickle filename for adaptor of step2', metavar='PATH', required=True, default='')
('--metrics', help='Comma-separated list or "none"', type=CommaSeparatedList(), default='fid50k_full', show_default=True)
('--data', help='Dataset to evaluate metrics against (directory or zip) [default: same as training data]', metavar='PATH')
('--mirror', help='Whether the dataset was augmented with x-flips during training [default: look up]', type=bool, metavar='BOOL')
('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True)
('--verbose', help='Print optional information', type=bool, default=True, metavar='BOOL', show_default=True)
def calc_metrics(ctx, network_pkl, network_pkl_trans, metrics, data, mirror, gpus, verbose):
dnnlib.util.Logger(should_flush=True)
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
if (not all((metric_main.is_valid_metric(metric) for metric in args.metrics))):
ctx.fail('\n'.join((['--metrics can only contain the following values:'] + metric_main.list_valid_metrics())))
if (not (args.num_gpus >= 1)):
ctx.fail('--gpus must be at least 1')
if os.path.isdir(network_pkl):
import glob
network_pkl = sorted(glob.glob((network_pkl + '/*.pkl')))[(- 1)]
if args.verbose:
print(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.G = network_dict['G_ema']
args.D = network_dict['D']
if (network_pkl_trans != ''):
with dnnlib.util.open_url(network_pkl_trans, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.Adapted_net = network_dict['Adapted_net']
if (data is not None):
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data)
elif (network_dict['training_set_kwargs'] is not None):
args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
args.dataset_kwargs.resolution = args.G.img_resolution
args.dataset_kwargs.use_labels = (args.G.c_dim != 0)
if (mirror is not None):
args.dataset_kwargs.xflip = mirror
if args.verbose:
print('Dataset options:')
print(json.dumps(args.dataset_kwargs, indent=2))
args.run_dir = './'
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
args.run_dir = pkl_dir
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if (args.num_gpus == 1):
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus) |
_level_function()
def moment(x, n, weight=None, axis=None, *, keepdims=False, mask_identity=False, highlevel=True, behavior=None, attrs=None):
(yield (x, weight))
return _impl(x, n, weight, axis, keepdims, mask_identity, highlevel, behavior, attrs) |
def assign_params_from_flat(x, params):
flat_size = (lambda p: int(np.prod(p.shape.as_list())))
splits = tf.split(x, [flat_size(p) for p in params])
new_params = [tf.reshape(p_new, p.shape) for (p, p_new) in zip(params, splits)]
return tf.group([tf.assign(p, p_new) for (p, p_new) in zip(params, new_params)]) |
def _get_values_target_representation(val: Union[(str, Any)], target_representation: str, conversion_type: str, conversion_rate: float, n_round: int, split: bool, input_symbol: str, target_symbol: str) -> Any:
val_new = 0.0
val = float(val)
if (conversion_type in ('fiat_to_fiat', 'crypto_to_fiat')):
val_new = (val * conversion_rate)
else:
val_new = (val / conversion_rate)
if (target_representation == 'abbr'):
val = '{:,.{a}f}'.format(val, a=n_round)
target_val = '{:,.{a}f}'.format(val_new, a=n_round)
if split:
return (val, target_val)
else:
return ((input_symbol.upper() + str(val)), (target_symbol.upper() + str(target_val)))
else:
return (np.round(val, n_round), np.round(val_new, n_round)) |
class Up(nn.Module):
def __init__(self, in_ch, out_ch, norm_layer=nn.BatchNorm2d, use_bias=False):
super(Up, self).__init__()
self.up = nn.Sequential(nn.ConvTranspose2d(in_ch, out_ch, kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(out_ch), nn.Tanh())
def forward(self, x):
x = self.up(x)
return x |
def route_through_array(array, start, end, fully_connected=True, geometric=True):
(start, end) = (tuple(start), tuple(end))
if geometric:
mcp_class = MCP_Geometric
else:
mcp_class = MCP
m = mcp_class(array, fully_connected=fully_connected)
(costs, traceback_array) = m.find_costs([start], [end])
return (m.traceback(end), costs[end]) |
def validate_parameter_constraints(parameter_constraints, params, caller_name):
for (param_name, param_val) in params.items():
if (param_name not in parameter_constraints):
continue
constraints = parameter_constraints[param_name]
if (constraints == 'no_validation'):
continue
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
if constraint.is_satisfied_by(param_val):
break
else:
constraints = [constraint for constraint in constraints if (not constraint.hidden)]
if (len(constraints) == 1):
constraints_str = f'{constraints[0]}'
else:
constraints_str = f"{', '.join([str(c) for c in constraints[:(- 1)]])} or {constraints[(- 1)]}"
raise InvalidParameterError(f'The {param_name!r} parameter of {caller_name} must be {constraints_str}. Got {param_val!r} instead.') |
def test_regular_unknown_1_parm():
text = '[0 * unknown, parameters={"foo": "bar"}]'
parsedtype = deduce_type(text)
assert isinstance(parsedtype, ak.types.RegularType)
assert (str(parsedtype) == text) |
def single_test(model, data_loader, saveto=None, class_names=['Car'], show=False):
template = (('{} ' + ' '.join(['{:.4f}' for _ in range(15)])) + '\n')
if (saveto is not None):
mmcv.mkdir_or_exist(saveto)
model.eval()
annos = []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
results = model(return_loss=False, **data)
image_shape = (375, 1242)
for re in results:
img_idx = re['image_idx']
if (re['bbox'] is not None):
box2d = re['bbox']
box3d = re['box3d_camera']
labels = re['label_preds']
scores = re['scores']
alphas = re['alphas']
anno = kitti.get_start_result_anno()
num_example = 0
for (bbox2d, bbox3d, label, score, alpha) in zip(box2d, box3d, labels, scores, alphas):
if ((bbox2d[0] > image_shape[1]) or (bbox2d[1] > image_shape[0])):
continue
if ((bbox2d[2] < 0) or (bbox2d[3] < 0)):
continue
bbox2d[2:] = np.minimum(bbox2d[2:], image_shape[::(- 1)])
bbox2d[:2] = np.maximum(bbox2d[:2], [0, 0])
anno['name'].append(class_names[int(label)])
anno['truncated'].append(0.0)
anno['occluded'].append(0)
anno['alpha'].append(alpha)
anno['bbox'].append(bbox2d)
anno['dimensions'].append(bbox3d[[3, 4, 5]])
anno['location'].append(bbox3d[:3])
anno['rotation_y'].append(bbox3d[6])
anno['score'].append(score)
num_example += 1
if (num_example != 0):
if (saveto is not None):
of_path = os.path.join(saveto, ('%06d.txt' % img_idx))
with open(of_path, 'w+') as f:
for (name, bbox, dim, loc, ry, score, alpha) in zip(anno['name'], anno['bbox'], anno['dimensions'], anno['location'], anno['rotation_y'], anno['score'], anno['alpha']):
line = template.format(name, 0, 0, alpha, *bbox, *dim[[1, 2, 0]], *loc, ry, score)
f.write(line)
anno = {n: np.stack(v) for (n, v) in anno.items()}
annos.append(anno)
else:
if (saveto is not None):
of_path = os.path.join(saveto, ('%06d.txt' % img_idx))
f = open(of_path, 'w+')
f.close()
annos.append(kitti.empty_result_anno())
else:
if (saveto is not None):
of_path = os.path.join(saveto, ('%06d.txt' % img_idx))
f = open(of_path, 'w+')
f.close()
annos.append(kitti.empty_result_anno())
if show:
model.module.show_result(data, results, data_loader.dataset.img_norm_cfg)
num_example = annos[(- 1)]['name'].shape[0]
annos[(- 1)]['image_idx'] = np.array(([img_idx] * num_example), dtype=np.int64)
batch_size = len(results)
for _ in range(batch_size):
prog_bar.update()
return annos |
def save_parsed_sqls(args, parsed_sqls):
data_dir = args.data_dir
dataset = args.dataset_name
norm_tag = get_norm_tag(args)
out_json = os.path.join(data_dir, '{}.{}parsed.json'.format(dataset, norm_tag))
if os.path.exists(out_json):
shutil.copyfile(out_json, os.path.join('/tmp', '{}.{}parsed.json'.format(dataset, norm_tag)))
with open(out_json, 'w') as o_f:
json.dump(parsed_sqls, o_f, indent=4)
print('parsed SQL queries dumped to {}'.format(out_json)) |
def logimage(key, image_tensor):
for fmt in get_current().output_formats:
if isinstance(fmt, TensorBoardOutputFormat):
tb_logger = fmt
tb_logger.writeimage(key, image_tensor) |
class DetectionResultFields(object):
key = 'image_id'
detection_boxes = 'bbox'
detection_scores = 'score'
detection_classes = 'cls'
detection_masks = 'masks' |
class xDeepFM(BaseModel):
def __init__(self, linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 256), cin_layer_size=(256, 128), cin_split_half=True, cin_activation='relu', l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, l2_reg_cin=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
super(xDeepFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.dnn_hidden_units = dnn_hidden_units
self.use_dnn = ((len(dnn_feature_columns) > 0) and (len(dnn_hidden_units) > 0))
if self.use_dnn:
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.cin_layer_size = cin_layer_size
self.use_cin = ((len(self.cin_layer_size) > 0) and (len(dnn_feature_columns) > 0))
if self.use_cin:
field_num = len(self.embedding_dict)
if (cin_split_half == True):
self.featuremap_num = ((sum(cin_layer_size[:(- 1)]) // 2) + cin_layer_size[(- 1)])
else:
self.featuremap_num = sum(cin_layer_size)
self.cin = CIN(field_num, cin_layer_size, cin_activation, cin_split_half, l2_reg_cin, seed, device=device)
self.cin_linear = nn.Linear(self.featuremap_num, 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: ('weight' in x[0])), self.cin.named_parameters()), l2_reg_cin)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
linear_logit = self.linear_model(X)
if self.use_cin:
cin_input = torch.cat(sparse_embedding_list, dim=1)
cin_output = self.cin(cin_input)
cin_logit = self.cin_linear(cin_output)
if self.use_dnn:
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
if ((len(self.dnn_hidden_units) == 0) and (len(self.cin_layer_size) == 0)):
final_logit = linear_logit
elif ((len(self.dnn_hidden_units) == 0) and (len(self.cin_layer_size) > 0)):
final_logit = (linear_logit + cin_logit)
elif ((len(self.dnn_hidden_units) > 0) and (len(self.cin_layer_size) == 0)):
final_logit = (linear_logit + dnn_logit)
elif ((len(self.dnn_hidden_units) > 0) and (len(self.cin_layer_size) > 0)):
final_logit = ((linear_logit + dnn_logit) + cin_logit)
else:
raise NotImplementedError
y_pred = self.out(final_logit)
return y_pred |
def test_transfer_fields_unknown_batch():
adata1 = synthetic_iid()
protein_adata1 = synthetic_iid()
mdata1 = mudata.MuData({'rna': adata1, 'protein': protein_adata1})
adata2 = synthetic_iid()
adata2.X = adata1.X
protein_adata2 = synthetic_iid()
mdata2 = mudata.MuData({'rna': adata2, 'protein': protein_adata2})
adata2.obs['batch'] = ([2] * adata2.n_obs)
adata1_manager = generic_setup_mudata_manager(mdata1, layer_mod='rna', batch_mod='rna', batch_key='batch')
with pytest.raises(ValueError):
adata1_manager.transfer_fields(mdata2) |
def make_test_data_loader(datasets, start_ind, end_ind, is_distributed=True):
ims_per_gpu = cfg.TEST.IMS_PER_GPU
if ((start_ind == (- 1)) or (end_ind == (- 1))):
test_sampler = (torch.utils.data.distributed.DistributedSampler(datasets) if is_distributed else None)
else:
test_sampler = samplers.RangeSampler(start_ind, end_ind)
num_workers = cfg.TEST.LOADER_THREADS
collator = BatchCollator(cfg.TEST.SIZE_DIVISIBILITY)
data_loader = torch.utils.data.DataLoader(datasets, batch_size=ims_per_gpu, shuffle=False, sampler=test_sampler, num_workers=num_workers, collate_fn=collator)
return data_loader |
def CoxeterGraph():
g = Graph({27: [6, 22, 14], 24: [0, 7, 18], 25: [8, 15, 2], 26: [10, 16, 23]}, pos={})
g.add_cycle(list(range(24)))
g.add_edges([(5, 11), (9, 20), (12, 1), (13, 19), (17, 4), (3, 21)])
g._circle_embedding(list(range(24)))
g._circle_embedding([24, 25, 26], radius=0.5)
g._pos[27] = (0, 0)
g.name('Coxeter Graph')
return g |
def scheduler_tester(scheduler, ref_scheduler, max_iter, scheduler_args=[], atol=1e-06):
s = scheduler(*scheduler_args)
ref_s = ref_scheduler(*scheduler_args)
lr = [s.get_learning_rate(iter) for iter in range(max_iter)]
ref_lr = [ref_s.get_learning_rate(iter) for iter in range(max_iter)]
assert_allclose(lr, ref_lr, atol=atol) |
((not have_sympy), 'SymPy not installed')
def test_dirichlet_eta():
x = Symbol('x')
e1 = sympy.dirichlet_eta(sympy.Symbol('x'))
e2 = dirichlet_eta(x)
assert (sympify(e1) == e2)
assert (e2._sympy_() == e1) |
def main():
matplotlib.use('Agg')
np.random.seed(args['SEED'])
torch.manual_seed(args['SEED'])
gpuAvailable = torch.cuda.is_available()
device = torch.device(('cuda' if gpuAvailable else 'cpu'))
kwargs = ({'num_workers': args['NUM_WORKERS'], 'pin_memory': True} if gpuAvailable else {})
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
audioParams = {'stftWindow': args['STFT_WINDOW'], 'stftWinLen': args['STFT_WIN_LENGTH'], 'stftOverlap': args['STFT_OVERLAP']}
noiseParams = {'noiseFile': (args['DATA_DIRECTORY'] + '/noise.wav'), 'noiseProb': args['NOISE_PROBABILITY'], 'noiseSNR': args['NOISE_SNR_DB']}
trainData = LRS2Main('train', args['DATA_DIRECTORY'], args['MAIN_REQ_INPUT_LENGTH'], args['CHAR_TO_INDEX'], args['STEP_SIZE'], audioParams, noiseParams)
trainLoader = DataLoader(trainData, batch_size=args['BATCH_SIZE'], collate_fn=collate_fn, shuffle=True, **kwargs)
noiseParams = {'noiseFile': (args['DATA_DIRECTORY'] + '/noise.wav'), 'noiseProb': 0, 'noiseSNR': args['NOISE_SNR_DB']}
valData = LRS2Main('val', args['DATA_DIRECTORY'], args['MAIN_REQ_INPUT_LENGTH'], args['CHAR_TO_INDEX'], args['STEP_SIZE'], audioParams, noiseParams)
valLoader = DataLoader(valData, batch_size=args['BATCH_SIZE'], collate_fn=collate_fn, shuffle=True, **kwargs)
model = AudioNet(args['TX_NUM_FEATURES'], args['TX_ATTENTION_HEADS'], args['TX_NUM_LAYERS'], args['PE_MAX_LENGTH'], args['AUDIO_FEATURE_SIZE'], args['TX_FEEDFORWARD_DIM'], args['TX_DROPOUT'], args['NUM_CLASSES'])
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args['INIT_LR'], betas=(args['MOMENTUM1'], args['MOMENTUM2']))
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=args['LR_SCHEDULER_FACTOR'], patience=args['LR_SCHEDULER_WAIT'], threshold=args['LR_SCHEDULER_THRESH'], threshold_mode='abs', min_lr=args['FINAL_LR'], verbose=True)
loss_function = nn.CTCLoss(blank=0, zero_infinity=False)
if os.path.exists((args['CODE_DIRECTORY'] + '/checkpoints')):
while True:
ch = input("Continue and remove the 'checkpoints' directory? y/n: ")
if (ch == 'y'):
break
elif (ch == 'n'):
exit()
else:
print('Invalid input')
shutil.rmtree((args['CODE_DIRECTORY'] + '/checkpoints'))
os.mkdir((args['CODE_DIRECTORY'] + '/checkpoints'))
os.mkdir((args['CODE_DIRECTORY'] + '/checkpoints/models'))
os.mkdir((args['CODE_DIRECTORY'] + '/checkpoints/plots'))
if (args['PRETRAINED_MODEL_FILE'] is not None):
print(('\n\nPre-trained Model File: %s' % args['PRETRAINED_MODEL_FILE']))
print('\nLoading the pre-trained model .... \n')
model.load_state_dict(torch.load((args['CODE_DIRECTORY'] + args['PRETRAINED_MODEL_FILE']), map_location=device))
model.to(device)
print('Loading Done.\n')
trainingLossCurve = list()
validationLossCurve = list()
trainingWERCurve = list()
validationWERCurve = list()
(numTotalParams, numTrainableParams) = num_params(model)
print(('\nNumber of total parameters in the model = %d' % numTotalParams))
print(('Number of trainable parameters in the model = %d\n' % numTrainableParams))
print('\nTraining the model .... \n')
trainParams = {'spaceIx': args['CHAR_TO_INDEX'][' '], 'eosIx': args['CHAR_TO_INDEX']['<EOS>']}
valParams = {'decodeScheme': 'greedy', 'spaceIx': args['CHAR_TO_INDEX'][' '], 'eosIx': args['CHAR_TO_INDEX']['<EOS>']}
for step in range(args['NUM_STEPS']):
(trainingLoss, trainingCER, trainingWER) = train(model, trainLoader, optimizer, loss_function, device, trainParams)
trainingLossCurve.append(trainingLoss)
trainingWERCurve.append(trainingWER)
(validationLoss, validationCER, validationWER) = evaluate(model, valLoader, loss_function, device, valParams)
validationLossCurve.append(validationLoss)
validationWERCurve.append(validationWER)
print(('Step: %03d || Tr.Loss: %.6f Val.Loss: %.6f || Tr.CER: %.3f Val.CER: %.3f || Tr.WER: %.3f Val.WER: %.3f' % (step, trainingLoss, validationLoss, trainingCER, validationCER, trainingWER, validationWER)))
scheduler.step(validationWER)
if ((((step % args['SAVE_FREQUENCY']) == 0) or (step == (args['NUM_STEPS'] - 1))) and (step != 0)):
savePath = (args['CODE_DIRECTORY'] + '/checkpoints/models/train-step_{:04d}-wer_{:.3f}.pt'.format(step, validationWER))
torch.save(model.state_dict(), savePath)
plt.figure()
plt.title('Loss Curves')
plt.xlabel('Step No.')
plt.ylabel('Loss value')
plt.plot(list(range(1, (len(trainingLossCurve) + 1))), trainingLossCurve, 'blue', label='Train')
plt.plot(list(range(1, (len(validationLossCurve) + 1))), validationLossCurve, 'red', label='Validation')
plt.legend()
plt.savefig((args['CODE_DIRECTORY'] + '/checkpoints/plots/train-step_{:04d}-loss.png'.format(step)))
plt.close()
plt.figure()
plt.title('WER Curves')
plt.xlabel('Step No.')
plt.ylabel('WER')
plt.plot(list(range(1, (len(trainingWERCurve) + 1))), trainingWERCurve, 'blue', label='Train')
plt.plot(list(range(1, (len(validationWERCurve) + 1))), validationWERCurve, 'red', label='Validation')
plt.legend()
plt.savefig((args['CODE_DIRECTORY'] + '/checkpoints/plots/train-step_{:04d}-wer.png'.format(step)))
plt.close()
print('\nTraining Done.\n')
return |
('/get_balance/', methods=('GET',))
def get_balance():
web3 = connect_to_geth(app.web3_url, app.consensus)
balance = {}
for addr in app.eth_accounts:
caddr = Web3.toChecksumAddress(addr)
balance[addr] = web3.fromWei(web3.eth.get_balance(caddr), 'ether')
for addr in app.local_accounts:
caddr = Web3.toChecksumAddress(addr)
balance[addr] = web3.fromWei(web3.eth.get_balance(caddr), 'ether')
return balance |
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL='relu', same_padding=False, bn=True, bias=True):
super(Conv2d, self).__init__()
padding = (int(((kernel_size - 1) // 2)) if same_padding else 0)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)
self.bn = (nn.BatchNorm2d(out_channels) if bn else None)
if (NL == 'relu'):
self.relu = nn.ReLU(inplace=True)
elif (NL == 'prelu'):
self.relu = nn.PReLU()
else:
self.relu = None
def forward(self, x):
x = self.conv(x)
if (self.bn is not None):
x = self.bn(x)
if (self.relu is not None):
x = self.relu(x)
return x |
def mag_pha_stft(y, n_fft, hop_size, win_size, compress_factor=1.0, center=True):
hann_window = torch.hann_window(win_size).to(y.device)
stft_spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window, center=center, pad_mode='reflect', normalized=False, return_complex=True)
mag = torch.abs(stft_spec)
pha = torch.angle(stft_spec)
mag = torch.pow(mag, compress_factor)
com = torch.stack(((mag * torch.cos(pha)), (mag * torch.sin(pha))), dim=(- 1))
return (mag, pha, com) |
def attribute_names(o):
return sorted([a['name'] for a in o['arguments'] if (not value_has_tensors(a))]) |
def test_importing_submodules():
for name in PUBLIC_SUBMODULES:
try:
cmd = [sys.executable, '-c', 'import scipy.{0}'.format(name)]
subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise AssertionError('Importing scipy.{0} failed'.format(name)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.