code stringlengths 101 5.91M |
|---|
class Grassberger(EntropyEstimator):
def g_series():
GG = {}
gamma0 = ndd.fnsb.gamma0
log_two = numpy.log(2.0)
def gterm(n):
if (n in GG):
return GG[n]
if (n <= 2):
if (n < 1):
value = 0.0
elif (n == 1):
value = ((- euler_gamma) - log_two)
elif (n == 2):
value = (2.0 + gterm(1))
elif ((n % 2) == 0):
value = (gamma0(((n + 1) / 2)) + log_two)
else:
value = gterm((n - 1))
GG[n] = value
return value
return gterm
_function
def fit(self, nk, k=None, zk=None):
gg = self.g_series()
estimate = 0
if (zk is not None):
n = numpy.sum((nk * zk))
for (j, x) in enumerate(nk):
if x:
estimate -= ((zk[j] * x) * gg(x))
else:
n = numpy.sum(nk)
for x in nk:
if x:
estimate -= (x * gg(x))
estimate = (numpy.log(n) - (estimate / n))
self.estimate_ = estimate
return self |
class ActuatedDampedPendulum(ActuatedSimplePendulum):
def __init__(self, params=None):
if (params is None):
params = torch.abs(torch.randn(4))
super().__init__(params=params[:3])
self._visc_force = ViscousJointDampingForce(params[(- 1)].reshape(1, self._qdim))
self._generalized_force = GeneralizedForces([self._lin_force, self._visc_force])
def generalized_force(self, q, v, u):
return self._generalized_force(q, v, u) |
class Vocab(object):
def __init__(self):
self._count_dict = dict()
self._predefined_list = [PAD, UNK, ASPECT]
def add(self, word):
if (word in self._count_dict):
self._count_dict[word] += 1
else:
self._count_dict[word] = 1
def add_list(self, words):
for word in words:
self.add(word)
def get_vocab(self, max_size=None, min_freq=0):
sorted_words = sorted(self._count_dict.items(), key=operator.itemgetter(1), reverse=True)
word2index = {}
for word in self._predefined_list:
word2index[word] = len(word2index)
for (word, freq) in sorted_words:
if (word in word2index):
continue
if (((max_size is not None) and (len(word2index) >= max_size)) or (freq < min_freq)):
word2index[word] = word2index[UNK]
else:
word2index[word] = len(word2index)
index2word = {}
index2word[word2index[UNK]] = UNK
for (word, index) in word2index.items():
if (index == word2index[UNK]):
continue
else:
index2word[index] = word
return (word2index, index2word) |
def test_single_objective_max_loss_negative():
with pytest.raises(ValueError):
SingleObjectiveCDV(max_empirical_loss=max_empirical_loss_neg) |
class Model(torch.nn.Module):
def __init__(self, backbone):
super(Model, self).__init__()
module_list = list(backbone.children())
self.conv_net = torch.nn.Sequential(*module_list[:(- 1)])
def forward(self, x):
x = self.conv_net(x)
x = torch.flatten(x, 1)
return x |
def expand_scalar(x, shp):
if np.isscalar(x):
x *= np.ones(shp)
else:
assert (x.shape == shp)
return x |
def remove_explain_phase(settings: hypothesis.settings) -> hypothesis.settings:
if (Phase.explain in settings.phases):
phases = tuple((phase for phase in settings.phases if (phase != Phase.explain)))
return hypothesis.settings(settings, phases=phases)
return settings |
class ContextualAttentionModule(nn.Module):
def __init__(self, unfold_raw_kernel_size=4, unfold_raw_stride=2, unfold_raw_padding=1, unfold_corr_kernel_size=3, unfold_corr_stride=1, unfold_corr_dilation=1, unfold_corr_padding=1, scale=0.5, fuse_kernel_size=3, softmax_scale=10, return_attention_score=True):
super().__init__()
self.unfold_raw_kernel_size = unfold_raw_kernel_size
self.unfold_raw_stride = unfold_raw_stride
self.unfold_raw_padding = unfold_raw_padding
self.unfold_corr_kernel_size = unfold_corr_kernel_size
self.unfold_corr_stride = unfold_corr_stride
self.unfold_corr_dilation = unfold_corr_dilation
self.unfold_corr_padding = unfold_corr_padding
self.scale = scale
self.fuse_kernel_size = fuse_kernel_size
self.with_fuse_correlation = (fuse_kernel_size > 1)
self.softmax_scale = softmax_scale
self.return_attention_score = return_attention_score
if self.with_fuse_correlation:
assert ((fuse_kernel_size % 2) == 1)
fuse_kernel = torch.eye(fuse_kernel_size).view(1, 1, fuse_kernel_size, fuse_kernel_size)
self.register_buffer('fuse_kernel', fuse_kernel)
padding = int(((fuse_kernel_size - 1) // 2))
self.fuse_conv = partial(F.conv2d, padding=padding, stride=1)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, context, mask=None):
raw_context = context
raw_context_cols = self.im2col(raw_context, kernel_size=self.unfold_raw_kernel_size, stride=self.unfold_raw_stride, padding=self.unfold_raw_padding, normalize=False, return_cols=True)
x = F.interpolate(x, scale_factor=self.scale)
context = F.interpolate(context, scale_factor=self.scale)
context_cols = self.im2col(context, kernel_size=self.unfold_corr_kernel_size, stride=self.unfold_corr_stride, padding=self.unfold_corr_padding, dilation=self.unfold_corr_dilation, normalize=True, return_cols=True)
(h_unfold, w_unfold) = self.calculate_unfold_hw(context.size()[(- 2):], kernel_size=self.unfold_corr_kernel_size, stride=self.unfold_corr_stride, padding=self.unfold_corr_padding, dilation=self.unfold_corr_dilation)
context_cols = context_cols.reshape((- 1), *context_cols.shape[2:])
correlation_map = self.patch_correlation(x, context_cols)
if self.with_fuse_correlation:
correlation_map = self.fuse_correlation_map(correlation_map, h_unfold, w_unfold)
correlation_map = self.mask_correlation_map(correlation_map, mask=mask)
attention_score = self.softmax((correlation_map * self.softmax_scale))
raw_context_filter = raw_context_cols.reshape((- 1), *raw_context_cols.shape[2:])
output = self.patch_copy_deconv(attention_score, raw_context_filter)
overlap_factor = self.calculate_overlap_factor(attention_score)
output /= overlap_factor
if self.return_attention_score:
(n, _, h_s, w_s) = attention_score.size()
attention_score = attention_score.view(n, h_unfold, w_unfold, h_s, w_s)
return (output, attention_score)
return output
def patch_correlation(self, x, kernel):
(n, _, h_in, w_in) = x.size()
patch_corr = F.conv2d(x.view(1, (- 1), h_in, w_in), kernel, stride=self.unfold_corr_stride, padding=self.unfold_corr_padding, dilation=self.unfold_corr_dilation, groups=n)
(h_out, w_out) = patch_corr.size()[(- 2):]
return patch_corr.view(n, (- 1), h_out, w_out)
def patch_copy_deconv(self, attention_score, context_filter):
(n, _, h, w) = attention_score.size()
attention_score = attention_score.view(1, (- 1), h, w)
output = F.conv_transpose2d(attention_score, context_filter, stride=self.unfold_raw_stride, padding=self.unfold_raw_padding, groups=n)
(h_out, w_out) = output.size()[(- 2):]
return output.view(n, (- 1), h_out, w_out)
def fuse_correlation_map(self, correlation_map, h_unfold, w_unfold):
(n, _, h_map, w_map) = correlation_map.size()
map_ = correlation_map.permute(0, 2, 3, 1)
map_ = map_.reshape(n, (h_map * w_map), (h_unfold * w_unfold), 1)
map_ = map_.permute(0, 3, 1, 2).contiguous()
map_ = self.fuse_conv(map_, self.fuse_kernel)
correlation_map = map_.view(n, h_unfold, w_unfold, h_map, w_map)
map_ = correlation_map.permute(0, 2, 1, 4, 3).reshape(n, 1, (h_unfold * w_unfold), (h_map * w_map))
map_ = self.fuse_conv(map_, self.fuse_kernel)
correlation_map = map_.view(n, w_unfold, h_unfold, w_map, h_map).permute(0, 4, 3, 2, 1)
correlation_map = correlation_map.reshape(n, (- 1), h_unfold, w_unfold)
return correlation_map
def calculate_unfold_hw(self, input_size, kernel_size=3, stride=1, dilation=1, padding=0):
(h_in, w_in) = input_size
h_unfold = int((((((h_in + (2 * padding)) - (dilation * (kernel_size - 1))) - 1) / stride) + 1))
w_unfold = int((((((w_in + (2 * padding)) - (dilation * (kernel_size - 1))) - 1) / stride) + 1))
return (h_unfold, w_unfold)
def calculate_overlap_factor(self, attention_score):
(h, w) = attention_score.shape[(- 2):]
kernel_size = self.unfold_raw_kernel_size
ones_input = torch.ones(1, 1, h, w).to(attention_score)
ones_filter = torch.ones(1, 1, kernel_size, kernel_size).to(attention_score)
overlap = F.conv_transpose2d(ones_input, ones_filter, stride=self.unfold_raw_stride, padding=self.unfold_raw_padding)
overlap[(overlap == 0)] = 1.0
return overlap
def mask_correlation_map(self, correlation_map, mask):
if (mask is not None):
mask = F.interpolate(mask, scale_factor=self.scale)
mask_cols = self.im2col(mask, kernel_size=self.unfold_corr_kernel_size, stride=self.unfold_corr_stride, padding=self.unfold_corr_padding, dilation=self.unfold_corr_dilation)
mask_cols = (mask_cols.sum(dim=1, keepdim=True) > 0).float()
mask_cols = mask_cols.permute(0, 2, 1).reshape(mask.size(0), (- 1), 1, 1)
mask_cols[(mask_cols == 1)] = (- float('inf'))
correlation_map += mask_cols
return correlation_map
def im2col(self, img, kernel_size, stride=1, padding=0, dilation=1, normalize=False, return_cols=False):
img_unfold = F.unfold(img, kernel_size, stride=stride, padding=padding, dilation=dilation)
if normalize:
norm = torch.sqrt((img_unfold ** 2).sum(dim=1, keepdim=True))
eps = torch.tensor([0.0001]).to(img)
img_unfold = (img_unfold / torch.max(norm, eps))
if return_cols:
img_unfold_ = img_unfold.permute(0, 2, 1)
(n, num_cols) = img_unfold_.size()[:2]
img_cols = img_unfold_.view(n, num_cols, img.size(1), kernel_size, kernel_size)
return img_cols
return img_unfold |
class FlaxBigBirdForSequenceClassification(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def AG(n, q, x=None):
if (x is None):
x = 'x'
F = GF(q, x)
P = ProjectiveSpace(n, F)
A = Matrix(F, [list(p) for p in P if (not (list(p)[0] == 0))]).transpose()
M = Matroid(A)
M.rename(((((('AG(' + str(n)) + ', ') + str(q)) + '): ') + repr(M)))
return M |
def find_representative(m):
target_class = None
if hasattr(m, 'REPRESENTATIVE'):
target_class = getattr(m, 'REPRESENTATIVE')
else:
for (name, obj) in inspect.getmembers(m):
if inspect.isclass(obj):
target_class = getattr(m, name)
break
return target_class |
def write_rd_page(f, is_64, is_replay):
if is_64:
bytes = bytearray([15, 5, 195])
else:
bytes = bytearray([205, 128, 195])
nocall_bytes = bytearray([49, 192, 195])
f.write(bytes)
f.write(bytes)
f.write(bytes)
if is_replay:
f.write(bytes)
else:
f.write(nocall_bytes)
if is_replay:
f.write(nocall_bytes)
else:
f.write(bytes)
f.write(bytes)
if is_replay:
f.write(bytes)
else:
f.write(nocall_bytes)
if is_replay:
f.write(nocall_bytes)
else:
f.write(bytes)
ff_bytes = bytearray([255, 255, 255, 255, 255, 255, 255, 255])
f.write(ff_bytes) |
def video_list_from_file(video_list_fpath: str, base_path: Optional[str]=None):
video_list = []
with PathManager.open(video_list_fpath, 'r') as io:
for line in io:
video_list.append(maybe_prepend_base_path(base_path, str(line.strip())))
return video_list |
def interaction_information(ar, ks=None, estimator='nsb', axis=0, r=None):
def iinfo(X, ks, estimator):
info = 0.0
S = len(X)
for T in range(1, (S + 1)):
sgn = ((- 1) ** (S - T))
info += (sgn * sum(from_data(X, ks=ks, estimator=estimator, r=T)))
return (- info)
estimator = as_estimator(estimator)
if (not isinstance(ar, DataArray)):
ar = DataArray(ar, ks=ks, axis=axis)
if (r is not None):
return estimates_from_combinations(ar, r, q=iinfo, estimator=estimator)
(data, k) = ar.iter_data()
return iinfo(data, k, estimator) |
def test_hash_same(default_test_case, variable_reference_mock, field_mock):
statement = stmt.FieldStatement(default_test_case, field_mock, variable_reference_mock)
statement2 = stmt.FieldStatement(default_test_case, field_mock, variable_reference_mock)
memo = {variable_reference_mock: 0, statement.ret_val: 1}
memo2 = {variable_reference_mock: 0, statement2.ret_val: 1}
assert (statement.structural_hash(memo) == statement2.structural_hash(memo2)) |
class ScalarNoiseModel(NoiseModel):
def whiten_scalar(self, x: sf.Scalar, bounded_away_from_zero: bool=False) -> sf.Scalar:
pass
def whiten(self, unwhitened_residual: sf.Matrix.MatrixT) -> sf.Matrix.MatrixT:
return unwhitened_residual.applyfunc(self.whiten_scalar)
def whiten_norm(self, residual: sf.Matrix.MatrixT, epsilon: sf.Scalar=sf.epsilon()) -> sf.Matrix.MatrixT:
norm = residual.norm(epsilon)
whitened_norm = self.whiten_scalar(norm, bounded_away_from_zero=True)
scale_factor = (whitened_norm / norm)
return (scale_factor * residual) |
class M_PHATE(phate.PHATE):
def __init__(self, n_components=2, intraslice_knn=2, interslice_knn=25, decay=5, t='auto', gamma=0, n_landmark=4000, normalize=True, mds_solver='smacof', n_pca=100, n_svd=100, n_jobs=(- 2), random_state=None, verbose=1, knn=None, **phate_kwargs):
if (knn is not None):
warnings.warn('Argument `knn` is ambiguous and ignored. Use `intraslice_knn` or `interslice_knn`.', UserWarning)
self.interslice_knn = interslice_knn
self.n_svd = n_svd
self.normalize = normalize
return super().__init__(n_components=n_components, knn=intraslice_knn, mds_solver=mds_solver, decay=decay, t=t, n_pca=n_pca, gamma=gamma, n_landmark=n_landmark, n_jobs=n_jobs, random_state=random_state, verbose=verbose, **phate_kwargs)
def intraslice_knn(self):
return self.knn
def fit(self, X):
if (not (len(X.shape) == 3)):
raise ValueError('Expected X to be a tensor with three dimensions. Got shape {}'.format(X.shape))
if self.normalize:
X = utils.normalize(X)
with _logger.task('multislice kernel'):
K = kernel.multislice_kernel(X, intraslice_knn=self.intraslice_knn, interslice_knn=self.interslice_knn, decay=self.decay, n_pca=self.n_pca, distance=self.knn_dist, n_jobs=self.n_jobs)
with _logger.task('graph and diffusion operator'):
n_landmark = (self.n_landmark if ((self.n_landmark is not None) and (self.n_landmark < K.shape[0])) else None)
self.graph = graphtools.Graph(K, precomputed='affinity', n_landmark=n_landmark, n_svd=self.n_svd, n_jobs=self.n_jobs, verbose=self.verbose, random_state=self.random_state, **self.kwargs)
self.diff_op
result = super().fit(self.graph)
return result
def fit_transform(self, X, **kwargs):
with _logger.task('M-PHATE'):
self.fit(X)
embedding = self.transform(**kwargs)
return embedding
def _check_params(self):
phate.utils.check_int(interslice_knn=self.interslice_knn, n_svd=self.n_svd)
phate.utils.check_positive(interslice_knn=self.interslice_knn, n_svd=self.n_svd)
return super()._check_params()
def set_params(self, **params):
reset_kernel = False
if (('interslice_knn' in params) and (params['interslice_knn'] != self.interslice_knn)):
self.interslice_knn = params['interslice_knn']
reset_kernel = True
del params['interslice_knn']
if ('intraslice_knn' in params):
params['knn'] = params['intraslice_knn']
del params['intraslice_knn']
if (('n_svd' in params) and (params['n_svd'] != self.n_svd)):
self._set_graph_params(n_svd=params['n_svd'])
self.n_svd = params['n_svd']
del params['n_svd']
if reset_kernel:
self._reset_graph()
return super().set_params(**params) |
class docSect1TypeSub(supermod.docSect1Type):
def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect1Type.__init__(self, mixedclass_, content_) |
def test_IndexedOptionArray_NumpyArray():
v2a = ak.contents.indexedoptionarray.IndexedOptionArray(ak.index.Index(np.array([2, 2, (- 1), 1, (- 1), 5, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])))
_cuda.jit(extensions=[ak.numba.cuda])
def f(out, obj):
out[0] = len(obj)
out[1] = (obj[0] if (obj[0] is not None) else 999.0)
out[2] = (obj[1] if (obj[1] is not None) else 999.0)
out[3] = (obj[2] if (obj[2] is not None) else 999.0)
out[4] = (obj[3] if (obj[3] is not None) else 999.0)
out[5] = (obj[4] if (obj[4] is not None) else 999.0)
out[6] = (obj[5] if (obj[5] is not None) else 999.0)
out[7] = (obj[6] if (obj[6] is not None) else 999.0)
out = np.zeros(8, dtype=np.float64)
f[(blockspergrid, threadsperblock)](out, ak.highlevel.Array(v2a, backend='cuda'))
assert (out.tolist() == [7.0, 2.2, 2.2, 999.0, 1.1, 999.0, 5.5, 4.4]) |
class ModelLib(BaseModelLib):
def __init__(self, args):
self.ultralytics_model = YOLOReplacer(args[MODEL_NAME])
self.dataset_name = COCO_DATASET
self.preprocess = yolov8_preprocess_chw_transpose
model_weights = self.ultralytics_model.model.state_dict()
self.model = self.ultralytics_model.model
self.model = DetectionModelModuleReplacer().replace(self.model)
self.model = C2fModuleReplacer().replace(self.model)
self.model = TaskModuleReplacer[self.ultralytics_model.task].replace(self.model)
initialize_weights(self.model)
self.model.load_state_dict(model_weights)
super().__init__(args)
def get_model(self):
return self.model
def get_representative_dataset(self, representative_dataset_folder, n_iter, batch_size):
image_data_loader = FolderImageLoader(representative_dataset_folder, preprocessing=[self.preprocess], batch_size=batch_size)
def representative_data_gen() -> list:
for _ in range(n_iter):
(yield [image_data_loader.sample()])
return representative_data_gen
def evaluate(self, model):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model.to(device)
self.ultralytics_model = prepare_model_for_ultralytics_val(self.ultralytics_model, model)
if (self.args[VALIDATION_DATASET_FOLDER] is not None):
logging.warning('The provided value for "validation_dataset_folder" is ignored. Ultralytics utilizes the dataset path specified in the coco.yaml file. By default, the dataset path is taken from "/home/user/.config/Ultralytics/settings.yaml", depends on your operating system.')
results = self.ultralytics_model.val(batch=int(self.args[BATCH_SIZE]))
map_res = results.mean_results()[(- 1)]
dataset_info = DatasetInfo(self.dataset_name, 5000)
return (map_res, dataset_info) |
class LabelSmoothing(nn.Module):
def __init__(self, padding_idx, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.padding_idx = padding_idx
self.confidence = (1.0 - smoothing)
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=(- 1), dtype=torch.float32)
non_pad_mask = (target != self.padding_idx)
nll_loss = (- logprobs.gather(dim=(- 1), index=target.unsqueeze(1)))
nll_loss = nll_loss.squeeze(1)[non_pad_mask]
smooth_loss = (- logprobs.mean(dim=(- 1))[non_pad_mask])
loss = ((self.confidence * nll_loss) + (self.smoothing * smooth_loss))
return loss.sum() |
class PuzzlePiece():
def __eq__(self, other) -> bool:
if isinstance(other, PuzzlePiece):
return (self.border() == other.border())
else:
return False
def __hash__(self):
return hash((type(self), self.border()))
def border(self) -> tuple:
return tuple((self.edge_label(edge) for edge in self.edges()))
def color(self) -> str:
colors = {('0', '0', '0'): 'red', ('1', '1', '1'): 'blue', ('2', '2', '2'): 'green'}
border = self.border()
if (border in colors):
color = colors[border]
elif ('K' in border):
color = 'orange'
elif ('10' in border):
color = 'white'
elif any((label.startswith('T') for label in border)):
color = 'yellow'
else:
color = 'white'
return color
def _plot_label(self, label, coords, fontcolor=(0.3, 0.3, 0.3), fontsize=15, rotation=0):
if (label in ('0', '1', '2')):
return text(label, coords, color=fontcolor, fontsize=fontsize, rotation=rotation)
else:
return Graphics()
def _plot_piece(self, coords, border_color=(0.5, 0.5, 0.5), border_thickness=1, style='fill'):
if (style == 'fill'):
P = polygon(coords, color=self.color())
P += polygon(coords, fill=False, color=border_color, thickness=border_thickness)
return P
elif (style == 'edges'):
if isinstance(self, DeltaPiece):
edges = ('north_west', 'south', 'north_east')
elif isinstance(self, NablaPiece):
edges = ('south_west', 'north', 'south_east')
else:
edges = self.edges()
P = Graphics()
for (i, edge) in enumerate(edges):
P += line([coords[i], coords[((i + 1) % 3)]], color=self.edge_color(edge), thickness=border_thickness)
return P
else:
return NotImplemented
def edge_color(self, edge) -> str:
edge_label = self.edge_label(edge)
colors = {'1': 'blue', '0': 'red'}
if (edge_label in colors):
color = colors[edge_label]
elif ('K' in edge_label):
color = 'orange'
elif edge_label.startswith('T'):
color = 'yellow'
else:
color = 'white'
return color
def edge_label(self, edge) -> str:
return self._edge_labels[edge]
__getitem__ = edge_label |
def validate_args(args):
assert (args.text or args.file), 'Either text or file must be provided Matcha-T(ea)TTS need sometext to whisk the waveforms.'
assert (args.temperature >= 0), 'Sampling temperature cannot be negative'
assert (args.speaking_rate >= 0), 'Speaking rate must be greater than 0'
return args |
class OSNet(nn.Module):
def __init__(self, blocks, layers, channels, bn_norm, IN=False, **kwargs):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert (num_blocks == len(layers))
assert (num_blocks == (len(channels) - 1))
self.conv1 = ConvLayer(3, channels[0], 7, bn_norm, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(blocks[0], layers[0], channels[0], channels[1], bn_norm, reduce_spatial_size=True, IN=IN)
self.conv3 = self._make_layer(blocks[1], layers[1], channels[1], channels[2], bn_norm, reduce_spatial_size=True)
self.conv4 = self._make_layer(blocks[2], layers[2], channels[2], channels[3], bn_norm, reduce_spatial_size=False)
self.conv5 = Conv1x1(channels[3], channels[3], bn_norm)
self._init_params()
def _make_layer(self, block, layer, in_channels, out_channels, bn_norm, reduce_spatial_size, IN=False):
layers = []
layers.append(block(in_channels, out_channels, bn_norm, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, bn_norm, IN=IN))
if reduce_spatial_size:
layers.append(nn.Sequential(Conv1x1(out_channels, out_channels, bn_norm), nn.AvgPool2d(2, stride=2)))
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x |
class CKConv(torch.nn.Module):
def __init__(self, in_channels: int, out_channels: int, horizon: int, kernel_dim_linear=2, kernel_n_points=36, kernel_radius=0.002, kernel_coord_std=0.1, conv_use_fft=False, conv_bias=True, conv_padding='same', conv_stride=1):
super().__init__()
self.Kernel = ckconv.nn.ck.SMPKernel(dim_linear=kernel_dim_linear, in_channels=in_channels, out_channels=out_channels, n_points=kernel_n_points, radius=kernel_radius, coord_std=kernel_coord_std)
if conv_bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
self.bias.data.fill_(value=0.0)
else:
self.bias = None
self.padding = conv_padding
self.stride = conv_stride
self.rel_positions = None
self.kernel_dim_linear = kernel_dim_linear
self.horizon = horizon
self.use_fftconv = conv_use_fft
self.register_buffer('train_length', torch.zeros(1).int(), persistent=True)
self.register_buffer('conv_kernel', torch.zeros(in_channels), persistent=False)
conv_type = 'conv'
if conv_use_fft:
conv_type = ('fft' + conv_type)
if (kernel_dim_linear == 1):
conv_type = ('causal_' + conv_type)
self.conv = getattr(ckconv_F, conv_type)
def forward(self, x):
x_shape = x.shape
rel_pos = self.handle_rel_positions(x)
conv_kernel = self.Kernel(rel_pos).view((- 1), x_shape[1], *rel_pos.shape[2:])
self.conv_kernel = conv_kernel
return self.conv(x, conv_kernel, self.bias)
def handle_rel_positions(self, x):
if (self.rel_positions is None):
if (self.train_length[0] == 0):
if (self.horizon == 'full'):
self.train_length[0] = ((2 * x.shape[(- 1)]) - 1)
elif (self.horizon == 'same'):
self.train_length[0] = x.shape[(- 1)]
elif ((int(self.horizon) % 2) == 1):
self.train_length[0] = int(self.horizon)
else:
raise ValueError(f"The horizon argument of the operation must be either 'full', 'same' or an odd number in string format. Current value: {self.horizon}")
rel_positions = rel_positions_grid(grid_sizes=self.train_length.repeat(self.kernel_dim_linear)).unsqueeze(0)
self.rel_positions = rel_positions.to(x.device)
return self.rel_positions |
def test_wrap_index_numpy():
data = np.arange(10, dtype=np.int64)
index = ak.index.Index64(data)
other_data = np.asarray(index)
assert np.shares_memory(data, other_data) |
_cuda.jit(extensions=[ak.numba.cuda])
def pass_record_through(array, out):
tid = nb_cuda.grid(1)
out[tid] = array.x[tid] |
def get_full_version_string(major, minor, build, revision):
global GIT_HASH, GIT_DESCRIBE
res = ('Z3 %s.%s.%s.%s' % (major, minor, build, revision))
if GIT_HASH:
res += (' ' + GIT_HASH)
if GIT_DESCRIBE:
branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
res += (((' ' + branch) + ' ') + check_output(['git', 'describe']))
return (('"' + res) + '"') |
def test_mwt_ner_conversion():
doc = CoNLL.conll2doc(input_str=MWT_NER)
assert (len(doc.sentences) == 1)
sentence = doc.sentences[0]
assert (len(sentence.tokens) == 5)
assert (not sentence.has_enhanced_dependencies())
EXPECTED_NER = ['O', 'O', 'S-PERSON', 'O', 'O']
EXPECTED_WORDS = [1, 1, 2, 1, 1]
for (token, ner, expected_words) in zip(sentence.tokens, EXPECTED_NER, EXPECTED_WORDS):
assert (token.ner == ner)
assert (not token.misc)
assert (len(token.words) == expected_words)
assert (not token.words[0].misc)
conll = '{:C}'.format(doc)
assert (conll == MWT_NER) |
def spherical_bessel_formulas(n):
x = sym.symbols('x')
f = [(sym.sin(x) / x)]
a = (sym.sin(x) / x)
for i in range(1, n):
b = (sym.diff(a, x) / x)
f += [sym.simplify((b * ((- x) ** i)))]
a = sym.simplify(b)
return f |
class CleanData():
def __init__(self, df_train, df_test, run_train):
self.df_train = df_train
self.df_test = df_test
self.run_train = run_train
def clean(self):
if self.run_train:
self.df_train.drop(['index'], axis=1, inplace=True)
self.df_train.reset_index(drop=True, inplace=True)
self.df_test.drop(['index'], axis=1, inplace=True)
self.df_test.reset_index(drop=True, inplace=True)
if self.run_train:
missing_data_indices = np.where(((self.df_train <= (- 9999.9)).apply(sum, axis=1) >= 2))[0]
self.df_train.drop(missing_data_indices, axis=0, inplace=True)
print('self.df_train.shape:', self.df_train.shape)
self.df_train.reset_index(drop=True, inplace=True)
missing_data_indices_test = np.where(((self.df_test <= (- 9999.9)).apply(sum, axis=1) >= 2))[0]
self.df_test.drop(missing_data_indices_test, axis=0, inplace=True)
print('self.df_test.shape:', self.df_test.shape)
self.df_test.reset_index(drop=True, inplace=True)
if self.run_train:
one_miss_train_idx = np.where(((self.df_train <= (- 9999.9)).apply(sum, axis=1) == 1))[0]
print('(len(one_miss_train_idx)', len(one_miss_train_idx))
self.df_train.shape
col_names = self.df_train.columns
from collections import defaultdict
stats = defaultdict(int)
total_single_missing_values = 0
for name in col_names:
col_mean = self.df_train[(~ (self.df_train[name] == (- 9999.9)))][name].mean()
missing_indices = np.where((self.df_train[name] == (- 9999.9)))
stats[name] = len(missing_indices[0])
self.df_train[name].loc[missing_indices] = col_mean
total_single_missing_values += sum((self.df_train[name] == (- 9999.9)))
train = np.where(((self.df_train <= (- 9999.9)).apply(sum, axis=1) == 1))[0]
print('len(train):', len(train))
one_miss_test_idx = np.where(((self.df_test <= (- 9999.9)).apply(sum, axis=1) == 1))[0]
len(one_miss_test_idx)
col_names_test = self.df_test.columns
from collections import defaultdict
stats_test = defaultdict(int)
total_single_missing_values_test = 0
for name in col_names_test:
col_mean = self.df_test[(~ (self.df_test[name] == (- 9999.9)))][name].mean()
missing_indices = np.where((self.df_test[name] == (- 9999.9)))
stats_test[name] = len(missing_indices[0])
self.df_test[name].loc[missing_indices] = col_mean
total_single_missing_values_test += sum((self.df_test[name] == (- 9999.9)))
test = np.where(((self.df_test <= (- 9999.9)).apply(sum, axis=1) == 1))[0]
print('len(test):', len(test))
print('self.df_test.shape:', self.df_test.shape)
return (self.df_train, self.df_test) |
_level_function(module='ak.str')
def replace_substring(array, pattern, replacement, *, max_replacements=None, highlevel=True, behavior=None, attrs=None):
(yield (array,))
return _impl(array, pattern, replacement, max_replacements, highlevel, behavior, attrs) |
class StyleEncoder(torch.nn.Module):
def __init__(self, in_dim=513, hidden_dim=128, out_dim=256):
super().__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.kernel_size = 5
self.n_head = 2
self.dropout = 0.1
self.spectral = nn.Sequential(nn.Conv1d(self.in_dim, self.hidden_dim, 1), Mish(), nn.Dropout(self.dropout), nn.Conv1d(self.hidden_dim, self.hidden_dim, 1), Mish(), nn.Dropout(self.dropout))
self.temporal = nn.Sequential(Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout), Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout))
self.slf_attn = attentions.MultiHeadAttention(self.hidden_dim, self.hidden_dim, self.n_head, p_dropout=self.dropout, proximal_bias=False, proximal_init=True)
self.atten_drop = nn.Dropout(self.dropout)
self.fc = nn.Conv1d(self.hidden_dim, self.out_dim, 1)
def forward(self, x, mask=None):
x = (self.spectral(x) * mask)
x = (self.temporal(x) * mask)
attn_mask = (mask.unsqueeze(2) * mask.unsqueeze((- 1)))
y = self.slf_attn(x, x, attn_mask=attn_mask)
x = (x + self.atten_drop(y))
x = self.fc(x)
w = self.temporal_avg_pool(x, mask=mask)
return w
def temporal_avg_pool(self, x, mask=None):
if (mask is None):
out = torch.mean(x, dim=2)
else:
len_ = mask.sum(dim=2)
x = x.sum(dim=2)
out = torch.div(x, len_)
return out |
class ROITagHead(torch.nn.Module):
def __init__(self):
super(ROITagHead, self).__init__()
self.feature_extractor = make_roi_tag_feature_extractor()
self.predictor = make_roi_tag_predictor()
self.loss_evaluator = WeightedCeLoss(cfg.runtime_info.cls_pos_wts, cfg.runtime_info.cls_neg_wts)
self.loss_evaluator2 = WeightedCeLossBatchOhem(cfg.runtime_info.cls_pos_wts, cfg.runtime_info.cls_neg_wts)
self.num_tag = cfg.runtime_info.num_tags
def forward(self, features, proposals, targets=None):
x = self.feature_extractor(features, proposals)
tag_logits = self.predictor(x)
tag_prob = torch.sigmoid(tag_logits)
if (not self.training):
box_num_per_image = [prop.bbox.shape[0] for prop in proposals]
tag_prob = tag_prob.split(box_num_per_image, dim=0)
for i in range(len(proposals)):
proposals[i].add_field('tag_scores', tag_prob[i])
return (tag_logits, proposals, {})
labels = [proposal.get_field('labels') for proposal in proposals]
matched_idxs = [proposal.get_field('matched_idxs') for proposal in proposals]
tags_target = [target.get_field('tags') for target in targets]
reliable_neg_tags = [target.get_field('reliable_neg_tags') for target in targets]
tags = []
rhem_wts = []
for img_idx in range(len(labels)):
tags_per_image = (- torch.ones(labels[img_idx].shape[0], self.num_tag).to(torch.int).cuda())
TP_lesion_idxs = (labels[img_idx] > 0)
tags_per_image[TP_lesion_idxs] = tags_target[img_idx][matched_idxs[img_idx][TP_lesion_idxs]]
wts_per_image = torch.zeros(labels[img_idx].shape[0], self.num_tag).to(torch.int).cuda()
wts_per_image[TP_lesion_idxs] = torch.clamp(reliable_neg_tags[img_idx][matched_idxs[img_idx][TP_lesion_idxs]], min=0)
wts_per_image[TP_lesion_idxs] += torch.clamp(tags_per_image[TP_lesion_idxs], min=0)
tags.append(tags_per_image)
rhem_wts.append(wts_per_image)
tag_target = cat(tags, dim=0)
rhem_wts = cat(rhem_wts, dim=0)
loss_tag = self.loss_evaluator(tag_prob, tag_target)
loss_tag_ohem = self.loss_evaluator2(tag_prob, tag_target, rhem_wts)
losses = dict(loss_tag=loss_tag, loss_tag_ohem=loss_tag_ohem)
return (tag_logits, proposals, losses) |
def register_Ns3DlDciListElement_s_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DlDciListElement_s const &', 'arg0')])
cls.add_instance_attribute('m_aggrLevel', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_cceIndex', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_dai', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_dlPowerOffset', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_format', 'ns3::DlDciListElement_s::Format_e', is_const=False)
cls.add_instance_attribute('m_harqProcess', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_mcs', 'std::vector< unsigned char >', is_const=False)
cls.add_instance_attribute('m_nGap', 'ns3::DlDciListElement_s::Ngap_e', is_const=False)
cls.add_instance_attribute('m_ndi', 'std::vector< unsigned char >', is_const=False)
cls.add_instance_attribute('m_pdcchOrder', 'bool', is_const=False)
cls.add_instance_attribute('m_pdcchPowerOffset', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_prachMaskIndex', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_preambleIndex', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_precodingInfo', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_rbBitmap', 'uint32_t', is_const=False)
cls.add_instance_attribute('m_rbShift', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_resAlloc', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_rnti', 'uint16_t', is_const=False)
cls.add_instance_attribute('m_rv', 'std::vector< unsigned char >', is_const=False)
cls.add_instance_attribute('m_spsRelease', 'bool', is_const=False)
cls.add_instance_attribute('m_tbSwap', 'bool', is_const=False)
cls.add_instance_attribute('m_tbsIdx', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_tbsSize', 'std::vector< unsigned short >', is_const=False)
cls.add_instance_attribute('m_tpc', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_vrbFormat', 'ns3::DlDciListElement_s::VrbFormat_e', is_const=False)
return |
class LaneSprite():
def __init__(self, street_map, lane_id, batch, group_marking, group_road):
self.street_map = street_map
self._lane_id = lane_id
self._lane_node = self.street_map.graph.lanes[lane_id]
self._batch = batch
self._group_road = group_road
self._group_marking = group_marking
self._shapes: List[Union[(pyglet.shapes.Polygon, pyglet.shapes.Line)]] = []
self._init_shapes()
def _init_shapes(self):
self._shapes += draw_multi_line(self._lane_node.shape.shape, ROAD_COLOR, self._batch, self._group_marking, self._lane_node.width)
if (self._lane_node.left_neigh is None):
self._shapes += draw_multi_line(self._lane_node.shape.left_border, BETWEEN_ROAD_MARKING_COLOR, self._batch, self._group_marking)
if (self._lane_node.right_neigh is not None):
self._shapes += draw_multi_line(self._lane_node.shape.right_border, INNER_LANE_MARKING_COLOR, self._batch, self._group_marking)
def delete(self):
[x.delete() for x in self._shapes] |
def embedding_computation_loop(split, set_loader, stat_file):
if (not os.path.isfile(stat_file)):
logger.debug('Extracting deep embeddings and diarizing')
embeddings = np.empty(shape=[0, params['emb_dim']], dtype=np.float64)
modelset = []
segset = []
params['mean_var_norm_emb'].count = 0
for batch in set_loader:
ids = batch.id
(wavs, lens) = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = (modelset + mod)
segset = (segset + seg)
emb = compute_embeddings(wavs, lens).contiguous().squeeze(1).cpu().numpy()
embeddings = np.concatenate((embeddings, emb), axis=0)
modelset = np.array(modelset, dtype='|O')
segset = np.array(segset, dtype='|O')
s = np.array(([None] * embeddings.shape[0]))
b = np.array(([[1.0]] * embeddings.shape[0]))
stat_obj = StatObject_SB(modelset=modelset, segset=segset, start=s, stop=s, stat0=b, stat1=embeddings)
logger.debug('Saving Embeddings...')
stat_obj.save_stat_object(stat_file)
else:
logger.debug('Skipping embedding extraction (as already present).')
logger.debug('Loading previously saved embeddings.')
with open(stat_file, 'rb') as in_file:
stat_obj = pickle.load(in_file)
return stat_obj |
def test_constant_schedule():
cs = ConstantSchedule(5)
for i in range((- 100), 100):
assert np.isclose(cs.value(i), 5) |
class Zettl(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 5.0)] * self.N), ([10.0] * self.N)))
self.global_optimum = [[(- 0.), 0.0]]
self.fglob = (- 0.)
def fun(self, x, *args):
self.nfev += 1
return (((((x[0] ** 2) + (x[1] ** 2)) - (2 * x[0])) ** 2) + (0.25 * x[0])) |
def build_transform(is_train, args):
mean = IMAGENET_DEFAULT_MEAN
std = IMAGENET_DEFAULT_STD
if is_train:
transform = create_transform(input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation='bicubic', re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, mean=mean, std=std)
return transform
t = []
if (args.input_size <= 224):
crop_pct = (224 / 256)
else:
crop_pct = 1.0
size = int((args.input_size / crop_pct))
t.append(transforms.Resize(size, interpolation=PIL.Image.BICUBIC))
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t) |
class Pipeline():
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
self.negate = negate
self.pipe_err = pipe_err
def __repr__(self):
return ('Pipeline(%r, %r, %r)' % (self.commands, self.negate, self.pipe_err))
def __eq__(self, other):
if (not isinstance(other, Pipeline)):
return False
return ((self.commands, self.negate, self.pipe_err) == (other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if (pipefail != self.pipe_err):
raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if (cmd is not self.commands[(- 1)]):
file.write('|\n ') |
class TGCN(torch.nn.Module):
def __init__(self, in_channels: int, out_channels: int, improved: bool=False, cached: bool=False, id: int=(- 1)):
super(TGCN, self).__init__()
assert (id >= 0), 'kwarg id is required.'
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.id = id
self.graph_conv1 = GCNConv(in_channels=(self.in_channels + self.out_channels), out_channels=(self.out_channels * 2), improved=self.improved, cached=self.cached, normalize=True, bias=True, add_self_loops=True)
self.graph_conv1.bias.data = torch.ones_like(self.graph_conv1.bias.data)
self.graph_conv2 = GCNConv(in_channels=(self.in_channels + self.out_channels), out_channels=self.out_channels, improved=self.improved, cached=self.cached, normalize=True, bias=True, add_self_loops=True)
def _forward(self, X: torch.FloatTensor, edge_index: torch.LongTensor, edge_weight: torch.FloatTensor=None, H: torch.FloatTensor=None) -> torch.FloatTensor:
if (not isinstance(H, torch.Tensor)):
H = torch.zeros(X.shape[0], self.out_channels).to(X.device)
concatenation = torch.sigmoid(self.graph_conv1(torch.cat([X, H], dim=1), edge_index, edge_weight))
(r, u) = torch.chunk(concatenation, chunks=2, dim=1)
c = torch.tanh(self.graph_conv2(torch.cat([X, (H * r)], dim=1), edge_index, edge_weight))
H = ((u * H) + ((1.0 - u) * c))
return H
def forward(self, batch):
if hasattr(batch, 'edge_weight'):
edge_weight = batch.edge_weight
else:
edge_weight = None
H = self._forward(X=batch.node_feature, edge_index=batch.edge_index, edge_weight=edge_weight, H=batch.node_states[self.id])
batch.node_states[self.id] = H
batch.node_feature = H
return batch |
class MemoryOptimizedGroupedGLU(torch.autograd.Function):
.amp.custom_fwd
def forward(ctx, x, w1, v1, w2, batch_sizes, num_input_bits, num_remat_bits, activation_fn):
if ((not x.is_contiguous()) or (not w1.is_contiguous()) or (not v1.is_contiguous()) or (not w2.is_contiguous())):
raise ValueError("Expected contiguous 'x', 'w1', 'v1' and 'w2'.")
sdd_out = gg.backend.gmm(x, w1, batch_sizes, trans_b=True)
v1_out = gg.backend.gmm(x, v1, batch_sizes, trans_b=True)
input_save_args = (x,)
if (num_input_bits != (- 1)):
(x_q, x_scales) = turbo.quantize_signed(x, num_bits=num_input_bits)
input_save_args = (x_q, x_scales)
if (num_remat_bits == (- 1)):
activation_fn_out = (activation_fn(sdd_out) * v1_out)
input_save_args += (sdd_out, v1_out)
else:
if (activation_fn is not DEFAULT_ACTIVATION_FN):
raise NotImplementedError(f'`num_remat_bits` != -1 not implemented for custom activation_fn={activation_fn!r} (num_remat_bits={num_remat_bits!r}).')
(hidden_q_sdd, hidden_scales_sdd, _) = turbo.quantize_signed(sdd_out, num_bits=num_remat_bits, op=turbo.ElemwiseOps.GELU_FORWARD, x_forward=sdd_out)
activation_fn_out = (sdd_out * v1_out)
(hidden_q_v1, hidden_scales_v1) = turbo.quantize_signed(v1_out, num_bits=num_remat_bits)
input_save_args += (hidden_q_sdd, hidden_scales_sdd, hidden_q_v1, hidden_scales_v1)
dsd_out = gg.backend.gmm(activation_fn_out, w2, batch_sizes)
ctx.num_input_bits = num_input_bits
ctx.num_remat_bits = num_remat_bits
ctx.x_shape = x.shape
ctx.sdd_out_shape = sdd_out.shape
ctx.dtype = x.dtype
ctx.activation_fn = activation_fn
ctx.save_for_backward(w1, v1, w2, batch_sizes, *input_save_args)
return dsd_out
.amp.custom_bwd
def backward(ctx, ddsd_out):
if ((not ctx.needs_input_grad[0]) or (not ctx.needs_input_grad[1]) or (not ctx.needs_input_grad[2])):
raise ValueError('Expected all MLP inputs to need grad.')
dtype = ctx.dtype
saved_tensors = ctx.saved_tensors
(w1, v1, w2) = saved_tensors[:3]
batch_sizes = saved_tensors[3]
if (ctx.num_input_bits == (- 1)):
x = saved_tensors[4]
else:
(x_q, x_scales) = saved_tensors[4:6]
if (ctx.num_remat_bits == (- 1)):
(sdd_out, v1_out) = saved_tensors[(- 2):]
else:
(hidden_q_sdd, hidden_scales_sdd, hidden_q_v1, hidden_scales_v1) = saved_tensors[(- 4):]
activation_fn = ctx.activation_fn
activation_grad_fn = None
if (ctx.num_remat_bits == (- 1)):
with torch.set_grad_enabled(True):
sdd_out.requires_grad = True
v1_out.requires_grad = True
activation_fn_out = (activation_fn(sdd_out) * v1_out)
activation_grad_fn = activation_fn_out.backward
else:
if (activation_fn is not DEFAULT_ACTIVATION_FN):
raise NotImplementedError(f'`num_remat_bits` != -1 not implemented for custom activation_fn={activation_fn!r} (ctx.num_remat_bits={ctx.num_remat_bits!r}).')
sdd_out = turbo.dequantize_signed(hidden_q_sdd, hidden_scales_sdd, num_bits=ctx.num_remat_bits, op=turbo.ElemwiseOps.GELU_FORWARD, out_shape=ctx.sdd_out_shape, out_dtype=dtype)
v1_out = turbo.dequantize_signed(hidden_q_v1, hidden_scales_v1, num_bits=ctx.num_remat_bits, out_shape=ctx.sdd_out_shape, out_dtype=dtype)
activation_fn_out = (sdd_out * v1_out)
dw2 = gg.backend.gmm(activation_fn_out, ddsd_out, batch_sizes, trans_a=True)
dactivation_fn_out = activation_fn_out
gg.backend.gmm(ddsd_out, w2, batch_sizes, trans_b=True, c=dactivation_fn_out)
if (ctx.num_remat_bits == (- 1)):
assert (activation_grad_fn is not None)
activation_grad_fn(dactivation_fn_out)
dsdd_out = sdd_out.grad
dv1_out = v1_out.grad
else:
dsdd_out = turbo.dequantize_signed(hidden_q_sdd, hidden_scales_sdd, num_bits=ctx.num_remat_bits, op=turbo.ElemwiseOps.GELU_BACKWARD, x_out=(dactivation_fn_out * v1_out).data)
dv1_out = (dactivation_fn_out * sdd_out).data
if (ctx.num_input_bits != (- 1)):
x = turbo.dequantize_signed(x_q, x_scales, num_bits=ctx.num_input_bits, out_dtype=dtype, out_shape=ctx.x_shape)
dw1 = gg.backend.gmm(dsdd_out, x, batch_sizes, trans_a=True)
dv1 = gg.backend.gmm(dv1_out, x, batch_sizes, trans_a=True)
dx = ddsd_out
gg.backend.gmm(dsdd_out, w1, batch_sizes, c=dx)
dx += gg.backend.gmm(dv1_out, v1, batch_sizes)
return (dx, dw1, dv1, dw2, None, None, None, None) |
class DummyEncoder(Encoder):
def __init__(self, input_shape: Shape):
super().__init__()
self.input_shape = input_shape
self.dummy_parameter = torch.nn.Parameter(torch.rand(1, self.get_feature_size()))
def forward(self, x: TorchObservation) -> torch.Tensor:
if isinstance(x, torch.Tensor):
y = x.view(x.shape[0], (- 1))
else:
batch_size = x[0].shape[0]
y = torch.cat([_x.view(batch_size, (- 1)) for _x in x], dim=(- 1))
return (y + self.dummy_parameter)
def get_feature_size(self) -> int:
if isinstance(self.input_shape[0], int):
return int(np.cumprod(self.input_shape)[(- 1)])
else:
return sum([np.cumprod(shape)[(- 1)] for shape in self.input_shape]) |
def addDBPointer(turn):
domains = ['restaurant', 'hotel', 'attraction', 'train']
pointer_vector = np.zeros((6 * len(domains)))
for domain in domains:
num_entities = dbPointer.queryResult(domain, turn)
pointer_vector = dbPointer.oneHotVector(num_entities, domain, pointer_vector)
return pointer_vector |
def dataclass_with_default_init(_cls=None, *args, **kwargs):
def wrap(cls):
user_init = getattr(cls, '__init__')
delattr(cls, '__init__')
result = dataclass(cls, *args, **kwargs)
setattr(result, '__default_init__', result.__init__)
setattr(result, '__init__', user_init)
if (result is not cls):
setattr(cls, '__init__', user_init)
return result
if (_cls is None):
return wrap
else:
return wrap(_cls) |
def load_mixed_5b(state_dict, name_pth, name_tf):
load_conv2d(state_dict, (name_pth + '.branch0'), (name_tf + '/Branch_0/Conv2d_1x1'))
load_conv2d(state_dict, (name_pth + '.branch1.0'), (name_tf + '/Branch_1/Conv2d_0a_1x1'))
load_conv2d(state_dict, (name_pth + '.branch1.1'), (name_tf + '/Branch_1/Conv2d_0b_5x5'))
load_conv2d(state_dict, (name_pth + '.branch2.0'), (name_tf + '/Branch_2/Conv2d_0a_1x1'))
load_conv2d(state_dict, (name_pth + '.branch2.1'), (name_tf + '/Branch_2/Conv2d_0b_3x3'))
load_conv2d(state_dict, (name_pth + '.branch2.2'), (name_tf + '/Branch_2/Conv2d_0c_3x3'))
load_conv2d(state_dict, (name_pth + '.branch3.1'), (name_tf + '/Branch_3/Conv2d_0b_1x1')) |
class FunnelForMultipleChoice(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_relevant_deps_and_context(line, args):
dep_type = args.dependency_type
parse = nlp.annotate(line, properties={'annotators': 'tokenize,ssplit,pos,depparse', 'outputFormat': 'json', 'ssplit.isOneSentence': True})
deps = []
tokens = parse['sentences'][0]['tokens']
pos = [tok['pos'] for tok in tokens]
tokens = [tok['word'] for tok in tokens]
for dep_dict in parse['sentences'][0][dep_type]:
if (dep_dict['dep'] not in ignore_dep):
dep_temp = {'dep': dep_dict['dep']}
dep_temp.update({'child': dep_dict['dependentGloss'], 'child_idx': dep_dict['dependent']})
dep_temp.update({'head': dep_dict['governorGloss'], 'head_idx': dep_dict['governor']})
deps.append(dep_temp)
return (tokens, pos, deps) |
class TestRowWiseCounter(hu.HypothesisTestCase):
def test_rowwise_counter(self):
h = (8 * 20)
n = 5
curr_iter = np.array([100], dtype=np.int64)
update_counter = np.random.randint(99, size=h).astype(np.float64)
prev_iter = np.random.rand(h, 1).astype(np.int64)
indices = np.unique(np.random.randint(0, h, size=n))
indices.sort(axis=0)
counter_halflife = 1
net = core.Net('test_net')
net.Proto().type = 'dag'
workspace.FeedBlob('indices', indices)
workspace.FeedBlob('curr_iter', curr_iter)
workspace.FeedBlob('update_counter', update_counter)
workspace.FeedBlob('prev_iter', prev_iter)
net.RowWiseCounter(['prev_iter', 'update_counter', 'indices', 'curr_iter'], ['prev_iter', 'update_counter'], counter_halflife=counter_halflife)
workspace.RunNetOnce(net)
prev_iter_out = workspace.FetchBlob('prev_iter')
update_counter_out = workspace.FetchBlob('update_counter')
(prev_iter_out_ref, update_counter_out_ref) = update_counter_ref(prev_iter, update_counter, indices, curr_iter, counter_halflife=counter_halflife)
assert np.allclose(prev_iter_out, prev_iter_out_ref, rtol=0.001)
assert np.allclose(update_counter_out, update_counter_out_ref, rtol=0.001) |
def GDPPLoss(phiFake, phiReal, backward=True):
def compute_diversity(phi):
phi = F.normalize(phi, p=2, dim=1)
SB = torch.mm(phi, phi.t())
(eigVals, eigVecs) = torch.symeig(SB, eigenvectors=True)
return (eigVals, eigVecs)
def normalize_min_max(eigVals):
(minV, maxV) = (torch.min(eigVals), torch.max(eigVals))
if (abs((minV - maxV)) < 1e-10):
return eigVals
return ((eigVals - minV) / (maxV - minV))
(fakeEigVals, fakeEigVecs) = compute_diversity(phiFake)
(realEigVals, realEigVecs) = compute_diversity(phiReal)
magnitudeLoss = (0.0001 * F.mse_loss(target=realEigVals, input=fakeEigVals))
structureLoss = (- torch.sum(torch.mul(fakeEigVecs, realEigVecs), 0))
normalizedRealEigVals = normalize_min_max(realEigVals)
weightedStructureLoss = torch.sum(torch.mul(normalizedRealEigVals, structureLoss))
gdppLoss = (magnitudeLoss + weightedStructureLoss)
if backward:
gdppLoss.backward(retain_graph=True)
return gdppLoss.item() |
class TestMMIOSparseCSR(TestMMIOArray):
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(a.todense(), b.todense())
def check_exact(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_equal(a.todense(), b.todense())
.parametrize('typeval, dtype', parametrize_args)
def test_simple_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[1, 2], [3, 4]], dtype=dtype), (2, 2, 4, 'coordinate', typeval, 'general'))
def test_32bit_integer(self):
a = scipy.sparse.csr_matrix(array([[((2 ** 31) - 1), ((- (2 ** 31)) + 2)], [((2 ** 31) - 3), ((2 ** 31) - 4)]], dtype=np.int32))
self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general'))
def test_64bit_integer(self):
a = scipy.sparse.csr_matrix(array([[((2 ** 32) + 1), ((2 ** 32) + 1)], [((- (2 ** 63)) + 2), ((2 ** 63) - 2)]], dtype=np.int64))
if (np.intp(0).itemsize < 8):
assert_raises(OverflowError, mmwrite, self.fn, a)
else:
self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general'))
def test_32bit_unsigned_integer(self):
a = scipy.sparse.csr_matrix(array([[((2 ** 31) - 1), ((2 ** 31) - 2)], [((2 ** 31) - 3), ((2 ** 31) - 4)]], dtype=np.uint32))
self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general'))
def test_64bit_unsigned_integer(self):
a = scipy.sparse.csr_matrix(array([[((2 ** 32) + 1), ((2 ** 32) + 1)], [((2 ** 64) - 2), ((2 ** 64) - 1)]], dtype=np.uint64))
self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general'))
.parametrize('typeval, dtype', parametrize_args)
def test_simple_upper_triangle_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[0, 1], [0, 0]], dtype=dtype), (2, 2, 1, 'coordinate', typeval, 'general'))
.parametrize('typeval, dtype', parametrize_args)
def test_simple_lower_triangle_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[0, 0], [1, 0]], dtype=dtype), (2, 2, 1, 'coordinate', typeval, 'general'))
.parametrize('typeval, dtype', parametrize_args)
def test_simple_rectangular_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[1, 2, 3], [4, 5, 6]], dtype=dtype), (2, 3, 6, 'coordinate', typeval, 'general'))
def test_simple_rectangular_float(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3.5, 4], [5, 6]]), (3, 2, 6, 'coordinate', 'real', 'general'))
def test_simple_float(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4.0]]), (2, 2, 4, 'coordinate', 'real', 'general'))
def test_simple_complex(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4j]]), (2, 2, 4, 'coordinate', 'complex', 'general'))
.parametrize('typeval, dtype', parametrize_args)
def test_simple_symmetric_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[1, 2], [2, 4]], dtype=dtype), (2, 2, 3, 'coordinate', typeval, 'symmetric'))
def test_simple_skew_symmetric_integer(self):
self.check_exact(scipy.sparse.csr_matrix([[1, 2], [(- 2), 4]]), (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'))
def test_simple_skew_symmetric_float(self):
self.check(scipy.sparse.csr_matrix(array([[1, 2], [(- 2.0), 4]], 'f')), (2, 2, 3, 'coordinate', 'real', 'skew-symmetric'))
def test_simple_hermitian_complex(self):
self.check(scipy.sparse.csr_matrix([[1, (2 + 3j)], [(2 - 3j), 4]]), (2, 2, 3, 'coordinate', 'complex', 'hermitian'))
def test_random_symmetric_float(self):
sz = (20, 20)
a = np.random.random(sz)
a = (a + transpose(a))
a = scipy.sparse.csr_matrix(a)
self.check(a, (20, 20, 210, 'coordinate', 'real', 'symmetric'))
def test_random_rectangular_float(self):
sz = (20, 15)
a = np.random.random(sz)
a = scipy.sparse.csr_matrix(a)
self.check(a, (20, 15, 300, 'coordinate', 'real', 'general'))
def test_simple_pattern(self):
a = scipy.sparse.csr_matrix([[0, 1.5], [3.0, 2.5]])
p = np.zeros_like(a.todense())
p[(a.todense() > 0)] = 1
info = (2, 2, 3, 'coordinate', 'pattern', 'general')
mmwrite(self.fn, a, field='pattern')
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(p, b.todense()) |
class AttrDict(dict):
def __getattr__(self, name):
if (name in self.__dict__):
return self.__dict__[name]
elif (name in self):
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if (name in self.__dict__):
self.__dict__[name] = value
else:
self[name] = value |
_module()
class FPN_UNet(BaseModule):
def __init__(self, in_channels, out_channels, init_cfg=dict(type='Xavier', layer=['Conv2d', 'ConvTranspose2d'], distribution='uniform')):
super().__init__(init_cfg=init_cfg)
assert (len(in_channels) == 4)
assert isinstance(out_channels, int)
blocks_out_channels = ([out_channels] + [min((out_channels * (2 ** i)), 256) for i in range(4)])
blocks_in_channels = (([blocks_out_channels[1]] + [(in_channels[i] + blocks_out_channels[(i + 2)]) for i in range(3)]) + [in_channels[3]])
self.up4 = nn.ConvTranspose2d(blocks_in_channels[4], blocks_out_channels[4], kernel_size=4, stride=2, padding=1)
self.up_block3 = UpBlock(blocks_in_channels[3], blocks_out_channels[3])
self.up_block2 = UpBlock(blocks_in_channels[2], blocks_out_channels[2])
self.up_block1 = UpBlock(blocks_in_channels[1], blocks_out_channels[1])
self.up_block0 = UpBlock(blocks_in_channels[0], blocks_out_channels[0])
def forward(self, x):
(c2, c3, c4, c5) = x
x = F.relu(self.up4(c5))
x = torch.cat([x, c4], dim=1)
x = F.relu(self.up_block3(x))
x = torch.cat([x, c3], dim=1)
x = F.relu(self.up_block2(x))
x = torch.cat([x, c2], dim=1)
x = F.relu(self.up_block1(x))
x = self.up_block0(x)
return x |
def test_UnmaskedArray_RecordArray_NumpyArray():
v2a = ak.contents.unmaskedarray.UnmaskedArray(ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]))], ['nest']))
assert (to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)) |
def num_errors(all_possible_countries: List[str], state: Dict) -> float:
try:
if (('sub_text' in state) and ((state['sub_text'] != '') or (state['current'] == '{}')) and (len(state['sub_text']) < (len(state['original']) * 0.75))):
text = state['sub_text']
correct_freq_dict = dict()
for country in all_possible_countries:
num_occurrences = text.count(country)
correct_freq_dict[country] = num_occurrences
else:
correct_freq_dict = list_to_freq_dict(string_to_list(state['ground_truth']))
current_freq_dict = json.loads(state['current'])
countries_not_in_current = (set(correct_freq_dict.keys()) - set(current_freq_dict.keys()))
countries_not_in_correct = (set(current_freq_dict.keys()) - set(correct_freq_dict.keys()))
num_errors = 0
for country in countries_not_in_current:
num_errors += abs(correct_freq_dict[country])
for country in countries_not_in_correct:
num_errors += abs(current_freq_dict[country])
for country in (set(correct_freq_dict.keys()) & set(current_freq_dict.keys())):
num_errors += abs((correct_freq_dict[country] - current_freq_dict[country]))
return num_errors
except:
return 100 |
class Encoder(nn.Module):
def __init__(self, img_channels, latent_size, m):
super(Encoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(m, latent_size)
self.fc_logsigma = nn.Linear(m, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), (- 1))
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return (mu, logsigma) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * self.expansion), momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class DiscreteGaussianDistributionPolynomialSampler(SageObject):
def __init__(self, P, n, sigma):
if isinstance(sigma, DiscreteGaussianDistributionIntegerSampler):
self.D = sigma
else:
self.D = DiscreteGaussianDistributionIntegerSampler(RR(sigma))
self.n = ZZ(n)
self.P = P
def __call__(self):
coeffs = [self.D() for _ in range(self.n)]
return self.P(coeffs)
def _repr_(self):
return ('Discrete Gaussian sampler for polynomials of degree < %d with =%f in each component' % (self.n, self.D.sigma)) |
class GeneralizedRCNN(nn.Module):
def __init__(self, backbone, rpn, roi_heads, track_heads, transform, n_channel_backbone):
super(GeneralizedRCNN, self).__init__()
self.transform = transform
self.backbone = backbone
self.rpn = rpn
self.roi_heads = roi_heads
self.track_heads = track_heads
self.n_channel_backbone = n_channel_backbone
def forward(self, images, targets=None):
if (self.training and (targets is None)):
raise ValueError('In training mode, targets should be passed')
original_image_sizes = [img.shape[(- 2):] for img in images]
(images, targets) = self.transform(images, targets)
features = self.backbone(images.tensors)
if (self.n_channel_backbone < 5):
in_channels = [(i, features[i]) for i in range(self.n_channel_backbone)]
features = OrderedDict(in_channels)
if (self.n_channel_backbone > 5):
in_channels = [(i, features[i]) for i in range(5)]
features = OrderedDict(in_channels)
if isinstance(features, torch.Tensor):
features = OrderedDict([(0, features)])
(proposals, scores, proposal_losses) = self.rpn(images, features, targets)
(detections, detector_losses) = self.roi_heads(features, proposals, images.image_sizes, targets)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
for (i, (pred, im_s, o_im_s)) in enumerate(zip(proposals, images.image_sizes, original_image_sizes)):
boxes = resize_boxes(pred, im_s, o_im_s)
proposals[i] = boxes
if self.training:
return losses
return (detections, features) |
_node_type()
class WaveguideModeOverlap(optplan.EmOverlap):
type = schema_utils.polymorphic_model_type('overlap.waveguide_mode')
center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
mode_num = types.IntType()
power = types.FloatType() |
def make_false_data(N, F_bin, T):
mock = torch.rand([N, F_bin, T], dtype=torch.float32)
mock2 = ((mock + (torch.rand([N, F_bin, T], dtype=torch.float32) * 1)) - 0.5)
mock = torch.stack([mock, mock2], dim=1)
return mock |
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
(m, s) = (f['mu'][:], f['sigma'][:])
f.close()
else:
path = pathlib.Path(path)
files = (list(path.glob('*.jpg')) + list(path.glob('*.png')))
imgs = np.array([imread(str(fn)).astype(np.float32) for fn in files])
imgs = imgs.transpose((0, 3, 1, 2))
imgs /= 255
(m, s) = calculate_activation_statistics(imgs, model, batch_size, dims, cuda)
return (m, s) |
def add_evaluation_config(cfg: CN):
_C = cfg
_C.DENSEPOSE_EVALUATION = CN()
_C.DENSEPOSE_EVALUATION.TYPE = 'iou'
_C.DENSEPOSE_EVALUATION.STORAGE = 'none'
_C.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD = 0.5
_C.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE = True
_C.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT = False
_C.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES = [] |
_module()
class DeformableDETR(DETR):
def __init__(self, *args, **kwargs):
super(DETR, self).__init__(*args, **kwargs) |
class BidirectionalLSTM(AbstractTapeModel):
_hparams.capture
def __init__(self, n_symbols: int, n_units: int=1024, n_layers: int=3, dropout: Optional[float]=0.1) -> None:
super().__init__(n_symbols)
if (dropout is None):
dropout = 0
self.embedding = Embedding(n_symbols, 128)
self.forward_lstm = Stack([LSTM(n_units, return_sequences=True) for _ in range(n_layers)], name='forward_lstm')
self.reverse_lstm = Stack([LSTM(n_units, return_sequences=True) for _ in range(n_layers)], name='reverse_lstm')
self.dropout = Dropout(dropout)
def call(self, inputs):
sequence = inputs['primary']
protein_length = inputs['protein_length']
sequence = self.embedding(sequence)
tf.add_to_collection('checkpoints', sequence)
forward_output = self.forward_lstm(sequence)
tf.add_to_collection('checkpoints', forward_output)
reversed_sequence = tf.reverse_sequence(sequence, protein_length, seq_axis=1)
reverse_output = self.reverse_lstm(reversed_sequence)
reverse_output = tf.reverse_sequence(reverse_output, protein_length, seq_axis=1)
tf.add_to_collection('checkpoints', reverse_output)
encoder_output = tf.concat((forward_output, reverse_output), (- 1))
encoder_output = self.dropout(encoder_output)
inputs['encoder_output'] = encoder_output
return inputs
def get_optimal_batch_sizes(self) -> Tuple[(List[int], List[int])]:
bucket_sizes = np.array([100, 200, 300, 400, 600, 900, 1000, 1200, 1300, 2000, 3000])
batch_sizes = np.array([5, 5, 4, 3, 2, 1, 0.75, 0.5, 0.5, 0.25, 0, 0])
batch_sizes = np.asarray((batch_sizes * self._get_gpu_memory()), np.int32)
batch_sizes[(batch_sizes <= 0)] = 1
return (bucket_sizes, batch_sizes) |
class Cylinder(Element):
def __init__(self, P1, P2, Radius=50, Priority=10):
Element.__init__(self, 'Cylinder')
self['Priority'] = Priority
self['P1'] = Point(P1)
self['P2'] = Point(P2)
self['Radius'] = Radius |
def test_none_correct():
from pysad.evaluation import PrecisionMetric, AUPRMetric, AUROCMetric, RecallMetric
import numpy as np
from pysad.utils import fix_seed
fix_seed(61)
metric_classes = {PrecisionMetric: 0.0, AUROCMetric: 0.0, RecallMetric: 0.0}
y_true = np.random.randint(0, 2, size=(25,), dtype=np.int32)
y_true[0] = 1
y_true[1] = 0
y_pred = (1 - y_true.copy())
helper_test_all_metrics(metric_classes, y_true, y_pred) |
class DoWhileScope(ControlFlow):
sdfg: SDFG
test: CodeBlock
body: GeneralBlock
def as_cpp(self, codegen, symbols) -> str:
if (self.test is not None):
test = unparse_interstate_edge(self.test.code[0], self.sdfg, codegen=codegen)
else:
test = 'true'
expr = 'do {\n'
expr += _clean_loop_body(self.body.as_cpp(codegen, symbols))
expr += f'''
}} while ({test});
'''
return expr
def first_state(self) -> SDFGState:
return self.body[0].first_state
def children(self) -> List[ControlFlow]:
return [self.body] |
def test_isotonic_regression_with_ties_in_differently_sized_groups():
x = np.array([0, 1, 1, 2, 3, 4])
y = np.array([0, 0, 1, 0, 0, 1])
y_true = np.array([0.0, 0.25, 0.25, 0.25, 0.25, 1.0])
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true)
assert_array_almost_equal(ir.fit_transform(x, y), y_true) |
class codelineType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
self.external = external
self.lineno = lineno
self.refkind = refkind
self.refid = refid
if (highlight is None):
self.highlight = []
else:
self.highlight = highlight
def factory(*args_, **kwargs_):
if codelineType.subclass:
return codelineType.subclass(*args_, **kwargs_)
else:
return codelineType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_highlight(self):
return self.highlight
def set_highlight(self, highlight):
self.highlight = highlight
def add_highlight(self, value):
self.highlight.append(value)
def insert_highlight(self, index, value):
self.highlight[index] = value
def get_external(self):
return self.external
def set_external(self, external):
self.external = external
def get_lineno(self):
return self.lineno
def set_lineno(self, lineno):
self.lineno = lineno
def get_refkind(self):
return self.refkind
def set_refkind(self, refkind):
self.refkind = refkind
def get_refid(self):
return self.refid
def set_refid(self, refid):
self.refid = refid
def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''):
showIndent(outfile, level)
outfile.write(('<%s%s %s' % (namespace_, name_, namespacedef_)))
self.exportAttributes(outfile, level, namespace_, name_='codelineType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, (level + 1), namespace_, name_)
showIndent(outfile, level)
outfile.write(('</%s%s>\n' % (namespace_, name_)))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'):
if (self.external is not None):
outfile.write((' external=%s' % (quote_attrib(self.external),)))
if (self.lineno is not None):
outfile.write((' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')))
if (self.refkind is not None):
outfile.write((' refkind=%s' % (quote_attrib(self.refkind),)))
if (self.refid is not None):
outfile.write((' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'),)))
def exportChildren(self, outfile, level, namespace_='', name_='codelineType'):
for highlight_ in self.highlight:
highlight_.export(outfile, level, namespace_, name_='highlight')
def hasContent_(self):
if (self.highlight is not None):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='codelineType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if (self.external is not None):
showIndent(outfile, level)
outfile.write(('external = "%s",\n' % (self.external,)))
if (self.lineno is not None):
showIndent(outfile, level)
outfile.write(('lineno = %s,\n' % (self.lineno,)))
if (self.refkind is not None):
showIndent(outfile, level)
outfile.write(('refkind = "%s",\n' % (self.refkind,)))
if (self.refid is not None):
showIndent(outfile, level)
outfile.write(('refid = %s,\n' % (self.refid,)))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('highlight=[\n')
level += 1
for highlight in self.highlight:
showIndent(outfile, level)
outfile.write('model_.highlight(\n')
highlight.exportLiteral(outfile, level, name_='highlight')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[(- 1)]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('external'):
self.external = attrs.get('external').value
if attrs.get('lineno'):
try:
self.lineno = int(attrs.get('lineno').value)
except ValueError as exp:
raise ValueError(('Bad integer attribute (lineno): %s' % exp))
if attrs.get('refkind'):
self.refkind = attrs.get('refkind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'highlight')):
obj_ = highlightType.factory()
obj_.build(child_)
self.highlight.append(obj_) |
def register_Ns3VsaInfo_methods(root_module, cls):
cls.add_constructor([param('ns3::VsaInfo const &', 'arg0')])
cls.add_constructor([param('ns3::Mac48Address', 'peer'), param('ns3::OrganizationIdentifier', 'identifier'), param('uint8_t', 'manageId'), param('ns3::Ptr< ns3::Packet >', 'vscPacket'), param('uint32_t', 'channel'), param('uint8_t', 'repeat'), param('ns3::VsaTransmitInterval', 'interval')])
cls.add_instance_attribute('channelNumber', 'uint32_t', is_const=False)
cls.add_instance_attribute('managementId', 'uint8_t', is_const=False)
cls.add_instance_attribute('oi', 'ns3::OrganizationIdentifier', is_const=False)
cls.add_instance_attribute('peer', 'ns3::Mac48Address', is_const=False)
cls.add_instance_attribute('repeatRate', 'uint8_t', is_const=False)
cls.add_instance_attribute('sendInterval', 'ns3::VsaTransmitInterval', is_const=False)
cls.add_instance_attribute('vsc', 'ns3::Ptr< ns3::Packet >', is_const=False)
return |
def etl_simple_femr_program() -> None:
parser = argparse.ArgumentParser(description='An extraction tool for generic OMOP sources')
parser.add_argument('simple_source', type=str, help='Path of the folder to the simple femr input source')
parser.add_argument('target_location', type=str, help='The place to store the extract')
parser.add_argument('temp_location', type=str, help='The place to store temporary files', default=None)
parser.add_argument('--num_threads', type=int, help='The number of threads to use', default=1)
parser.add_argument('--athena_download', type=str, help='An optional athena download to use for ontologies', default=None)
args = parser.parse_args()
(soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
args.target_location = os.path.abspath(args.target_location)
args.temp_location = os.path.abspath(args.temp_location)
if (not os.path.exists(args.target_location)):
os.mkdir(args.target_location)
if (not os.path.exists(args.temp_location)):
os.mkdir(args.temp_location)
logFormatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s')
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler(os.path.join(args.target_location, 'log'))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.INFO)
rootLogger.info(f'Extracting from OMOP with arguments {args}')
try:
event_dir = os.path.join(args.temp_location, 'events')
patients_dir = os.path.join(args.temp_location, 'patients')
omop_dir = os.path.join(args.temp_location, 'omop_dir')
if (not os.path.exists(event_dir)):
rootLogger.info('Converting to events')
assert os.path.exists(args.simple_source), 'Input file / directory is missing?'
input_files: List[str]
if os.path.isdir(args.simple_source):
input_files = [os.path.join(args.simple_source, fname) for fname in os.listdir(args.simple_source)]
else:
input_files = [args.simple_source]
concept_ids = set()
with multiprocessing.Pool(args.num_threads) as pool:
for f_concepts in pool.imap_unordered(get_concept_ids_from_file, input_files):
concept_ids |= f_concepts
os.mkdir(omop_dir)
with open(os.path.join(omop_dir, 'concept.csv'), 'w') as f:
concept_id_map: Dict[(str, int)] = {}
writer = csv.DictWriter(f, ['concept_id', 'concept_name', 'vocabulary_id', 'standard_concept', 'concept_code'])
writer.writeheader()
if args.athena_download:
with open(os.path.join(args.athena_download, 'CONCEPT.csv'), 'r') as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
del row['invalid_reason']
del row['domain_id']
del row['valid_end_date']
del row['concept_class_id']
del row['valid_start_date']
writer.writerow(row)
concept_id_map[f"{row['vocabulary_id']}/{row['concept_code']}"] = int(row['concept_id'])
for (i, concept_id) in enumerate(concept_ids):
if (concept_id in concept_id_map):
continue
index = (i + )
prefix_index = concept_id.index('/')
vocab = concept_id[:prefix_index]
code = concept_id[(prefix_index + 1):]
writer.writerow({'concept_id': index, 'concept_name': concept_id, 'vocabulary_id': vocab, 'standard_concept': '', 'concept_code': code})
concept_id_map[concept_id] = index
if args.athena_download:
with open(os.path.join(args.athena_download, 'CONCEPT_RELATIONSHIP.csv'), 'r') as f:
with open(os.path.join(omop_dir, 'concept_relationship.csv'), 'w') as wf:
reader = csv.DictReader(f, delimiter='\t')
assert (reader.fieldnames is not None)
writer = csv.DictWriter(wf, fieldnames=reader.fieldnames)
writer.writeheader()
for row in reader:
writer.writerow(row)
else:
os.mkdir(os.path.join(omop_dir, 'concept_relationship'))
event_collection = EventCollection(event_dir)
with multiprocessing.Pool(args.num_threads) as pool:
tasks = [(fname, concept_id_map, event_collection) for fname in input_files]
for _ in pool.imap_unordered(convert_file_to_event_file, tasks):
pass
else:
rootLogger.info('Already converted to events, skipping')
event_collection = EventCollection(event_dir)
if (not os.path.exists(patients_dir)):
rootLogger.info('Converting to patients')
patient_collection = event_collection.to_patient_collection(patients_dir, num_threads=args.num_threads)
else:
rootLogger.info('Already converted to patients, skipping')
patient_collection = PatientCollection(patients_dir)
if (not os.path.exists(os.path.join(args.target_location, 'meta'))):
rootLogger.info('Converting to extract')
print('Converting to extract', datetime.datetime.now())
patient_collection.to_patient_database(args.target_location, omop_dir, num_threads=args.num_threads).close()
else:
rootLogger.info('Already converted to extract, skipping')
except Exception as e:
rootLogger.critical(e, exc_info=True)
raise e |
def _impl(array, axis, highlevel, behavior, attrs):
axis = regularize_axis(axis)
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
layout = ctx.unwrap(array, allow_record=False, primitive_policy='error')
if (not is_integer(axis)):
raise TypeError(f"'axis' must be an integer, not {axis!r}")
if (maybe_posaxis(layout, axis, 1) == 0):
index_nplike = layout.backend.index_nplike
if isinstance(layout, ak.record.Record):
return index_nplike.asarray(index_nplike.shape_item_as_index(1))
else:
return index_nplike.asarray(index_nplike.shape_item_as_index(layout.length))
def action(layout, depth, **kwargs):
posaxis = maybe_posaxis(layout, axis, depth)
if ((posaxis == depth) and layout.is_list):
return ak.contents.NumpyArray((layout.stops.data - layout.starts.data))
elif layout.is_leaf:
raise AxisError(f'axis={axis} exceeds the depth of this array ({depth})')
out = ak._do.recursively_apply(layout, action, numpy_to_regular=True)
return ctx.wrap(out, highlevel=highlevel) |
_utils.test()
def test_struct_arg_with_matrix():
mat = ti.types.matrix(3, 2, ti.f32)
s0 = ti.types.struct(a=mat, b=ti.f32)
s1 = ti.types.struct(a=ti.i32, b=s0)
def foo(a: s1) -> ti.i32:
ret = (a.a + a.b.b)
for i in range(3):
for j in range(2):
ret += ((a.b.a[(i, j)] * (i + 1)) * (j + 2))
return ret
arg = s1(a=1, b=s0(a=mat(1, 2, 3, 4, 5, 6), b=123))
ret_std = (1 + 123)
for i in range(3):
for j in range(2):
ret_std += (((i + 1) * (j + 2)) * (((i * 2) + j) + 1))
ret = foo(arg)
assert (ret == ret_std) |
class LazyMappingExample(LazyMapping):
def __init__(self, cache):
super(LazyMappingExample, self).__init__(cache)
self.computes_called = Counter()
def compute_batch(self, keys):
for key in keys:
self.computes_called[key] += 1
return [(k * 2) for k in keys] |
def split_filtered_relations(relations):
team_relations = set()
player_relations = set()
for (_, num, rel, label) in relations:
if isinstance(label, bool):
team_relations.add((num[3], rel, label))
elif isinstance(label, str):
player_relations.add((num[3], rel, label))
else:
assert (label is None)
return (list(team_relations), list(player_relations)) |
def evaluate(dataloader, cnn_model, rnn_model, batch_size, labels):
cnn_model.eval()
rnn_model.eval()
s_total_loss = 0
w_total_loss = 0
t_total_loss = 0
for (step, data) in enumerate(dataloader, 0):
(imgs, captions, cap_lens, class_ids, keys) = prepare_data(data)
(words_features, sent_code, word_logits) = cnn_model(imgs[(- 1)], captions)
hidden = rnn_model.init_hidden(batch_size)
(words_emb, sent_emb) = rnn_model(captions, cap_lens, hidden)
(w_loss0, w_loss1, attn) = words_loss(words_features, words_emb, labels, cap_lens, class_ids, batch_size)
w_total_loss += (w_loss0 + w_loss1).data
(s_loss0, s_loss1) = sent_loss(sent_code, sent_emb, labels, class_ids, batch_size)
s_total_loss += (s_loss0 + s_loss1).data
t_loss = image_to_text_loss(word_logits, captions)
t_total_loss += t_loss.data
if (step == 50):
break
s_cur_loss = (s_total_loss.item() / step)
w_cur_loss = (w_total_loss.item() / step)
t_cur_loss = (t_total_loss.item() / step)
return (s_cur_loss, w_cur_loss, t_cur_loss) |
def check_model_works_with_seqlen(model_type, config, input_len):
key = PRNGKey(0)
Vocab = hax.Axis('vocab', 128)
model = model_type.init(Vocab, config, key=key)
input_ids = hax.arange(config.Pos.resize(input_len), dtype=jax.numpy.int32)
causal_mask = AttentionMask.causal()
a1 = model(input_ids, key=key, attn_mask=causal_mask)
assert (a1.axis_size('position') == input_len) |
def test_set_from_mat():
empty_param = CameraParameter(name='test_set')
mat_3x3 = np.eye(3)
mat_4x4 = np.eye(4)
vec_3 = np.zeros(shape=[3])
empty_param.set_KRT(K_mat=mat_3x3, R_mat=mat_3x3, T_vec=vec_3)
empty_param.set_KRT(K_mat=mat_3x3, R_mat=mat_3x3, T_vec=vec_3, inverse_extrinsic=True)
with pytest.raises(AssertionError):
empty_param.set_KRT(K_mat=mat_4x4, R_mat=mat_3x3, T_vec=vec_3)
with pytest.raises(AssertionError):
empty_param.set_KRT(K_mat=mat_3x3, R_mat=mat_4x4, T_vec=vec_3)
assert (len(empty_param.get_value('translation')) == 3) |
def slice_signal_index(path, window_size, stride):
(signal, rate) = librosa.load(path, 16000)
assert (stride <= 1), stride
assert (stride > 0), stride
assert (signal.ndim == 1), signal.ndim
n_samples = signal.shape[0]
slices = []
offset = int((window_size * stride))
for beg_i in range(0, ((n_samples - window_size) + 1), offset):
end_i = (beg_i + window_size)
slice_ = (beg_i, end_i)
slices.append(slice_)
return slices |
def degseq_to_data(degree_sequence):
degree_sequence.sort()
return sum(((di * (10 ** i)) for (i, di) in enumerate(degree_sequence))) |
def get_openvino_throughput(model_path: Path, test_dataset: Dataset) -> float:
inferencer = OpenVINOInferencer(((model_path / 'openvino') / 'model.xml'), ((model_path / 'openvino') / 'metadata.json'))
start_time = time.time()
for image_path in test_dataset.samples.image_path:
inferencer.predict(image_path)
inference_time = (time.time() - start_time)
throughput = (len(test_dataset) / inference_time)
return throughput |
class CNN(nn.Module):
def __init__(self, bn=True, dataset='mnist', init='ortho'):
super(CNN, self).__init__()
nhiddens = [200, 500, 700, 1000]
if (dataset == 'mnist'):
self.channel = 1
self.sz = 28
elif ('cifar' in dataset):
self.channel = 3
self.sz = 32
elif (dataset == 'stl10'):
self.channel = 3
self.sz = 32
self.conv1 = nn.Conv2d(self.channel, nhiddens[0], 3, 1)
if bn:
self.bn1 = nn.BatchNorm2d(nhiddens[0])
else:
self.bn1 = nn.Sequential()
self.conv2 = nn.Conv2d(nhiddens[0], nhiddens[1], 3, 1)
if bn:
self.bn2 = nn.BatchNorm2d(nhiddens[1])
else:
self.bn2 = nn.Sequential()
self.conv3 = nn.Conv2d(nhiddens[1], nhiddens[2], 3, 1)
if bn:
self.bn3 = nn.BatchNorm2d(nhiddens[2])
else:
self.bn3 = nn.Sequential()
self.conv4 = nn.Conv2d(nhiddens[2], nhiddens[3], 3, 1)
if bn:
self.bn4 = nn.BatchNorm2d(nhiddens[3])
else:
self.bn4 = nn.Sequential()
param_init(self, init=init)
self.feat_dim = nhiddens[(- 1)]
self.nhiddens = nhiddens
def forward(self, x, ret_hid=False, state=(- 1)):
hid = {}
x = x.view((- 1), self.channel, self.sz, self.sz)
x = self.conv1(x)
x = F.relu(self.bn1(x))
hid['0'] = x
if (state == 0):
return x
x = F.max_pool2d(x, 2, 2)
x = self.conv2(x)
x = F.relu(self.bn2(x))
hid['1'] = x
if (state == 1):
return x
x = self.conv3(x)
x = F.relu(self.bn3(x))
hid['2'] = x
if (state == 2):
return x
x = F.max_pool2d(x, 2, 2)
x = self.conv4(x)
x = F.relu(self.bn4(x))
hid['3'] = x
x = nn.AvgPool2d(*[(x.size()[2] * 2)])(x)
out = x.view(x.size()[0], (- 1))
if ret_hid:
return hid
return out |
def to_attribute(name, attr_string):
attr_classes = (NominalAttribute, NumericAttribute, DateAttribute, StringAttribute, RelationalAttribute)
for cls in attr_classes:
attr = cls.parse_attribute(name, attr_string)
if (attr is not None):
return attr
raise ParseArffError(('unknown attribute %s' % attr_string)) |
def xmlread(filename):
global _xml_path_zip
global _xml_zfile
path = filename
pos_at = path.index('')
if (pos_at == (- 1)):
print(("character '' is not found from the given path '%s'" % path))
assert 0
path_zip = path[0:pos_at]
path_xml = path[(pos_at + 2):]
if (not os.path.isfile(path_zip)):
print(("zip file '%s' is not found" % path_zip))
assert 0
for i in range(len(_xml_path_zip)):
if (_xml_path_zip[i] == path_zip):
data = _xml_zfile[i].open(path_xml)
return ET.fromstring(data.read())
_xml_path_zip.append(path_zip)
print(("read new xml file '%s'" % path_zip))
_xml_zfile.append(zipfile.ZipFile(path_zip, 'r'))
data = _xml_zfile[(- 1)].open(path_xml)
return ET.fromstring(data.read()) |
def register_Ns3ErpInformation_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::ErpInformation const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')], is_virtual=True)
cls.add_method('ElementId', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('GetBarkerPreambleMode', 'uint8_t', [], is_const=True)
cls.add_method('GetInformationFieldSize', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetNonErpPresent', 'uint8_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint16_t', [], is_const=True)
cls.add_method('GetUseProtection', 'uint8_t', [], is_const=True)
cls.add_method('Serialize', 'ns3::Buffer::Iterator', [param('ns3::Buffer::Iterator', 'start')], is_const=True)
cls.add_method('SerializeInformationField', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetBarkerPreambleMode', 'void', [param('uint8_t', 'barkerPreambleMode')])
cls.add_method('SetErpSupported', 'void', [param('uint8_t', 'erpSupported')])
cls.add_method('SetNonErpPresent', 'void', [param('uint8_t', 'nonErpPresent')])
cls.add_method('SetUseProtection', 'void', [param('uint8_t', 'useProtection')])
return |
def get_from_cache(url: str, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent: Union[(Dict, str, None)]=None, use_auth_token: Union[(bool, str, None)]=None, local_files_only=False) -> Optional[str]:
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
headers = {'user-agent':
if isinstance(use_auth_token, str):
headers['authorization'] = f'Bearer {use_auth_token}'
elif use_auth_token:
token = HfFolder.get_token()
if (token is None):
raise EnvironmentError('You specified use_auth_token=True, but a huggingface token was not found.')
headers['authorization'] = f'Bearer {token}'
url_to_download = url
etag = None
if (not local_files_only):
try:
r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=etag_timeout)
_raise_for_status(r)
etag = (r.headers.get('X-Linked-Etag') or r.headers.get('ETag'))
if (etag is None):
raise OSError("Distant resource does not have an ETag, we won't be able to reliably ensure reproducibility.")
if (300 <= r.status_code <= 399):
url_to_download = r.headers['Location']
except (requests.exceptions.SSLError, requests.exceptions.ProxyError):
raise
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
pass
filename = url_to_filename(url, etag)
cache_path = os.path.join(cache_dir, filename)
if (etag is None):
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [file for file in fnmatch.filter(os.listdir(cache_dir), (filename.split('.')[0] + '.*')) if ((not file.endswith('.json')) and (not file.endswith('.lock')))]
if (len(matching_files) > 0):
return os.path.join(cache_dir, matching_files[(- 1)])
elif local_files_only:
raise FileNotFoundError("Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable model look-ups and downloads online, set 'local_files_only' to False.")
else:
raise ValueError('Connection error, and we cannot find the requested files in the cached path. Please try again or make sure your Internet connection is on.')
if (os.path.exists(cache_path) and (not force_download)):
return cache_path
lock_path = (cache_path + '.lock')
with FileLock(lock_path):
if (os.path.exists(cache_path) and (not force_download)):
return cache_path
if resume_download:
incomplete_path = (cache_path + '.incomplete')
def _resumable_file_manager() -> 'io.BufferedWriter':
with open(incomplete_path, 'ab') as f:
(yield f)
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, mode='wb', dir=cache_dir, delete=False)
resume_size = 0
with temp_file_manager() as temp_file:
logger.info(f'{url} not found in cache or force_download set to True, downloading to {temp_file.name}')
temp_file, proxies=proxies, resume_size=resume_size, headers=headers)
logger.info(f'storing {url} in cache at {cache_path}')
os.replace(temp_file.name, cache_path)
umask = os.umask(438)
os.umask(umask)
os.chmod(cache_path, (438 & (~ umask)))
logger.info(f'creating metadata file for {cache_path}')
meta = {'url': url, 'etag': etag}
meta_path = (cache_path + '.json')
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
return cache_path |
def bcd_beamforming_given_csi(file_path_channel, file_path_beamforming='bcd_perfect_csi.mat'):
channel = sio.loadmat(file_path_channel)
channel_true = (channel['channel_bs_user'], channel['channel_irs_user'], channel['channel_bs_irs'])
beamforming_data = sio.loadmat(file_path_beamforming)
(w_bcd, theta_bcd) = (beamforming_data['w_all'], beamforming_data['theta_all'])
if (len(w_bcd.shape) == 2):
w_bcd = np.reshape(w_bcd, (w_bcd.shape[0], w_bcd.shape[1], 1))
w_bcd = w_bcd.conjugate()
(rate_sum, rate_all) = compute_rate_batch(w_bcd, theta_bcd, channel_true, Pt=5)
rate_sum_matlab = np.squeeze(beamforming_data['rate_perfect'])
print('===Beamforming via BCD given csi:')
print('Sum rate:', rate_sum, rate_sum_matlab)
return (rate_sum, rate_all) |
def compute_aspect_term_metrics(result_dict, labels, predictions):
all_true = []
all_pred = []
all_correct = 0
all_total = 0
for (true, pred) in zip(labels, predictions):
l = true.split('<|term|>')[(- 1)].split('<|endofterm|>')[0].strip()
p = pred.split('<|term|>')[(- 1)].split('<|endofterm|>')[0].strip()
(l_new, p_new) = sort_output(l, p)
all_true.append(l_new)
all_pred.append(p_new)
all_total += 1
if (l_new == p_new):
all_correct += 1
all_acc = (all_correct / all_total)
acc = accuracy_score(all_true, all_pred)
assert (acc == all_acc)
(prec, recall, fscore, _) = precision_recall_fscore_support(all_true, all_pred, average='macro')
(prec_extract, recall_extract, f1_extract, _, _, _) = aspect_extraction(all_true, all_pred)
(acc_polarity, acc_polarity_true, acc_polarity_correct, _, _) = aspect_polarity_estimation(all_true, all_pred)
result_dict.update({'term_acc': all_acc, 'term_prec': prec, 'term_recall': recall, 'term_fscorce': fscore, 'term_extract_prec': prec_extract, 'term_extract_recall': recall_extract, 'term_extract_fscorce': f1_extract, 'term_polarity_acc': acc_polarity, 'term_polarity_acc_true': acc_polarity_true, 'term_polarity_acc_correct': acc_polarity_correct})
return result_dict |
def _get_fig_filenames(ebase, images_dir):
fig_base = ebase2fbase(ebase)
if (ebase in custom):
custom_options = custom.get(ebase)
if ('sfepy-view-options' in custom_options):
custom_view_options = custom_options['sfepy-view-options']
for fig_filename in _get_image_names(custom_options):
(yield os.path.join(images_dir, _make_fig_name(fig_base, fig_filename)))
else:
custom_view_options = custom_options
if custom_view_options:
suffixes = sorted(custom_view_options.keys())
for suffix in suffixes:
(yield os.path.join(images_dir, ((fig_base + suffix) + '.png')))
else:
(yield os.path.join(images_dir, (fig_base + '.png'))) |
def extra_index_url():
return Option('--extra-index-url', dest='extra_index_urls', metavar='URL', action='append', default=[], help='Extra URLs of package indexes to use in addition to --index-url. Should follow the same rules as --index-url.') |
def test_gammaincc_neg_x_array():
with pytest.raises(ValueError):
gammaincc(0.5, [3.0, 2.0, 1.0, 0.0, (- 1.0)]) |
class PipelineDataset(Dataset):
def __init__(self, dataset, process, params):
self.dataset = dataset
self.process = process
self.params = params
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
item = self.dataset[i]
processed = self.process(item, **self.params)
return processed |
class CBLoss(Loss):
def __init__(self, loss: Union[(str, Callable)], loss_params: Optional[Dict]=None, fw_func: Optional[Callable]=None, bw_func: Optional[Callable]=None):
self.loss_params = {}
if (loss_params is not None):
self.loss_params = loss_params
if (type(loss) is str):
if (loss in _cb_loss_mapping):
(loss_name, fw_func, bw_func) = _cb_loss_mapping[loss]
if (loss in _cb_loss_params_mapping):
mapped_params = {_cb_loss_params_mapping[loss][k]: v for (k, v) in self.loss_params.items()}
self.fobj = None
self.fobj_name = cb_str_loss_wrapper(loss_name, **mapped_params)
else:
self.fobj = None
self.fobj_name = loss_name
else:
raise ValueError('Unexpected loss for catboost')
else:
self.fobj = loss
self.fobj_name = None
if (fw_func is not None):
self._fw_func = fw_func
if (bw_func is not None):
self._bw_func = bw_func
self.fobj_params = {}
if (loss_params is not None):
self.fobj_params = loss_params
self.metric = None
self.metric_name = None
def set_callback_metric(self, metric: Union[(str, Callable)], greater_is_better: Optional[bool]=None, metric_params: Optional[Dict]=None, task_name: str=None):
assert (task_name in ['binary', 'reg', 'multiclass', 'multi:reg', 'multilabel']), 'Unknown task name: {}'.format(task_name)
self.metric_params = {}
if (metric_params is not None):
self.metric_params = metric_params
if (type(metric) is str):
self.metric = None
_metric_dict = _cb_metrics_dict[task_name]
if (task_name == 'multi:reg'):
logger.info2('CatBoost supports only MultiRMSE metric and loss for multi:reg task.')
self.fobj = None
self.fobj_name = 'MultiRMSE'
if (task_name == 'multilabel'):
logger.info2('CatBoost uses as obj. MultiCrossEntropy.')
self.fobj = None
self.fobj_name = 'MultiCrossEntropy'
if (metric in _cb_metric_params_mapping):
metric_params = {_cb_metric_params_mapping[metric][k]: v for (k, v) in self.metric_params.items()}
self.metric_name = cb_str_loss_wrapper(_metric_dict[metric], **metric_params)
else:
self.metric_name = _metric_dict[metric]
else:
self.metric_name = self.fobj_name
self.metric_params = self.fobj_params
self.metric = None
if (task_name == 'multi:reg'):
logger.info2('CatBoost supports only MultiRMSE metric and loss for multi:reg task.')
self.fobj = None
self.fobj_name = 'MultiRMSE'
self.metric_name = 'MultiRMSE'
if (task_name == 'multilabel'):
logger.info2('CatBoost uses as obj. MultiCrossEntropy.')
self.fobj = None
self.fobj_name = 'MultiCrossEntropy'
self.metric_name = 'MultiCrossEntropy' |
class PatchEmbed(nn.Module):
def __init__(self, c1=3, c2=32, patch_size=7, stride=4):
super().__init__()
self.proj = nn.Conv2d(c1, c2, patch_size, stride, (patch_size // 2))
self.norm = nn.LayerNorm(c2)
def forward(self, x: Tensor) -> Tensor:
x = self.proj(x)
(_, _, H, W) = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return (x, H, W) |
class RNN(nn.Module):
def __init__(self, input_size=50, hidden_size=256, dropout=0, bidirectional=False, num_layers=1, activation_function='tanh'):
super().__init__()
if bidirectional:
hidden_size /= 2
self.rnn = nn.RNN(input_size, hidden_size, num_layers, nonlinearity=activation_function, dropout=dropout, bidirectional=bidirectional)
def forward(self, x):
x = x.transpose(0, 1)
(x, h) = self.rnn(x)
x = x.transpose(0, 1)
return x |
class BaseConfig():
def __init__(self) -> None:
self.init_member_classes(self)
def init_member_classes(obj):
for key in dir(obj):
if (key == '__class__'):
continue
var = getattr(obj, key)
if inspect.isclass(var):
i_var = var()
setattr(obj, key, i_var)
BaseConfig.init_member_classes(i_var) |
('Solving conda environment')
def conda_solve(name=None, prefix=None, channels=('pytorch-nightly',), override_channels=False):
if (prefix is not None):
existing_env = True
env_opts = ['--prefix', prefix]
elif (name is not None):
existing_env = True
env_opts = ['--name', name]
else:
existing_env = False
env_opts = ['--name', 'pytorch-deps']
if existing_env:
cmd = ['conda', 'install', '--yes', '--dry-run', '--json']
cmd.extend(env_opts)
else:
cmd = ['conda', 'create', '--yes', '--dry-run', '--json', '--name', '__pytorch__']
channel_args = _make_channel_args(channels=channels, override_channels=override_channels)
cmd.extend(channel_args)
cmd.extend(SPECS_TO_INSTALL)
p = subprocess.run(cmd, capture_output=True, check=True)
solve = json.loads(p.stdout)
link = solve['actions']['LINK']
deps = []
for pkg in link:
url = URL_FORMAT.format(**pkg)
if (pkg['name'] == 'pytorch'):
pytorch = url
platform = pkg['platform']
else:
deps.append(url)
return (deps, pytorch, platform, existing_env, env_opts) |
def load_extension_if_needed():
global _extension_loaded
if _extension_loaded:
return
if _warn_first_load:
warnings.warn('Loading `cdf_ops` extension. If this is the first compilation on this machine, up to 10 minutes is needed. Subsequent loading will use cached results. Use `pqe.cdf_ops.disable_load_extension_warning()` to suppress this warning.')
load(name='cdf_ops', sources=get_source_files(), extra_cflags=get_extra_cflags(), extra_cuda_cflags=get_extra_cuda_cflags(), is_python_module=False)
_extension_loaded = True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.