code stringlengths 101 5.91M |
|---|
class _infix_wrapper():
function = None
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def __call__(self, *args, **kwds):
return self.function(*args, **kwds)
def _left(self, right):
if (self.left is None):
if (self.right is None):
new = copy(self)
new.right = right
return new
else:
raise SyntaxError('Infix operator already has its right argument')
else:
return self.function(self.left, right)
def _right(self, left):
if (self.right is None):
if (self.left is None):
new = copy(self)
new.left = left
return new
else:
raise SyntaxError('Infix operator already has its left argument')
else:
return self.function(left, self.right) |
def test_select(with_global_metadata):
arr = ak.metadata_from_parquet(with_global_metadata, row_groups=[1])
assert (arr['col_counts'] == [2])
with pytest.raises(ValueError):
ak.metadata_from_parquet(with_global_metadata, row_groups=[1, 1])
with pytest.raises(ValueError):
ak.metadata_from_parquet(with_global_metadata, row_groups=[(- 1)])
with pytest.raises(ValueError):
ak.metadata_from_parquet(with_global_metadata, row_groups=[4]) |
def test_feedback_block_heatmap_attention():
x1 = torch.rand(2, 16, 32, 32)
heatmap = torch.rand(2, 5, 32, 32)
model = FeedbackBlockHeatmapAttention(16, 2, 8, 5, 2)
x2 = model(x1, heatmap)
assert (x2.shape == x1.shape)
x3 = model(x2, heatmap)
assert (x3.shape == x2.shape) |
def _make_legal_action_mask(state: State, hand, c_p, new_tile):
legal_action_mask = jnp.zeros(NUM_ACTION, dtype=jnp.bool_)
legal_action_mask = legal_action_mask.at[:34].set((hand[c_p] > 0))
legal_action_mask = legal_action_mask.at[new_tile].set(FALSE)
legal_action_mask = legal_action_mask.at[Action.TSUMOGIRI].set(TRUE)
legal_action_mask = legal_action_mask.at[(new_tile + 34)].set((Hand.can_ankan(hand[c_p], new_tile) | ((Hand.can_kakan(hand[c_p], new_tile) & state._pon[(c_p, new_tile)]) > 0)))
legal_action_mask = legal_action_mask.at[Action.RIICHI].set(jax.lax.cond(((state._riichi[c_p] | state._is_menzen[c_p]) | (state._next_deck_ix < (13 + 4))), (lambda : FALSE), (lambda : Hand.can_riichi(hand[c_p]))))
legal_action_mask = legal_action_mask.at[Action.TSUMO].set((Hand.can_tsumo(hand[c_p]) & Yaku.judge(state._hand[c_p], state._melds[c_p], state._n_meld[c_p], state._last_draw, state._riichi[c_p], FALSE, _dora_array(state, state._riichi[c_p]))[0].any()))
return legal_action_mask |
class Unexpectedness(RecOnlyMetric):
_scala_udf_name = 'getUnexpectednessMetricValue'
def __init__(self, pred: DataFrameLike, use_scala_udf: bool=False):
self._use_scala_udf = use_scala_udf
self.pred = convert2spark(pred)
def _get_metric_value_by_user(k, *args) -> float:
pred = args[0]
base_pred = args[1]
if (len(pred) == 0):
return 0
return (1.0 - (len((set(pred[:k]) & set(base_pred[:k]))) / k))
def _get_enriched_recommendations(self, recommendations: SparkDataFrame, ground_truth: SparkDataFrame, max_k: int, ground_truth_users: Optional[DataFrameLike]=None) -> SparkDataFrame:
recommendations = convert2spark(recommendations)
ground_truth_users = convert2spark(ground_truth_users)
base_pred = self.pred
base_recs = filter_sort(base_pred).withColumnRenamed('pred', 'base_pred')
recommendations = get_top_k_recs(recommendations, k=max_k)
recommendations = filter_sort(recommendations)
recommendations = recommendations.join(base_recs, how='right', on=['user_idx'])
if (ground_truth_users is not None):
recommendations = recommendations.join(ground_truth_users, on='user_idx', how='right')
return fill_na_with_empty_array(recommendations, 'pred', base_pred.schema['item_idx'].dataType) |
def _length_hint(obj):
try:
return len(obj)
except (AttributeError, TypeError):
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return None
try:
hint = get_hint(obj)
except TypeError:
return None
if ((hint is NotImplemented) or (not isinstance(hint, int_types)) or (hint < 0)):
return None
return hint |
def validate_json_string(json_string: str, schema_name: str) -> (dict | None):
try:
json_loaded = json.loads(json_string)
if (not validate_json(json_loaded, schema_name)):
return None
return json_loaded
except:
return None |
class GTResDataset(Dataset):
def __init__(self, root_path, gt_dir=None, transform=None, transform_train=None):
self.pairs = []
for f in os.listdir(root_path):
image_path = os.path.join(root_path, f)
gt_path = os.path.join(gt_dir, f)
if (f.endswith('.jpg') or f.endswith('.png')):
self.pairs.append([image_path, gt_path.replace('.png', '.jpg'), None])
self.transform = transform
self.transform_train = transform_train
def __len__(self):
return len(self.pairs)
def __getitem__(self, index):
(from_path, to_path, _) = self.pairs[index]
from_im = Image.open(from_path).convert('RGB')
to_im = Image.open(to_path).convert('RGB')
if self.transform:
to_im = self.transform(to_im)
from_im = self.transform(from_im)
return (from_im, to_im) |
def load_checkpoint(filename, gpu=True):
if os.path.exists(filename):
checkpoint = torch.load(filename, map_location=(lambda storage, loc: storage))
else:
print('No model found at {}'.format(filename))
return checkpoint |
class dlaplace_gen(rv_discrete):
def _pmf(self, k, a):
return (tanh((a / 2.0)) * exp(((- a) * abs(k))))
def _cdf(self, x, a):
k = floor(x)
f = (lambda k, a: (1.0 - (exp(((- a) * k)) / (exp(a) + 1))))
f2 = (lambda k, a: (exp((a * (k + 1))) / (exp(a) + 1)))
return _lazywhere((k >= 0), (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = (1 + exp(a))
vals = ceil(np.where((q < (1.0 / (1 + exp((- a))))), ((log((q * const)) / a) - 1), ((- log(((1 - q) * const))) / a)))
vals1 = (vals - 1)
return np.where((self._cdf(vals1, a) >= q), vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = ((2.0 * ea) / ((ea - 1.0) ** 2))
mu4 = (((2.0 * ea) * (((ea ** 2) + (10.0 * ea)) + 1.0)) / ((ea - 1.0) ** 4))
return (0.0, mu2, 0.0, ((mu4 / (mu2 ** 2)) - 3.0))
def _entropy(self, a):
return ((a / sinh(a)) - log(tanh((a / 2.0)))) |
def is_tensor_method_or_property(func: Callable) -> bool:
return ((func in _get_tensor_methods()) or (func.__name__ == '__get__')) |
def load_model(model_path='', mode='all', **kwds):
model = get_2lvl_model(mode=mode, **kwds)
return model |
def evaluate_one_shot(model, xloader, api, cal_mode, seed=111):
print('This is an old version of codes to use NAS-Bench-API, and should be modified to align with the new version. Please contact me for more details if you use this function.')
weights = deepcopy(model.state_dict())
model.train(cal_mode)
with torch.no_grad():
logits = nn.functional.log_softmax(model.arch_parameters, dim=(- 1))
archs = CellStructure.gen_all(model.op_names, model.max_nodes, False)
(probs, accuracies, gt_accs_10_valid, gt_accs_10_test) = ([], [], [], [])
loader_iter = iter(xloader)
random.seed(seed)
random.shuffle(archs)
for (idx, arch) in enumerate(archs):
arch_index = api.query_index_by_arch(arch)
metrics = api.get_more_info(arch_index, 'cifar10-valid', None, False, False)
gt_accs_10_valid.append(metrics['valid-accuracy'])
metrics = api.get_more_info(arch_index, 'cifar10', None, False, False)
gt_accs_10_test.append(metrics['test-accuracy'])
select_logits = []
for (i, node_info) in enumerate(arch.nodes):
for (op, xin) in node_info:
node_str = '{:}<-{:}'.format((i + 1), xin)
op_index = model.op_names.index(op)
select_logits.append(logits[(model.edge2index[node_str], op_index)])
cur_prob = sum(select_logits).item()
probs.append(cur_prob)
cor_prob_valid = np.corrcoef(probs, gt_accs_10_valid)[(0, 1)]
cor_prob_test = np.corrcoef(probs, gt_accs_10_test)[(0, 1)]
print('{:} correlation for probabilities : {:.6f} on CIFAR-10 validation and {:.6f} on CIFAR-10 test'.format(time_string(), cor_prob_valid, cor_prob_test))
for (idx, arch) in enumerate(archs):
model.set_cal_mode('dynamic', arch)
try:
(inputs, targets) = next(loader_iter)
except:
loader_iter = iter(xloader)
(inputs, targets) = next(loader_iter)
(_, logits) = model(inputs.cuda())
(_, preds) = torch.max(logits, dim=(- 1))
correct = (preds == targets.cuda()).float()
accuracies.append(correct.mean().item())
if ((idx != 0) and (((idx % 500) == 0) or ((idx + 1) == len(archs)))):
cor_accs_valid = np.corrcoef(accuracies, gt_accs_10_valid[:(idx + 1)])[(0, 1)]
cor_accs_test = np.corrcoef(accuracies, gt_accs_10_test[:(idx + 1)])[(0, 1)]
print('{:} {:05d}/{:05d} mode={:5s}, correlation : accs={:.5f} for CIFAR-10 valid, {:.5f} for CIFAR-10 test.'.format(time_string(), idx, len(archs), ('Train' if cal_mode else 'Eval'), cor_accs_valid, cor_accs_test))
model.load_state_dict(weights)
return (archs, probs, accuracies) |
class DenseNet121(nn.Module):
def __init__(self, n_inputs=12, numCls=17):
super().__init__()
densenet = models.densenet121(pretrained=False)
self.encoder = nn.Sequential(nn.Conv2d(n_inputs, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False), *densenet.features[1:])
self.classifier = nn.Linear(65536, numCls, bias=True)
self.apply(weights_init_kaiming)
self.apply(fc_init_weights)
def forward(self, x):
x = self.encoder(x)
x = x.view(x.size(0), (- 1))
logits = self.classifier(x)
return logits |
class TestImbalance(unittest.TestCase):
def test(self):
feature_names = ['Age', 'Workclass', 'fnlwgt', 'Education', 'Education-Num', 'Marital Status', 'Occupation', 'Relationship', 'Race', 'Sex', 'Capital Gain', 'Capital Loss', 'Hours per week', 'Country', 'label']
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../datasets')
data = np.genfromtxt(os.path.join(data_dir, 'adult.data'), delimiter=', ', dtype=str)
tabular_data = Tabular(data, feature_columns=feature_names, categorical_columns=[feature_names[i] for i in [1, 3, 5, 6, 7, 8, 9, 13]], target_column='label')
explainer = DataAnalyzer(explainers=['correlation', 'imbalance#0', 'imbalance#1', 'imbalance#2', 'imbalance#3', 'mutual', 'chi2'], mode='classification', data=tabular_data)
explanations = explainer.explain_global(params={'imbalance#0': {'features': ['Sex']}, 'imbalance#1': {'features': ['Race']}, 'imbalance#2': {'features': ['Sex', 'Race']}, 'imbalance#3': {'features': ['Marital Status', 'Age']}})
dashboard = Dashboard(data_explanations=explanations)
dashboard.show() |
def _key_complex_for_display(a):
ar = a.real()
ai = a.imag()
if (not ai):
return (0, ar)
epsilon = ar.parent()(1e-10)
if (ar.abs() < epsilon):
ar_truncated = 0
elif (ar.prec() < 34):
ar_truncated = ar
else:
ar_truncated = ar.n(digits=9)
return (1, ar_truncated, ai) |
def convLayer(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, dilation=1, bias=False):
if batchNorm:
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=((((kernel_size - 1) // 2) + dilation) - 1), dilation=dilation, bias=bias), nn.BatchNorm2d(out_planes), nn.ELU(inplace=True))
else:
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=((((kernel_size - 1) // 2) + dilation) - 1), dilation=dilation, bias=True), nn.ELU(inplace=True)) |
_module()
class Collect(object):
def __init__(self, keys, meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'flip', 'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
img_meta = {}
for key in self.meta_keys:
if (key in results):
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, meta_keys={self.meta_keys})') |
def get_metrics():
try:
return tf.compat.v1.metrics
except AttributeError:
return tf.metrics |
class TargetFilter(Wrapper, Dataset):
def __init__(self, dataset, keep):
super().__init__(dataset)
self.ds = dataset
self.keep = set(keep)
self.slugs = self.load_slugs()
def load_slugs(self):
slugs = []
for (i, data) in enumerate(self.ds):
(target, aux) = (data[(- 2)], data[(- 1)])
keep = False
if ('cls' in aux):
keep = (aux['cls'] in self.keep)
else:
classes = set(np.unique(target))
keep = len((self.keep & classes))
if keep:
slugs.append(i)
return slugs
def __getitem__(self, idx):
inner_idx = self.slugs[idx]
return self.ds[inner_idx]
def __len__(self):
return len(self.slugs) |
def obj_from_dict(info, parent=None, default_args=None):
assert (isinstance(info, dict) and ('type' in info))
assert (isinstance(default_args, dict) or (default_args is None))
args = copy.deepcopy(info)
obj_type = args.pop('type')
if torchie.is_str(obj_type):
if (parent is not None):
obj_type = getattr(parent, obj_type)
else:
obj_type = sys.modules[obj_type]
elif (not isinstance(obj_type, type)):
raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type)))
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
return obj_type(**args) |
_torch
_torchaudio
class ClapFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = ClapFeatureExtractor
def setUp(self):
self.feat_extract_tester = ClapFeatureExtractionTester(self)
def test_call(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
input_features = feature_extractor(np_speech_inputs, padding='max_length', return_tensors='np').input_features
self.assertTrue((input_features.ndim == 4))
encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors='np').input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors='np').input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=0.001))
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors='np').input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors='np').input_features
for (enc_seq_1, enc_seq_2) in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=0.001))
def test_double_precision_pad(self):
import torch
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
py_speech_inputs = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
np_processed = feature_extractor.pad([{'input_features': inputs}], return_tensors='np')
self.assertTrue((np_processed.input_features.dtype == np.float32))
pt_processed = feature_extractor.pad([{'input_features': inputs}], return_tensors='pt')
self.assertTrue((pt_processed.input_features.dtype == torch.float32))
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation')
speech_samples = ds.sort('id').select(range(num_samples))[:num_samples]['audio']
return [x['array'] for x in speech_samples]
def test_integration_fusion_short_input(self):
EXPECTED_INPUT_FEATURES = torch.tensor([[[(- 20.1049), (- 19.9764), (- 20.0731), (- 19.5055), (- 27.5018), (- 22.5761), (- 26.6071), (- 29.0091), (- 26.4659), (- 26.4236), (- 28.8808), (- 31.919), (- 32.4848), (- 34.1186), (- 34.034), (- 32.8803), (- 30.9895), (- 37.6238), (- 38.0347), (- 40.6263), (- 36.3496), (- 42.2533), (- 32.9132), (- 27.7068), (- 29.3704), (- 30.3208), (- 22.5972), (- 27.1494), (- 30.1975), (- 31.1005), (- 29.9372), (- 27.1917), (- 25.9806), (- 30.3489), (- 33.238), (- 31.9062), (- 36.5498), (- 32.8721), (- 30.5629), (- 27.4674), (- 22.2232), (- 22.5653), (- 16.3868), (- 17.2713), (- 25.9738), (- 30.6256), (- 34.3766), (- 31.1292), (- 27.895), (- 27.0588), (- 25.6206), (- 23.0712), (- 26.605), (- 28.0112), (- 32.6847), (- 34.3396), (- 34.9738), (- 35.8463), (- 39.2324), (- 37.1188), (- 33.3705), (- 28.923), (- 28.9112), (- 28.6578)], [(- 36.7233), (- 30.0587), (- 24.8431), (- 18.4611), (- 16.8149), (- 23.9319), (- 32.858), (- 34.2264), (- 27.4332), (- 26.8027), (- 29.2721), (- 33.9033), (- 39.3403), (- 35.3232), (- 26.8076), (- 28.646), (- 35.278), (- 36.0738), (- 35.4996), (- 37.7631), (- 39.5056), (- 34.7112), (- 36.8741), (- 34.1066), (- 32.9474), (- 33.6604), (- 27.9937), (- 30.9594), (- 26.2928), (- 32.0485), (- 29.2151), (- 29.2917), (- 32.7308), (- 29.6542), (- 31.1454), (- 37.0088), (- 32.3388), (- 37.3086), (- 31.1024), (- 27.2889), (- 19.6788), (- 21.1488), (- 19.5144), (- 14.8889), (- 21.2006), (- 24.7488), (- 27.794), (- 31.1058), (- 27.5068), (- 21.5737), (- 22.378), (- 21.5151), (- 26.3086), (- 30.9223), (- 33.5043), (- 32.0307), (- 37.3806), (- 41.6188), (- 45.665), (- 40.5131), (- 32.5023), (- 26.7385), (- 26.3709), (- 26.7761)]], [[(- 25.7496), (- 24.9339), (- 24.1357), (- 23.1271), (- 23.7853), (- 26.1264), (- 29.1456), (- 33.206), (- 37.8179), (- 42.4833), (- 41.9386), (- 41.2164), (- 42.3566), (- 44.2575), (- 40.0217), (- 36.6794), (- 36.6974), (- 38.7819), (- 42.088), (- 45.556), (- 39.9368), (- 36.3219), (- 35.5981), (- 36.6434), (- 35.1851), (- 33.0684), (- 30.0437), (- 30.201), (- 34.3476), (- 42.1373), (- 38.8039), (- 37.3355), (- 40.4576), (- 41.0485), (- 40.6377), (- 38.2275), (- 42.7481), (- 34.6084), (- 34.7048), (- 29.5149), (- 26.3935), (- 26.8952), (- 34.1336), (- 26.2904), (- 28.2571), (- 32.5642), (- 36.724), (- 35.5334), (- 38.2451), (- 34.8177), (- 28.9754), (- 25.1096), (- 27.9768), (- 32.3184), (- 37.0269), (- 40.5136), (- 40.8061), (- 36.4948), (- 40.3767), (- 38.9671), (- 38.3552), (- 34.125), (- 30.9035), (- 31.6112)], [(- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0)]], [[(- 25.7496), (- 24.9339), (- 24.1357), (- 23.1271), (- 23.7853), (- 26.1264), (- 29.1456), (- 33.206), (- 37.8179), (- 42.4833), (- 41.9386), (- 41.2164), (- 42.3566), (- 44.2575), (- 40.0217), (- 36.6794), (- 36.6974), (- 38.7819), (- 42.088), (- 45.556), (- 39.9368), (- 36.3219), (- 35.5981), (- 36.6434), (- 35.1851), (- 33.0684), (- 30.0437), (- 30.201), (- 34.3476), (- 42.1373), (- 38.8039), (- 37.3355), (- 40.4576), (- 41.0485), (- 40.6377), (- 38.2275), (- 42.7481), (- 34.6084), (- 34.7048), (- 29.5149), (- 26.3935), (- 26.8952), (- 34.1336), (- 26.2904), (- 28.2571), (- 32.5642), (- 36.724), (- 35.5334), (- 38.2451), (- 34.8177), (- 28.9754), (- 25.1096), (- 27.9768), (- 32.3184), (- 37.0269), (- 40.5136), (- 40.8061), (- 36.4948), (- 40.3767), (- 38.9671), (- 38.3552), (- 34.125), (- 30.9035), (- 31.6112)], [(- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0)]], [[(- 58.526), (- 58.1155), (- 57.8623), (- 57.5059), (- 57.9178), (- 58.7171), (- 59.2343), (- 59.9833), (- 60.9764), (- 62.0722), (- 63.5723), (- 65.7111), (- 67.5153), (- 68.7088), (- 69.8325), (- 70.2987), (- 70.1548), (- 70.6233), (- 71.5702), (- 72.5159), (- 72.3821), (- 70.1817), (- 67.0315), (- 64.1387), (- 62.2202), (- 61.0717), (- 60.4951), (- 61.6005), (- 63.7358), (- 67.14), (- 67.6185), (- 65.5635), (- 64.3593), (- 63.7138), (- 63.6209), (- 66.495), (- 72.6284), (- 63.3961), (- 56.8334), (- 52.7319), (- 50.631), (- 51.3728), (- 53.5619), (- 51.919), (- 50.9708), (- 52.8684), (- 55.8073), (- 58.8227), (- 60.6991), (- 57.0547), (- 52.7611), (- 51.4388), (- 54.4892), (- 60.895), (- 66.1024), (- 72.4352), (- 67.8538), (- 65.1463), (- 68.7588), (- 72.308), (- 68.4864), (- 60.4688), (- 57.1516), (- 60.946)], [(- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0)]]])
MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]]
input_speech = self._load_datasamples(1)
feature_extractor = ClapFeatureExtractor()
for (padding, EXPECTED_VALUES, idx_in_mel) in zip(['repeat', 'repeatpad', None, 'pad'], EXPECTED_INPUT_FEATURES, MEL_BIN):
input_features = feature_extractor(input_speech, return_tensors='pt', padding=padding).input_features
self.assertEqual(input_features.shape, (1, 4, 1001, 64))
self.assertTrue(torch.allclose(input_features[(0, 0, idx_in_mel[0])], EXPECTED_VALUES[0], atol=0.0001))
self.assertTrue(torch.allclose(input_features[(0, 0, idx_in_mel[1])], EXPECTED_VALUES[1], atol=0.0001))
self.assertTrue(torch.all((input_features[(0, 0)] == input_features[(0, 1)])))
self.assertTrue(torch.all((input_features[(0, 0)] == input_features[(0, 2)])))
self.assertTrue(torch.all((input_features[(0, 0)] == input_features[(0, 3)])))
def test_integration_rand_trunc_short_input(self):
EXPECTED_INPUT_FEATURES = torch.tensor([[[(- 35.0483), (- 35.7865), (- 38.2884), (- 40.022), (- 42.5349), (- 44.9489), (- 43.2228), (- 44.6499), (- 47.6253), (- 49.6983), (- 50.2127), (- 52.5483), (- 52.2223), (- 51.9157), (- 49.4082), (- 51.2024), (- 57.0476), (- 56.2803), (- 58.1618), (- 60.7474), (- 55.0389), (- 60.9514), (- 59.308), (- 50.4419), (- 47.8172), (- 48.757), (- 55.2552), (- 44.5036), (- 44.1148), (- 50.8218), (- 51.0968), (- 52.9408), (- 51.1037), (- 48.9789), (- 47.5897), (- 52.0915), (- 55.4216), (- 54.1529), (- 58.0149), (- 58.0866), (- 52.7798), (- 52.6154), (- 45.9144), (- 46.2008), (- 40.7603), (- 41.1703), (- 50.225), (- 55.4112), (- 59.4818), (- 54.5795), (- 53.5552), (- 51.3668), (- 49.8358), (- 50.3186), (- 54.0452), (- 57.603), (- 61.1589), (- 61.6415), (- 63.2756), (- 66.589), (- 62.8543), (- 58.0665), (- 56.7203), (- 56.7632)], [(- 47.132), (- 37.9961), (- 34.0076), (- 36.7109), (- 47.9057), (- 48.4924), (- 43.8371), (- 44.9728), (- 48.1689), (- 52.9141), (- 57.6077), (- 52.852), (- 44.8502), (- 45.6764), (- 51.8389), (- 56.4284), (- 54.6972), (- 53.4889), (- 55.6077), (- 58.7149), (- 60.376), (- 54.0136), (- 56.073), (- 55.987), (- 54.4017), (- 53.1094), (- 53.564), (- 50.3064), (- 49.952), (- 49.3239), (- 48.1668), (- 53.4852), (- 50.4561), (- 50.8688), (- 55.197), (- 51.5538), (- 53.026), (- 59.6933), (- 54.8183), (- 59.5895), (- 55.9589), (- 50.3761), (- 44.1282), (- 44.1463), (- 43.854), (- 39.1168), (- 45.3893), (- 49.5542), (- 53.1505), (- 55.287), (- 50.3921), (- 46.8511), (- 47.4444), (- 49.5633), (- 56.0034), (- 59.0815), (- 59.0018), (- 63.7589), (- 69.5745), (- 71.5789), (- 64.0498), (- 56.0558), (- 54.3475), (- 54.7004)]], [[(- 40.3184), (- 39.7186), (- 39.8807), (- 41.6508), (- 45.3613), (- 50.4785), (- 57.0297), (- 60.4944), (- 59.1642), (- 58.9495), (- 60.4661), (- 62.53), (- 58.4759), (- 55.2865), (- 54.8973), (- 56.078), (- 57.5482), (- 59.6557), (- 64.3309), (- 65.033), (- 59.4941), (- 56.8552), (- 55.0519), (- 55.9817), (- 56.9739), (- 55.2827), (- 54.5312), (- 51.4141), (- 50.4289), (- 51.9131), (- 57.5821), (- 63.9979), (- 59.918), (- 58.9489), (- 62.3247), (- 62.6975), (- 63.7948), (- 60.525), (- 64.6107), (- 58.7905), (- 57.0229), (- 54.3084), (- 49.8445), (- 50.4459), (- 57.0172), (- 50.6425), (- 52.5992), (- 57.4207), (- 61.6358), (- 60.654), (- 63.1968), (- 57.436), (- 52.3263), (- 51.7695), (- 57.1946), (- 62.961), (- 66.7359), (- 67.0335), (- 63.744), (- 68.1775), (- 66.3798), (- 62.865), (- 59.8972), (- 59.3139)], [(- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0)]], [[(- 40.3184), (- 39.7186), (- 39.8807), (- 41.6508), (- 45.3613), (- 50.4785), (- 57.0297), (- 60.4944), (- 59.1642), (- 58.9495), (- 60.4661), (- 62.53), (- 58.4759), (- 55.2865), (- 54.8973), (- 56.078), (- 57.5482), (- 59.6557), (- 64.3309), (- 65.033), (- 59.4941), (- 56.8552), (- 55.0519), (- 55.9817), (- 56.9739), (- 55.2827), (- 54.5312), (- 51.4141), (- 50.4289), (- 51.9131), (- 57.5821), (- 63.9979), (- 59.918), (- 58.9489), (- 62.3247), (- 62.6975), (- 63.7948), (- 60.525), (- 64.6107), (- 58.7905), (- 57.0229), (- 54.3084), (- 49.8445), (- 50.4459), (- 57.0172), (- 50.6425), (- 52.5992), (- 57.4207), (- 61.6358), (- 60.654), (- 63.1968), (- 57.436), (- 52.3263), (- 51.7695), (- 57.1946), (- 62.961), (- 66.7359), (- 67.0335), (- 63.744), (- 68.1775), (- 66.3798), (- 62.865), (- 59.8972), (- 59.3139)], [(- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0)]], [[(- 73.319), (- 73.6349), (- 74.1451), (- 74.8539), (- 75.7476), (- 76.5438), (- 78.554), (- 80.1339), (- 81.8911), (- 83.756), (- 85.5387), (- 86.7466), (- 88.2072), (- 88.609), (- 88.8243), (- 89.0784), (- 89.4364), (- 89.8179), (- 91.3146), (- 92.2833), (- 91.7221), (- 90.944), (- 88.1315), (- 86.2425), (- 84.2281), (- 82.4893), (- 81.5993), (- 81.1328), (- 81.5759), (- 83.1068), (- 85.6525), (- 88.952), (- 88.9187), (- 87.2703), (- 86.3052), (- 85.7188), (- 85.8802), (- 87.9996), (- 95.0464), (- 88.0133), (- 80.8561), (- 76.5597), (- 74.2816), (- 74.8109), (- 77.3615), (- 76.0719), (- 75.3426), (- 77.6428), (- 80.9663), (- 84.5275), (- 84.9907), (- 80.5205), (- 77.2851), (- 78.6259), (- 84.774), (- 91.4535), (- 98.1894), (- 94.3872), (- 92.3735), (- 97.6807), (- 98.1501), (- 91.4344), (- 85.2842), (- 88.4338)], [(- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0), (- 100.0)]]])
MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]]
input_speech = self._load_datasamples(1)
feature_extractor = ClapFeatureExtractor()
for (padding, EXPECTED_VALUES, idx_in_mel) in zip(['repeat', 'repeatpad', None, 'pad'], EXPECTED_INPUT_FEATURES, MEL_BIN):
input_features = feature_extractor(input_speech, return_tensors='pt', truncation='rand_trunc', padding=padding).input_features
self.assertEqual(input_features.shape, (1, 1, 1001, 64))
self.assertTrue(torch.allclose(input_features[(0, 0, idx_in_mel[0])], EXPECTED_VALUES[0], atol=0.0001))
self.assertTrue(torch.allclose(input_features[(0, 0, idx_in_mel[1])], EXPECTED_VALUES[1], atol=0.0001))
def test_integration_fusion_long_input(self):
EXPECTED_INPUT_FEATURES = torch.tensor([[(- 11.183), (- 10.1894), (- 8.6051), (- 4.8578), (- 1.3268), (- 8.4606), (- 14.5453), (- 9.2017), 0.5781, 16.2129, 14.8289, 3.6326, (- 3.8794), (- 6.5544), (- 2.4408), 1.9531, 6.0967, 1.759, (- 7.673), (- 6.1571), 2.0052, 16.6694, 20.6447, 21.2145, 13.4972, 15.9043, 16.8987, 4.1766, 11.9428, 21.2372, 12.3016, 4.8604, 6.7241, 1.8543, 4.9235, 5.3188, (- 0.9897), (- 1.2416), (- 6.5864), 2.9529, 2.9274, 6.4753, 10.23, 11.2127, 3.4042, (- 1.0055), (- 6.0475), (- 6.7524), (- 3.9801), (- 1.4434), 0.474, (- 0.1584), (- 4.5457), (- 8.5746), (- 8.8428), (- 13.1475), (- 9.6079), (- 8.5798), (- 4.1143), (- 3.7966), (- 7.1651), (- 6.1517), (- 8.0258), (- 12.1486)], [(- 10.2017), (- 7.9924), (- 5.9517), (- 3.9372), (- 1.9735), (- 4.313), 16.1647, 25.0592, 23.5532, 14.4974, (- 7.0778), (- 10.2262), 6.4782, 20.3454, 19.4269, 1.7976, (- 16.507), 4.938, 12.339, 6.9285, (- 13.6325), (- 8.5298), 1.0839, (- 5.9629), (- 8.4812), 3.1331, (- 2.0963), (- 16.6046), (- 14.007), (- 17.5707), (- 13.208), (- 17.2168), (- 17.777), (- 12.1111), (- 18.6184), (- 17.1897), (- 13.9801), (- 12.0426), (- 23.54), (- 25.6823), (- 23.5813), (- 18.7847), (- 20.5473), (- 25.6458), (- 19.7585), (- 27.6007), (- 28.9276), (- 24.8948), (- 25.4458), (- 22.2807), (- 19.6613), (- 19.2669), (- 15.7813), (- 19.6821), (- 24.3439), (- 22.2598), (- 28.2631), (- 30.1017), (- 32.7646), (- 33.6525), (- 27.5639), (- 22.0548), (- 27.8054), (- 29.6947)], [(- 9.2078), (- 7.2963), (- 6.2095), (- 7.9959), (- 2.928), (- 11.1843), (- 6.149), 5.0733, 19.2957, 21.4578, 14.6803, (- 3.3153), (- 6.3334), (- 2.3542), 6.9509, 15.2965, 14.662, 5.2075, (- 0.0873), 1.1919, 18.1986, 20.847, 10.8035, 2.2516, 7.6905, 7.7427, (- 1.2543), (- 5.0018), 0.9809, (- 2.1584), (- 5.458), (- 5.476), (- 11.8888), (- 9.0605), (- 8.4638), (- 9.9897), (- 0.054), (- 5.1629), 0.0483, (- 4.1504), (- 4.814), (- 7.8236), (- 9.0622), (- 10.1742), (- 8.9597), (- 11.538), (- 16.5603), (- 17.1858), (- 17.5032), (- 20.9326), (- 23.9543), (- 25.2602), (- 25.3429), (- 27.4536), (- 26.8859), (- 22.7852), (- 25.8288), (- 24.8399), (- 23.8893), (- 24.2096), (- 26.5415), (- 23.7281), (- 25.6851), (- 22.3629)], [1.3448, 2.9883, 4.0366, (- 0.8019), (- 10.4191), (- 10.0883), (- 4.3812), 0.8136, 2.1579, 0.0832, 1.0949, (- 0.9759), (- 5.5319), (- 4.6009), (- 6.5452), (- 14.9155), (- 20.1584), (- 9.3611), (- 2.4271), 1.4031, 4.991, 8.6916, 8.6785, 10.1973, 9.9029, 5.384, 7.5336, 5.2803, 2.8144, (- 0.3138), 2.2216, 5.7328, 7.5574, 7.7402, 1.0681, 3.1049, 7.0742, 6.5588, 7.3712, 5.7881, 8.6874, 8.7725, 2.8133, (- 4.5809), (- 6.1317), (- 5.1719), (- 5.0192), (- 9.0977), (- 10.9391), (- 6.0769), 1.6016, (- 0.8965), (- 7.2252), (- 7.8632), (- 11.4468), (- 11.7446), (- 10.7447), (- 7.0601), (- 2.7748), (- 4.1798), (- 2.8433), (- 3.1352), 0.8097, 6.4212]])
MEL_BIN = 963
input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])
feature_extractor = ClapFeatureExtractor()
for (padding, EXPECTED_VALUES, block_idx) in zip(['repeat', 'repeatpad', None, 'pad'], EXPECTED_INPUT_FEATURES, [1, 2, 0, 3]):
set_seed()
input_features = feature_extractor(input_speech, return_tensors='pt', padding=padding).input_features
self.assertEqual(input_features.shape, (1, 4, 1001, 64))
self.assertTrue(torch.allclose(input_features[(0, block_idx, MEL_BIN)], EXPECTED_VALUES, atol=0.001))
def test_integration_rand_trunc_long_input(self):
EXPECTED_INPUT_FEATURES = torch.tensor([[(- 35.4022), (- 32.7555), (- 31.2004), (- 32.7764), (- 42.577), (- 41.6339), (- 43.163), (- 44.508), (- 44.3029), (- 48.9628), (- 39.5022), (- 39.2105), (- 43.135), (- 43.2195), (- 48.4894), (- 52.2344), (- 57.6891), (- 52.2228), (- 45.5155), (- 44.2893), (- 43.4697), (- 46.6702), (- 43.749), (- 40.4819), (- 42.7275), (- 46.3434), (- 46.8412), (- 41.2003), (- 43.1681), (- 46.2948), (- 46.1925), (- 47.8333), (- 45.6812), (- 44.9182), (- 41.7786), (- 43.3809), (- 44.3199), (- 42.8814), (- 45.4771), (- 46.7114), (- 46.9746), (- 42.709), (- 41.6057), (- 38.3965), (- 40.198), (- 41.0263), (- 34.1256), (- 28.3289), (- 29.0201), (- 30.4453), (- 29.5561), (- 30.1734), (- 25.9406), (- 19.0897), (- 15.8452), (- 20.1351), (- 23.6515), (- 23.1194), (- 17.1845), (- 19.4399), (- 23.6527), (- 22.8768), (- 20.7279), (- 22.7864)], [(- 35.7719), (- 27.2566), (- 23.6964), (- 27.5521), 0.251, 7.4391, 1.3917, (- 13.3417), (- 28.1758), (- 17.0856), (- 5.7723), (- 0.8), (- 7.8832), (- 15.5548), (- 30.5935), (- 24.7571), (- 13.7009), (- 10.3432), (- 21.2464), (- 24.8118), (- 19.408), (- 14.9779), (- 11.7991), (- 18.4485), (- 20.1982), (- 17.3652), (- 20.6328), (- 28.2967), (- 25.7819), (- 21.8962), (- 28.5083), (- 29.5719), (- 30.212), (- 35.7033), (- 31.8218), (- 34.0408), (- 37.7744), (- 33.9653), (- 31.3009), (- 30.9063), (- 28.6153), (- 32.2202), (- 28.5456), (- 28.8579), (- 32.517), (- 37.9152), (- 43.0052), (- 46.4849), (- 44.0786), (- 39.1933), (- 33.2757), (- 31.6313), (- 42.6386), (- 52.3679), (- 53.5785), (- 55.6444), (- 47.005), (- 47.6459), (- 56.6361), (- 60.6781), (- 61.5244), (- 55.8272), (- 60.4832), (- 58.1897)], [(- 38.2686), (- 36.6285), (- 32.5835), (- 35.1693), (- 37.7938), (- 37.4035), (- 35.3132), (- 35.6083), (- 36.3609), (- 40.9472), (- 36.7846), (- 36.1544), (- 38.9076), (- 39.3618), (- 35.4953), (- 34.2809), (- 39.9466), (- 39.7433), (- 34.8347), (- 37.5674), (- 41.5689), (- 38.9161), (- 34.3947), (- 30.2924), (- 30.4841), (- 34.5831), (- 28.9261), (- 24.8849), (- 31.2324), (- 27.1622), (- 27.2107), (- 25.9385), (- 30.1691), (- 30.9223), (- 23.9495), (- 25.6047), (- 26.7119), (- 28.5523), (- 27.7481), (- 32.8427), (- 35.465), (- 31.0399), (- 31.2073), (- 30.5163), (- 22.9819), (- 20.8892), (- 19.251), (- 24.7905), (- 28.9426), (- 28.1998), (- 26.7386), (- 25.014), (- 27.9223), (- 32.9913), (- 33.1864), (- 34.9742), (- 38.5995), (- 39.699), (- 29.3203), (- 22.4697), (- 25.6415), (- 33.5608), (- 33.0945), (- 27.1716)], [(- 33.2015), (- 28.7741), (- 21.9457), (- 23.4888), (- 32.1072), (- 8.6307), 3.2724, 5.9157, (- 0.9221), (- 30.1814), (- 31.0015), (- 27.4508), (- 27.0477), (- 9.5342), 0.3221, 0.6511, (- 7.1596), (- 25.9707), (- 32.8924), (- 32.23), (- 13.8974), (- 0.4895), 0.9168, (- 10.7663), (- 27.1176), (- 35.0829), (- 11.6859), (- 4.8855), (- 11.8898), (- 26.6167), (- 5.6192), (- 3.8443), (- 19.7947), (- 14.4101), (- 8.6236), (- 21.2458), (- 21.0801), (- 17.9136), (- 24.4663), (- 18.6333), (- 24.8085), (- 15.5854), (- 15.4344), (- 11.5046), (- 22.3625), (- 27.3387), (- 32.4353), (- 30.967), (- 31.3789), (- 35.4044), (- 34.4591), (- 25.2433), (- 28.0773), (- 33.8736), (- 33.0224), (- 33.3155), (- 38.5302), (- 39.2741), (- 36.6395), (- 34.7729), (- 32.4483), (- 42.4001), (- 49.2857), (- 39.1682)]])
MEL_BIN = 963
SEEDS = [, 1234, 666, 5555]
input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])
feature_extractor = ClapFeatureExtractor()
for (padding, EXPECTED_VALUES, seed) in zip(['repeat', 'repeatpad', None, 'pad'], EXPECTED_INPUT_FEATURES, SEEDS):
set_seed(seed)
input_features = feature_extractor(input_speech, return_tensors='pt', truncation='rand_trunc', padding=padding).input_features
self.assertEqual(input_features.shape, (1, 1, 1001, 64))
self.assertTrue(torch.allclose(input_features[(0, 0, MEL_BIN)], EXPECTED_VALUES, atol=0.0001)) |
def _reverse_seq(input_seq, lengths):
if (lengths is None):
return list(reversed(input_seq))
input_shape = tensor_shape.matrix(None, None)
for input_ in input_seq:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
s_joined = array_ops.pack(input_seq)
if (lengths is not None):
lengths = math_ops.to_int64(lengths)
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
result = array_ops.unpack(s_reversed)
for r in result:
r.set_shape(input_shape)
return result |
def test_nlc2nchw2nlc():
shape_nchw = (4, 2, 5, 5)
shape_nlc = (4, 25, 2)
def test_func(x):
assert (x.shape == torch.Size(shape_nchw))
return x
x = torch.rand(*shape_nlc)
output = nlc2nchw2nlc(test_func, x, shape_nchw[2:])
assert (output.shape == torch.Size(shape_nlc))
def test_func2(x, arg):
assert (x.shape == torch.Size(shape_nchw))
assert (arg == 100)
return x
x = torch.rand(*shape_nlc)
output = nlc2nchw2nlc(test_func2, x, shape_nchw[2:], arg=100)
assert (output.shape == torch.Size(shape_nlc))
def test_func3(x):
assert x.is_contiguous()
assert (x.shape == torch.Size(shape_nchw))
return x
x = torch.rand(*shape_nlc)
output = nlc2nchw2nlc(test_func3, x, shape_nchw[2:], contiguous=True)
assert (output.shape == torch.Size(shape_nlc))
assert output.is_contiguous() |
def get_recall(capsule1_path, region1_path, capsule2_path, region2_path):
class_coefs = []
capsules = []
regions = []
capsules.append(cv2.imread(capsule1_path))
capsules.append(cv2.imread(capsule2_path))
regions.append(cv2.imread(region1_path))
regions.append(cv2.imread(region2_path))
for i in range(1, 5):
masks = generate_masks(capsules, regions, i, 2)
recall_coef = compute_recall(masks[0], masks[1])
class_coefs.append(recall_coef)
return class_coefs |
def test_statement_coverage_hash(statement_coverage_goal):
assert (statement_coverage_goal.__hash__() != 0) |
class MaxNorm(Constraint):
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
def __call__(self, w):
norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
desired = K.clip(norms, 0, self.max_value)
w *= (desired / (K.epsilon() + norms))
return w
def get_config(self):
return {'max_value': self.max_value, 'axis': self.axis} |
class Partition5(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[22]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[23]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[0]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:5'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'encoder.20', 'l_1': 'encoder.21', 'l_2': 'encoder.22', 'l_3': 'encoder.23', 'l_4': 'encoder.final_layer_norm', 'l_5': 'encoder.dropout', 'l_6': 'decoder.embed_tokens', 'l_7': 'decoder.dropout', 'l_8': 'decoder.0'}
self.to(self.device)
def forward(self, *args):
(attention_mask, decoder_attention_mask, decoder_input_ids, inverted_encoder_attention_mask, x0, x1, x2, x3) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=attention_mask, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_1(t_0, attention_mask=attention_mask, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_2(t_0, attention_mask=attention_mask, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_3(t_0, attention_mask=attention_mask, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_4(t_0)
t_0 = self.l_5(t_0)
t_1 = x0[(- 1)]
t_1 = decoder_input_ids.view((- 1), t_1)
t_1 = self.l_6(x1, t_1)
t_1 = self.l_7(t_1)
t_1 = self.l_8(t_1, attention_mask=decoder_attention_mask, position_bias=None, encoder_hidden_states=t_0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=None)
t_2 = t_1[0]
t_3 = t_1[1]
t_1 = t_1[2]
return list(flatten((t_0, t_2, t_3, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class Form(Meta):
def _init(self, *, parameters: (JSONMapping | None), form_key: (str | None)):
if ((parameters is not None) and (not isinstance(parameters, dict))):
raise TypeError("{} 'parameters' must be of type dict or None, not {}".format(type(self).__name__, repr(parameters)))
if ((form_key is not None) and (not isinstance(form_key, str))):
raise TypeError("{} 'form_key' must be of type string or None, not {}".format(type(self).__name__, repr(form_key)))
self._parameters = parameters
self._form_key = form_key
def form_key(self):
return self._form_key
_key.setter
def form_key(self, value):
if ((value is not None) and (not isinstance(value, str))):
raise TypeError('form_key must be None or a string')
self._form_key = value
def __str__(self):
return json.dumps(self.to_dict(verbose=False), indent=4)
def to_dict(self, verbose=True):
return self._to_dict_part(verbose, toplevel=True)
def _to_dict_extra(self, out, verbose):
if (verbose or ((self._parameters is not None) and (len(self._parameters) > 0))):
out['parameters'] = self.parameters
if (verbose or (self._form_key is not None)):
out['form_key'] = self._form_key
return out
def to_json(self):
return json.dumps(self.to_dict(verbose=True))
def _repr_args(self):
out = []
if ((self._parameters is not None) and (len(self._parameters) > 0)):
out.append(('parameters=' + repr(self._parameters)))
if (self._form_key is not None):
out.append(('form_key=' + repr(self._form_key)))
return out
def type(self):
raise NotImplementedError
def columns(self, list_indicator=None, column_prefix=()):
output = []
self._columns(column_prefix, output, list_indicator)
return output
def select_columns(self, specifier, expand_braces=True, *, prune_unions_and_records: bool=True):
if isinstance(specifier, str):
specifier = {specifier}
for item in specifier:
if (not isinstance(item, str)):
raise TypeError('a column-selection specifier must be a list of non-empty strings')
if (not item):
raise ValueError('a column-selection specifier must be a list of non-empty strings')
if expand_braces:
next_specifier = []
for item in specifier:
for result in _expand_braces(item):
next_specifier.append(result)
specifier = next_specifier
specifier = [([] if (item == '') else item.split('.')) for item in set(specifier)]
match_specifier = _SpecifierMatcher(specifier, match_if_empty=False)
selection = self._select_columns(match_specifier)
assert (selection is not None), 'top-level selections always return a Form'
if prune_unions_and_records:
return selection._prune_columns(False)
else:
return selection
def column_types(self):
return self._column_types()
def _columns(self, path, output, list_indicator):
raise NotImplementedError
def _prune_columns(self, is_inside_record_or_union: bool) -> (Form | None):
raise NotImplementedError
def _select_columns(self, match_specifier: _SpecifierMatcher) -> (Form | None):
raise NotImplementedError
def _column_types(self):
raise NotImplementedError
def _to_dict_part(self, verbose, toplevel):
raise NotImplementedError
def length_zero_array(self, *, backend=numpy_backend, highlevel=True, behavior=None):
if highlevel:
deprecate('The `highlevel=True` variant of `Form.length_zero_array` is now deprecated. Please use `ak.Array(form.length_zero_array(...), behavior=...)` if an `ak.Array` is required.', version='2.3.0')
return ak.operations.ak_from_buffers._impl(form=self, length=0, container={'': b'\x00\x00\x00\x00\x00\x00\x00\x00'}, buffer_key='', backend=backend, byteorder=ak._util.native_byteorder, highlevel=highlevel, behavior=behavior, attrs=None, simplify=False)
def length_one_array(self, *, backend=numpy_backend, highlevel=True, behavior=None):
if highlevel:
deprecate('The `highlevel=True` variant of `Form.length_zero_array` is now deprecated. Please use `ak.Array(form.length_zero_array(...), behavior=...)` if an `ak.Array` is required.', version='2.3.0')
def max_prefer_unknown(this: ShapeItem, that: ShapeItem) -> ShapeItem:
if (this is unknown_length):
return this
if (that is unknown_length):
return that
return max(this, that)
container = {}
def prepare(form, multiplier):
form_key = f'node-{len(container)}'
if isinstance(form, (ak.forms.BitMaskedForm, ak.forms.ByteMaskedForm)):
if form.valid_when:
container[form_key] = (b'\x00' * multiplier)
else:
container[form_key] = (b'\xff' * multiplier)
return form.copy(form_key=form_key)
elif isinstance(form, ak.forms.IndexedOptionForm):
container[form_key] = b'\xff\xff\xff\xff\xff\xff\xff\xff'
return form.copy(form_key=form_key)
elif isinstance(form, ak.forms.EmptyForm):
raise TypeError('cannot generate a length_one_array from a Form with an unknowntype that cannot be hidden (EmptyForm not within BitMaskedForm, ByteMaskedForm, or IndexedOptionForm)')
elif isinstance(form, ak.forms.UnmaskedForm):
return form.copy(content=prepare(form.content, multiplier))
elif isinstance(form, (ak.forms.IndexedForm, ak.forms.ListForm)):
container[form_key] = (b'\x00' * (8 * multiplier))
return form.copy(content=prepare(form.content, multiplier), form_key=form_key)
elif isinstance(form, ak.forms.ListOffsetForm):
container[form_key] = (b'\x00' * (8 * (multiplier + 1)))
return form.copy(content=prepare(form.content, multiplier), form_key=form_key)
elif isinstance(form, ak.forms.RegularForm):
size = form.size
if (size is unknown_length):
size = 1
return form.copy(content=prepare(form.content, (multiplier * size)))
elif isinstance(form, ak.forms.NumpyForm):
dtype = ak.types.numpytype.primitive_to_dtype(form._primitive)
size = (multiplier * dtype.itemsize)
for x in form.inner_shape:
if (x is not unknown_length):
size *= x
container[form_key] = (b'\x00' * size)
return form.copy(form_key=form_key)
elif isinstance(form, ak.forms.RecordForm):
return form.copy(contents=[prepare(x, multiplier) for x in form.contents])
elif isinstance(form, ak.forms.UnionForm):
container[form_key] = (b'\x00' * (8 * multiplier))
return form.copy(contents=([prepare(form.contents[0], multiplier)] + form.contents[1:]), form_key=form_key)
else:
raise AssertionError(f'not a Form: {form!r}')
return ak.operations.ak_from_buffers._impl(form=prepare(self, 1), length=1, container=container, buffer_key='{form_key}', backend=backend, byteorder=ak._util.native_byteorder, highlevel=highlevel, behavior=behavior, attrs=None, simplify=False)
def _expected_from_buffers(self, getkey: Callable[([Form, str], str)], recursive: bool) -> Iterator[tuple[(str, DType)]]:
raise NotImplementedError
def expected_from_buffers(self, buffer_key='{form_key}-{attribute}', recursive=True):
getkey = regularize_buffer_key(buffer_key)
return dict(self._expected_from_buffers(getkey, recursive))
def is_equal_to(self, other: Any, *, all_parameters: bool=False, form_key: bool=False) -> bool:
return self._is_equal_to(other, all_parameters, form_key)
__eq__ = is_equal_to
def _is_equal_to(self, other: Any, all_parameters: bool, form_key: bool) -> bool:
raise NotImplementedError
def _is_equal_to_generic(self, other: Any, all_parameters: bool, form_key: bool) -> bool:
compare_parameters = (parameters_are_equal if all_parameters else type_parameters_equal)
return (isinstance(other, type(self)) and (not (form_key and (self._form_key != other._form_key))) and compare_parameters(self._parameters, other._parameters)) |
def vgg_detectron_weight_mapping(model):
mapping_to_detectron = {}
for k in model.state_dict():
if ('.weight' in k):
mapping_to_detectron.update({k: k.replace('.weight', '_w')})
if ('.bias' in k):
mapping_to_detectron.update({k: k.replace('.bias', '_b')})
orphan_in_detectron = []
return (mapping_to_detectron, orphan_in_detectron) |
def main():
paused = False
while True:
if (not paused):
straight()
print(key_check())
time.sleep(1)
left()
print(key_check())
time.sleep(1)
right()
print(key_check())
time.sleep(1) |
def get_dist_from_SVDD(data_set, model, center):
z_set = []
model.eval()
with torch.no_grad():
for (batch_idx, x) in enumerate(data_set):
z = model(x)
z_set.append(z)
z_set = torch.vstack(z_set)
dist = (z_set - center.unsqueeze(0))
dist = dist.square().mean(1)
dist = dist.cpu().detach().numpy()
return dist |
def search_span(span):
candidates = []
params = {'action': 'wbsearchentities', 'search': span, 'language': 'en', 'limit': 5, 'format': 'json', 'props': 'description'}
response = requests.get(url, params=params)
data = response.json()
results = data['search']
for result in results:
candidates.append(result['id'])
return candidates |
class BernoulliDistribution(Distribution):
def __init__(self, action_dims: int):
super(BernoulliDistribution, self).__init__()
self.action_dims = action_dims
def proba_distribution_net(self, latent_dim: int) -> nn.Module:
action_logits = nn.Linear(latent_dim, self.action_dims)
return action_logits
def proba_distribution(self, action_logits: torch.Tensor) -> 'BernoulliDistribution':
self.distribution = Bernoulli(logits=action_logits)
return self
def log_prob(self, actions: torch.Tensor) -> torch.Tensor:
return self.distribution.log_prob(actions).sum(dim=1)
def entropy(self) -> torch.Tensor:
return self.distribution.entropy().sum(dim=1)
def sample(self) -> torch.Tensor:
return self.distribution.sample()
def mode(self) -> torch.Tensor:
return torch.round(self.distribution.probs)
def actions_from_params(self, action_logits: torch.Tensor, deterministic: bool=False) -> torch.Tensor:
self.proba_distribution(action_logits)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(self, action_logits: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
actions = self.actions_from_params(action_logits)
log_prob = self.log_prob(actions)
return (actions, log_prob) |
def eye(n, M=None, k=0, dtype=float, order='C'):
return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) |
def add_chinese_references(dataset, ref_file):
with open(ref_file, 'r', encoding='utf-8') as f:
refs = [json.loads(line) for line in f.read().splitlines() if ((len(line) > 0) and (not line.isspace()))]
assert (len(dataset) == len(refs))
dataset_dict = {c: dataset[c] for c in dataset.column_names}
dataset_dict['chinese_ref'] = refs
return Dataset.from_dict(dataset_dict) |
def unlink_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
dy = grad_inputs[0]
x0 = inputs[0]
raise NotImplementedError('unlink_backward is not implemented.') |
def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
def _objective(trial, checkpoint_dir=None):
model_path = None
if checkpoint_dir:
for subdir in os.listdir(checkpoint_dir):
if subdir.startswith(PREFIX_CHECKPOINT_DIR):
model_path = os.path.join(checkpoint_dir, subdir)
trainer.objective = None
trainer.train(model_path=model_path, trial=trial)
if (getattr(trainer, 'objective', None) is None):
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
return trainer.objective
timeout = kwargs.pop('timeout', None)
n_jobs = kwargs.pop('n_jobs', 1)
study = optuna.create_study(direction=direction, **kwargs)
study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs)
best_trial = study.best_trial
return BestRun(str(best_trial.number), best_trial.value, best_trial.params) |
def check_integrity(fpath: str, md5: Optional[str]=None) -> bool:
if (not os.path.isfile(fpath)):
return False
if (md5 is None):
return True
return check_md5(fpath, md5) |
def enable_explicit_format() -> None:
handlers = _get_library_root_logger().handlers
for handler in handlers:
formatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s')
handler.setFormatter(formatter) |
def generate_multimethod(argument_extractor: ArgumentExtractorType, argument_replacer: ArgumentReplacerType, domain: str, default: Optional[Callable]=None):
(kw_defaults, arg_defaults, opts) = get_defaults(argument_extractor)
ua_func = _Function(argument_extractor, argument_replacer, domain, arg_defaults, kw_defaults, default)
return functools.update_wrapper(ua_func, argument_extractor) |
def write_sequence(frames, path):
with open(path, 'w') as f:
for (t, objects) in frames.items():
for obj in objects:
print(t, obj.track_id, obj.class_id, obj.mask['size'][0], obj.mask['size'][1], obj.mask['counts'].decode(encoding='UTF-8'), file=f) |
class DeVilliersGlasser02(Benchmark):
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([1.0] * self.N), ([60.0] * self.N)))
self.global_optimum = [[53.81, 1.27, 3.012, 2.13, 0.507]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
t = (0.1 * arange(16))
y = (((53.81 * (1.27 ** t)) * tanh(((3.012 * t) + sin((2.13 * t))))) * cos((exp(0.507) * t)))
return sum((((((x[0] * (x[1] ** t)) * tanh(((x[2] * t) + sin((x[3] * t))))) * cos((t * exp(x[4])))) - y) ** 2.0)) |
def fit_predict_selected(model, train_log, inf_log, user_features, users):
kwargs = {}
if isinstance(model, (HybridRecommender, UserRecommender)):
kwargs = {'user_features': user_features}
model.fit(train_log, **kwargs)
return model.predict(log=inf_log, users=users, k=1, **kwargs) |
_LAYERS.register_module()
_LAYERS.register_module('deconv')
_LAYERS.register_module('deconv', force=True)
class ConvTranspose2d(nn.ConvTranspose2d):
def forward(self, x: torch.Tensor) -> torch.Tensor:
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))):
out_shape = [x.shape[0], self.out_channels]
for (i, k, p, s, d, op) in zip(x.shape[(- 2):], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding):
out_shape.append((((((i - 1) * s) - (2 * p)) + ((d * (k - 1)) + 1)) + op))
empty = NewEmptyTensorOp.apply(x, out_shape)
if self.training:
dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0)
return (empty + dummy)
else:
return empty
return super().forward(x) |
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self) |
class FPEM(BaseModule):
def __init__(self, in_channels=128, init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.up_add1 = SeparableConv2d(in_channels, in_channels, 1)
self.up_add2 = SeparableConv2d(in_channels, in_channels, 1)
self.up_add3 = SeparableConv2d(in_channels, in_channels, 1)
self.down_add1 = SeparableConv2d(in_channels, in_channels, 2)
self.down_add2 = SeparableConv2d(in_channels, in_channels, 2)
self.down_add3 = SeparableConv2d(in_channels, in_channels, 2)
def forward(self, c2, c3, c4, c5):
c4 = self.up_add1(self._upsample_add(c5, c4))
c3 = self.up_add2(self._upsample_add(c4, c3))
c2 = self.up_add3(self._upsample_add(c3, c2))
c3 = self.down_add1(self._upsample_add(c3, c2))
c4 = self.down_add2(self._upsample_add(c4, c3))
c5 = self.down_add3(self._upsample_add(c5, c4))
return (c2, c3, c4, c5)
def _upsample_add(self, x, y):
return (F.interpolate(x, size=y.size()[2:]) + y) |
def read_metadata_from_db(datasource, table):
with connect_with_data_source(datasource) as conn:
with SQLFSReader(conn, table) as r:
metadata = _read_metadata(r)
return metadata |
class Conv2D2BNInfoCollectionTest(BasePytorchTest):
def __init__(self, unit_test):
super().__init__(unit_test)
self.val_batch_size = 1
def create_inputs_shape(self):
return [[self.val_batch_size, 3, 32, 32]]
def generate_inputs(input_shapes):
return to_torch_tensor([torch.randn(*in_shape) for in_shape in input_shapes])
def prepare_graph(self, in_model):
fw_info = DEFAULT_PYTORCH_INFO
pytorch_impl = PytorchImplementation()
input_shapes = self.create_inputs_shape()
x = self.generate_inputs(input_shapes)
def representative_data_gen():
(yield x)
graph = pytorch_impl.model_reader(in_model, representative_data_gen)
graph = substitute(graph, pytorch_impl.get_substitutions_prepare_graph())
for node in graph.nodes:
node.prior_info = pytorch_impl.get_node_prior_info(node=node, fw_info=fw_info, graph=graph)
transformed_graph = substitute(graph, pytorch_impl.get_substitutions_pre_statistics_collection(DEFAULTCONFIG))
return transformed_graph
def run_test(self):
model_float = create_model_2()
transformed_graph = self.prepare_graph(model_float)
self.unit_test.assertTrue((len(transformed_graph.find_node_by_name('conv1')) == 1))
conv_bn_node = transformed_graph.find_node_by_name('conv1')[0]
self.unit_test.assertTrue((len(transformed_graph.find_node_by_name('bn')) == 1))
bn_node = transformed_graph.find_node_by_name('bn')[0]
self.unit_test.assertTrue((len(transformed_graph.find_node_by_name('bn2')) == 1))
bn2_node = transformed_graph.find_node_by_name('bn2')[0]
conv_std = conv_bn_node.prior_info.std_output
conv_mean = conv_bn_node.prior_info.mean_output
bn_std = bn_node.prior_info.std_output
bn_mean = bn_node.prior_info.mean_output
bn2_std = bn2_node.prior_info.std_output
bn2_mean = bn2_node.prior_info.mean_output
bn_layer = model_float.bn
mm = bn_layer.running_mean
mv = bn_layer.running_var
m_std = np.sqrt(mv.cpu().data.numpy())
self.unit_test.assertTrue((mm.cpu().data.numpy() == conv_mean).all())
self.unit_test.assertTrue((m_std == conv_std).all())
gamma = bn_layer.weight
beta = bn_layer.bias
self.unit_test.assertTrue((beta.cpu().data.numpy() == bn_mean).all())
self.unit_test.assertTrue((abs(gamma.cpu().data.numpy()) == bn_std).all())
bn2_layer = model_float.bn2
gamma2 = bn2_layer.weight
beta2 = bn2_layer.bias
self.unit_test.assertTrue((beta2.cpu().data.numpy() == bn2_mean).all())
self.unit_test.assertTrue((abs(gamma2.cpu().data.numpy()) == bn2_std).all()) |
class ConfigurationCommand(Command):
name = 'config'
usage = '\n %prog [<file-option>] list\n %prog [<file-option>] [--editor <editor-path>] edit\n\n %prog [<file-option>] get name\n %prog [<file-option>] set name value\n %prog [<file-option>] unset name\n '
summary = 'Manage local and global configuration.'
def __init__(self, *args, **kwargs):
super(ConfigurationCommand, self).__init__(*args, **kwargs)
self.configuration = None
self.cmd_opts.add_option('--editor', dest='editor', action='store', default=None, help='Editor to use to edit the file. Uses VISUAL or EDITOR environment variables if not provided.')
self.cmd_opts.add_option('--global', dest='global_file', action='store_true', default=False, help='Use the system-wide configuration file only')
self.cmd_opts.add_option('--user', dest='user_file', action='store_true', default=False, help='Use the user configuration file only')
self.cmd_opts.add_option('--venv', dest='venv_file', action='store_true', default=False, help='Use the virtualenv configuration file only')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
handlers = {'list': self.list_values, 'edit': self.open_in_editor, 'get': self.get_name, 'set': self.set_name_value, 'unset': self.unset_name}
if ((not args) or (args[0] not in handlers)):
logger.error('Need an action ({}) to perform.'.format(', '.join(sorted(handlers))))
return ERROR
action = args[0]
try:
load_only = self._determine_file(options, need_value=(action in ['get', 'set', 'unset', 'edit']))
except PipError as e:
logger.error(e.args[0])
return ERROR
self.configuration = Configuration(isolated=options.isolated_mode, load_only=load_only)
self.configuration.load()
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def _determine_file(self, options, need_value):
file_options = {kinds.USER: options.user_file, kinds.GLOBAL: options.global_file, kinds.VENV: options.venv_file}
if (sum(file_options.values()) == 0):
if (not need_value):
return None
elif os.path.exists(venv_config_file):
return kinds.VENV
else:
return kinds.USER
elif (sum(file_options.values()) == 1):
return [key for key in file_options if file_options[key]][0]
raise PipError('Need exactly one file to operate upon (--user, --venv, --global) to perform.')
def list_values(self, options, args):
self._get_n_args(args, 'list', n=0)
for (key, value) in sorted(self.configuration.items()):
logger.info('%s=%r', key, value)
def get_name(self, options, args):
key = self._get_n_args(args, 'get [name]', n=1)
value = self.configuration.get_value(key)
logger.info('%s', value)
def set_name_value(self, options, args):
(key, value) = self._get_n_args(args, 'set [name] [value]', n=2)
self.configuration.set_value(key, value)
self._save_configuration()
def unset_name(self, options, args):
key = self._get_n_args(args, 'unset [name]', n=1)
self.configuration.unset_value(key)
self._save_configuration()
def open_in_editor(self, options, args):
editor = self._determine_editor(options)
fname = self.configuration.get_file_to_edit()
if (fname is None):
raise PipError('Could not determine appropriate file.')
try:
subprocess.check_call([editor, fname])
except subprocess.CalledProcessError as e:
raise PipError('Editor Subprocess exited with exit code {}'.format(e.returncode))
def _get_n_args(self, args, example, n):
if (len(args) != n):
msg = 'Got unexpected number of arguments, expected {}. (example: "{} config {}")'.format(n, get_prog(), example)
raise PipError(msg)
if (n == 1):
return args[0]
else:
return args
def _save_configuration(self):
try:
self.configuration.save()
except Exception:
logger.error('Unable to save configuration. Please report this as a bug.', exc_info=1)
raise PipError('Internal Error.')
def _determine_editor(self, options):
if (options.editor is not None):
return options.editor
elif ('VISUAL' in os.environ):
return os.environ['VISUAL']
elif ('EDITOR' in os.environ):
return os.environ['EDITOR']
else:
raise PipError('Could not determine editor to use.') |
class SoftSign(Module):
def __init__(self):
super(SoftSign, self).__init__()
self.temp = None
self.tempgrad = None
def updateOutput(self, input):
if (self.temp is None):
self.temp = input.new()
self.temp.resize_as_(input).copy_(input).abs_().add_(1)
self.output.resize_as_(input).copy_(input).div_(self.temp)
return self.output
def updateGradInput(self, input, gradOutput):
if (self.tempgrad is None):
self.tempgrad = input.new()
self.tempgrad.resize_as_(self.output).copy_(input).abs_().add_(1).mul_(self.tempgrad)
self.gradInput.resize_as_(input).copy_(gradOutput).div_(self.tempgrad)
return self.gradInput
def clearState(self):
clear(self, 'temp', 'tempgrad')
return super(SoftSign, self).clearState() |
class AddPosEmb(nn.Module):
def __init__(self, n, c):
super(AddPosEmb, self).__init__()
self.pos_emb = nn.Parameter(torch.zeros(1, 1, n, c).float().normal_(mean=0, std=0.02), requires_grad=True)
self.num_vecs = n
def forward(self, x):
(b, n, c) = x.size()
x = x.view(b, (- 1), self.num_vecs, c)
x = (x + self.pos_emb)
x = x.view(b, n, c)
return x |
def build_candidate_set(documents: List[Document], target: str) -> List[Union[(Span, Relation)]]:
Xs = []
for doc in documents:
xs = [doc.annotations[i][target] for i in doc.annotations if (target in doc.annotations[i])]
Xs.extend(itertools.chain.from_iterable(xs))
return Xs |
def get_random_port():
old_state = random.getstate()
random.seed()
port = random.randint(10000, 20000)
random.setstate(old_state)
return port |
_utils.test(require=ti.extension.data64)
def test_cast_f64():
z = ti.field(ti.i32, shape=())
def func():
z[None] = ((ti.cast(.0, ti.f64) / ti.cast(.0, ti.f64)) + 0.001)
func()
assert (z[None] == 1000) |
def test_pickling_vectorizer():
instances = [HashingVectorizer(), HashingVectorizer(norm='l1'), HashingVectorizer(binary=True), HashingVectorizer(ngram_range=(1, 2)), CountVectorizer(), CountVectorizer(preprocessor=strip_tags), CountVectorizer(analyzer=lazy_analyze), CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), TfidfVectorizer(), TfidfVectorizer(analyzer=lazy_analyze), TfidfVectorizer().fit(JUNK_FOOD_DOCS)]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert (type(copy) == orig.__class__)
assert (copy.get_params() == orig.get_params())
if (IS_PYPY and isinstance(orig, HashingVectorizer)):
continue
else:
assert_allclose_dense_sparse(copy.fit_transform(JUNK_FOOD_DOCS), orig.fit_transform(JUNK_FOOD_DOCS)) |
def run_python_forward_backward(unit_test_class, test_params):
device = test_params.device
module = test_params.test_instance.constructor(*test_params.test_instance.constructor_args).to(device)
inputs = set_python_tensors_requires_grad(move_python_tensors_to_device([arg_value for (_, arg_value) in test_params.arg_dict['input']], device))
inputs += move_python_tensors_to_device([arg_value for (_, arg_value) in test_params.arg_dict['target']], device)
inputs += move_python_tensors_to_device([arg_value for (_, arg_value) in test_params.arg_dict['extra_args']], device)
torch.manual_seed(0)
python_output = module(*inputs)
module.forward = types.MethodType((lambda self, input: input), module)
script_module = torch.jit.trace(module, torch.tensor(0))
python_output.sum().backward()
python_grad_dict = {}
for (name, param) in module.named_parameters():
grad = param.grad
if grad.is_sparse:
python_grad_dict[(name + '_grad_indices')] = grad.coalesce().indices()
python_grad_dict[(name + '_grad_values')] = grad.coalesce().values()
else:
python_grad_dict[(name + '_grad')] = grad
return (script_module, python_output, python_grad_dict) |
def GenerateSM61_Simt(manifest, args):
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
math_instructions = [MathInstruction([1, 1, 4], DataType.s8, DataType.s8, DataType.s32, OpcodeClass.Simt, MathOperation.multiply_add)]
min_cc = 61
max_cc = 1024
alignment_constraints = [1]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc), TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc), TileDescription([32, 128, 32], 2, [1, 2, 1], math_inst, min_cc, max_cc)]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, math_inst.element_accumulator]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints) |
def print_model_param_nums(model=None):
if (model == None):
model = torchvision.models.alexnet()
total = sum([(param.nelement() if param.requires_grad else 0) for param in model.parameters()])
print((' + Number of params: %.4fM' % (total / 1000000.0))) |
def register_coco_panoptic(name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None):
panoptic_name = name
DatasetCatalog.register(panoptic_name, (lambda : load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata)))
MetadataCatalog.get(panoptic_name).set(panoptic_root=panoptic_root, image_root=image_root, panoptic_json=panoptic_json, json_file=instances_json, evaluator_type='coco_panoptic_seg', ignore_label=255, label_divisor=1000, **metadata) |
def compute_rhs(up_hat, bh_hat):
global uiuj, uiuj_hat
bh_hat.fill(0)
bi_hat = bh_hat[0]
ui_hat = up_hat[0]
uip = ui_hat.backward(padding_factor=1.5)
uiuj = outer(uip, uip, uiuj)
uiuj_hat = uiuj.forward(uiuj_hat)
bi_hat = BS.matvec(uiuj_hat, bi_hat)
return bh_hat |
class EpicFHIRDownloadFiles(VirtualFunctionTool):
name = 'EpicFHIRDownloadFiles'
summary = 'Download files by their unique identifiers.'
parameters: List[ArgParameter] = [{'name': 'file_ids', 'type': 'array', 'description': "The unique identifiers of the files to download. Each should be a valid 'document_id', 'record_id' or 'report_id'.", 'required': True}]
returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'Whether the operation was successful.'}, {'name': 'file_paths', 'type': 'array', 'description': 'The list of local paths of the downloaded files. Returned if the file was successfully downloaded, otherwise empty.'}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "At least one of the 'file_ids' does not exist."}] |
def to_tf_space(space):
if isinstance(space, TheanoBox):
return Box(low=space.low, high=space.high)
elif isinstance(space, TheanoDiscrete):
return Discrete(space.n)
elif isinstance(space, TheanoProduct):
return Product(list(map(to_tf_space, space.components)))
else:
raise NotImplementedError |
class PointTarget():
def __init__(self):
self.points = Point(cfg.POINT.STRIDE, cfg.TRAIN.OUTPUT_SIZE, (cfg.TRAIN.SEARCH_SIZE // 2))
def __call__(self, target, size, neg=False):
cls = ((- 1) * np.ones((size, size), dtype=np.int64))
delta = np.zeros((4, size, size), dtype=np.float32)
def select(position, keep_num=16):
num = position[0].shape[0]
if (num <= keep_num):
return (position, num)
slt = np.arange(num)
np.random.shuffle(slt)
slt = slt[:keep_num]
return (tuple((p[slt] for p in position)), keep_num)
(tcx, tcy, tw, th) = corner2center(target)
points = self.points.points
if neg:
neg = np.where((((np.square((tcx - points[0])) / np.square((tw / 4))) + (np.square((tcy - points[1])) / np.square((th / 4)))) < 1))
cls[neg] = 0
return (cls, delta)
delta[0] = (points[0] - target[0])
delta[1] = (points[1] - target[1])
delta[2] = (target[2] - points[0])
delta[3] = (target[3] - points[1])
pos = np.where((((np.square((tcx - points[0])) / np.square((tw / 4))) + (np.square((tcy - points[1])) / np.square((th / 4)))) < 1))
neg = np.where((((np.square((tcx - points[0])) / np.square((tw / 2))) + (np.square((tcy - points[1])) / np.square((th / 2)))) > 1))
(pos, pos_num) = select(pos, cfg.TRAIN.POS_NUM)
(neg, neg_num) = select(neg, (cfg.TRAIN.TOTAL_NUM - cfg.TRAIN.POS_NUM))
cls[pos] = 1
cls[neg] = 0
return (cls, delta) |
def stringify_keys(d):
for key in d.keys():
if isinstance(d[key], dict):
value = stringify_keys(d[key])
else:
value = d[key]
if (not isinstance(key, str)):
try:
d[str(key)] = value
except Exception:
try:
d[repr(key)] = value
except Exception:
pass
del d[key]
return d |
def render_pep440_post_branch(pieces):
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if (pieces['distance'] or pieces['dirty']):
rendered += ('.post%d' % pieces['distance'])
if (pieces['branch'] != 'master'):
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += ('g%s' % pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = ('0.post%d' % pieces['distance'])
if (pieces['branch'] != 'master'):
rendered += '.dev0'
rendered += ('+g%s' % pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
return rendered |
def convolution(_x, k, out_dim, name, stride=1):
padding = ((k - 1) // 2)
_x = ZeroPadding2D(padding=padding, name=(name + '.pad'))(_x)
_x = Conv2D(out_dim, k, strides=stride, use_bias=False, name=(name + '.conv'))(_x)
_x = BatchNormalization(epsilon=1e-05, name=(name + '.bn'))(_x)
_x = Activation('relu', name=(name + '.relu'))(_x)
return _x |
def read_teacher_score(score_files):
teacher_score = collections.defaultdict(dict)
for file in score_files.split(','):
if (not os.path.exists(file)):
logging.info(f'There is no score file:{file}, skip reading the score')
return None
for line in open(file):
(qid, did, score) = line.strip().split()
score = float(score.strip('[]'))
teacher_score[qid][did] = score
return teacher_score |
def get_all_examples():
blocklist = {'_np'}
allexamples = ''
example_file_lines = ['import torch', 'import torch.nn.functional as F', 'import math # type: ignore', 'import numpy # type: ignore', 'import io # type: ignore', 'import itertools # type: ignore', '', 'def preprocess(inp):', ' # type: (torch.Tensor) -> torch.Tensor', ' return inp']
for fname in dir(torch):
fn = getattr(torch, fname)
docstr = inspect.getdoc(fn)
if (docstr and (fname not in blocklist)):
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f'''
def example_torch_{fname}():''')
example_file_lines += e
for fname in dir(torch.Tensor):
fn = getattr(torch.Tensor, fname)
docstr = inspect.getdoc(fn)
if (docstr and (fname not in blocklist)):
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f'''
def example_torch_tensor_{fname}():''')
example_file_lines += e
return '\n'.join(example_file_lines) |
def get_valid_stats(trainer):
stats = collections.OrderedDict()
stats['valid_loss'] = trainer.get_meter('valid_loss').avg
if (trainer.get_meter('valid_nll_loss').count > 0):
nll_loss = trainer.get_meter('valid_nll_loss').avg
stats['valid_nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('valid_loss').avg
stats['valid_ppl'] = get_perplexity(nll_loss)
stats['num_updates'] = trainer.get_num_updates()
task_meters = trainer.get_meter('task')
if (task_meters is not None):
for m in task_meters.values():
for (n, v) in m.vals():
stats[n] = v
if hasattr(save_checkpoint, 'best'):
stats['best'] = min(save_checkpoint.best, stats['valid_loss'])
return stats |
class RealUser(UserSim):
def __init__(self, error_evaluator, bool_undo=True):
UserSim.__init__(self, error_evaluator)
self.user_type = 'real'
self.bool_undo = bool_undo
self.undo_semantic_units = []
def get_answer(self, pointer, *args):
self.questioned_pointers.append(pointer)
if self.bool_undo:
answer = input('Please enter yes(y)/no(n)/undo/exit: ').lower().strip()
while (answer not in {'yes', 'no', 'exit', 'y', 'n', 'undo'}):
answer = input('Please enter yes(y)/no(n)/undo/exit: ').lower().strip()
else:
answer = input('Please enter yes(y)/no(n)/exit: ').lower().strip()
while (answer not in {'yes', 'no', 'exit', 'y', 'n'}):
answer = input('Please enter yes(y)/no(n)/exit: ').lower().strip()
if (answer == 'y'):
answer = 'yes'
elif (answer == 'n'):
answer = 'no'
return answer
def get_selection(self, pointer, answer_sheet, sel_none_of_above):
def answer_parsing(answer_str):
selections = answer_str.split(', ')
try:
selections = [int(sel) for sel in selections]
except:
return None
else:
assert len(selections)
if (sel_none_of_above in selections):
assert (len(selections) == 1)
return selections
answer = input("Please enter the option id(s) delimited by comma ', ': ")
selections = answer_parsing(answer)
while (selections is None):
answer = input("Please enter the option id(s) delimited by comma ', ': ")
selections = answer_parsing(answer)
return selections |
class TFCLIPPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class PerTensorWeightQuantizationTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test, experimental_exporter=True)
def get_tpc(self):
tp = generate_test_tp_model({'weights_per_channel_threshold': False})
return generate_keras_tpc(name='per_tensor_weight_quantization', tp_model=tp)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.Conv2D(6, 7)(inputs)
return keras.Model(inputs=inputs, outputs=x)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
conv_layer = get_layers_from_model_by_type(quantized_model, layers.Conv2D)[0]
self.unit_test.assertTrue((len(conv_layer.weights_quantizers[KERNEL].get_config()[THRESHOLD]) == 1), f'Expected in per-tensor quantization to have a single threshold but found {len(conv_layer.weights_quantizers[KERNEL].get_config()[THRESHOLD])}') |
def weights_init(m):
if ((type(m) == nn.Conv2d) or (type(m) == nn.ConvTranspose2d)):
nn.init.xavier_normal(m.weight.data)
elif (type(m) == nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0) |
def main():
parser = argparse.ArgumentParser('preprocess')
parser.add_argument('--input_dir', type=str, help='inp directory', default='../data/')
parser.add_argument('--output_dir', type=str, help='out directory', default='data/qmsum/preprocessed')
args = parser.parse_args()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
for split in ['test', 'val', 'train']:
print(f'''
Processing {split}''')
input_path_meetings = Path(args.input_dir, f'{split}-meetings.jsonl')
meeting_lookup = {}
print('Loading meetings')
with open(input_path_meetings) as f:
for line in tqdm.tqdm(f):
data = json.loads(line)
meeting_id = data['meeting_id']
source = ' '.join(data['meeting_transcripts'])
meeting_lookup[meeting_id] = source
input_path = Path(args.input_dir, f'{split}.jsonl')
output_path = Path(args.output_dir, f'{split}.jsonl')
print('Loading queries')
with open(input_path) as inp, open(output_path, 'w') as out:
for line in tqdm.tqdm(inp):
data = json.loads(line)
meeting_id = data['meeting_id']
source = meeting_lookup[meeting_id]
query = data['query']
target = data['answer']
out.write((json.dumps({'source': source, 'query': query, 'target': target}) + '\n')) |
def test_pegasus_newline():
pred = ['" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ']
tgt = [' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .']
prev_score = calculate_rouge(pred, tgt, rouge_keys=['rougeLsum'], newline_sep=False)['rougeLsum']
new_score = calculate_rouge(pred, tgt, rouge_keys=['rougeLsum'])['rougeLsum']
assert (new_score > prev_score) |
class HRModule(BaseModule):
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), block_init_cfg=None, init_cfg=None):
super(HRModule, self).__init__(init_cfg)
self.block_init_cfg = block_init_cfg
self._check_branches(num_branches, num_blocks, in_channels, num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels, num_channels):
if (num_branches != len(num_blocks)):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if (num_branches != len(num_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if (num_branches != len(in_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if ((stride != 1) or (self.in_channels[branch_index] != (num_channels[branch_index] * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.in_channels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (num_channels[branch_index] * block.expansion))[1])
layers = []
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg))
self.in_channels[branch_index] = (num_channels[branch_index] * block.expansion)
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg))
return Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return ModuleList(branches)
def _make_fuse_layers(self):
if (self.num_branches == 1):
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = (num_branches if self.multiscale_output else 1)
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if (j > i):
fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], Upsample(scale_factor=(2 ** (j - i)), mode='bilinear', align_corners=False)))
elif (j == i):
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range((i - j)):
if (k == ((i - j) - 1)):
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1]))
else:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
if (self.num_branches == 1):
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if (i == j):
y += x[j]
elif (j > i):
y = (y + resize(self.fuse_layers[i][j](x[j]), size=x[i].shape[2:], mode='bilinear', align_corners=False))
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse |
def _validate_loaded_sparse_tensors():
try:
for t in _sparse_tensors_to_validate:
torch._validate_sparse_coo_tensor_args(t._indices(), t._values(), t.size())
finally:
_sparse_tensors_to_validate.clear() |
def save_json(data, json_path, mode='w', encoding='utf-8'):
dir = os.path.dirname(os.path.abspath(json_path))
if (not os.path.exists(dir)):
print(dir)
os.makedirs(dir)
with open(json_path, mode=mode, encoding=encoding) as f:
f.write(json.dumps(data, ensure_ascii=False)) |
class ClientNode2(Node):
def config(self, **params):
super(ClientNode2, self).config(**params)
self.cmd('openvpn openvpn-client2.conf &')
def terminate(self):
super(ClientNode2, self).terminate() |
def get_tree_node_with_kinds(tree, kinds):
cursor = tree.walk()
reached_root = False
while (reached_root == False):
if (cursor.node.type in kinds):
(yield cursor.node)
if cursor.goto_first_child():
continue
if cursor.goto_next_sibling():
continue
retracing = True
while retracing:
if (not cursor.goto_parent()):
retracing = False
reached_root = True
if cursor.goto_next_sibling():
retracing = False |
class ColumnReductionOp():
Template = '\n${visitor}\n\nusing ${instance_name} = cutlass::epilogue::threadblock::VisitorOpColumnReduction<\n cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,\n ${element_accumulator}, ${element_reduction}, ${element_reduction_accumulator},\n ${output_tile_iterator}, ${visitor_name}>;\n'
counter = 0
def __init__(self, element_accumulator, element_reduction, element_reduction_accumulator, visitor) -> None:
self.element_accumulator = element_accumulator
self.element_reduction = element_reduction
self.element_reduction_accumulator = element_reduction_accumulator
self.visitor = visitor
self.instance_name = ('ColumnReductionOp%d' % ColumnReductionOp.counter)
ColumnReductionOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [('reduction_ptr', ctypes.c_void_p), ('batch_stride', ctypes.c_longlong), ('visitor_arg', self.visitor.argument_type)]
def __init__(self, reduction_ptr, visitor_arg, batch_stride=0) -> None:
self.reduction_ptr = reduction_ptr
self.batch_stride = batch_stride
self.visitor_arg = visitor_arg
self.argument_type = _Arguments
def emit(self, operation):
values = {'instance_name': self.instance_name, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'element_accumulator': DataTypeTag[self.element_accumulator], 'element_reduction': DataTypeTag[self.element_reduction], 'element_reduction_accumulator': DataTypeTag[self.element_reduction_accumulator], 'output_tile_iterator': (operation.procedural_name() + '_default::Epilogue::OutputTileIterator'), 'visitor_name': self.visitor.instance_name, 'visitor': self.visitor.emit(operation)}
return SubstituteTemplate(self.Template, values) |
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
while True:
chunk = file.read(size)
if (not chunk):
break
(yield chunk) |
def unpack_kwargs(kwarg_keys: List[str], flat_args: List[Any]) -> Tuple[(List[Any], Dict[(str, Any)])]:
if (len(kwarg_keys) == 0):
return (flat_args, {})
args = flat_args[:(- len(kwarg_keys))]
kwargs = {k: v for (k, v) in zip(kwarg_keys, flat_args[(- len(kwarg_keys)):])}
return (args, kwargs) |
def is_dominating(G, dom, focus=None):
to_dom = (set(G) if (focus is None) else set(focus))
for v in dom:
if (not to_dom):
return True
to_dom.difference_update(G.neighbor_iterator(v, closed=True))
return (not to_dom) |
class AlgebraicGeneratorRelation(SageObject):
def __init__(self, child1, child1_poly, child2, child2_poly, parent):
self.child1 = child1
self.child1_poly = child1_poly
self.child2 = child2
self.child2_poly = child2_poly
self.parent = parent |
.parametrize('p', [1, 2, np.inf])
.parametrize('size', [50, 100, None])
def test_ensure_spacing_batch_processing(p, size):
coord = np.random.randn(100, 2)
spacing = np.median(pdist(coord, metric=minkowski, p=p))
expected = ensure_spacing(coord, spacing=spacing, p_norm=p)
assert np.array_equal(ensure_spacing(coord, spacing=spacing, p_norm=p, min_split_size=size), expected) |
def print_uniques(csv, cols=['alg', 'bs_train', 'model', 'dataset', 'seed', 'step_every']):
df = pd.read_csv(csv)
var_to_uniques = {var: pd.unique(df[var]) for var in cols}
var_to_len_uniques = {i: len(v) for (i, v) in var_to_uniques.items()}
print(f'-I- Describing csv: {csv}')
print(f'-I- Analyzed cols: {cols}')
print('-I- length_uniques:')
print(var_to_len_uniques)
print('-I- uniques:')
print(var_to_uniques) |
def _lfc(content, equality=False):
content = list(content)
a = ([0] * sum(content))
content[0] -= 1
k = len(content)
rng_k = list(range(k))
rng_k.reverse()
dll = DoublyLinkedList(rng_k)
if (not content[0]):
dll.hide(0)
(yield from _list_fixed_content(a, content, 2, 1, k, dll, equality=equality)) |
_numpy_output(check_dtype=True)
def test_ufunc_heaviside_cc(A: dace.complex64[10], B: dace.complex64[10]):
return np.heaviside(A, B) |
def run_once(func):
(func)
def wrapper(*args, **kwargs):
if (not wrapper.has_run):
result = func(*args, **kwargs)
wrapper.has_run = True
return result
wrapper.has_run = False
return wrapper |
.fast
.parametrize('length,max_seq_length,eos_token_id,expected_token_ids,expected_token_type_ids', [(2, 6, (- 1), [0, 1, (- 1), (- 1), (- 1), (- 1)], [0, (- 1), 2, 2, 2, 2]), (0, 6, (- 1), [(- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], ([TokenTypeIds.PADDING] * 6))])
def test_pad(tokenized_line: TokenizedLine, expected_token_ids: List[int], expected_token_type_ids: List[int]):
assert isinstance(tokenized_line, TokenizedSequence)
tokenized_line.pad()
assert (tokenized_line.dump_token_ids() == expected_token_ids)
assert (tokenized_line.dump_token_type_ids() == expected_token_type_ids) |
class AbstractAdapter(GaugeAdapter):
__test__ = False
re_time = re_compile('RESULT-(\\w+):\\s*(\\d+\\.\\d+)')
def __init__(self, include_faulty, executor):
super(AbstractAdapter, self).__init__(include_faulty, executor)
self._other_error_definitions = [re_compile('FAILED')]
def _make_measurement(self, run_id, invocation, iteration, value, criterion):
return Measurement(invocation, iteration, value, 'ms', run_id, criterion)
def parse_data(self, data, run_id, invocation):
iteration = 1
data_points = []
current = DataPoint(run_id)
for line in data.split('\n'):
if self.check_for_error(line):
raise ResultsIndicatedAsInvalid('Output of bench program indicated error.')
match = MyTestAdapter.re_time.match(line)
if match:
measure = self._make_measurement(run_id, invocation, iteration, float(match.group(2)), match.group(1))
current.add_measurement(measure)
if measure.is_total():
data_points.append(current)
current = DataPoint(run_id)
iteration += 1
if (not data_points):
raise OutputNotParseable(data)
return data_points |
def test_classify_instance_weighting(create_pool_classifiers):
n_samples = 3
query = np.ones((n_samples, 2))
pool_classifiers = (create_pool_classifiers + create_pool_classifiers)
des_test = BaseDES(pool_classifiers, mode='weighting')
des_test.classes_ = np.array([0, 1])
des_test.n_classes_ = 2
competences = np.tile([0.55, 1.0, 0.2, 0.6, 0.75, 0.3], (3, 1))
des_test.estimate_competence = MagicMock(return_value=competences)
predictions = []
for clf in des_test.pool_classifiers:
predictions.append(clf.predict(query)[0])
predicted_label = des_test.classify_with_ds(np.tile(predictions, (3, 1)))
assert (np.allclose(predicted_label, 1) and (predicted_label.size == 3)) |
def aggregate_passage_embeddings_in_run(run: dict, p_emb_dict: dict, aggregation_mode: str):
if ((aggregation_mode == 'vrrf') or (aggregation_mode == 'vranks') or (aggregation_mode == 'vscores')):
run_p_embs = aggregate_run_in_p_with_scores(run, p_emb_dict)
else:
run_p_embs = aggregate_p_in_run(run, p_emb_dict)
run_q_id_p_id_aggregated = {}
for (q_id, retrieved_lists) in run_p_embs.items():
p_ids_agg_emb = aggregate_ids_with_embeddings(retrieved_lists, aggregation_mode)
run_q_id_p_id_aggregated.update({q_id: p_ids_agg_emb})
return run_q_id_p_id_aggregated |
def _clean_args(*args):
newargs = []
for chk in args:
if (chk is None):
break
newargs.append(chk)
return newargs |
class ProtobufModel(torch.nn.Module):
_ids = count(0)
def __init__(self, predict_net, init_net):
logger.info(f'Initializing ProtobufModel for: {predict_net.name} ...')
super().__init__()
assert isinstance(predict_net, caffe2_pb2.NetDef)
assert isinstance(init_net, caffe2_pb2.NetDef)
self.ws_name = '__tmp_ProtobufModel_{}__'.format(next(self._ids))
self.net = core.Net(predict_net)
logger.info('Running init_net once to fill the parameters ...')
with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
ws.RunNetOnce(init_net)
uninitialized_external_input = []
for blob in self.net.Proto().external_input:
if (blob not in ws.Blobs()):
uninitialized_external_input.append(blob)
ws.CreateBlob(blob)
ws.CreateNet(self.net)
self._error_msgs = set()
self._input_blobs = uninitialized_external_input
def _infer_output_devices(self, inputs):
def _get_device_type(torch_tensor):
assert (torch_tensor.device.type in ['cpu', 'cuda'])
assert (torch_tensor.device.index == 0)
return torch_tensor.device.type
predict_net = self.net.Proto()
input_device_types = {(name, 0): _get_device_type(tensor) for (name, tensor) in zip(self._input_blobs, inputs)}
device_type_map = infer_device_type(predict_net, known_status=input_device_types, device_name_style='pytorch')
(ssa, versions) = core.get_ssa(predict_net)
versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
output_devices = [device_type_map[outp] for outp in versioned_outputs]
return output_devices
def forward(self, inputs):
assert (len(inputs) == len(self._input_blobs)), f"Length of inputs ({len(inputs)}) doesn't match the required input blobs: {self._input_blobs}"
with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
for (b, tensor) in zip(self._input_blobs, inputs):
ws.FeedBlob(b, tensor)
try:
ws.RunNet(self.net.Proto().name)
except RuntimeError as e:
if (not (str(e) in self._error_msgs)):
self._error_msgs.add(str(e))
logger.warning('Encountered new RuntimeError: \n{}'.format(str(e)))
logger.warning('Catch the error and use partial results.')
c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output]
for b in self.net.Proto().external_output:
ws.FeedBlob(b, f'{b}, a C++ native class of type nullptr (uninitialized).')
output_devices = (self._infer_output_devices(inputs) if any(((t.device.type != 'cpu') for t in inputs)) else ['cpu' for _ in self.net.Proto().external_output])
outputs = []
for (name, c2_output, device) in zip(self.net.Proto().external_output, c2_outputs, output_devices):
if (not isinstance(c2_output, np.ndarray)):
raise RuntimeError('Invalid output for blob {}, received: {}'.format(name, c2_output))
outputs.append(torch.tensor(c2_output).to(device=device))
return tuple(outputs) |
def fst_transition(fst_handle, states, inputs):
return get_tf_mod().open_fst_transition(handle=fst_handle, states=states, inputs=inputs) |
class Mapping():
def __init__(self, short_details=None):
self.short_details = short_details
def prop(self):
return getattr(self, '_prop', None)
def prop(self, value):
value = validate_type('prop', value, PhysicalProperty, cast=False)
value._mapping = self
self._prop = value
def reciprocal(self):
if (self.prop and self.prop.reciprocal):
return self.prop.reciprocal.mapping
def reciprocal_prop(self):
if (self.prop and self.prop.reciprocal):
return self.prop.reciprocal
def clear_props(self, instance):
for prop in (self.prop, self.reciprocal_prop, self.reciprocal):
if (prop is not None):
delattr(instance, prop.name)
def get_property(scope):
doc = f'''{scope.short_details}
Returns
-------
SimPEG.maps.IdentityMap
'''
def fget(self):
value = getattr(self, f'_{scope.name}', None)
if (value is not None):
return value
if (scope.reciprocal is None):
return None
reciprocal = getattr(self, f'_{scope.reciprocal.name}', None)
if (reciprocal is None):
return None
return (ReciprocalMap() * reciprocal)
def fset(self, value):
if (value is not None):
value = validate_type(scope.name, value, IdentityMap, cast=False)
scope.clear_props(self)
setattr(self, f'_{scope.name}', value)
def fdel(self):
setattr(self, f'_{scope.name}', None)
return property(fget=fget, fset=fset, fdel=fdel, doc=doc) |
def score_dependencies(args):
if (args['lang'] != 'en'):
raise ValueError('Converting and scoring dependencies is currently only supported for English')
constituency_package = 'wsj_bert'
pipeline_args = {'lang': args['lang'], 'tokenize_pretokenized': True, 'package': {'pos': args['retag_package'], 'depparse': 'converter', 'constituency': constituency_package}, 'processors': 'tokenize, pos, constituency, depparse'}
pipeline = stanza.Pipeline(**pipeline_args)
input_doc = CoNLL.conll2doc(args['eval_file'])
output_doc = pipeline(input_doc)
print(('Processed %d sentences' % len(output_doc.sentences)))
input_doc = CoNLL.conll2doc(args['eval_file'])
scorer.score_named_dependencies(output_doc, input_doc)
with tempfile.TemporaryDirectory() as tempdir:
output_path = os.path.join(tempdir, 'converted.conll')
CoNLL.write_doc2conll(output_doc, output_path)
(_, _, score) = scorer.score(output_path, args['eval_file'])
print('Parser score:')
print('{} {:.2f}'.format(constituency_package, (score * 100))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.