code
stringlengths
101
5.91M
def _generate_results_unreliable(input_stream, input_queue, worker_output_queue, output_queue, num_workers, max_outstanding_unused): next_in_item = next(input_stream, EndSentinel) inputs_remain = (next_in_item is not EndSentinel) received_messages = deque() pack_cookie = struct.pack input_fd = input_queue.put_fd() worker_output_fd = worker_output_queue.get_fd() output_fd = output_queue.put_fd() poller = select.epoll() poller.register(input_fd, select.EPOLLOUT) poller.register(worker_output_fd, select.EPOLLIN) poller.register(output_fd, 0) num_outstanding = 0 sentinels_received = 0 while ((sentinels_received < num_workers) and (inputs_remain or received_messages)): evts = ((input_fd if (inputs_remain and (not input_queue.full())) else 0), (output_fd if ((not output_queue.full()) and len(received_messages)) else 0), (worker_output_fd if (not worker_output_queue.empty()) else 0)) if all(((evt == 0) for evt in evts)): evts = dict(poller.poll()) if (output_fd in evts): output_queue.put(received_messages.pop()) if (len(received_messages) == 0): poller.modify(output_fd, 0) if (worker_output_fd in evts): for (receive_time, maybe_sentinel) in worker_output_queue.get_multiple(): if (maybe_sentinel is EndSentinel): sentinels_received += 1 continue received_messages.appendleft(maybe_sentinel[1]) num_outstanding -= 1 poller.modify(output_fd, select.EPOLLOUT) if (input_fd in evts): (send_idx, send_value) = next_in_item input_queue.put((pack_cookie('<Q', send_idx), send_value)) next_in_item = next(input_stream, EndSentinel) inputs_remain = (next_in_item is not EndSentinel) num_outstanding += 1 if (not inputs_remain): poller.modify(input_fd, 0) for value in _do_cleanup(input_queue, worker_output_queue, num_workers, sentinels_received, num_outstanding): output_queue.put(value) output_queue.put(EndSentinel)
class _BrokenModel(_PseudoTrainableQuadratic): def optimize(self, dataset: Dataset) -> NoReturn: raise _Whoops
class SumoVehSignal(object): BLINKER_RIGHT = (1 << 0) BLINKER_LEFT = (1 << 1) BLINKER_EMERGENCY = (1 << 2) BRAKELIGHT = (1 << 3) FRONTLIGHT = (1 << 4) FOGLIGHT = (1 << 5) HIGHBEAM = (1 << 6) BACKDRIVE = (1 << 7) WIPER = (1 << 8) DOOR_OPEN_LEFT = (1 << 9) DOOR_OPEN_RIGHT = (1 << 10) EMERGENCY_BLUE = (1 << 11) EMERGENCY_RED = (1 << 12) EMERGENCY_YELLOW = (1 << 13)
def find_input_arraynode(graph, edge): result = graph.memlet_path(edge)[0] if (not isinstance(result.src, nd.AccessNode)): raise RuntimeError(('Input array node not found for memlet ' + str(edge.data))) return result.src
class YosoConfig(PretrainedConfig): model_type = 'yoso' def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=4096, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-12, position_embedding_type='absolute', use_expectation=True, hash_code_len=9, num_hash=64, conv_window=None, use_fast_hash=True, lsh_backward=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_expectation = use_expectation self.hash_code_len = hash_code_len self.num_hash = num_hash self.conv_window = conv_window self.use_fast_hash = use_fast_hash self.lsh_backward = lsh_backward
_function_from_c_func_and_dispatcher(_multiarray_umath.dot) def dot(a, b, out=None): return (a, b, out)
class SwagProcessor(DataProcessor): def get_train_examples(self, data_dir): logger.info('LOOKING AT {} train'.format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, 'train.csv')), 'train') def get_dev_examples(self, data_dir): logger.info('LOOKING AT {} dev'.format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, 'val.csv')), 'dev') def get_test_examples(self, data_dir): logger.info('LOOKING AT {} dev'.format(data_dir)) raise ValueError('For swag testing, the input file does not contain a label column. It can not be tested in current codesetting!') return self._create_examples(self._read_csv(os.path.join(data_dir, 'test.csv')), 'test') def get_labels(self): return ['0', '1', '2', '3'] def _read_csv(self, input_file): with open(input_file, 'r', encoding='utf-8') as f: return list(csv.reader(f)) def _create_examples(self, lines: List[List[str]], type: str): if ((type == 'train') and (lines[0][(- 1)] != 'label')): raise ValueError('For training, the input file must contain a label column.') examples = [InputExample(example_id=line[2], question=line[5], contexts=[line[4], line[4], line[4], line[4]], endings=[line[7], line[8], line[9], line[10]], label=line[11]) for line in lines[1:]] return examples
class CityscapesData(Dataset): def __init__(self, folder_path): self.folder_path = folder_path self.all_imgs = sorted(list(Path(folder_path).glob('**/*.png'))) def __len__(self): return len(self.all_imgs) def __getitem__(self, index): image_path = self.all_imgs[index] image = cv2.imread(str(image_path), cv2.IMREAD_UNCHANGED) image = ((cv2.resize(image, (640, 192), cv2.INTER_AREA) * 1.0) / 256) image = image.astype(np.float32) return image
class MemoizedClass(object): def __init__(self): self.calls = 0 _with_key_fxn((lambda self, a, b: b)) def fxn_to_memoize(self, a, b): self.calls += 1 return (a + b)
def _preserve_environment(names): log.debug(('_preserve_environment(%r)' % names)) env = {name: os.environ.get(name) for name in names} return env
def get_label_from_logits(logits, label_ids, input_ids, subword, input_mask, tokenizer, label_map, k=1, mode='IO', print_topk=0): pred_ids_topk = torch.topk(logits, k=k, dim=2).indices if (print_topk > 0): (pred_value_top5, pred_ids_top5) = torch.topk(logits, k=print_topk, dim=2) pred_labels = [] pred_tokens = [] gold_tokens = [] pred_tokens_top5 = [] batch_size = label_ids.shape[0] for i in range(batch_size): gold_token = tokenizer.convert_ids_to_tokens(input_ids[i]) pred_token = get_label_token_from_topk(pred_ids_topk[i], tokenizer, label_map) gold_token = filter_item(gold_token, subword_mask=subword[i], input_mask=input_mask[i]) pred_token = filter_item(pred_token, subword_mask=subword[i], input_mask=input_mask[i]) if (print_topk > 0): pred_tokens_top5_ = [(tokenizer.convert_ids_to_tokens(word_ids), values) for (word_ids, values) in zip(pred_ids_top5[i], pred_value_top5[i])] pred_tokens_top5.append(filter_item(pred_tokens_top5_, subword_mask=subword[i], input_mask=input_mask[i])) reverse_label_map = {value: key[2:] for (key, value) in label_map.items()} pred_label = get_label_from_label_token(pred_token, reverse_label_map, mode) assert (len(gold_token) == len(pred_token)) assert (len(gold_token) == len(pred_label)) gold_tokens.append(gold_token) pred_tokens.append(pred_token) pred_labels.append(pred_label) if (print_topk > 0): return (pred_labels, pred_tokens, gold_tokens, pred_tokens_top5) else: return (pred_labels, pred_tokens, gold_tokens)
_model def identityformer_s12(pretrained=False, **kwargs): model = MetaFormer(depths=[2, 2, 6, 2], dims=[64, 128, 320, 512], token_mixers=nn.Identity, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs) model.default_cfg = default_cfgs['identityformer_s12'] if pretrained: state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True) model.load_state_dict(state_dict) return model
def write_sensor_data_as_document(cas): localpath = '/Users/moy/work/git_public/word2vec-data/sensor' with open((localpath + '/sensor_data.txt'), 'w') as f: f.write(' '.join(cas.sensor_seq))
def generate_sample(embeddings, this_spk, other_spks, label): this_spk_embs = embeddings[this_spk] other_spk_embs = list(chain(*[embeddings[spk] for spk in other_spks])) samples = [] for this_spk_emb in this_spk_embs: for other_spk_emb in other_spk_embs: cosine_similarity = get_cosine_similarity(this_spk_emb, other_spk_emb) samples.append((cosine_similarity, label)) return samples
class ResultSet(six.Iterator): def __init__(self): self._generator = None def __iter__(self): return self def _gen(self): fetch_size = 128 while True: rows = (self._fetch(fetch_size) or []) for r in rows: (yield r) if (len(rows) < fetch_size): break def __next__(self): if (self._generator is None): self._generator = self._gen() return next(self._generator) def _fetch(self, fetch_size): pass def raw_column_info(self): return self.column_info() def column_info(self): pass def success(self): return False def close(self): pass def error(self): return ''
def expand_args(params): sweep_args = {k: v for (k, v) in params.items() if isinstance(v, list)} sweep = [dict(zip(sweep_args.keys(), vs)) for vs in itertools.product(*sweep_args.values())] expanded = [] for swargs in sweep: new_args = {**params, **swargs} expanded.append(new_args) return expanded
def add_assert_range_checked(ctx: LeanGenContext, lhs: Expression, rhs: Expression, assert_rw: str): if (ctx.rc_steps is not None): ctx.concat_final(ctx.rc_steps.add_assert_range_checked(lhs, rhs, assert_rw))
def test_indexed(): assert ak.almost_equal(ak.contents.ListOffsetArray(ak.index.Index64([0, 2, 4, 8]), ak.contents.IndexedArray(ak.index.Index64([0, 1, 2, 3, 2, 1, 0, 5]), ak.contents.NumpyArray(np.arange(6, dtype=np.int64)))), ak.contents.ListOffsetArray(ak.index.Index64([0, 2, 4, 8]), ak.contents.NumpyArray(np.array([0, 1, 2, 3, 2, 1, 0, 5], dtype=np.int64))))
class SolveMaxMatching(): def __init__(self, nworkers, ntasks, k, value=10000, pairwise_lamb=0.1): self.nworkers = nworkers self.ntasks = ntasks self.value = value self.k = k self.source = 0 self.sink = ((self.nworkers + self.ntasks) + 1) self.pairwise_cost = int((pairwise_lamb * value)) self.supplies = (([(self.nworkers * self.k)] + ((self.ntasks + self.nworkers) * [0])) + [((- self.nworkers) * self.k)]) self.start_nodes = list() self.end_nodes = list() self.capacities = list() self.common_costs = list() for work_idx in range(self.nworkers): self.start_nodes.append(self.source) self.end_nodes.append((work_idx + 1)) self.capacities.append(self.k) self.common_costs.append(0) for work_idx in range(self.nworkers): for task_idx in range(self.ntasks): self.start_nodes.append(((self.nworkers + 1) + task_idx)) self.end_nodes.append(self.sink) self.capacities.append(1) self.common_costs.append((work_idx * self.pairwise_cost)) for work_idx in range(self.nworkers): for task_idx in range(self.ntasks): self.start_nodes.append((work_idx + 1)) self.end_nodes.append(((self.nworkers + 1) + task_idx)) self.capacities.append(1) self.nnodes = len(self.start_nodes) def solve(self, array): assert (array.shape == (self.nworkers, self.ntasks)), 'Wrong array shape, it should be ({}, {})'.format(self.nworkers, self.ntasks) self.array = (self.value * array) self.array = (- self.array) self.array = self.array.astype(np.int32) costs = copy.copy(self.common_costs) for work_idx in range(self.nworkers): for task_idx in range(self.ntasks): costs.append(self.array[work_idx][task_idx]) costs = np.array(costs) costs = costs.tolist() assert (len(costs) == self.nnodes), 'Length of costs should be {} but {}'.format(self.nnodes, len(costs)) min_cost_flow = pywrapgraph.SimpleMinCostFlow() for idx in range(self.nnodes): min_cost_flow.AddArcWithCapacityAndUnitCost(self.start_nodes[idx], self.end_nodes[idx], self.capacities[idx], costs[idx]) for idx in range(((self.ntasks + self.nworkers) + 2)): min_cost_flow.SetNodeSupply(idx, self.supplies[idx]) min_cost_flow.Solve() results = list() for arc in range(min_cost_flow.NumArcs()): if ((min_cost_flow.Tail(arc) != self.source) and (min_cost_flow.Head(arc) != self.sink)): if (min_cost_flow.Flow(arc) > 0): results.append([(min_cost_flow.Tail(arc) - 1), ((min_cost_flow.Head(arc) - self.nworkers) - 1)]) results_np = np.zeros_like(array) for (i, j) in results: results_np[i][j] = 1 return (results, results_np)
def get_impute_knn_score(X_missing, y_missing): imputer = KNNImputer(missing_values=np.nan, add_indicator=True) knn_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing) return (knn_impute_scores.mean(), knn_impute_scores.std())
class ParameterList(rf.Module): def __init__(self, *parameters: Union[(rf.Parameter, Iterable[rf.Parameter], Dict[(str, rf.Parameter)], ParameterList)]): super().__init__() if ((len(parameters) == 1) and isinstance(parameters[0], dict)): for (i, (key, parameter)) in enumerate(parameters[0].items()): if _is_int_str(key): key = str(i) setattr(self, key, parameter) elif ((len(parameters) == 1) and isinstance(parameters[0], ParameterList)): for (key, parameter) in parameters[0]._get_parameters().items(): setattr(self, key, parameter) elif ((len(parameters) == 1) and _is_iterable(parameters[0])): for (idx, parameter) in enumerate(parameters[0]): setattr(self, str(idx), parameter) else: for (idx, parameter) in enumerate(parameters): setattr(self, str(idx), parameter) def _get_parameters(self) -> Dict[(str, rf.Parameter)]: return {key: value for (key, value) in vars(self).items() if isinstance(value, rf.Parameter)} def append(self, parameter: rf.Parameter) -> ParameterList: setattr(self, str(len(self)), parameter) return self def extend(self, parameters: Iterable[rf.Parameter]) -> ParameterList: for parameter in parameters: self.append(parameter) return self def __len__(self) -> int: return len(self._get_parameters()) def __iter__(self) -> Iterator[rf.Parameter]: return iter(self._get_parameters().values()) def __getitem__(self, idx) -> Union[(ParameterList, rf.Parameter)]: if isinstance(idx, slice): return self.__class__(dict(list(self._get_parameters().items())[idx])) else: return list(self._get_parameters().values())[idx] def __setitem__(self, idx: int, parameter: rf.Parameter) -> None: key = list(self._get_parameters().keys())[idx] return setattr(self, key, rf.Parameter) __call__ = rf.Module.__call__
class BatchSampler(BaseSampler): def start_worker(self): if (singleton_pool.n_parallel > 1): singleton_pool.run_each(worker_init_tf) parallel_sampler.populate_task(self.algo.env, self.algo.policy) if (singleton_pool.n_parallel > 1): singleton_pool.run_each(worker_init_tf_vars) def shutdown_worker(self): parallel_sampler.terminate_task(scope=self.algo.scope) def obtain_samples(self, itr): cur_policy_params = self.algo.policy.get_param_values() paths = parallel_sampler.sample_paths(policy_params=cur_policy_params, env_params=None, max_samples=self.algo.batch_size, max_path_length=self.algo.max_path_length, scope=self.algo.scope) if self.algo.whole_paths: return paths else: paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size) return paths_truncated
class adjust_light(): def __call__(self, image): seed = random.random() if (seed > 0.5): gamma = ((random.random() * 3) + 0.5) invGamma = (1.0 / gamma) table = np.array([(((i / 255.0) ** invGamma) * 255) for i in np.arange(0, 256)]).astype(np.uint8) image = cv2.LUT(np.array(image).astype(np.uint8), table).astype(np.uint8) return image
def resnet18(pretrained=False, progress=True, modal='vision', **kwargs): return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, modal, **kwargs)
def pil_loader(data_path, label_path): data = Image.open(data_path) label = Image.open(label_path) return (data, label)
class LinearOperator(object): def __new__(cls, *args, **kwargs): if (cls is LinearOperator): return super(LinearOperator, cls).__new__(_CustomLinearOperator) else: obj = super(LinearOperator, cls).__new__(cls) if ((type(obj)._matvec == LinearOperator._matvec) and (type(obj)._matmat == LinearOperator._matmat)): warnings.warn('LinearOperator subclass should implement at least one of _matvec and _matmat.', category=RuntimeWarning, stacklevel=2) return obj def __init__(self, dtype, shape): if (dtype is not None): dtype = np.dtype(dtype) shape = tuple(shape) if (not isshape(shape)): raise ValueError(('invalid shape %r (must be 2-d)' % (shape,))) self.dtype = dtype self.shape = shape def _init_dtype(self): if (self.dtype is None): v = np.zeros(self.shape[(- 1)]) self.dtype = np.asarray(self.matvec(v)).dtype def _matmat(self, X): return np.hstack([self.matvec(col.reshape((- 1), 1)) for col in X.T]) def _matvec(self, x): return self.matmat(x.reshape((- 1), 1)) def matvec(self, x): x = np.asanyarray(x) (M, N) = self.shape if ((x.shape != (N,)) and (x.shape != (N, 1))): raise ValueError('dimension mismatch') y = self._matvec(x) if isinstance(x, np.matrix): y = asmatrix(y) else: y = np.asarray(y) if (x.ndim == 1): y = y.reshape(M) elif (x.ndim == 2): y = y.reshape(M, 1) else: raise ValueError('invalid shape returned by user-defined matvec()') return y def rmatvec(self, x): x = np.asanyarray(x) (M, N) = self.shape if ((x.shape != (M,)) and (x.shape != (M, 1))): raise ValueError('dimension mismatch') y = self._rmatvec(x) if isinstance(x, np.matrix): y = asmatrix(y) else: y = np.asarray(y) if (x.ndim == 1): y = y.reshape(N) elif (x.ndim == 2): y = y.reshape(N, 1) else: raise ValueError('invalid shape returned by user-defined rmatvec()') return y def _rmatvec(self, x): if (type(self)._adjoint == LinearOperator._adjoint): raise NotImplementedError else: return self.H.matvec(x) def matmat(self, X): X = np.asanyarray(X) if (X.ndim != 2): raise ValueError(('expected 2-d ndarray or matrix, not %d-d' % X.ndim)) if (X.shape[0] != self.shape[1]): raise ValueError(('dimension mismatch: %r, %r' % (self.shape, X.shape))) Y = self._matmat(X) if isinstance(Y, np.matrix): Y = asmatrix(Y) return Y def rmatmat(self, X): X = np.asanyarray(X) if (X.ndim != 2): raise ValueError(('expected 2-d ndarray or matrix, not %d-d' % X.ndim)) if (X.shape[0] != self.shape[0]): raise ValueError(('dimension mismatch: %r, %r' % (self.shape, X.shape))) Y = self._rmatmat(X) if isinstance(Y, np.matrix): Y = asmatrix(Y) return Y def _rmatmat(self, X): if (type(self)._adjoint == LinearOperator._adjoint): return np.hstack([self.rmatvec(col.reshape((- 1), 1)) for col in X.T]) else: return self.H.matmat(X) def __call__(self, x): return (self * x) def __mul__(self, x): return self.dot(x) def dot(self, x): if isinstance(x, LinearOperator): return _ProductLinearOperator(self, x) elif np.isscalar(x): return _ScaledLinearOperator(self, x) else: x = np.asarray(x) if ((x.ndim == 1) or ((x.ndim == 2) and (x.shape[1] == 1))): return self.matvec(x) elif (x.ndim == 2): return self.matmat(x) else: raise ValueError(('expected 1-d or 2-d array or matrix, got %r' % x)) def __matmul__(self, other): if np.isscalar(other): raise ValueError("Scalar operands are not allowed, use '*' instead") return self.__mul__(other) def __rmatmul__(self, other): if np.isscalar(other): raise ValueError("Scalar operands are not allowed, use '*' instead") return self.__rmul__(other) def __rmul__(self, x): if np.isscalar(x): return _ScaledLinearOperator(self, x) else: return NotImplemented def __pow__(self, p): if np.isscalar(p): return _PowerLinearOperator(self, p) else: return NotImplemented def __add__(self, x): if isinstance(x, LinearOperator): return _SumLinearOperator(self, x) else: return NotImplemented def __neg__(self): return _ScaledLinearOperator(self, (- 1)) def __sub__(self, x): return self.__add__((- x)) def __repr__(self): (M, N) = self.shape if (self.dtype is None): dt = 'unspecified dtype' else: dt = ('dtype=' + str(self.dtype)) return ('<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)) def adjoint(self): return self._adjoint() H = property(adjoint) def transpose(self): return self._transpose() T = property(transpose) def _adjoint(self): return _AdjointLinearOperator(self) def _transpose(self): return _TransposedLinearOperator(self)
def recover_formula(prefix_tree): formula = '' if (not isinstance(prefix_tree, list)): raise TypeError('the input must be a parse tree as a list') formula = apply_func(prefix_tree, recover_formula_internal) if ((prefix_tree[0] == '~') or (len(prefix_tree) == 1)): return formula return formula[1:(- 1)]
def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args): for (i, (targets, expr)) in enumerate(zip(lhs_targets, lhs_args)): if expr.is_starred: starred = i lhs_remaining = ((len(lhs_args) - i) - 1) break targets.append(expr) else: raise InternalError('no starred arg found when splitting starred assignment') for (i, (targets, expr)) in enumerate(zip(lhs_targets[(- lhs_remaining):], lhs_args[(starred + 1):])): targets.append(expr) target = lhs_args[starred].target starred_rhs = rhs_args[starred:] if lhs_remaining: starred_rhs = starred_rhs[:(- lhs_remaining)] if starred_rhs: pos = starred_rhs[0].pos else: pos = target.pos starred_assignments.append([target, ExprNodes.ListNode(pos=pos, args=starred_rhs)])
class Critic(nn.Module): def __init__(self, repr_dim, action_shape, feature_dim, hidden_dim): super().__init__() self.Q1 = nn.Sequential(nn.Linear((feature_dim + (action_shape[0] * 100)), hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, 1)) self.Q2 = nn.Sequential(nn.Linear((feature_dim + (action_shape[0] * 100)), hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, 1)) self.apply(utils.weight_init) def forward(self, obs, bc_action, offset_action): bc_action = bc_action.repeat(1, 50) offset_action = offset_action.repeat(1, 50) h_action = torch.cat([obs, bc_action, offset_action], dim=(- 1)) q1 = self.Q1(h_action) q2 = self.Q2(h_action) return (q1, q2)
(datatype[(N, N)], datatype[N], datatype[N]) def trisolv(L, x, b): for i in range(0, N, 1): def init_x(): (in_b << b[i]) (out >> x[i]) out = in_b def set_x(j: _[0:i]): (in_L << L[(i, j)]) (in_x << x[j]) (out >> x(1, (lambda x, y: (x + y)))[i]) out = ((- in_L) * in_x) def div(): (in_x << x[i]) (in_L << L[(i, i)]) (out >> x[i]) out = (in_x / in_L)
def register_Ns3Dot11sPeerLinkConfirmStartPlinkConfirmStartFields_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::dot11s::PeerLinkConfirmStart::PlinkConfirmStartFields const &', 'arg0')]) cls.add_instance_attribute('aid', 'uint16_t', is_const=False) cls.add_instance_attribute('capability', 'uint16_t', is_const=False) cls.add_instance_attribute('config', 'ns3::dot11s::IeConfiguration', is_const=False) cls.add_instance_attribute('protocol', 'ns3::dot11s::IePeeringProtocol', is_const=False) cls.add_instance_attribute('rates', 'ns3::SupportedRates', is_const=False) return
def install_mpi_excepthook(): import sys from mpi4py import MPI old_hook = sys.excepthook def new_hook(a, b, c): old_hook(a, b, c) sys.stdout.flush() sys.stderr.flush() MPI.COMM_WORLD.Abort() sys.excepthook = new_hook
class TFMobileViTMobileNetLayer(tf.keras.layers.Layer): def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int=1, num_stages: int=1, **kwargs) -> None: super().__init__(**kwargs) self.layers = [] for i in range(num_stages): layer = TFMobileViTInvertedResidual(config, in_channels=in_channels, out_channels=out_channels, stride=(stride if (i == 0) else 1), name=f'layer.{i}') self.layers.append(layer) in_channels = out_channels def call(self, features: tf.Tensor, training: bool=False) -> tf.Tensor: for layer_module in self.layers: features = layer_module(features, training=training) return features
def test_replace_ImageToTensor(): pipelines = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize'), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] expected_pipelines = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize'), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img'])])] with pytest.warns(UserWarning): assert (expected_pipelines == replace_ImageToTensor(pipelines)) pipelines = [dict(type='LoadImageFromFile'), dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize'), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])] expected_pipelines = [dict(type='LoadImageFromFile'), dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize'), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img'])] with pytest.warns(UserWarning): assert (expected_pipelines == replace_ImageToTensor(pipelines))
def test_readxml_public_api(): assert (dir(pyhf.readxml) == ['clear_filecache', 'dedupe_parameters', 'extract_error', 'import_root_histogram', 'parse', 'process_channel', 'process_data', 'process_measurements', 'process_sample'])
class Bucket(object): def __init__(self, environment, key, checksum): self.environment = environment self.key = key self.checksum = checksum self.reset() def reset(self): self.code = None def load_bytecode(self, f): magic = f.read(len(bc_magic)) if (magic != bc_magic): self.reset() return checksum = pickle.load(f) if (self.checksum != checksum): self.reset() return try: self.code = marshal_load(f) except (EOFError, ValueError, TypeError): self.reset() return def write_bytecode(self, f): if (self.code is None): raise TypeError("can't write empty bucket") f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal_dump(self.code, f) def bytecode_from_string(self, string): self.load_bytecode(BytesIO(string)) def bytecode_to_string(self): out = BytesIO() self.write_bytecode(out) return out.getvalue()
def format_stat(stat): if isinstance(stat, Number): stat = '{:g}'.format(stat) elif isinstance(stat, AverageMeter): stat = '{:.3f}'.format(stat.avg) elif isinstance(stat, TimeMeter): stat = '{:g}'.format(round(stat.avg)) elif isinstance(stat, StopwatchMeter): stat = '{:g}'.format(round(stat.sum)) return stat
def read_sentences_from_conllu(filename): sents = [] cache = [] with open(filename, encoding='utf-8') as infile: for line in infile: line = line.strip() if (len(line) == 0): if (len(cache) > 0): sents.append(cache) cache = [] continue cache.append(line) if (len(cache) > 0): sents.append(cache) return sents
def _match_hostname(cert, asserted_hostname): try: match_hostname(cert, asserted_hostname) except CertificateError as e: log.warning('Certificate did not match expected hostname: %s. Certificate: %s', asserted_hostname, cert) e._peer_cert = cert raise
def get_console(**kwargs) -> Console: interactive = is_interactive() from rich.theme import Theme theme = Theme(STYLES) return Console(force_jupyter=interactive, log_path=False, theme=theme, soft_wrap=True, **kwargs)
class BaseLearner(Layer): def __init__(self, module: Layer) -> None: super().__init__() self.module = module def adapt(self, loss: Tensor) -> None: raise NotImplementedError def clone(self: Type[Learner]) -> Learner: raise NotImplementedError def forward(self, *args, **kwargs): return self.module(*args, **kwargs)
('grammar', 'spider') class SpiderLanguage(): root_type = 'sql' def __init__(self, output_from=False, use_table_pointer=False, include_literals=True, include_columns=True, end_with_from=False, clause_order=None, infer_from_conditions=False, factorize_sketch=0): custom_primitive_type_checkers = {} self.pointers = set() if use_table_pointer: custom_primitive_type_checkers['table'] = (lambda x: isinstance(x, int)) self.pointers.add('table') self.include_columns = include_columns if include_columns: custom_primitive_type_checkers['column'] = (lambda x: isinstance(x, int)) self.pointers.add('column') self.factorize_sketch = factorize_sketch if (self.factorize_sketch == 0): asdl_file = 'Spider.asdl' elif (self.factorize_sketch == 1): asdl_file = 'Spider_f1.asdl' elif (self.factorize_sketch == 2): asdl_file = 'Spider_f2.asdl' else: raise NotImplementedError self.ast_wrapper = ast_util.ASTWrapper(asdl.parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), asdl_file)), custom_primitive_type_checkers=custom_primitive_type_checkers) if (not use_table_pointer): self.ast_wrapper.singular_types['Table'].fields[0].type = 'int' if (not include_columns): col_unit_fields = self.ast_wrapper.singular_types['col_unit'].fields assert (col_unit_fields[1].name == 'col_id') del col_unit_fields[1] self.include_literals = include_literals if (not self.include_literals): if (self.factorize_sketch == 0): limit_field = self.ast_wrapper.singular_types['sql'].fields[6] else: limit_field = self.ast_wrapper.singular_types['sql_orderby'].fields[1] assert (limit_field.name == 'limit') limit_field.opt = False limit_field.type = 'singleton' self.output_from = output_from self.end_with_from = end_with_from self.clause_order = clause_order self.infer_from_conditions = infer_from_conditions if self.clause_order: assert (factorize_sketch == 2) sql_fields = self.ast_wrapper.product_types['sql'].fields letter2field = {k: v for (k, v) in zip('SFWGOI', sql_fields)} new_sql_fields = [letter2field[k] for k in self.clause_order] self.ast_wrapper.product_types['sql'].fields = new_sql_fields elif (not self.output_from): sql_fields = self.ast_wrapper.product_types['sql'].fields assert (sql_fields[1].name == 'from') del sql_fields[1] else: sql_fields = self.ast_wrapper.product_types['sql'].fields assert (sql_fields[1].name == 'from') if self.end_with_from: sql_fields.append(sql_fields[1]) del sql_fields[1] def parse(self, code, section): return self.parse_sql(code) def unparse(self, tree, item): unparser = SpiderUnparser(self.ast_wrapper, item.schema, self.factorize_sketch) return unparser.unparse_sql(tree) def tokenize_field_value(cls, field_value): if isinstance(field_value, bytes): field_value_str = field_value.encode('latin1') elif isinstance(field_value, str): field_value_str = field_value else: field_value_str = str(field_value) if ((field_value_str[0] == '"') and (field_value_str[(- 1)] == '"')): field_value_str = field_value_str[1:(- 1)] return [field_value_str] def parse_val(self, val): if isinstance(val, str): if (not self.include_literals): return {'_type': 'Terminal'} return {'_type': 'String', 's': val} elif isinstance(val, list): return {'_type': 'ColUnit', 'c': self.parse_col_unit(val)} elif isinstance(val, float): if (not self.include_literals): return {'_type': 'Terminal'} return {'_type': 'Number', 'f': val} elif isinstance(val, dict): return {'_type': 'ValSql', 's': self.parse_sql(val)} else: raise ValueError(val) def parse_col_unit(self, col_unit): (agg_id, col_id, is_distinct) = col_unit result = {'_type': 'col_unit', 'agg_id': {'_type': self.AGG_TYPES_F[agg_id]}, 'is_distinct': is_distinct} if self.include_columns: result['col_id'] = col_id return result def parse_val_unit(self, val_unit): (unit_op, col_unit1, col_unit2) = val_unit result = {'_type': self.UNIT_TYPES_F[unit_op], 'col_unit1': self.parse_col_unit(col_unit1)} if (unit_op != 0): result['col_unit2'] = self.parse_col_unit(col_unit2) return result def parse_table_unit(self, table_unit): (table_type, value) = table_unit if (table_type == 'sql'): return {'_type': 'TableUnitSql', 's': self.parse_sql(value)} elif (table_type == 'table_unit'): return {'_type': 'Table', 'table_id': value} else: raise ValueError(table_type) def parse_cond(self, cond, optional=False): if (optional and (not cond)): return None if (len(cond) > 1): return {'_type': self.LOGIC_OPERATORS_F[cond[1]], 'left': self.parse_cond(cond[:1]), 'right': self.parse_cond(cond[2:])} ((not_op, op_id, val_unit, val1, val2),) = cond result = {'_type': self.COND_TYPES_F[op_id], 'val_unit': self.parse_val_unit(val_unit), 'val1': self.parse_val(val1)} if (op_id == 1): result['val2'] = self.parse_val(val2) if not_op: result = {'_type': 'Not', 'c': result} return result def parse_sql(self, sql, optional=False): if (optional and (sql is None)): return None if (self.factorize_sketch == 0): return filter_nones({'_type': 'sql', 'select': self.parse_select(sql['select']), 'where': self.parse_cond(sql['where'], optional=True), 'group_by': [self.parse_col_unit(u) for u in sql['groupBy']], 'order_by': self.parse_order_by(sql['orderBy']), 'having': self.parse_cond(sql['having'], optional=True), 'limit': (sql['limit'] if self.include_literals else (sql['limit'] is not None)), 'intersect': self.parse_sql(sql['intersect'], optional=True), 'except': self.parse_sql(sql['except'], optional=True), 'union': self.parse_sql(sql['union'], optional=True), **({'from': self.parse_from(sql['from'], self.infer_from_conditions)} if self.output_from else {})}) elif (self.factorize_sketch == 1): return filter_nones({'_type': 'sql', 'select': self.parse_select(sql['select']), **({'from': self.parse_from(sql['from'], self.infer_from_conditions)} if self.output_from else {}), 'sql_where': filter_nones({'_type': 'sql_where', 'where': self.parse_cond(sql['where'], optional=True), 'sql_groupby': filter_nones({'_type': 'sql_groupby', 'group_by': [self.parse_col_unit(u) for u in sql['groupBy']], 'having': filter_nones({'_type': 'having', 'having': self.parse_cond(sql['having'], optional=True)}), 'sql_orderby': filter_nones({'_type': 'sql_orderby', 'order_by': self.parse_order_by(sql['orderBy']), 'limit': filter_nones({'_type': 'limit', 'limit': (sql['limit'] if self.include_literals else (sql['limit'] is not None))}), 'sql_ieu': filter_nones({'_type': 'sql_ieu', 'intersect': self.parse_sql(sql['intersect'], optional=True), 'except': self.parse_sql(sql['except'], optional=True), 'union': self.parse_sql(sql['union'], optional=True)})})})})}) elif (self.factorize_sketch == 2): return filter_nones({'_type': 'sql', 'select': self.parse_select(sql['select']), **({'from': self.parse_from(sql['from'], self.infer_from_conditions)} if self.output_from else {}), 'sql_where': filter_nones({'_type': 'sql_where', 'where': self.parse_cond(sql['where'], optional=True)}), 'sql_groupby': filter_nones({'_type': 'sql_groupby', 'group_by': [self.parse_col_unit(u) for u in sql['groupBy']], 'having': self.parse_cond(sql['having'], optional=True)}), 'sql_orderby': filter_nones({'_type': 'sql_orderby', 'order_by': self.parse_order_by(sql['orderBy']), 'limit': (sql['limit'] if self.include_literals else (sql['limit'] is not None))}), 'sql_ieu': filter_nones({'_type': 'sql_ieu', 'intersect': self.parse_sql(sql['intersect'], optional=True), 'except': self.parse_sql(sql['except'], optional=True), 'union': self.parse_sql(sql['union'], optional=True)})}) def parse_select(self, select): (is_distinct, aggs) = select return {'_type': 'select', 'is_distinct': is_distinct, 'aggs': [self.parse_agg(agg) for agg in aggs]} def parse_agg(self, agg): (agg_id, val_unit) = agg return {'_type': 'agg', 'agg_id': {'_type': self.AGG_TYPES_F[agg_id]}, 'val_unit': self.parse_val_unit(val_unit)} def parse_from(self, from_, infer_from_conditions=False): return filter_nones({'_type': 'from', 'table_units': [self.parse_table_unit(u) for u in from_['table_units']], 'conds': (self.parse_cond(from_['conds'], optional=True) if (not infer_from_conditions) else None)}) def parse_order_by(self, order_by): if (not order_by): return None (order, val_units) = order_by return {'_type': 'order_by', 'order': {'_type': self.ORDERS_F[order]}, 'val_units': [self.parse_val_unit(v) for v in val_units]} (COND_TYPES_F, COND_TYPES_B) = bimap(range(1, 10), ('Between', 'Eq', 'Gt', 'Lt', 'Ge', 'Le', 'Ne', 'In', 'Like')) (UNIT_TYPES_F, UNIT_TYPES_B) = bimap(range(5), ('Column', 'Minus', 'Plus', 'Times', 'Divide')) (AGG_TYPES_F, AGG_TYPES_B) = bimap(range(6), ('NoneAggOp', 'Max', 'Min', 'Count', 'Sum', 'Avg')) (ORDERS_F, ORDERS_B) = bimap(('asc', 'desc'), ('Asc', 'Desc')) (LOGIC_OPERATORS_F, LOGIC_OPERATORS_B) = bimap(('and', 'or'), ('And', 'Or'))
def assert_approx_equal(actual, desired, significant=7, err_msg='', verbose=True): __tracebackhide__ = True import numpy as np (actual, desired) = map(float, (actual, desired)) if (desired == actual): return with np.errstate(invalid='ignore'): scale = (0.5 * (np.abs(desired) + np.abs(actual))) scale = np.power(10, np.floor(np.log10(scale))) try: sc_desired = (desired / scale) except ZeroDivisionError: sc_desired = 0.0 try: sc_actual = (actual / scale) except ZeroDivisionError: sc_actual = 0.0 msg = build_err_msg([actual, desired], err_msg, header=('Items are not equal to %d significant digits:' % significant), verbose=verbose) try: if (not (gisfinite(desired) and gisfinite(actual))): if (gisnan(desired) or gisnan(actual)): if (not (gisnan(desired) and gisnan(actual))): raise AssertionError(msg) elif (not (desired == actual)): raise AssertionError(msg) return except (TypeError, NotImplementedError): pass if (np.abs((sc_desired - sc_actual)) >= np.power(10.0, (- (significant - 1)))): raise AssertionError(msg)
class MagmaFunction(ExpectFunction): def __call__(self, *args, **kwds): nvals = 1 if (len(kwds) > 0): if ('nvals' in kwds): nvals = kwds['nvals'] del kwds['nvals'] M = self._parent return M.function_call(self._name, list(args), params=kwds, nvals=nvals) def _instancedoc_(self): M = self._parent s = M.eval(self._name) s = sage.misc.misc.word_wrap(s, 80) return s
def inside(): return (lambda bbox1, bbox2: ((bbox2['x1'] >= bbox1['x1']) and (bbox2['x2'] <= bbox1['x2']) and (bbox2['y1'] >= bbox1['y1']) and (bbox2['y2'] <= bbox1['y2'])))
def test_dtype(target, mix, dtype): output = wiener(target.to(dtype=dtype), mix.to(dtype=dtype), iterations=1) assert (output.dtype == dtype)
def _where_connected_to_curr_pose(start, traversible, seed, visited): non_traversible = (1 - (traversible * 1)) if (traversible[((start[0] + 1), (start[1] + 1))] == 0): count = 0 while ((traversible[((start[0] + 1), (start[1] + 1))] == 0) and (count < 100)): np.random.seed((seed + count)) start_idx = np.random.choice(len(np.where((visited == 1))[0])) start = (np.where((visited == 1))[0][start_idx], np.where((visited == 1))[1][start_idx]) count += 1 connected_regions = skimage.morphology.label(traversible, connectivity=2) where_start_connected = np.where((connected_regions == connected_regions[((start[0] + 1), (start[1] + 1))])) wc_wrong = ((len(where_start_connected[0]) < len(np.where(visited)[0])) or (np.sum(traversible[where_start_connected]) < np.sum(non_traversible[where_start_connected]))) if wc_wrong: count = 0 while (wc_wrong and (count < min(len(np.where((visited == 1))[0]), 100))): start = (np.where((visited == 1))[0][count], np.where((visited == 1))[1][count]) where_start_connected = np.where((connected_regions == connected_regions[((start[0] + 1), (start[1] + 1))])) wc_wrong = ((len(where_start_connected[0]) < len(np.where(visited)[0])) or (np.sum(traversible[where_start_connected]) < np.sum(non_traversible[where_start_connected]))) count += 1 return where_start_connected
def res2net50_v1b_26w_4s(pretrained=False, **kwargs): model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['res2net50_v1b_26w_4s'])) return model
def add_reranking_args(parser): group = parser.add_argument_group('Reranking') group.add_argument('--score-model1', '-s1', type=str, metavar='FILE', required=True, help='path to first model or ensemble of models for rescoring') group.add_argument('--score-model2', '-s2', type=str, metavar='FILE', required=False, help='path to second model or ensemble of models for rescoring') group.add_argument('--num-rescore', '-n', type=int, metavar='N', default=10, help='the number of candidate hypothesis to rescore') group.add_argument('-bz', '--batch-size', type=int, metavar='N', default=128, help='batch size for generating the nbest list') group.add_argument('--gen-subset', default='test', metavar='SET', choices=['test', 'train', 'valid'], help='data subset to generate (train, valid, test)') group.add_argument('--gen-model', default=None, metavar='FILE', help='the model to generate translations') group.add_argument('-b1', '--backwards1', action='store_true', help='whether or not the first model group is backwards') group.add_argument('-b2', '--backwards2', action='store_true', help='whether or not the second model group is backwards') group.add_argument('-a', '--weight1', default=1, nargs='+', type=float, help='the weight(s) of the first model') group.add_argument('-b', '--weight2', default=1, nargs='+', type=float, help='the weight(s) of the second model, or the gen model if using nbest from interactive.py') group.add_argument('-c', '--weight3', default=1, nargs='+', type=float, help='the weight(s) of the third model') group.add_argument('-lm', '--language-model', default=None, metavar='FILE', help='language model for target language to rescore translations') group.add_argument('--lm-dict', default=None, metavar='FILE', help='the dict of the language model for the target language') group.add_argument('--lm-name', default=None, help='the name of the language model for the target language') group.add_argument('--lm-bpe-code', default=None, metavar='FILE', help='the bpe code for the language model for the target language') group.add_argument('--data-dir-name', default=None, help='name of data directory') group.add_argument('--lenpen', default=1, nargs='+', type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--score-dict-dir', default=None, help='the directory with dictionaries for the scoring models') group.add_argument('--right-to-left1', action='store_true', help='whether the first model group is a right to left model') group.add_argument('--right-to-left2', action='store_true', help='whether the second model group is a right to left model') group.add_argument('--post-process', '--remove-bpe', default=' ', help='the bpe symbol, used for the bitext and LM') group.add_argument('--prefix-len', default=None, type=int, help='the length of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--sampling', action='store_true', help='use sampling instead of beam search for generating n best list') group.add_argument('--diff-bpe', action='store_true', help='bpe for rescoring and nbest list not the same') group.add_argument('--rescore-bpe-code', default=None, help='bpe code for rescoring models') group.add_argument('--nbest-list', default=None, help='use predefined nbest list in interactive.py format') group.add_argument('--write-hypos', default=None, help='filename prefix to write hypos to') group.add_argument('--ref-translation', default=None, help='reference translation to use with nbest list from interactive.py') group.add_argument('--backwards-score-dict-dir', default=None, help='the directory with dictionaries for the backwards model,if None then it is assumed the fw and backwards models share dictionaries') group.add_argument('--gen-model-name', default=None, help='the name of the models that generated the nbest list') group.add_argument('--model1-name', default=None, help='the name of the set for model1 group ') group.add_argument('--model2-name', default=None, help='the name of the set for model2 group') group.add_argument('--shard-id', default=0, type=int, help='the id of the shard to generate') group.add_argument('--num-shards', default=1, type=int, help='the number of shards to generate across') group.add_argument('--all-shards', action='store_true', help='use all shards') group.add_argument('--target-prefix-frac', default=None, type=float, help='the fraction of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--source-prefix-frac', default=None, type=float, help='the fraction of the source prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--normalize', action='store_true', help='whether to normalize by src and target len') return group
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15, no_fake=True): to_dir = os.path.abspath(to_dir) was_imported = (('pkg_resources' in sys.modules) or ('setuptools' in sys.modules)) try: try: import pkg_resources try: pkg_resources.require('setuptools>=0.7b') return except (pkg_resources.DistributionNotFound, pkg_resources.VersionConflict): pass if (not hasattr(pkg_resources, '_distribute')): if (not no_fake): _fake_setuptools() raise ImportError except ImportError: return _do_download(version, download_base, to_dir, download_delay) try: pkg_resources.require(('distribute>=' + version)) return except pkg_resources.VersionConflict: e = sys.exc_info()[1] if was_imported: sys.stderr.write(("The required version of distribute (>=%s) is notavailable,\n and can't be installed while this script is running. Please\n install a more recent version first, using\n'easy_install -U distribute'.\n\n(Currently using %r)\n" % (version, e.args[0]))) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] return _do_download(version, download_base, to_dir, download_delay) except pkg_resources.DistributionNotFound: return _do_download(version, download_base, to_dir, download_delay) finally: if (not no_fake): _create_fake_setuptools_pkg_info(to_dir)
def get_fixtures(func: Callable, request: FixtureRequest, given_kwargs: dict[(str, Any)]) -> dict[(str, Any)]: sig = signature(func) return {name: request.getfixturevalue(name) for name in sig.parameters if ((name != 'case') and (name not in given_kwargs))}
def zeropadding2d_args_preprocessor(args, kwargs): converted = [] if (('padding' in kwargs) and isinstance(kwargs['padding'], dict)): if (set(kwargs['padding'].keys()) <= {'top_pad', 'bottom_pad', 'left_pad', 'right_pad'}): top_pad = kwargs['padding'].get('top_pad', 0) bottom_pad = kwargs['padding'].get('bottom_pad', 0) left_pad = kwargs['padding'].get('left_pad', 0) right_pad = kwargs['padding'].get('right_pad', 0) kwargs['padding'] = ((top_pad, bottom_pad), (left_pad, right_pad)) warnings.warn('The `padding` argument in the Keras 2 API no longeraccepts dict types. You can now input argument as: `padding=(top_pad, bottom_pad, left_pad, right_pad)`.', stacklevel=3) elif ((len(args) == 2) and isinstance(args[1], dict)): if (set(args[1].keys()) <= {'top_pad', 'bottom_pad', 'left_pad', 'right_pad'}): top_pad = args[1].get('top_pad', 0) bottom_pad = args[1].get('bottom_pad', 0) left_pad = args[1].get('left_pad', 0) right_pad = args[1].get('right_pad', 0) args = (args[0], ((top_pad, bottom_pad), (left_pad, right_pad))) warnings.warn('The `padding` argument in the Keras 2 API no longeraccepts dict types. You can now input argument as: `padding=((top_pad, bottom_pad), (left_pad, right_pad))`', stacklevel=3) return (args, kwargs, converted)
def commodity_gen(mat, with_val=True, skip_zero=True): for x in range(mat.shape[0]): for y in range(mat.shape[(- 1)]): if (x == y): continue if (skip_zero and (mat[(x, y)] == 0)): continue if with_val: (yield (x, y, mat[(x, y)])) else: (yield (x, y))
def test_zero_der_nz_dp(): dx = (np.finfo(float).eps ** 0.33) p0 = ((200.0 - dx) / (2.0 + dx)) with suppress_warnings() as sup: sup.filter(RuntimeWarning, 'RMS of') x = zeros.newton((lambda y: ((y - 100.0) ** 2)), x0=([p0] * 10)) assert_allclose(x, ([100] * 10)) p0 = ((2.0 - 0.0001) / (2.0 + 0.0001)) with suppress_warnings() as sup: sup.filter(RuntimeWarning, 'Tolerance of') x = zeros.newton((lambda y: ((y - 1.0) ** 2)), x0=p0, disp=False) assert_allclose(x, 1) with pytest.raises(RuntimeError, match='Tolerance of'): x = zeros.newton((lambda y: ((y - 1.0) ** 2)), x0=p0, disp=True) p0 = (((- 2.0) + 0.0001) / (2.0 + 0.0001)) with suppress_warnings() as sup: sup.filter(RuntimeWarning, 'Tolerance of') x = zeros.newton((lambda y: ((y + 1.0) ** 2)), x0=p0, disp=False) assert_allclose(x, (- 1)) with pytest.raises(RuntimeError, match='Tolerance of'): x = zeros.newton((lambda y: ((y + 1.0) ** 2)), x0=p0, disp=True)
class SpikeSlab(base.Prior): def __init__(self, prob, mean, var): self.prob = prob self.mean = mean self.var = var self.rho = (prob * (var + (mean ** 2))) def __var_x(self, a, b): m_g = (((b * self.var) + self.mean) / (1.0 + (a * self.var))) v_g = (self.var / (1 + (a * self.var))) p_s = (self.prob / (self.prob + (((1 - self.prob) * np.sqrt((1 + (a * self.var)))) * np.exp(((((- 0.5) * (m_g ** 2)) / v_g) + ((0.5 * (self.mean ** 2)) / self.var)))))) return ((p_s * v_g) + ((p_s * (1 - p_s)) * (m_g ** 2))) def iter_v(self, a): a_s = (1 + (a * self.var)) f = (lambda s: (lambda z: self.__var_x(a, (((s * a) * self.mean) + (np.sqrt(((a_s ** s) * a)) * z))))) return (((1 - self.prob) * H(f(0))) + (self.prob * H(f(1)))) def eval_i(self, a): b_s = ((self.mean ** 2) / self.var) a_s = (1 + (a * self.var)) f = (lambda s: (lambda z: np.log((((1 - self.prob) * np.exp((((- 0.5) * (((np.sqrt(((a_s ** s) * a)) * z) + (((a_s ** s) * self.mean) / self.var)) ** 2)) / (a + (1 / self.var))))) + ((self.prob * np.exp(((- 0.5) * b_s))) / np.sqrt(a_s)))))) return (((1 - self.prob) * (H(f(0)) + ((0.5 * ((a * self.var) + b_s)) / a_s))) + (self.prob * (H(f(1)) + (0.5 * ((a * self.var) + (b_s * a_s)))))) def eval_rho(self, rho_prev): return self.rho
def tf_efficientnet_b1_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model
(version_base='1.3', config_path='../configs', config_name='enjoy') def main(cfg: EnjoyConfig): assert (cfg.infer == True) train_main(cfg)
.parametrize('axis', (0, 1, 2)) .parametrize('family', ('chebyshev',)) def test_biharmonic3D(family, axis): la = cla N = (16, 16, 16) SD = FunctionSpace(N[allaxes3D[axis][0]], family=family, bc=(0, 0, 0, 0)) K1 = FunctionSpace(N[allaxes3D[axis][1]], family='F', dtype='D') K2 = FunctionSpace(N[allaxes3D[axis][2]], family='F', dtype='d') subcomms = mpi4py_fft.pencil.Subcomm(MPI.COMM_WORLD, [0, 1, 1]) bases = ([0] * 3) bases[allaxes3D[axis][0]] = SD bases[allaxes3D[axis][1]] = K1 bases[allaxes3D[axis][2]] = K2 T = TensorProductSpace(subcomms, bases, axes=allaxes3D[axis]) u = shenfun.TrialFunction(T) v = shenfun.TestFunction(T) mat = inner(v, div(grad(div(grad(u))))) H = la.Biharmonic(*mat) u = Function(T) u[:] = (np.random.random(u.shape) + (1j * np.random.random(u.shape))) f = Function(T) f = H.matvec(u, f) g0 = Function(T) g1 = Function(T) g2 = Function(T) mat = H.tpmats M = {d.get_key(): d for d in mat} g0 = M['SSBSBmat'].matvec(u, g0) g1 = M['ASBSBmat'].matvec(u, g1) g2 = M['BSBSBmat'].matvec(u, g2) T.destroy() assert (np.linalg.norm((f - ((g0 + g1) + g2))) < 1e-08), np.linalg.norm((f - ((g0 + g1) + g2)))
def disassemble(pdf, pars): return {k: pars[pdf.config.par_slice(k)] for k in pdf.config.par_map}
def current_actor_handle() -> ray.actor.ActorHandle: return ray.runtime_context.get_runtime_context().current_actor
_dispatch def idct(x, type=2, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None): return (Dispatchable(x, np.ndarray),)
def sa_tti(u, v, model): (A, B, C) = thomsen_mat(model) R = R_mat(model) PI = (R.T * (((A * R) * grads(u, so_fact=2)) + ((B * R) * grads(v, so_fact=2)))) MI = (R.T * (((B * R) * grads(u, so_fact=2)) + ((C * R) * grads(v, so_fact=2)))) return (divs(PI, so_fact=2), divs(MI, so_fact=2))
def adjust_learning_rate(optimizer, epoch, args): if (args.lradj == 'type1'): lr_adjust = {epoch: (args.learning_rate * (0.5 ** ((epoch - 1) // 1)))} elif (args.lradj == 'type2'): lr_adjust = {2: 5e-05, 4: 1e-05, 6: 5e-06, 8: 1e-06, 10: 5e-07, 15: 1e-07, 20: 5e-08} if (epoch in lr_adjust.keys()): lr = lr_adjust[epoch] for param_group in optimizer.param_groups: param_group['lr'] = lr print('Updating learning rate to {}'.format(lr))
class TransformerDecoderLayerImproved(Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu'): super(TransformerDecoderLayerImproved, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, use_alibi=False) self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, use_alibi=False) self.linear1 = Linear(d_model, dim_feedforward) self.dropout = Dropout(dropout) self.linear2 = Linear(dim_feedforward, d_model) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.norm3 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) self.dropout3 = Dropout(dropout) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if ('activation' not in state): state['activation'] = F.relu super(TransformerDecoderLayerImproved, self).__setstate__(state) def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None): tgt1 = self.norm1(tgt) tgt2 = self.self_attn(tgt1, tgt1, tgt1, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = (tgt + self.dropout1(tgt2)) tgt1 = self.norm2(tgt) tgt2 = self.multihead_attn(tgt1, memory, memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = (tgt + self.dropout2(tgt2)) tgt1 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt1)))) tgt = (tgt + self.dropout3(tgt2)) return tgt
class DataProcessor(object): def get_train_examples(self, data_dir): raise NotImplementedError() def get_dev_examples(self, data_dir): raise NotImplementedError() def get_labels(self): raise NotImplementedError() def _read_tsv(cls, input_file, quotechar=None): with open(input_file, 'r') as f: reader = csv.reader(f, delimiter='\t', quotechar=quotechar) lines = [] for line in reader: if (sys.version_info[0] == 2): line = list((unicode(cell, 'utf-8') for cell in line)) lines.append(line) return lines
_utils.test() def test_argument_redefinition(): def foo(a: ti.i32): a = 1 with pytest.raises(ti.TaichiSyntaxError, match='Kernel argument "a" is immutable in the kernel') as e: foo(5)
def register_Ns3EpcS11SapSgwCreateSessionRequestMessage_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::EpcS11SapSgw::CreateSessionRequestMessage const &', 'arg0')]) cls.add_instance_attribute('bearerContextsToBeCreated', 'std::list< ns3::EpcS11SapSgw::BearerContextToBeCreated >', is_const=False) cls.add_instance_attribute('imsi', 'uint64_t', is_const=False) cls.add_instance_attribute('uli', 'ns3::EpcS11Sap::Uli', is_const=False) return
class _GlfwRenderer(imgui.integrations.glfw.GlfwRenderer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.mouse_wheel_multiplier = 1 def scroll_callback(self, window, x_offset, y_offset): self.io.mouse_wheel += (y_offset * self.mouse_wheel_multiplier)
class InputRequired(object): field_flags = ('required',) def __init__(self, message=None): self.message = message def __call__(self, form, field): if ((not field.raw_data) or (not field.raw_data[0])): if (self.message is None): message = field.gettext('This field is required.') else: message = self.message field.errors[:] = [] raise StopValidation(message)
class JoinedMonitorDescription(schema_utils.Model): joiner_id = types.StringType() monitor_names = types.ListType(optplan.ReferenceType(optplan.Monitor)) monitor_type = types.StringType(choices=('scalar', 'planar', 'volume')) scalar_operation = types.StringType(choices=('magnitude_squared', 'magnitude', 'phase', 'real', 'imag')) vector_operation = types.StringType(choices=('magnitude', 'x', 'y', 'z')) def __init__(self, *args, **kwargs) -> None: if ('monitor_name' in kwargs): kwargs['monitor_names'] = [kwargs['monitor_name']] del kwargs['monitor_name'] super().__init__(*args, **kwargs) if (not self.joiner_id): self.joiner_id = self.monitor_names[0]
class GPipeLastPartition(GPipePartition): RECOMP_PARTITION_CLS = Partition NO_RECOMP_PARTITION_CLS = LastPartition _CLONE_INPUTS = True def __init__(self, *args, **kw): super().__init__(*args, **kw) def forward(self, x: TensorOrTensors, micro_batch_idx): x = super().forward(x, micro_batch_idx) if (not isinstance(x, Tensor)): assert (len(x) == 1) return x[0] return x
(name='learners_data') def fixture_learners_data(breast_cancer_data, california_housing_data, california_housing_survival_data): models_data = [] (X_class_train, _, Y_class_train, _) = breast_cancer_data ngb = NGBClassifier(verbose=False, n_estimators=10) ngb.fit(X_class_train, Y_class_train) models_data.append((ngb, X_class_train, ngb.predict(X_class_train))) (X_reg_train, _, Y_reg_train, _) = california_housing_data ngb = NGBRegressor(verbose=False, n_estimators=10) ngb.fit(X_reg_train, Y_reg_train) models_data.append((ngb, X_reg_train, ngb.predict(X_reg_train))) (X_surv_train, _, T_surv_train, E_surv_train, _) = california_housing_survival_data ngb = NGBSurvival(verbose=False, n_estimators=10) ngb.fit(X_surv_train, T_surv_train, E_surv_train) models_data.append((ngb, X_surv_train, ngb.predict(X_surv_train))) ngb = NGBRegressor(Dist=MultivariateNormal(2), n_estimators=10) ngb.fit(X_surv_train, np.vstack((T_surv_train, E_surv_train)).T) models_data.append((ngb, X_surv_train, ngb.predict(X_surv_train))) return models_data
class NodeNotExpandedError(InvalidSDFGNodeError): def __init__(self, sdfg: 'SDFG', state_id: int, node_id: int): super().__init__('Library node not expanded', sdfg, state_id, node_id)
def hardtanh(input, min_val=(- 1.0), max_val=1.0, inplace=False): if inplace: return torch._C._nn.hardtanh_(input, min_val, max_val) return torch._C._nn.hardtanh(input, min_val, max_val)
def generate_pt_gradient_test(configs, pt_bench_op): _register_test(configs, pt_bench_op, create_pytorch_op_test_case, True)
def arg_parse(): parser = argparse.ArgumentParser(description='Script for testing RoutedFusion') parser.add_argument('--config', required=True) args = parser.parse_args() return vars(args)
class StereoDataset(data.Dataset): def __init__(self, root='./datasets', data_file='test.list', phase='test', img_transform=None, joint_transform=None, depth_transform=None): self.root = root self.data_file = data_file self.files = [] self.phase = phase self.img_transform = img_transform self.joint_transform = joint_transform with open(osp.join(self.root, self.data_file), 'r') as f: data_list = f.read().split('\n') for data in data_list: if (len(data) == 0): continue data_info = data.split(' ') self.files.append({'rgb': data_info[0]}) def __len__(self): return len(self.files) def read_data(self, datafiles): print(osp.join(self.root, datafiles['rgb'])) assert osp.exists(osp.join(self.root, datafiles['rgb'])), 'Image does not exist' rgb = Image.open(osp.join(self.root, datafiles['rgb'])).convert('RGB') disp = cv2.imread(osp.join(self.root, datafiles['rgb'].replace('image_2', 'disp_noc_0').replace('jpg', 'png')), (- 1)) disp = (disp.astype(np.float32) / 256.0) return (rgb, disp) def __getitem__(self, index): index = (index % len(self)) datafiles = self.files[index] (img, disp) = self.read_data(datafiles) if (self.joint_transform is not None): (img, _, _, _, _) = self.joint_transform((img, None, None, 'test', None, None)) if (self.img_transform is not None): img = self.img_transform(img) data = {} data['left_img'] = img data['disp'] = disp return data
class PretrainedVocab(BaseVocab): def __init__(self, embedding_name, *args, **kwargs): self.type = 'pretrained' if (embedding_name not in vocab.pretrained_aliases): raise RuntimeError(f'Unknown embedding type: {embedding_name}') vector_cache = get_mmf_cache_dir() if is_main(): vocab.pretrained_aliases[embedding_name](cache=vector_cache) synchronize() embedding = vocab.pretrained_aliases[embedding_name](cache=vector_cache) self.UNK_INDEX = 3 self.stoi = defaultdict((lambda : self.UNK_INDEX)) self.itos = {} self.itos[self.PAD_INDEX] = self.PAD_TOKEN self.itos[self.SOS_INDEX] = self.SOS_TOKEN self.itos[self.EOS_INDEX] = self.EOS_TOKEN self.itos[self.UNK_INDEX] = self.UNK_TOKEN self.stoi[self.SOS_TOKEN] = self.SOS_INDEX self.stoi[self.EOS_TOKEN] = self.EOS_INDEX self.stoi[self.PAD_TOKEN] = self.PAD_INDEX self.stoi[self.UNK_TOKEN] = self.UNK_INDEX self.vectors = torch.FloatTensor((len(self.itos.keys()) + len(embedding.itos)), len(embedding.vectors[0])) for i in range(4): self.vectors[i] = ((torch.ones_like(self.vectors[i]) * 0.1) * i) index = 4 for word in embedding.stoi: self.itos[index] = word self.stoi[word] = index actual_index = embedding.stoi[word] self.vectors[index] = embedding.vectors[actual_index] index += 1
class ProbingTest(absltest.TestCase): def test_array(self): A_pos = np.array([1, 2, 0, 4, 3]) expected = np.array([2, 1, 1, 4, 0]) out = probing.array(A_pos) np.testing.assert_array_equal(expected, out) def test_array_cat(self): A = np.array([2, 1, 0, 1, 1]) expected = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0]]) out = probing.array_cat(A, 3) np.testing.assert_array_equal(expected, out) def test_heap(self): A_pos = np.array([1, 3, 5, 0, 7, 4, 2, 6]) expected = np.array([3, 1, 2, 1, 5, 1, 6, 3]) out = probing.heap(A_pos, heap_size=6) np.testing.assert_array_equal(expected, out) def test_graph(self): G = np.array([[0.0, 7.0, (- 1.0), (- 3.9), 7.452], [0.0, 0.0, 133.0, 0.0, 9.3], [0.5, 0.1, 0.22, 0.55, 0.666], [7.0, 6.1, 0.2, 0.0, 0.0], [0.0, 3.0, 0.0, 1.0, 0.5]]) expected = np.array([[1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 1.0]]) out = probing.graph(G) np.testing.assert_array_equal(expected, out) def test_mask_one(self): expected = np.array([0, 0, 0, 1, 0]) out = probing.mask_one(3, 5) np.testing.assert_array_equal(expected, out) def test_strings_id(self): T_pos = np.array([0, 1, 2, 3, 4]) P_pos = np.array([0, 1, 2]) expected = np.array([0, 0, 0, 0, 0, 1, 1, 1]) out = probing.strings_id(T_pos, P_pos) np.testing.assert_array_equal(expected, out) def test_strings_pair(self): pair_probe = np.array([[0.5, 3.1, 9.1, 7.3], [1.0, 0.0, 8.0, 9.3], [0.1, 5.0, 0.0, 1.2]]) expected = np.array([[0.0, 0.0, 0.0, 0.5, 3.1, 9.1, 7.3], [0.0, 0.0, 0.0, 1.0, 0.0, 8.0, 9.3], [0.0, 0.0, 0.0, 0.1, 5.0, 0.0, 1.2], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) out = probing.strings_pair(pair_probe) np.testing.assert_equal(expected, out) def test_strings_pair_cat(self): pair_probe = np.array([[0, 2, 1], [2, 2, 0]]) expected = np.array([[[0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0]], [[0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0]], [[0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)]], [[0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)]], [[0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)], [0, 0, 0, (- 1)]]]) out = probing.strings_pair_cat(pair_probe, 3) np.testing.assert_equal(expected, out) def test_strings_pi(self): T_pos = np.array([0, 1, 2, 3, 4, 5]) P_pos = np.array([0, 1, 2, 3]) pi = np.array([3, 1, 0, 2]) expected = np.array([0, 1, 2, 3, 4, 5, 9, 7, 6, 8]) out = probing.strings_pi(T_pos, P_pos, pi) np.testing.assert_array_equal(expected, out) def test_strings_pos(self): T_pos = np.array([0, 1, 2, 3, 4]) P_pos = np.array([0, 1, 2, 3]) expected = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.25, 0.5, 0.75]) out = probing.strings_pos(T_pos, P_pos) np.testing.assert_array_equal(expected, out) def test_strings_pred(self): T_pos = np.array([0, 1, 2, 3, 4]) P_pos = np.array([0, 1, 2]) expected = np.array([0, 0, 1, 2, 3, 5, 5, 6]) out = probing.strings_pred(T_pos, P_pos) np.testing.assert_array_equal(expected, out)
def get_model_modules(): _ignore_modules = ['modeling_auto', 'modeling_encoder_decoder', 'modeling_marian', 'modeling_mmbt', 'modeling_outputs', 'modeling_retribert', 'modeling_utils', 'modeling_flax_auto', 'modeling_flax_encoder_decoder', 'modeling_flax_utils', 'modeling_speech_encoder_decoder', 'modeling_flax_speech_encoder_decoder', 'modeling_flax_vision_encoder_decoder', 'modeling_transfo_xl_utilities', 'modeling_tf_auto', 'modeling_tf_encoder_decoder', 'modeling_tf_outputs', 'modeling_tf_pytorch_utils', 'modeling_tf_utils', 'modeling_tf_transfo_xl_utilities', 'modeling_tf_vision_encoder_decoder', 'modeling_vision_encoder_decoder'] modules = [] for model in dir(transformers.models): if (not model.startswith('__')): model_module = getattr(transformers.models, model) for submodule in dir(model_module): if (submodule.startswith('modeling') and (submodule not in _ignore_modules)): modeling_module = getattr(model_module, submodule) if inspect.ismodule(modeling_module): modules.append(modeling_module) return modules
def log1p(g, self): return log(g, add(g, sym_help._if_scalar_type_as(g, torch.ones(1), self), self))
def set_checkpoint(config): if (config.checkpoint.filepath is not ''): config.checkpoint.monitor = os.path.join('{}-{}'.format(prepare_dataset_prefix(config.datasets.validation, config.checkpoint.monitor_index), config.checkpoint.monitor)) config.checkpoint.filepath = os.path.join(config.checkpoint.filepath, config.name, ('{epoch:02d}_{%s:.3f}' % config.checkpoint.monitor)) if (config.checkpoint.s3_path is not ''): config.checkpoint.s3_url = s3_url(config) else: config.checkpoint.s3_path = '' return config.checkpoint
def test_fit_predict(): lcpn = LocalClassifierPerNode(local_classifier=LogisticRegression()) x = np.array([[1, 2], [3, 4]]) y = np.array([['a', 'b'], ['b', 'c']]) lcpn.fit(x, y) predictions = lcpn.predict(x) assert_array_equal(y, predictions)
class UniversalCondition(QuantifiedCondition): def _untyped(self, parts): type_literals = [par.get_atom().negate() for par in self.parameters] return UniversalCondition(self.parameters, [Disjunction((type_literals + parts))]) def negate(self): return ExistentialCondition(self.parameters, [p.negate() for p in self.parts]) def has_universal_part(self): return True
class BSMNode(Node): def __init__(self, name: str, timeline: 'Timeline', other_nodes: List[str], seed=None, component_templates=None) -> None: super().__init__(name, timeline, seed) if (not component_templates): component_templates = {} bsm_name = (name + '.BSM') bsm_args = component_templates.get('SingleAtomBSM', {}) bsm = SingleAtomBSM(bsm_name, timeline, **bsm_args) self.add_component(bsm) self.set_first_component(bsm_name) self.eg = EntanglementGenerationB(self, '{}_eg'.format(name), other_nodes) bsm.attach(self.eg) def receive_message(self, src: str, msg: 'Message') -> None: for protocol in self.protocols: if (type(protocol) == msg.protocol_type): if protocol.received_message(src, msg): return print(src, msg) raise Exception('Unknown protocol') def eg_add_others(self, other): self.protocols[0].others.append(other.name)
_metric def pr50k3(opts): opts.dataset_kwargs.update(max_size=None) (precision, recall) = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000) return dict(pr50k3_precision=precision, pr50k3_recall=recall)
def adaptive_clip_grad(parameters, gradients, clip_factor=0.01, eps=0.001): new_grads = [] for (params, grads) in zip(parameters, gradients): p_norm = unitwise_norm(params) max_norm = (tf.math.maximum(p_norm, eps) * clip_factor) grad_norm = unitwise_norm(grads) clipped_grad = (grads * (max_norm / tf.math.maximum(grad_norm, 1e-06))) new_grad = tf.where((grad_norm < max_norm), grads, clipped_grad) new_grads.append(new_grad) return new_grads
def proxyless_base(net_config=None, n_classes=None, bn_param=None, dropout_rate=None, local_path='~/.torch/proxylessnas/'): assert (net_config is not None), 'Please input a network config' if (' in net_config): net_config_path = download_url(net_config, local_path) else: net_config_path = net_config net_config_json = json.load(open(net_config_path, 'r')) if (n_classes is not None): net_config_json['classifier']['out_features'] = n_classes if (dropout_rate is not None): net_config_json['classifier']['dropout_rate'] = dropout_rate net = ProxylessNASNets.build_from_config(net_config_json) if (bn_param is not None): net.set_bn_param(momentum=bn_param[0], eps=bn_param[1]) return net
def splantider(tck, n=1): if isinstance(tck, BSpline): return tck.antiderivative(n) else: return _impl.splantider(tck, n)
_method('Intracomm', 'Recv') def _intracomm_Recv(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, icomm: 'Intracomm', buffer: str, src: Union[(str, sp.Expr, Number)], tag: Union[(str, sp.Expr, Number)]): from mpi4py import MPI (icomm_name, icomm_obj) = icomm if (icomm_obj != MPI.COMM_WORLD): raise ValueError('Only the mpi4py.MPI.COMM_WORLD Intracomm is supported in DaCe Python programs.') return _recv(pv, sdfg, state, buffer, src, tag)
def AddEdges(depG, deps, vtoi): for tup in deps: (src, tgt) = (vtoi[tup[0]], vtoi[tup[1]]) depG.add_edge(src, tgt, label=tup[2])
def _validate_int(n, bound, name): msg = f'{name} must be an integer not less than {bound}, but got {n!r}' try: n = operator.index(n) except TypeError: raise TypeError(msg) from None if (n < bound): raise ValueError(msg) return n
def get_dataloaders(args): train_dataset = torchvision.datasets.__dict__[args.task.upper()](root=args.data, train=True, download=True) test_dataset = torchvision.datasets.__dict__[args.task.upper()](root=args.data, train=False, download=True) dataloaders = [] datasets = {} for split in ['train', 'test']: if (split == 'train'): datasets[split] = ALL_DATASETS[args.task](args.task, train_dataset, split, index=None, prob_label=True, k=args.augment_k) elif (split == 'test'): datasets[split] = ALL_DATASETS[args.task](args.task, test_dataset, split) for (split, dataset) in datasets.items(): dataloaders.append(EmmentalDataLoader(task_to_label_dict={args.task: 'labels'}, dataset=dataset, split=split, shuffle=(True if (split in ['train']) else False), batch_size=(args.batch_size if ((split in args.train_split) or (args.valid_batch_size is None)) else args.valid_batch_size), num_workers=4)) logger.info(f'Built dataloader for {args.task} {split} set with {len(dataset)} samples (Shuffle={(split in args.train_split)}, Batch size={dataloaders[(- 1)].batch_size}).') return dataloaders
class WeightedEuclidean(Module): def __init__(self, inputSize, outputSize): super(WeightedEuclidean, self).__init__() self.weight = torch.Tensor(inputSize, outputSize) self.gradWeight = torch.Tensor(inputSize, outputSize) self.diagCov = torch.Tensor(inputSize, outputSize) self.gradDiagCov = torch.Tensor(inputSize, outputSize) self.reset() self._diagCov = self.output.new() self.fastBackward = False self._input = None self._weight = None self._expand = None self._expand2 = None self._expand3 = None self._repeat = None self._repeat2 = None self._repeat3 = None self._div = None self._output = None self._expand4 = None self._gradOutput = None self._sum = None def reset(self, stdv=None): if (stdv is not None): stdv = (stdv * math.sqrt(3)) else: stdv = (1.0 / math.sqrt(self.weight.size(1))) self.weight.uniform_((- stdv), stdv) self.diagCov.fill_(1) def _view(self, res, src, *args): if src.is_contiguous(): res.set_(src.view(*args)) else: res.set_(src.contiguous().view(*args)) def updateOutput(self, input): if (self._diagCov is None): self._diagCov = self.output.new() if (self._input is None): self._input = input.new() if (self._weight is None): self._weight = self.weight.new() if (self._expand is None): self._expand = self.output.new() if (self._expand2 is None): self._expand2 = self.output.new() if (self._expand3 is None): self._expand3 = self.output.new() if (self._repeat is None): self._repeat = self.output.new() if (self._repeat2 is None): self._repeat2 = self.output.new() if (self._repeat3 is None): self._repeat3 = self.output.new() (inputSize, outputSize) = (self.weight.size(0), self.weight.size(1)) if (input.dim() == 1): self._view(self._input, input, inputSize, 1) self._expand.expand_as(self._input, self.weight) self._repeat.resize_as_(self._expand).copy_(self._expand) self._repeat.add_((- 1), self.weight) self._repeat.mul_(self.diagCov) torch.norm(self._repeat, 2, 0, True, out=self.output) self.output.resize_(outputSize) elif (input.dim() == 2): batchSize = input.size(0) self._view(self._input, input, batchSize, inputSize, 1) self._expand = self._input.expand(batchSize, inputSize, outputSize) self._repeat.resize_as_(self._expand).copy_(self._expand) self._weight = self.weight.view(1, inputSize, outputSize) self._expand2 = self._weight.expand_as(self._repeat) self._diagCov = self.diagCov.view(1, inputSize, outputSize) self._expand3 = self._diagCov.expand_as(self._repeat) if (input.type() == 'torch.cuda.FloatTensor'): self._repeat2.resize_as_(self._expand2).copy_(self._expand2) self._repeat.add_((- 1), self._repeat2) self._repeat3.resize_as_(self._expand3).copy_(self._expand3) self._repeat.mul_(self._repeat3) else: self._repeat.add_((- 1), self._expand2) self._repeat.mul_(self._expand3) torch.norm(self._repeat, 2, 1, True, out=self.output) self.output.resize_(batchSize, outputSize) else: raise RuntimeError('1D or 2D input expected') return self.output def updateGradInput(self, input, gradOutput): if (self.gradInput is None): return if (self._div is None): self._div = input.new() if (self._output is None): self._output = self.output.new() if (self._expand4 is None): self._expand4 = input.new() if (self._gradOutput is None): self._gradOutput = input.new() if (not self.fastBackward): self.updateOutput(input) (inputSize, outputSize) = (self.weight.size(0), self.weight.size(1)) self._output.resize_as_(self.output).copy_(self.output).add_(1e-07) self._view(self._gradOutput, gradOutput, gradOutput.size()) torch.div(gradOutput, self._output, out=self._div) if (input.dim() == 1): self._div.resize_(1, outputSize) self._expand4 = self._div.expand_as(self.weight) if (torch.type(input) == 'torch.cuda.FloatTensor'): self._repeat2.resize_as_(self._expand4).copy_(self._expand4) self._repeat2.mul_(self._repeat) else: self._repeat2.mul_(self._repeat, self._expand4) self._repeat2.mul_(self.diagCov) torch.sum(self._repeat2, 1, True, out=self.gradInput) self.gradInput.resize_as_(input) elif (input.dim() == 2): batchSize = input.size(0) self._div.resize_(batchSize, 1, outputSize) self._expand4 = self._div.expand(batchSize, inputSize, outputSize) if (input.type() == 'torch.cuda.FloatTensor'): self._repeat2.resize_as_(self._expand4).copy_(self._expand4) self._repeat2.mul_(self._repeat) self._repeat2.mul_(self._repeat3) else: torch.mul(self._repeat, self._expand4, out=self._repeat2) self._repeat2.mul_(self._expand3) torch.sum(self._repeat2, 2, True, out=self.gradInput) self.gradInput.resize_as_(input) else: raise RuntimeError('1D or 2D input expected') return self.gradInput def accGradParameters(self, input, gradOutput, scale=1): (inputSize, outputSize) = (self.weight.size(0), self.weight.size(1)) if (input.dim() == 1): self.gradWeight.add_((- scale), self._repeat2) self._repeat.div_(self.diagCov) self._repeat.mul_(self._repeat) self._repeat.mul_(self.diagCov) if (torch.type(input) == 'torch.cuda.FloatTensor'): self._repeat2.resize_as_(self._expand4).copy_(self._expand4) self._repeat2.mul_(self._repeat) else: torch.mul(self._repeat, self._expand4, out=self._repeat2) self.gradDiagCov.add_(self._repeat2) elif (input.dim() == 2): if (self._sum is None): self._sum = input.new() torch.sum(self._repeat2, 0, True, out=self._sum) self._sum.resize_(inputSize, outputSize) self.gradWeight.add_((- scale), self._sum) if (input.type() == 'torch.cuda.FloatTensor'): self._repeat.div_(self._repeat3) self._repeat.mul_(self._repeat) self._repeat.mul_(self._repeat3) self._repeat2.resize_as_(self._expand4).copy_(self._expand4) self._repeat.mul_(self._repeat2) else: self._repeat.div_(self._expand3) self._repeat.mul_(self._repeat) self._repeat.mul_(self._expand3) self._repeat.mul_(self._expand4) torch.sum(self._repeat, 0, True, out=self._sum) self._sum.resize_(inputSize, outputSize) self.gradDiagCov.add_(scale, self._sum) else: raise RuntimeError('1D or 2D input expected') def type(self, type=None, tensorCache=None): if type: self._input = None self._output = None self._gradOutput = None self._weight = None self._div = None self._sum = None self._expand = None self._expand2 = None self._expand3 = None self._expand4 = None self._repeat = None self._repeat2 = None self._repeat3 = None return super(WeightedEuclidean, self).type(type, tensorCache) def parameters(self): return ([self.weight, self.diagCov], [self.gradWeight, self.gradDiagCov]) def accUpdateGradParameters(self, input, gradOutput, lr): gradWeight = self.gradWeight gradDiagCov = self.gradDiagCov self.gradWeight = self.weight self.gradDiagCov = self.diagCov self.accGradParameters(input, gradOutput, (- lr)) self.gradWeight = gradWeight self.gradDiagCov = gradDiagCov
def gettypeval(typename): if ('int' in typename): typeval = 123 elif ('bool' in typename): typeval = True elif (('double' in typename) or ('float' in typename)): typeval = 123.0 else: raise ValueError('Unknown type encountered') return typeval
class LinearDecayLR(_LRScheduler): def __init__(self, optimizer, args, last_epoch=(- 1), verbose=False): if args.finetune: self.lrs = ([args.lr] * (args.epochs + 1)) else: warmup_lr = [(args.warmup_min_lr + (((args.lr - args.warmup_min_lr) * i) / args.warmup_epochs)) for i in range(args.warmup_epochs)] decay_lr = [max(((i * args.lr) / args.epochs), args.min_lr) for i in range((args.epochs - args.warmup_epochs))] decay_lr.reverse() self.lrs = ((warmup_lr + decay_lr) + decay_lr[(- 1):]) self.lr_backbone_ratio = args.lr_backbone_ratio super(LinearDecayLR, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if (not self._get_lr_called_within_step): warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning) lr = self.lrs[self.last_epoch] return [lr, (lr * self.lr_backbone_ratio)]
class NTMLayer(Layer): def __init__(self, incoming, memory, controller, heads, only_return_final=False, **kwargs): super(NTMLayer, self).__init__(incoming, **kwargs) self.memory = memory self.controller = controller self.heads = heads self.write_heads = WriteHeadCollection(heads=filter((lambda head: isinstance(head, WriteHead)), heads)) self.read_heads = ReadHeadCollection(heads=filter((lambda head: isinstance(head, ReadHead)), heads)) self.only_return_final = only_return_final def get_output_shape_for(self, input_shapes): if self.only_return_final: return (input_shapes[0], self.controller.num_units) else: return (input_shapes[0], input_shapes[1], self.controller.num_units) def get_params(self, **tags): params = super(NTMLayer, self).get_params(**tags) params += self.controller.get_params(**tags) params += self.memory.get_params(**tags) for head in self.heads: params += head.get_params(**tags) return params def get_output_for(self, input, get_details=False, **kwargs): input = input.dimshuffle(1, 0, 2) def step(x_t, M_tm1, h_tm1, state_tm1, ww_tm1, wr_tm1, *params): M_t = self.write_heads.write(h_tm1, ww_tm1, M_tm1) r_t = self.read_heads.read(wr_tm1, M_t) (h_t, state_t) = self.controller.step(x_t, r_t, h_tm1, state_tm1) ww_t = self.write_heads.get_weights(h_t, ww_tm1, M_t) wr_t = self.read_heads.get_weights(h_t, wr_tm1, M_t) return [M_t, h_t, state_t, ww_t, wr_t] memory_init = T.tile(self.memory.memory_init, (input.shape[1], 1, 1)) memory_init = T.unbroadcast(memory_init, 0) write_weights_init = T.tile(self.write_heads.weights_init, (input.shape[1], 1, 1)) write_weights_init = T.unbroadcast(write_weights_init, 0) read_weights_init = T.tile(self.read_heads.weights_init, (input.shape[1], 1, 1)) read_weights_init = T.unbroadcast(read_weights_init, 0) non_seqs = (((self.controller.get_params() + self.memory.get_params()) + self.write_heads.get_params()) + self.read_heads.get_params()) (hids, _) = theano.scan(fn=step, sequences=input, outputs_info=(([memory_init] + self.controller.outputs_info(input.shape[1])) + [write_weights_init, read_weights_init]), non_sequences=non_seqs, strict=True) if get_details: hid_out = [hids[0].dimshuffle(1, 0, 2, 3), hids[1].dimshuffle(1, 0, 2), hids[2].dimshuffle(1, 0, 2), hids[3].dimshuffle(1, 0, 2, 3), hids[4].dimshuffle(1, 0, 2, 3)] elif self.only_return_final: hid_out = hids[1][(- 1)] else: hid_out = hids[1].dimshuffle(1, 0, 2) return hid_out
def test__get_pipeline_hyperparameter_dataset(): hyperparameters = {'dataset1': {'pipeline1': 'pipeline1.json', 'pipeline2': 'pipeline2.json'}} dataset = 'dataset1' expected_return = {'pipeline1': 'pipeline1.json', 'pipeline2': 'pipeline2.json'} returned = benchmark._get_pipeline_hyperparameter(hyperparameters, dataset, None) assert (returned == expected_return)
def test_numpytype_datetime64(): t = NumpyType('datetime64') assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t))
def range_deserialize(iodata: 'IOData') -> range: arguments = iodata.as_kwargs() return range(arguments['start'], arguments['stop'], arguments['step'])