code
stringlengths
101
5.91M
.parametrize('arg', ('headers', 'query_string')) def test_call_wsgi_overrides(mocker, arg, openapi_30): spy = mocker.patch('werkzeug.Client.open', side_effect=ValueError) original = {'A': 'X', 'B': 'X'} case = Case(openapi_30['/users']['GET'], headers=original, query=original) overridden = {'B': 'Y'} try: case.call_wsgi(**{arg: overridden}, base_url=' app=42) except ValueError: pass _assert_override(spy, arg, original, overridden)
def _render_question_crowd_html(question_template: CritiqueQuestionTemplate) -> str: question_input_crowd_html: str if (question_template.question_type == QuestionType.FREE_RESPONSE): question_input_crowd_html = textwrap.dedent(f' <crowd-text-area name="{question_template.name}" required></crowd-text-area>') elif (question_template.question_type == QuestionType.MULTIPLE_CHOICE): question_input_crowd_html = _render_multiple_choice_options_crowd_html(question_template.name, question_template.options) elif (question_template.question_type == QuestionType.CHECKBOX): question_input_crowd_html = _render_checkbox_options_crowd_html(question_template.name, question_template.options) return textwrap.dedent(f''' <p style="white-space: pre-wrap;"> {_format_template_tags(question_template.text)} </p> {_indent_to_level(question_input_crowd_html, 2)}''')
def make_data_loader(cfg, is_train=True): batch_size = cfg.SOLVER.IMS_PER_BATCH if is_train: batch_size = cfg.SOLVER.IMS_PER_BATCH shuffle = True else: batch_size = cfg.TEST.IMS_PER_BATCH shuffle = False transforms = build_transforms(cfg, is_train) datasets = build_dataset(cfg.DATASETS.TRAIN, transforms, bunch=cfg.SOLVER.BUNCH, use_mask=cfg.DATASETS.USE_MASK, num_frame=cfg.DATASETS.NUM_FRAME) num_workers = cfg.DATALOADER.NUM_WORKERS data_loader = data.DataLoader(datasets, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) return (data_loader, datasets)
def get_cudnn_mode(mode): if (mode == 'RNN_RELU'): return cudnn.CUDNN_RNN_RELU elif (mode == 'RNN_TANH'): return cudnn.CUDNN_RNN_TANH elif (mode == 'LSTM'): return cudnn.CUDNN_LSTM elif (mode == 'GRU'): return cudnn.CUDNN_GRU else: raise Exception('Unknown mode: {}'.format(mode))
class HistGradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark): param_names = [] params = () def setup_cache(self): super().setup_cache() def make_data(self, params): data = _synth_classification_dataset(n_samples=10000, n_features=100, n_classes=5) return data def make_estimator(self, params): estimator = HistGradientBoostingClassifier(max_iter=100, max_leaf_nodes=15, early_stopping=False, random_state=0) return estimator def make_scorers(self): make_gen_classif_scorers(self)
def register_Ns3PointToPointChannel_methods(root_module, cls): cls.add_constructor([param('ns3::PointToPointChannel const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Attach', 'void', [param('ns3::Ptr< ns3::PointToPointNetDevice >', 'device')]) cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True, is_virtual=True) cls.add_method('GetNDevices', 'uint32_t', [], is_const=True, is_virtual=True) cls.add_method('GetPointToPointDevice', 'ns3::Ptr< ns3::PointToPointNetDevice >', [param('uint32_t', 'i')], is_const=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('TransmitStart', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ptr< ns3::PointToPointNetDevice >', 'src'), param('ns3::Time', 'txTime')], is_virtual=True) cls.add_method('GetDelay', 'ns3::Time', [], is_const=True, visibility='protected') cls.add_method('GetDestination', 'ns3::Ptr< ns3::PointToPointNetDevice >', [param('uint32_t', 'i')], is_const=True, visibility='protected') cls.add_method('GetSource', 'ns3::Ptr< ns3::PointToPointNetDevice >', [param('uint32_t', 'i')], is_const=True, visibility='protected') cls.add_method('IsInitialized', 'bool', [], is_const=True, visibility='protected') return
def FilesBelongToSameModule(filename_cc, filename_h): if (not filename_cc.endswith('.cc')): return (False, '') filename_cc = filename_cc[:(- len('.cc'))] if filename_cc.endswith('_unittest'): filename_cc = filename_cc[:(- len('_unittest'))] elif filename_cc.endswith('_test'): filename_cc = filename_cc[:(- len('_test'))] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if (not filename_h.endswith('.h')): return (False, '') filename_h = filename_h[:(- len('.h'))] if filename_h.endswith('-inl'): filename_h = filename_h[:(- len('-inl'))] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:(- len(filename_h))] return (files_belong_to_same_module, common_path)
def test_check_response_method_unknown_method(): err_msg = 'RandomForestRegressor has none of the following attributes: unknown_method.' with pytest.raises(AttributeError, match=err_msg): _check_response_method(RandomForestRegressor(), 'unknown_method')
def dist_init(): global rank, world_size, inited try: (rank, world_size) = _dist_init() except RuntimeError as e: if ('public' in e.args[0]): logger.info(e) logger.info('Warning: use single process') (rank, world_size) = (0, 1) else: raise RuntimeError(*e.args) inited = True return (rank, world_size)
def rotate_shift(x, shift, angle): assert isinstance(angle, (np.float32, np.float16, float)) assert (shift.shape[(- 1)] == 2) assert (x.shape[(- 1)] == 2) return ((x rot_matrix(angle).T) + shift)
class Identity(nn.Module): def __init__(self, config): super(Identity, self).__init__() def forward(self, feature, att_mask, head_mask): return [feature]
def _get_listing_win(source_dir): listing = glob.glob(os.path.join(source_dir, '*.pyd')) listing.extend(glob.glob(os.path.join(source_dir, 'lib', '*.lib'))) listing.extend(glob.glob(os.path.join(source_dir, 'lib', '*.dll'))) return listing
def get_policy(env): policy_network = get_policy_network(env) policy = GaussianMLPPolicy(name='policy', env_spec=env.spec, mean_network=policy_network) return policy
def test_non_unique_vocab(): vocab = ['a', 'b', 'c', 'a', 'a'] vect = CountVectorizer(vocabulary=vocab) with pytest.raises(ValueError): vect.fit([])
def clean_bg_vat(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame: if (output_format not in {'compact', 'standard'}): raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".') df = to_dask(df) df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object) df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0))) df = df.rename(columns={'_temp_': f'{column}_clean'}) df = df.drop(columns=['clean_code_tup']) if inplace: df[column] = df[f'{column}_clean'] df = df.drop(columns=f'{column}_clean') df = df.rename(columns={column: f'{column}_clean'}) with ProgressBar(minimum=1, disable=(not progress)): df = df.compute() return df
class PublishProvidedPatternsExperiment(TaskConfiguration): def mode() -> str: return 'publish {}'.format(RunProvidedPatternsExperiment.ID) def tasks(self, config) -> List: filter_ = PotentialHitsFilterTask() publish = PublishFindingsTask(RunProvidedPatternsExperiment.ID, config.compiles_path, config.review_site_url, config.review_site_user, config.review_site_password) return (RunProvidedPatternsExperiment().tasks(config) + [filter_, publish])
class objVars(): def __init__(self, rot, trans): assert (rot.shape == (3,)), 'rot should of (3,) shape' assert (trans.shape == (3,)), 'rot should of (3,) shape' self.rot = rot self.trans = trans
class ResNet(nn.Module): def __init__(self, cfg): super(ResNet, self).__init__() stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.TYPE] self.stem = StemWithFixedBatchNorm(cfg) num_groups = cfg.MODEL.RESNETS.NUM_GROUPS width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS stage2_bottleneck_channels = (num_groups * width_per_group) stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS self.stages = [] self.return_features = {} for stage_spec in stage_specs: name = ('layer' + str(stage_spec.index)) stage2_relative_factor = (2 ** (stage_spec.index - 1)) bottleneck_channels = (stage2_bottleneck_channels * stage2_relative_factor) out_channels = (stage2_out_channels * stage2_relative_factor) module = _make_stage(BottleneckWithFixedBatchNorm, in_channels, bottleneck_channels, out_channels, stage_spec.block_count, num_groups, cfg.MODEL.RESNETS.STRIDE_IN_1X1, first_stride=(int((stage_spec.index > 1)) + 1)) in_channels = out_channels self.add_module(name, module) self.stages.append(name) self.return_features[name] = stage_spec.return_features self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_AT_STAGE) def _freeze_backbone(self, freeze_at): if (freeze_at < 0): return for stage_index in range(freeze_at): if (stage_index == 0): m = self.stem else: m = getattr(self, ('layer' + str(stage_index))) for p in m.parameters(): p.requires_grad = False def forward(self, x): outputs = [] x = self.stem(x) for stage_name in self.stages: x = getattr(self, stage_name)(x) if self.return_features[stage_name]: outputs.append(x) return outputs
class LowRank2d(nn.Module): def __init__(self, in_channels, out_channels): super(LowRank2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.phi = DenseNet([2, 64, 128, (in_channels * out_channels)], torch.nn.ReLU) self.psi = DenseNet([2, 64, 128, (in_channels * out_channels)], torch.nn.ReLU) def get_grid(self, S1, S2, batchsize, device): gridx = torch.tensor(np.linspace(0, 1, (S1 + 1))[:(- 1)], dtype=torch.float) gridx = gridx.reshape(1, S1, 1).repeat([batchsize, 1, S2]) gridy = torch.tensor(np.linspace(0, 1, (S2 + 1))[:(- 1)], dtype=torch.float) gridy = gridy.reshape(1, 1, S2).repeat([batchsize, S1, 1]) return torch.stack((gridx, gridy), dim=(- 1)).to(device) def forward(self, x, gridy=None): (batchsize, size1, size2) = (x.shape[0], x.shape[2], x.shape[3]) gridx = self.get_grid(S1=size1, S2=size2, batchsize=1, device=x.device).reshape((size1 * size2), 2) if (gridy is None): gridy = self.get_grid(S1=size1, S2=size2, batchsize=batchsize, device=x.device).reshape(batchsize, (size1 * size2), 2) Nx = (size1 * size2) Ny = gridy.shape[1] phi_eval = self.phi(gridx).reshape(Nx, self.out_channels, self.in_channels) psi_eval = self.psi(gridy).reshape(batchsize, Ny, self.out_channels, self.in_channels) x = x.reshape(batchsize, self.in_channels, Nx) x = (torch.einsum('noi,bin,bmoi->bom', phi_eval, x, psi_eval) / Nx) return x
def numpy_or_pandas_and_seq_concat(datasets: Sequence[Union[(NumpyDataset, PandasDataset, SeqNumpyPandasDataset)]]) -> Union[(NumpyDataset, PandasDataset)]: assert (len(datasets) == 2), 'should be 1 sequential and 1 plain dataset' for (n, dataset) in enumerate(datasets): if (type(dataset) == SeqNumpyPandasDataset): seq_dataset = dataset else: plain_dataset = dataset if (len(seq_dataset.data) == len(plain_dataset)): return SeqNumpyPandasDataset.concat([seq_dataset, plain_dataset.to_pandas()]) else: if hasattr(plain_dataset, 'seq_data'): plain_dataset.seq_data[seq_dataset.name] = seq_dataset else: plain_dataset.seq_data = {seq_dataset.name: seq_dataset} return plain_dataset
class RefAdaBound(RefSolver): def __init__(self, alpha, beta1, beta2, eps, final_lr, gamma): super().__init__() self.alpha = alpha self.init_alpha = alpha self.beta1 = beta1 self.beta2 = beta2 self.eps = eps self.final_lr = final_lr self.gamma = gamma self.m = {} self.v = {} self.t = {} def _set_state_impl(self, key, param): self.m[key] = np.zeros_like(param) self.v[key] = np.zeros_like(param) self.t[key] = 0 def _update_impl(self, key, p, g): self.t[key] = min((self.t[key] + 1), np.iinfo(np.int32).max) _update_adabound(p, g, self.m[key], self.v[key], self.t[key], self.alpha, self.init_alpha, self.beta1, self.beta2, self.eps, self.final_lr, self.gamma)
class ResNetV2(nn.Module): def __init__(self, block_units, width_factor): super().__init__() width = int((64 * width_factor)) self.width = width self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn.GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(OrderedDict(([('unit1', PreActBottleneck(cin=width, cout=(width * 4), cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=(width * 4), cout=(width * 4), cmid=width)) for i in range(2, (block_units[0] + 1))])))), ('block2', nn.Sequential(OrderedDict(([('unit1', PreActBottleneck(cin=(width * 4), cout=(width * 8), cmid=(width * 2), stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=(width * 8), cout=(width * 8), cmid=(width * 2))) for i in range(2, (block_units[1] + 1))])))), ('block3', nn.Sequential(OrderedDict(([('unit1', PreActBottleneck(cin=(width * 8), cout=(width * 16), cmid=(width * 4), stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=(width * 16), cout=(width * 16), cmid=(width * 4))) for i in range(2, (block_units[2] + 1))]))))])) def forward(self, x): x = self.root(x) x = self.body(x) return x
.skipif((not has_pytorch()), reason='Pytorch not installed.') _utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) def test_ad_reduce(): _utils.torch_op(output_shapes=[(1,)]) def test(x: ti.types.ndarray(), y: ti.types.ndarray()): for i in x: y[0] += (x[i] ** 2) device = ('cuda' if (ti.lang.impl.current_cfg().arch == ti.cuda) else 'cpu') input = torch.rand(4, dtype=torch.double, device=device, requires_grad=True) torch.autograd.gradcheck(test, input)
def get_detr(device: torch.device) -> GetterReturnType: N = 2 num_classes = 91 hidden_dim = 256 nheads = 8 num_encoder_layers = 6 num_decoder_layers = 6 model = models.DETR(num_classes=num_classes, hidden_dim=hidden_dim, nheads=nheads, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers) losses = ['labels', 'boxes', 'cardinality'] eos_coef = 0.1 bbox_loss_coef = 5 giou_loss_coef = 2 weight_dict = {'loss_ce': 1, 'loss_bbox': bbox_loss_coef, 'loss_giou': giou_loss_coef} matcher = models.HungarianMatcher(1, 5, 2) criterion = models.SetCriterion(num_classes=num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=eos_coef, losses=losses) model = model.to(device) criterion = criterion.to(device) (params, names) = extract_weights(model) inputs = torch.rand(N, 3, 800, 1200, device=device) labels = [] for idx in range(N): targets = {} n_targets: int = int(torch.randint(5, 10, size=tuple()).item()) label = torch.randint(5, 10, size=(n_targets,)) targets['labels'] = label boxes = torch.randint(100, 800, size=(n_targets, 4)) for t in range(n_targets): if (boxes[(t, 0)] > boxes[(t, 2)]): (boxes[(t, 0)], boxes[(t, 2)]) = (boxes[(t, 2)], boxes[(t, 0)]) if (boxes[(t, 1)] > boxes[(t, 3)]): (boxes[(t, 1)], boxes[(t, 3)]) = (boxes[(t, 3)], boxes[(t, 1)]) targets['boxes'] = boxes.float() labels.append(targets) def forward(*new_params: Tensor) -> Tensor: load_weights(model, names, new_params) out = model(inputs) loss = criterion(out, labels) weight_dict = criterion.weight_dict final_loss = cast(Tensor, sum(((loss[k] * weight_dict[k]) for k in loss.keys() if (k in weight_dict)))) return final_loss return (forward, params)
_end_docstrings(PIPELINE_INIT_ARGS, '\n return_all_scores (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to return all prediction scores or just the one of the predicted class.\n ') class TextClassificationPipeline(Pipeline): def __init__(self, return_all_scores: bool=False, **kwargs): super().__init__(**kwargs) self.check_model_type((TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if (self.framework == 'tf') else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)) self.return_all_scores = return_all_scores def __call__(self, *args, **kwargs): outputs = super().__call__(*args, **kwargs) if (self.model.config.num_labels == 1): scores = (1.0 / (1.0 + np.exp((- outputs)))) else: scores = (np.exp(outputs) / np.exp(outputs).sum((- 1), keepdims=True)) if self.return_all_scores: return [[{'label': self.model.config.id2label[i], 'score': score.item()} for (i, score) in enumerate(item)] for item in scores] else: return [{'label': self.model.config.id2label[item.argmax()], 'score': item.max().item()} for item in scores]
class StaticCamera(Camera): def __init__(self, fov, aspect, nearval, farval, width, height, look_at, look_from, up_vector, cid, name, robot_id=None, objects=None): self.nearval = nearval self.farval = farval self.fov = fov self.aspect = aspect self.look_from = look_from self.look_at = look_at self.up_vector = up_vector self.width = width self.height = height self.viewMatrix = p.computeViewMatrix(cameraEyePosition=look_from, cameraTargetPosition=look_at, cameraUpVector=self.up_vector) self.projectionMatrix = p.computeProjectionMatrixFOV(fov=fov, aspect=aspect, nearVal=self.nearval, farVal=self.farval) self.cid = cid self.name = name def set_position_from_gui(self): info = p.getDebugVisualizerCamera(physicsClientId=self.cid) look_at = np.array(info[(- 1)]) dist = info[(- 2)] forward = np.array(info[5]) look_from = (look_at - (dist * forward)) self.viewMatrix = p.computeViewMatrix(cameraEyePosition=look_from, cameraTargetPosition=look_at, cameraUpVector=self.up_vector) look_from = [float(x) for x in look_from] look_at = [float(x) for x in look_at] return (look_from, look_at) def render(self, wh=200): image = p.getCameraImage(width=wh, height=wh, viewMatrix=self.viewMatrix, projectionMatrix=self.projectionMatrix, physicsClientId=self.cid) (rgb_img, depth_img) = self.process_rgbd(image, self.nearval, self.farval) return (rgb_img, depth_img)
def PDO(filepath, df_splits=None, n_jobs=1): t0 = time() kwrgs_pp = {'selbox': (110, 260, 20, 70), 'format_lon': 'only_east'} ds = core_pp.import_ds_lazy(filepath, **kwrgs_pp) kwrgs_pp_eof_ds = kwrgs_pp kwrgs_pp_eof_ds.update({'seldates': ('11-01', '03-31'), 'dailytomonths': True}) ds_monthly = core_pp.import_ds_lazy(filepath, **kwrgs_pp_eof_ds) if (df_splits is None): print('No train-test split') iterables = [np.array([0]), pd.to_datetime(ds.time.values)] df_splits = pd.DataFrame(data=np.ones(ds.time.size), index=pd.MultiIndex.from_product(iterables), columns=['TrainIsTrue'], dtype=bool) splits = df_splits.index.levels[0] data = np.zeros((splits.size, ds.latitude.size, ds.longitude.size)) PDO_patterns = xr.DataArray(data, coords=[splits, ds.latitude.values, ds.longitude.values], dims=['split', 'latitude', 'longitude']) if (n_jobs > 1): with ProcessPoolExecutor(max_workers=os.cpu_count()) as pool: futures = [pool.submit(PDO_single_split, s, ds_monthly, ds, df_splits) for s in range(splits.size)] results = [future.result() for future in futures] else: results = [PDO_single_split(s, ds_monthly, ds, df_splits) for s in range(splits.size)] list_PDO_ts = [r[0] for r in results] time_ = (time() - t0) print('\n{:.1f} minutes'.format((time_ / 60))) for s in splits: PDO_patterns[s] = results[s][1] df_PDO = pd.concat(list_PDO_ts, axis=0, keys=splits) df_PDO = df_PDO.merge(df_splits, left_index=True, right_index=True) if (splits.size > 1): assert (float((df_PDO.loc[1]['PDO'] - df_PDO.loc[0]['PDO']).mean()) != 0), 'something went wrong with train test splits' return (df_PDO, PDO_patterns)
class SasRec(L.LightningModule): def __init__(self, tensor_schema: TensorSchema, block_count: int=2, head_count: int=1, hidden_size: int=50, max_seq_len: int=200, dropout_rate: float=0.2, ti_modification: bool=False, time_span: int=256, loss_type: str='CE', loss_sample_count: Optional[int]=None, negative_sampling_strategy: str='global_uniform', negatives_sharing: bool=False, optimizer_factory: Optional[OptimizerFactory]=None, lr_scheduler_factory: Optional[LRSchedulerFactory]=None): super().__init__() self.save_hyperparameters() self._model = SasRecModel(schema=tensor_schema, num_blocks=block_count, num_heads=head_count, hidden_size=hidden_size, max_len=max_seq_len, dropout=dropout_rate, ti_modification=ti_modification, time_span=time_span) self._loss_type = loss_type self._loss_sample_count = loss_sample_count self._negative_sampling_strategy = negative_sampling_strategy self._negatives_sharing = negatives_sharing self._optimizer_factory = optimizer_factory self._lr_scheduler_factory = lr_scheduler_factory self._loss = self._create_loss() assert (negative_sampling_strategy in {'global_uniform', 'inbatch'}) item_count = tensor_schema.item_id_features.item().cardinality assert item_count self._vocab_size = item_count def training_step(self, batch: SasRecTrainingBatch, batch_idx: int) -> torch.Tensor: if (((batch_idx % 100) == 0) and torch.cuda.is_available()): torch.cuda.empty_cache() loss = self._compute_loss(batch) self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True) return loss def forward(self, feature_tensors: TensorMap, padding_mask: torch.BoolTensor) -> torch.Tensor: return self._model_predict(feature_tensors, padding_mask) def predict_step(self, batch: SasRecPredictionBatch, batch_idx: int, dataloader_idx: int=0) -> torch.Tensor: return self._model_predict(batch.features, batch.padding_mask) def validation_step(self, batch: SasRecValidationBatch, batch_idx: int) -> torch.Tensor: return self._model_predict(batch.features, batch.padding_mask) def configure_optimizers(self) -> Any: optimizer_factory = (self._optimizer_factory or FatOptimizerFactory()) optimizer = optimizer_factory.create(self._model.parameters()) if (self._lr_scheduler_factory is None): return optimizer lr_scheduler = self._lr_scheduler_factory.create(optimizer) return ([optimizer], [lr_scheduler]) def _model_predict(self, feature_tensors: TensorMap, padding_mask: torch.BoolTensor) -> torch.Tensor: model: SasRecModel if isinstance(self._model, torch.nn.DataParallel): model = cast(SasRecModel, self._model.module) else: model = self._model scores = model.predict(feature_tensors, padding_mask) return scores def _compute_loss(self, batch: SasRecTrainingBatch) -> torch.Tensor: if (self._loss_type == 'BCE'): if (self._loss_sample_count is None): loss_func = self._compute_loss_bce else: loss_func = self._compute_loss_bce_sampled elif (self._loss_type == 'CE'): if (self._loss_sample_count is None): loss_func = self._compute_loss_ce else: loss_func = self._compute_loss_ce_sampled else: raise ValueError(f'Not supported loss type: {self._loss_type}') loss = loss_func(batch.features, batch.labels, batch.padding_mask, batch.labels_padding_mask) return loss def _compute_loss_bce(self, feature_tensors: TensorMap, positive_labels: torch.LongTensor, padding_mask: torch.BoolTensor, target_padding_mask: torch.BoolTensor) -> torch.Tensor: logits = self._model.forward(feature_tensors, padding_mask) logits = logits[target_padding_mask] labels = positive_labels[target_padding_mask] bce_labels = torch.zeros((logits.size(0), logits.size((- 1))), device=logits.device) bce_labels.scatter_(dim=(- 1), index=labels.unsqueeze((- 1)), value=1) loss = (self._loss(logits, bce_labels) / logits.size(0)) return loss def _compute_loss_bce_sampled(self, feature_tensors: TensorMap, positive_labels: torch.LongTensor, padding_mask: torch.BoolTensor, target_padding_mask: torch.BoolTensor) -> torch.Tensor: (positive_logits, negative_logits, *_) = self._get_sampled_logits(feature_tensors, positive_labels, padding_mask, target_padding_mask) positive_prob = torch.sigmoid(positive_logits) negative_prob = torch.sigmoid(negative_logits) clamp_border: float = 100.0 eps = 1e-06 positive_loss = torch.clamp(torch.log((positive_prob + eps)), (- clamp_border), clamp_border).sum() negative_loss = torch.clamp(torch.log(((1 - negative_prob) + eps)), (- clamp_border), clamp_border).sum() loss = (- (positive_loss + negative_loss)) loss /= positive_logits.size(0) return loss def _compute_loss_ce(self, feature_tensors: TensorMap, positive_labels: torch.LongTensor, padding_mask: torch.BoolTensor, target_padding_mask: torch.BoolTensor) -> torch.Tensor: logits = self._model.forward(feature_tensors, padding_mask) labels = positive_labels.masked_fill(mask=(~ target_padding_mask), value=(- 100)) logits_flat = logits.view((- 1), logits.size((- 1))) labels_flat = labels.view((- 1)) loss = self._loss(logits_flat, labels_flat) return loss def _compute_loss_ce_sampled(self, feature_tensors: TensorMap, positive_labels: torch.LongTensor, padding_mask: torch.BoolTensor, target_padding_mask: torch.BoolTensor) -> torch.Tensor: assert (self._loss_sample_count is not None) (positive_logits, negative_logits, positive_labels, negative_labels, vocab_size) = self._get_sampled_logits(feature_tensors, positive_labels, padding_mask, target_padding_mask) n_negative_samples = min(self._loss_sample_count, vocab_size) reject_labels = (positive_labels == negative_labels) negative_logits += math.log((vocab_size - 1)) negative_logits -= (1000000.0 * reject_labels) negative_logits -= torch.log((n_negative_samples - reject_labels.sum(dim=(- 1), keepdim=True)).float()) logits = torch.cat([positive_logits, negative_logits], dim=1).float() labels_flat = torch.zeros(positive_logits.size(0), dtype=torch.long, device=padding_mask.device) loss = self._loss(logits, labels_flat) return loss def _get_sampled_logits(self, feature_tensors: TensorMap, positive_labels: torch.LongTensor, padding_mask: torch.BoolTensor, target_padding_mask: torch.BoolTensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.LongTensor, torch.LongTensor, int)]: assert (self._loss_sample_count is not None) n_negative_samples = self._loss_sample_count positive_labels = cast(torch.LongTensor, torch.masked_select(positive_labels, target_padding_mask)) masked_batch_seq_size = positive_labels.size(0) device = padding_mask.device output_emb = self._model.forward_step(feature_tensors, padding_mask)[target_padding_mask] positive_labels = cast(torch.LongTensor, positive_labels.view((- 1), 1)) ids = torch.arange(masked_batch_seq_size, dtype=torch.long, device=device) (unique_positive_labels, positive_labels_indices) = positive_labels.unique(return_inverse=True) if (self._negative_sampling_strategy == 'global_uniform'): vocab_size = self._vocab_size multinomial_sample_distribution = torch.ones(vocab_size, device=device) positive_logits = self._model.get_logits(output_emb, positive_labels) elif (self._negative_sampling_strategy == 'inbatch'): positive_labels_indices = positive_labels_indices.view(masked_batch_seq_size, 1) positive_logits = self._model.get_logits(output_emb, unique_positive_labels) vocab_size = unique_positive_labels.size(0) if self._negatives_sharing: multinomial_sample_distribution = torch.ones(vocab_size, device=device) else: multinomial_sample_distribution = torch.softmax(positive_logits, dim=(- 1)) else: raise NotImplementedError(f'Unknown negative sampling strategy: {self._negative_sampling_strategy}') n_negative_samples = min(n_negative_samples, vocab_size) if self._negatives_sharing: negative_labels = torch.multinomial(multinomial_sample_distribution, num_samples=n_negative_samples, replacement=False) negative_labels = negative_labels.unsqueeze(0).repeat(masked_batch_seq_size, 1) elif (self._negative_sampling_strategy == 'global_uniform'): negative_labels = torch.randint(low=0, high=vocab_size, size=(masked_batch_seq_size, n_negative_samples), dtype=torch.long, device=device) else: negative_labels = torch.multinomial(multinomial_sample_distribution, num_samples=n_negative_samples, replacement=False) negative_labels = cast(torch.LongTensor, negative_labels) if (self._negative_sampling_strategy == 'global_uniform'): if self._negatives_sharing: (unique_negative_labels, negative_labels_indices) = negative_labels.unique(return_inverse=True) negative_labels_indices = negative_labels_indices.view(masked_batch_seq_size, n_negative_samples) negative_logits = self._model.get_logits(output_emb, unique_negative_labels) negative_logits = negative_logits[(ids, negative_labels_indices.T)].T else: negative_logits = self._model.get_logits(output_emb, negative_labels) else: negative_labels_indices = negative_labels negative_logits = positive_logits negative_logits = negative_logits[(ids, negative_labels_indices.T)].T positive_logits = positive_logits[(ids, positive_labels_indices.T)].T return (positive_logits, negative_logits, positive_labels, negative_labels, vocab_size) def _create_loss(self) -> Union[(torch.nn.BCEWithLogitsLoss, torch.nn.CrossEntropyLoss)]: if (self._loss_type == 'BCE'): return torch.nn.BCEWithLogitsLoss(reduction='sum') if (self._loss_type == 'CE'): return torch.nn.CrossEntropyLoss() raise NotImplementedError('Not supported loss_type')
class ResNet34(nn.Module): def __init__(self, n_inputs=12, numCls=17): super().__init__() resnet = models.resnet34(pretrained=False) self.conv1 = nn.Conv2d(n_inputs, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) self.encoder = nn.Sequential(self.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4, resnet.avgpool) self.FC = nn.Linear(512, numCls) self.apply(weights_init_kaiming) self.apply(fc_init_weights) def forward(self, x): x = self.encoder(x) x = x.view(x.size(0), (- 1)) logits = self.FC(x) return logits
def collect_boostrap_contrastiveness(feat, num_sample, dummy_inputs): num_negative = (num_sample - 1) negative_ids = [i for (i, x) in enumerate(feat.ex.candidates) if (not x.ex)] if (len(negative_ids) > num_negative): negative_ids.sort(key=(lambda x: feat.ex.candidates[x].score), reverse=True) selected_negative = negative_ids[:num_negative] else: selected_negative = negative_ids return _collect_contrastive_inputs(feat, num_sample, dummy_inputs, selected_negative)
(sh=True) .slow def test_hydra_sweep_ddp_sim(tmp_path): command = ([startfile, '-m', ('hydra.sweep.dir=' + str(tmp_path)), 'trainer=ddp_sim', 'trainer.max_epochs=3', '+trainer.limit_train_batches=0.01', '+trainer.limit_val_batches=0.1', '+trainer.limit_test_batches=0.1', 'model.optimizer.lr=0.005,0.01,0.02'] + overrides) run_sh_command(command)
_utils.test(require=ti.extension.sparse) def test_struct_for_branching(): x = ti.field(dtype=ti.i32) y = ti.field(dtype=ti.i32) ti.root.pointer(ti.ij, (128 // 4)).dense(ti.ij, 4).place(x, y) def func1(): for (i, j) in x: if ((x[(i, j)] & 2) == 2): y[(i, j)] = 1 def func2(): for (i, j) in x: if ((x[(i, j)] == 2) or (x[(i, j)] == 4)): y[(i, j)] = 1 def func3(): for (i, j) in x: if (((x[(i, j)] & 2) == 2) or ((x[(i, j)] & 4) == 4)): y[(i, j)] = 1 func1() func2() func3()
class DonutSwinPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def load_examples_rotten_tomatoes(path, args): hypotheses = [' negative', ' positive'] label_list = [' terrible', ' great'] label_path = './task_data/sst2/label_names_sentidict.txt' label2synonym = load_label(label_path) prompt = ' It was' icl_str = '' train_path = path.replace('dev', 'train') train_path = train_path.replace('test', 'train') if (args.k_shot > 0): train_examples = [] with open(train_path, 'r') as json_file: json_list = list(json_file) for row in json_list: row = json.loads(row) label_str = (' ' + row['output']) label = hypotheses.index(label_str) summary = row['input'] premise = f'{summary}{prompt}' options = [] for h in label_list: o = {} o['premise'] = premise o['hypothesis'] = h.lower() o['uncond_premise'] = prompt o['uncond_hypothesis'] = h.lower() options.append(o) similarity = (float(row['similarity']) if ('similarity' in row) else None) train_examples.append({'options': options, 'label': label, 'sim': similarity, 'label2synonym': label2synonym, 'label_list': label_list}) icl_str = construct_icl_examples(train_examples, k=args.k_shot) examples = [] with open(path, 'r') as json_file: json_list = list(json_file) for row in json_list: row = json.loads(row) label_str = (' ' + row['output']) label = hypotheses.index(label_str) summary = row['input'] premise = f'{summary}{prompt}' options = [] for h in label_list: o = {} o['premise'] = (icl_str + premise) o['knn_premise'] = premise o['hypothesis'] = h.lower() o['uncond_premise'] = prompt o['uncond_hypothesis'] = h.lower() options.append(o) similarity = (float(row['similarity']) if ('similarity' in row) else None) examples.append({'options': options, 'label': label, 'sim': similarity, 'label2synonym': label2synonym, 'label_list': label_list}) print('examples: ', examples[0]['options'][0]['premise']) return examples
def dist_init(port, backend): os.environ['DISTRIBUTED_BACKEND'] = backend rank = get_rank() world_size = get_world_size() addr = None num_gpus = torch.cuda.device_count() print('num_gpus', num_gpus) gpu_id = (rank % num_gpus) torch.cuda.set_device(gpu_id) if (world_size == 1): (rank, world_size) = (0, 1) alpha_print('using single card, no distributed environment init', flush=True) else: os.environ['MASTER_PORT'] = str(port) os.environ['WORLD_SIZE'] = str(world_size) os.environ['RANK'] = str(rank) dist.init_process_group(backend=backend) form = ('%%%dd' % len(str(world_size))) alpha_print(('world_size %d, distributed init rank %s, gpu %d, at %s:%d' % (world_size, (form % rank), gpu_id, addr, port)), flush=True) return (rank, world_size)
_memoize_get_funcs def get_blas_funcs(names, arrays=(), dtype=None): return _get_funcs(names, arrays, dtype, 'BLAS', _fblas, _cblas, 'fblas', 'cblas', _blas_alias)
class CmdGroup(FBSOptional): def init(self, fbs: bmodel_fbs.CmdGroup, buffer: memoryview): self.tiu_num = fbs.BdcNum() self.dma_num = fbs.GdmaNum() self.tiu_cmd = Binary(fbs.BinaryBdc(), buffer) self.dma_cmd = Binary(fbs.BinaryGdma(), buffer) def _serialize(self, builder, save_binary_fun): module = bmodel_fbs.CmdGroup module.Start(builder) module.AddBdcNum(builder, self.tiu_num) module.AddGdmaNum(builder, self.dma_num) module.AddBinaryBdc(builder, self.tiu_cmd.serialize(builder, save_binary_fun)) module.AddBinaryGdma(builder, self.dma_cmd.serialize(builder, save_binary_fun)) module.AddBdcCmdByte(builder, len(self.tiu_cmd)) module.AddGdmaCmdByte(builder, len(self.dma_cmd)) return module.End(builder) def __repr__(self): if self: return f'tiu_cmd[{self.tiu_num}], dma_cmd[{self.dma_num}]' return ''
def tf_efficientnet_b1_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model
def from_asgi(schema_path: str, app: Any, *, base_url: (str | None)=None, method: (Filter | None)=None, endpoint: (Filter | None)=None, tag: (Filter | None)=None, operation_id: (Filter | None)=None, skip_deprecated_operations: bool=False, validate_schema: bool=False, force_schema_version: (str | None)=None, data_generation_methods: DataGenerationMethodInput=DEFAULT_DATA_GENERATION_METHODS, generation_config: (GenerationConfig | None)=None, code_sample_style: str=CodeSampleStyle.default().name, rate_limit: (str | None)=None, sanitize_output: bool=True, **kwargs: Any) -> BaseOpenAPISchema: from starlette_testclient import TestClient as ASGIClient require_relative_url(schema_path) setup_default_headers(kwargs) client = ASGIClient(app) response = load_schema_from_url((lambda : client.get(schema_path, **kwargs))) return from_file(response.text, app=app, base_url=base_url, method=method, endpoint=endpoint, tag=tag, operation_id=operation_id, skip_deprecated_operations=skip_deprecated_operations, validate_schema=validate_schema, force_schema_version=force_schema_version, data_generation_methods=data_generation_methods, generation_config=generation_config, code_sample_style=code_sample_style, location=schema_path, rate_limit=rate_limit, sanitize_output=sanitize_output, __expects_json=_is_json_response(response))
def record_config_file(params=None, config_filename=None, net_save_filename=None, timestamp=None, train=True): import shutil utils.assert_arglist(params, [config_filename, net_save_filename]) if (params is not None): _config_filename = params['fconfig'] if train: _net_save_filename = params['train_net'] else: _net_save_filename = params[''] if (config_filename is not None): _config_filename = config_filename if (net_save_filename is not None): _net_save_filename = net_save_filename save_prefix = os.path.splitext(_net_save_filename)[0] config_file_exists = os.path.isfile(_config_filename) assert config_file_exists save_prefix_valid = (len(save_prefix) > 0) assert save_prefix_valid if (timestamp is None): timestamp = utils.timestamp() mode = ('train' if train else 'forward') directory_name = os.path.dirname(save_prefix) if (not os.path.exists(directory_name)): os.mkdir(directory_name) save_filename = '{}_{}_{}.cfg'.format(save_prefix, mode, timestamp) shutil.copy(_config_filename, save_filename)
def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.1): logsoftmax = nn.LogSoftmax() n_classes = pred.size(1) target = torch.unsqueeze(target, 1) soft_target = torch.zeros_like(pred) soft_target.scatter_(1, target, 1) soft_target = ((soft_target * (1 - label_smoothing)) + (label_smoothing / n_classes)) return torch.mean(torch.sum(((- soft_target) * logsoftmax(pred)), 1))
def dump_hls_lut_node2(f, name, lut, node): f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node))) n = lut.get_node_connection_size(node) s = lut.get_lut_table_size(node) for i in range(n): f.write((' ap_uint<1> in_data%d' % i)) if (i < (n - 1)): f.write(',\n') else: f.write(')\n') f.write('{\n') f.write('#pragma HLS inline\n\n') f.write((' ap_uint<%d> index;\n' % n)) for i in range(n): f.write((' index[%d] = in_data%d;\n' % (i, i))) f.write(' \n') f.write((' ap_uint<1> table[%d] = {' % s)) for i in range(s): f.write(('%d,' % lut.get_lut_table(node, i))) f.write('};\n') f.write(' #pragma HLS bind_storage variable=table type=ROM_1P impl=LUTRAM\n') f.write(' return table[index];\n') f.write('}\n\n')
def log_string(element, base=None): basestr = ((', base=' + str(base)) if base else '') return ('log(%s%s)' % (element, basestr))
def test_connections(): mcp = MCP(a) (costs, traceback) = mcp.find_costs([(1, 1), (7, 7), (1, 7)]) connections = set(mcp._conn.keys()) assert ((0, 1) in connections) assert ((1, 2) in connections) assert ((0, 2) in connections) for position_tuples in mcp._conn.values(): n1 = len(position_tuples) n2 = len(set(position_tuples)) assert (n1 == n2) (cost, pos1, pos2) = mcp._bestconn[(0, 1)] assert ((pos1, pos2) == ((3, 3), (4, 4))) path = (mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))) assert_array_equal(path, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)]) (cost, pos1, pos2) = mcp._bestconn[(1, 2)] assert ((pos1, pos2) == ((3, 7), (4, 7))) path = (mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))) assert_array_equal(path, [(1, 7), (2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)]) (cost, pos1, pos2) = mcp._bestconn[(0, 2)] assert ((pos1, pos2) == ((1, 3), (1, 4))) path = (mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))) assert_array_equal(path, [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)])
def compose_data_files() -> list: data_files = [('sfepy', ['LICENSE', 'VERSION'])] test_files = [('sfepy/tests', glob.glob('sfepy/tests/*.py'))] mesh_data_files = data_dir_walk('meshes', 'sfepy') example_files = data_dir_walk('examples', 'sfepy') return (((data_files + test_files) + mesh_data_files) + example_files)
def _make_win_cache(): idx = [] for i in range(3): for j in range(7): a = ((i * 7) + j) idx.append([a, (a + 7), (a + 14), (a + 21)]) for i in range(6): for j in range(4): a = ((i * 7) + j) idx.append([a, (a + 1), (a + 2), (a + 3)]) for i in range(3): for j in range(4): a = ((i * 7) + j) idx.append([a, (a + 8), (a + 16), (a + 24)]) for i in range(3): for j in range(3, 7): a = ((i * 7) + j) idx.append([a, (a + 6), (a + 12), (a + 18)]) return jnp.int32(idx)
class OlympicRingSampler(RingSampler): def __init__(self, radii: np.array=np.ones(5), width: float=0.5): num_objects = radii.shape[0] centers = (np.array([((- 140), 0), (0, 0), (140, 0), ((- 55), (- 50)), (55, (- 50))], np.float32) / float(50)) centers = centers[:num_objects] super(OlympicRingSampler, self).__init__(radii, centers, width)
def filter2D(img, kernel): k = kernel.size((- 1)) (b, c, h, w) = img.size() if ((k % 2) == 1): img = F.pad(img, ((k // 2), (k // 2), (k // 2), (k // 2)), mode='reflect') else: raise ValueError('Wrong kernel size') (ph, pw) = img.size()[(- 2):] if (kernel.size(0) == 1): img = img.view((b * c), 1, ph, pw) kernel = kernel.view(1, 1, k, k) return F.conv2d(img, kernel, padding=0).view(b, c, h, w) else: img = img.view(1, (b * c), ph, pw) kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view((b * c), 1, k, k) return F.conv2d(img, kernel, groups=(b * c)).view(b, c, h, w)
def pretty_print_default(enable=True): from sage.repl.rich_output import get_display_manager dm = get_display_manager() dm.preferences.text = ('latex' if enable else None)
def test_string(): text = 'string' parsedtype = ak.types.from_datashape(text, highlevel=False) assert isinstance(parsedtype, ak.types.ListType) assert (str(parsedtype) == text)
def vectorize_input(batch, training=True, device=None, mode='train'): if (not batch): return None srcs = torch.LongTensor(batch.sent1_word) src_lens = torch.LongTensor(batch.sent1_length) if (batch.sent2_word is not None): targets = torch.LongTensor(batch.sent2_word) target_lens = torch.LongTensor(batch.sent2_length) with torch.set_grad_enabled(training): example = {'batch_size': batch.batch_size, 'code_graphs': batch.code_graph, 'doc_graphs': batch.doc_graph, 'sequences': (srcs.to(device) if device else srcs), 'sequence_lens': (src_lens.to(device) if device else src_lens), 'code_token_indexes': batch.code_token_indexes, 'code_func': batch.funcs, 'file_names': batch.filenames, 'urls': batch.urls, 'max_code_lens': batch.max_sent1_length} if (batch.sent2_word is not None): example['targets'] = (targets.to(device) if device else targets) example['target_lens'] = (target_lens.to(device) if device else target_lens) example['target_src'] = batch.sent2_src return example
def is_valid_url(url: str) -> bool: try: result = urlparse(url) return all([result.scheme, result.netloc]) except ValueError: return False
class AWSKeyManager(): def __init__(self, auth: AWSAuthentication, local_key_dir: Path=(key_root / 'aws')): self.auth = auth self.local_key_dir = local_key_dir def key_exists_aws(self, aws_region: str, key_name: str) -> bool: ec2_client = self.auth.get_boto3_client('ec2', aws_region) return (key_name in set((p['KeyName'] for p in ec2_client.describe_key_pairs()['KeyPairs']))) def key_exists_local(self, key_name: str) -> bool: return (self.local_key_dir / f'{key_name}.pem').exists() def make_key(self, aws_region: str, key_name: str) -> Path: if self.key_exists_aws(aws_region, key_name): logger.error(f'Key {key_name} already exists in AWS region {aws_region}') raise skyplane_exceptions.PermissionsException(f'Key {key_name} already exists in AWS region {aws_region}, please delete it first or use a different key name.') if self.key_exists_local(key_name): logger.error(f'Key {key_name} already exists locally') raise skyplane_exceptions.PermissionsException(f'Key {key_name} already exists locally, please delete it first or use a different key name.') ec2 = self.auth.get_boto3_resource('ec2', aws_region) local_key_file = (self.local_key_dir / f'{key_name}.pem') local_key_file.parent.mkdir(parents=True, exist_ok=True) logger.fs.debug(f'[AWS] Creating keypair {key_name} in {aws_region}') key_pair = ec2.create_key_pair(KeyName=key_name, KeyType='rsa') with local_key_file.open('w') as f: key_str = key_pair.key_material if (not key_str.endswith('\n')): key_str += '\n' f.write(key_str) os.chmod(local_key_file, 384) return local_key_file def delete_key(self, aws_region: str, key_name: str): if self.key_exists_aws(aws_region, key_name): ec2 = self.auth.get_boto3_resource('ec2', aws_region) logger.fs.debug(f'[AWS] Deleting keypair {key_name} in {aws_region}') ec2.KeyPair(key_name).delete() if self.key_exists_local(key_name): (self.local_key_dir / f'{key_name}.pem').unlink() def get_key(self, key_name: str) -> Path: return (self.local_key_dir / f'{key_name}.pem') def ensure_key_exists(self, aws_region: str, key_name: str, delete_remote: bool=True) -> Path: (local_exists, remote_exists) = (self.key_exists_local(key_name), self.key_exists_aws(aws_region, key_name)) if (local_exists and remote_exists): return self.get_key(key_name) elif ((not local_exists) and (not remote_exists)): return self.make_key(aws_region, key_name) elif (local_exists and (not remote_exists)): local_key_path = self.get_key(key_name) logger.warning(f'Key {key_name} exists locally but not in AWS region {aws_region}. Moving the local key {local_key_path}.bak') local_key_path.rename(local_key_path.with_suffix('.pem.bak')) return self.make_key(aws_region, key_name) elif delete_remote: logger.warning(f'Key {key_name} exists in AWS region {aws_region} but not locally. Deleting the remote key.') self.delete_key(aws_region, key_name) return self.make_key(aws_region, key_name) else: raise skyplane_exceptions.PermissionsException(f'Key {key_name} exists in AWS region {aws_region} but not locally. Please delete the key from AWS or move it locally.')
def resnext50(baseWidth, cardinality): model = ResNeXt(baseWidth, cardinality, [3, 4, 6, 3], 1000) return model
class ResNet(nn.Module): def __init__(self, block, layers=(3, 4, 23, 3)): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [block(self.inplanes, planes, stride, downsample)] self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes, dilation=dilation)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x_3 = self.layer3(x) x = self.layer4(x_3) return (x, x_3)
def random_init(size, rng=None, rng_type=None): if (rng is None): rng = default_rng if (rng_type is None): vals = rng.uniform(low=(- 0.05), high=0.05, size=size) elif (rng_type == 'normal'): vals = rng.standard_normal(size) elif (rng_type == 'uniform'): vals = rng.uniform(low=(- (3.0 ** 0.5)), high=(3.0 ** 0.5), size=size) else: raise Exception('unknown random inittype: {}'.format(rng_type)) return vals.astype(theano.config.floatX)
class OverconvergentDistributions_class(OverconvergentDistributions_abstract): def _repr_(self): s = ('Space of %s-adic distributions with k=%s action and precision cap %s' % (self._p, self._k, self._prec_cap)) twiststuff = [] if (self._dettwist is not None): twiststuff.append(('det^%s' % self._dettwist)) if (self._character is not None): twiststuff.append(('(%s)' % self._character)) if twiststuff: s += (' twistted by ' + ' * '.join(twiststuff)) return s def is_symk(self): return False def change_ring(self, new_base_ring): return OverconvergentDistributions(k=self._k, p=self._p, prec_cap=self._prec_cap, base=new_base_ring, character=self._character, adjuster=self._adjuster, act_on_left=self._act.is_left()) def specialize(self, new_base_ring=None): if (self._character is not None): raise NotImplementedError if (new_base_ring is None): new_base_ring = self.base_ring() return Symk(k=self._k, base=new_base_ring, adjuster=self._adjuster, act_on_left=self._act.is_left())
class HTTPServerWithCounter(HTTPServer): def __init__(self, *args, **kwargs): super(HTTPServerWithCounter, self).__init__(*args, **kwargs) self.put_requests = 0
def run_aggregation_queries(): query_list = [] for method_name in args.keys(): requested = args[method_name] if (requested and isinstance(requested, bool)): query_list.append(getattr(queries, method_name)) for query in query_list: logger.info(f"Query: '{query.__name__}', date range: ({start_date}, {end_date})") start_time = time.time() result = collection.aggregate(query(args)) filename = f'{query.__name__}_{start_date}_to_{end_date}.csv' df = pd.DataFrame.from_dict(result) df.to_csv(filename, index=False) logger.info(f'{query.__name__} query completed in {(time.time() - start_time):.3f} seconds.')
class VolumetricMaxUnpooling(Module): def __init__(self, poolingModule): super(VolumetricMaxUnpooling, self).__init__() assert isinstance(poolingModule, VolumetricMaxPooling) assert (poolingModule.kT == poolingModule.dT) assert (poolingModule.kH == poolingModule.dH) assert (poolingModule.kW == poolingModule.dW) self.pooling = poolingModule def _setParams(self): self.indices = self.pooling.indices self.otime = self.pooling.itime self.oheight = self.pooling.iheight self.owidth = self.pooling.iwidth self.dT = self.pooling.dT self.dH = self.pooling.dH self.dW = self.pooling.dW self.padT = self.pooling.padT self.padH = self.pooling.padH self.padW = self.pooling.padW def updateOutput(self, input): self._setParams() self._backend.VolumetricMaxUnpooling_updateOutput(self._backend.library_state, input, self.output, self.indices, self.otime, self.owidth, self.oheight, self.dT, self.dW, self.dH, self.padT, self.padW, self.padH) return self.output def updateGradInput(self, input, gradOutput): self._setParams() self._backend.VolumetricMaxUnpooling_updateGradInput(self._backend.library_state, input, gradOutput, self.gradInput, self.indices, self.otime, self.owidth, self.oheight, self.dT, self.dW, self.dH, self.padT, self.padW, self.padH) return self.gradInput def __repr__(self): return ('nn.VolumetricMaxUnpooling associated to ' + self.pooling.__repr__())
class TensorboardXWriter(EventWriter): def __init__(self, log_dir: str, window_size: int=20, **kwargs): self.window_size = window_size from torch.utils.tensorboard import SummaryWriter self.writer = SummaryWriter(log_dir, **kwargs) def write(self, **kwargs): storage = get_event_storage() for (k, v) in storage.latest_with_smoothing_hint(self.window_size).items(): self.writer.add_scalar(k, v, storage.iter) def close(self): if hasattr(self, 'writer'): self.writer.close()
def read_any_img(img_path: str, format='ndarray'): img = read_rgb_image(img_path, format) return img
.parametrize('input_dim, output_dim, hidden_sizes, output_w_init_vals, n_heads', plain_settings) def test_multi_headed_mlp_module_with_layernorm(input_dim, output_dim, hidden_sizes, output_w_init_vals, n_heads): module = MultiHeadedMLPModule(n_heads=n_heads, input_dim=input_dim, output_dims=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, layer_normalization=True, hidden_w_init=nn.init.ones_, output_nonlinearities=None, output_w_inits=list(map(_helper_make_inits, output_w_init_vals))) input_value = torch.ones(input_dim) outputs = module(input_value) if (len(output_w_init_vals) == 1): output_w_init_vals = (list(output_w_init_vals) * n_heads) if (len(output_dim) == 1): output_dim = (list(output_dim) * n_heads) for (i, output) in enumerate(outputs): expected = (input_dim * torch.Tensor(hidden_sizes).prod()) expected *= output_w_init_vals[i] assert torch.equal(output, torch.zeros(output_dim[i]))
def get_quantized_kernel_by_weights_qc(fw_info: FrameworkInfo, n: BaseNode, weights_qc: NodeWeightsQuantizationConfig, fw_impl: FrameworkImplementation): if (weights_qc.weights_per_channel_threshold and (fw_info.kernel_channels_mapping is None)): Logger.warning('Weights Per Channel Quantization requires channel mapping function but framework info does not contain one') (output_channels_axis, input_channels_axis) = get_channels_axis(weights_qc, fw_info, n.type) Logger.debug(f'quantizing {n.name} with {weights_qc.weights_n_bits} bits') quantized_kernel = weights_qc.weights_quantization_fn(n.get_weights_by_keys(fw_impl.constants.KERNEL), n_bits=weights_qc.weights_n_bits, signed=True, quantization_params=weights_qc.weights_quantization_params, per_channel=weights_qc.weights_per_channel_threshold, output_channels_axis=output_channels_axis) return (quantized_kernel, (input_channels_axis, output_channels_axis))
.gpu def test_scalar_output(): def scaltest(A: dace.float64[(20, 20)]): scal = dace.define_local_scalar(dace.float64) for _ in dace.map[0:1]: with dace.tasklet: (inp << A[(1, 1)]) (out >> scal) out = (inp + 5) return scal sdfg = scaltest.to_sdfg() sdfg.apply_gpu_transformations() A = np.random.rand(20, 20) ret = sdfg(A=A) assert np.allclose(ret, (A[(1, 1)] + 5))
def test_binary_closing_noninteger_brute_force_passes_when_true(): data = numpy.ones([1]) assert (sndi.binary_erosion(data, iterations=2, brute_force=1.5) == sndi.binary_erosion(data, iterations=2, brute_force=bool(1.5))) assert (sndi.binary_erosion(data, iterations=2, brute_force=0.0) == sndi.binary_erosion(data, iterations=2, brute_force=bool(0.0)))
class pAdicExtensionGeneric(pAdicGeneric): def __init__(self, poly, prec, print_mode, names, element_class): self._given_poly = poly R = poly.base_ring() print_mode['unram_name'] = names[2] print_mode['ram_name'] = names[3] print_mode['var_name'] = names[0] names = names[0] pAdicGeneric.__init__(self, R, R.prime(), prec, print_mode, names, element_class) self._populate_coercion_lists_(coerce_list=[R]) def _coerce_map_from_(self, R): if (R is self.base_ring()): return True elif (isinstance(R, pAdicExtensionGeneric) and (R.fraction_field() is self)): if (self._implementation == 'NTL'): return True elif (R._prec_type() == 'capped-abs'): if (R.absolute_e() == 1): from sage.rings.padics.qadic_flint_CA import pAdicCoercion_CA_frac_field as coerce_map else: from sage.rings.padics.relative_ramified_CA import pAdicCoercion_CA_frac_field as coerce_map elif (R._prec_type() == 'capped-rel'): if (R.absolute_e() == 1): from sage.rings.padics.qadic_flint_CR import pAdicCoercion_CR_frac_field as coerce_map else: from sage.rings.padics.relative_ramified_CR import pAdicCoercion_CR_frac_field as coerce_map elif (R._prec_type() == 'floating-point'): if (R.absolute_e() == 1): from sage.rings.padics.qadic_flint_FP import pAdicCoercion_FP_frac_field as coerce_map else: from sage.rings.padics.relative_ramified_FP import pAdicCoercion_FP_frac_field as coerce_map elif (R._prec_type() == 'fixed-mod'): if (R.absolute_e() == 1): from sage.rings.padics.qadic_flint_FM import pAdicCoercion_FM_frac_field as coerce_map else: from sage.rings.padics.relative_ramified_FM import pAdicCoercion_FM_frac_field as coerce_map return coerce_map(R, self) def _extension_type(self): return '' def _repr_(self, do_latex=False): type = self._extension_type() base = self.base_ring() p = self.prime() if do_latex: if (self.absolute_e() == 1): if self.is_field(): letter = '\\Bold{Q}' else: letter = '\\Bold{Z}' f = self.absolute_f() if (f == 1): subscript = str(p) else: subscript = ('%s^{%s}' % (p, f)) return ('%s_{%s}' % (letter, subscript)) else: return ('%s[%s]' % (self.base_ring()._repr_(do_latex=True), self.latex_name())) else: if (type != ''): type += ' ' s = ('%s-adic %sExtension %s in %s defined by %s' % (p, type, ('Field' if self.is_field() else 'Ring'), self.variable_name(), self.defining_polynomial(exact=True))) if (base.absolute_degree() > 1): s += (' over its base ' + ('field' if base.is_field() else 'ring')) return s def _convert_map_from_(self, R): cat = None if ((self._implementation == 'NTL') and (R == QQ)): return None if (isinstance(R, pAdicExtensionGeneric) and (R.prime() == self.prime()) and (R.defining_polynomial(exact=True) == self.defining_polynomial(exact=True))): if (R.is_field() and (not self.is_field())): cat = SetsWithPartialMaps() elif (R.category() is self.category()): cat = R.category() else: cat = (EuclideanDomains() & MetricSpaces().Complete()) elif (isinstance(R, sage.rings.abc.Order) and (R.number_field().defining_polynomial() == self.defining_polynomial())): cat = IntegralDomains() elif (isinstance(R, NumberField) and (R.defining_polynomial() == self.defining_polynomial())): if self.is_field(): cat = Fields() else: cat = SetsWithPartialMaps() else: k = self.residue_field() if (R is k): return ResidueLiftingMap._create_(R, self) if (cat is not None): H = Hom(R, self, cat) return H.__make_element_class__(DefPolyConversion)(H) def __eq__(self, other): if (not isinstance(other, pAdicExtensionGeneric)): return False return ((self.ground_ring() == other.ground_ring()) and (self.defining_polynomial() == other.defining_polynomial()) and (self.precision_cap() == other.precision_cap()) and self._printer.richcmp_modes(other._printer, op_EQ)) def __ne__(self, other): return (not self.__eq__(other)) def __hash__(self): return hash((self.ground_ring(), self.defining_polynomial(exact=True), self.precision_cap())) def defining_polynomial(self, var=None, exact=False): if exact: ans = self._exact_modulus else: ans = self._given_poly if (var is None): return ans else: return ans.change_variable_name(var) def exact_field(self): return self.base_ring().exact_field().extension(self._exact_modulus, self.variable_name()) def exact_ring(self): return self.base_ring().exact_ring().extension(self.defining_polynomial(exact=True), self.variable_name()) def modulus(self, exact=False): return self.defining_polynomial(exact=exact) def ground_ring(self): return self._given_poly.base_ring() def ground_ring_of_tower(self): if isinstance(self.ground_ring(), pAdicBaseGeneric): return self.ground_ring() else: return self.ground_ring().ground_ring_of_tower() def polynomial_ring(self): return self._given_poly.parent() def construction(self, forbid_frac_field=False): from sage.categories.pushout import AlgebraicExtensionFunctor as AEF, FractionField as FF if ((not forbid_frac_field) and self.is_field()): return (FF(), self.integer_ring()) return (AEF([self.defining_polynomial(exact=True)], [self.variable_name()], precs=[self.precision_cap()], print_mode=self._printer.dict(), implementations=[self._implementation]), self.base_ring()) def random_element(self): return reduce((lambda x, y: (x + y)), [(self.ground_ring().random_element() * (self.gen() ** i)) for i in range(self.modulus().degree())], 0) _method(key=(lambda self, base, basis, map: ((base or self.base_ring()), map))) def free_module(self, base=None, basis=None, map=True): if (basis is not None): raise NotImplementedError B = self.base_ring() if (base is None): base = B A = B.base_ring() d = self.relative_degree() if (base is B): V = (B ** d) from_V = MapFreeModuleToOneStep to_V = MapOneStepToFreeModule elif (base is A): d *= B.relative_degree() V = (A ** d) from_V = MapFreeModuleToTwoStep to_V = MapTwoStepToFreeModule elif (base is self): return super().free_module(base=base, basis=basis, map=map) else: raise NotImplementedError FromV = Hom(V, self) ToV = Hom(self, V) from_V = FromV.__make_element_class__(from_V)(FromV) to_V = ToV.__make_element_class__(to_V)(ToV) return (V, from_V, to_V)
def preprocess_image(image, output_height, output_width, random_mirror, is_training=False, resize_side_min=_RESIZE_SIDE_MIN, resize_side_max=_RESIZE_SIDE_MAX): if is_training: return preprocess_for_train(image, output_height, output_width, random_mirror, resize_side_min, resize_side_max) else: return preprocess_for_eval(image)
def do_retrieval(): for data in ['dl19', 'dl20', 'covid', 'nfc', 'touche', 'dbpedia', 'scifact', 'signal', 'news', 'robust04']: print(('#' * 20)) print(f'Evaluation on {data}') print(('#' * 20)) try: searcher = LuceneSearcher.from_prebuilt_index(THE_INDEX[data]) topics = get_topics((THE_TOPICS[data] if (data != 'dl20') else 'dl20')) qrels = get_qrels(THE_TOPICS[data]) rank_results = run_retriever(topics, searcher, qrels, k=100) with open(f'rank_results_{data}.json', 'w') as f: json.dump(rank_results, f, indent=2) with open(f'qrels_{data}.json', 'w') as f: json.dump(qrels, f, indent=2) except: print(f'Failed to retrieve passages for {data}') for data in ['mrtydi-ar', 'mrtydi-bn', 'mrtydi-fi', 'mrtydi-id', 'mrtydi-ja', 'mrtydi-ko', 'mrtydi-ru', 'mrtydi-sw', 'mrtydi-te', 'mrtydi-th']: print(('#' * 20)) print(f'Evaluation on {data}') print(('#' * 20)) try: searcher = LuceneSearcher.from_prebuilt_index(THE_INDEX[data]) topics = get_topics((THE_TOPICS[data] if (data != 'dl20') else 'dl20')) qrels = get_qrels(THE_TOPICS[data]) rank_results = run_retriever(topics, searcher, qrels, k=100) rank_results = rank_results[:100] with open(f'data/rank_results/{data}.json', 'w') as f: json.dump(rank_results, f, indent=2) with open(f'data/qrels/{data}.json', 'w') as f: json.dump(qrels, f, indent=2) except: print(f'Failed to retrieve passages for {data}')
def run_make(arg): if (system(('%s -j %s' % (args.make_tool, arg))) != 0): print('\nBummer. Running serial build in order to recover the log and have a chance to fix the build') assert (system(('%s %s' % (args.make_tool, arg))) == 0)
def hfft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None): return hfftn(x, s, axes, norm, overwrite_x, workers)
def delsarte_bound_additive_hamming_space(n, d, q, d_star=1, q_base=0, return_data=False, solver='PPL', isinteger=False): from sage.numerical.mip import MIPSolverException if (q_base == 0): q_base = q kk = 0 while ((q_base ** kk) < q): kk += 1 if ((q_base ** kk) != q): print('Wrong q_base=', q_base, ' for q=', q, kk) return False m = (kk * n) bd = ((q ** n) + 1) while ((q_base ** m) < bd): (A, p) = _delsarte_LP_building(n, d, d_star, q, isinteger, solver, (q_base ** m)) try: bd = p.solve() except MIPSolverException as exc: print('Solver exception:', exc) return ((A, p, False) if return_data else False) m = (- 1) while ((q_base ** (m + 1)) < bd): m += 1 if ((q_base ** (m + 1)) == bd): m += 1 return ((A, p, m) if return_data else m)
class InMemoryVesselDataset(torch_geometric.data.InMemoryDataset): def __init__(self, root, pattern, split, purpose, transform=None, pre_transform=None): self.root = root self.pattern = pattern self.purpose = purpose self.split = split super(InMemoryVesselDataset, self).__init__(root, transform, pre_transform) (self.data, self.slices) = torch.load(self.processed_paths[0]) def raw_file_names(self): root = os.path.join(self.root, 'raw') absolute = glob.glob(os.path.join(root, self.pattern)) absolute.sort() if ('.hdf5' in os.path.basename(absolute[0])): with h5py.File(absolute[0], 'r') as f: absolute = [os.path.join(absolute[0], i) for i in list(f)] absolute = absolute[self.split[0]:self.split[1]] return [os.path.relpath(a, root) for a in absolute] def processed_file_names(self): root = os.path.join(self.root, 'processed', self.purpose) Path(root).mkdir(parents=True, exist_ok=True) return [os.path.join(self.purpose, 'data.pt')] def download(self): return def process(self): data_list = [] for path in tqdm.tqdm(self.raw_paths): data = self.load_process_hdf5(path) data_list.append(data) if (self.pre_filter is not None): data_list = [data for data in data_list if self.pre_filter(data)] if (self.pre_transform is not None): transformed = [] for data in tqdm.tqdm(data_list): transformed.append(self.pre_transform(data)) data_list = transformed (data, slices) = self.collate(data_list) torch.save((data, slices), self.processed_paths[0]) def load_process_hdf5(path): (file, sample) = os.path.split(path) with h5py.File(file, 'r') as f: data = Data(shape_id=(torch.tensor(f[sample].attrs['shape id']) if ('shape id' in f[sample].attrs) else None), condition=(torch.tensor(f[sample]['cbf'][()]) if ('cbf' in f[sample]) else None), t=(torch.from_numpy(f[sample]['t'][()][(None, ...)]) if ('t' in f[sample]) else None), y=torch.from_numpy(f[sample]['wss'][()].swapaxes(0, (- 2))), inlet_index=torch.from_numpy(f[sample][('inlet_idx' if ('inlet_idx' in f[sample]) else 'inlet_idcs')][()]), pos=torch.from_numpy(f[sample]['pos'][()]), face=torch.from_numpy(f[sample]['face'][()].T).long()) return data
def produceDict(): seg_name = ['wallbuilding', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag'] for seg in seg_name: print(((' - ' + seg) + ': 1'))
def bleu_count(hypothesis, references, max_n=4): ret_len_hyp = 0 ret_len_ref = 0 ret_clip_count = ([0] * max_n) ret_count = ([0] * max_n) for m in range(len(hypothesis)): (hyp, ref) = (hypothesis[m], references[m]) x = hyp.split() y = [r.split() for r in ref] x_len = len(x) y_len = [len(s) for s in y] n_ref = len(ref) closest_diff = 9999 closest_length = 9999 ref_ngram = dict() for i in range(n_ref): diff = abs((y_len[i] - x_len)) if (diff < closest_diff): closest_diff = diff closest_length = y_len[i] elif ((diff == closest_diff) and (y_len[i] < closest_length)): closest_length = y_len[i] for n in range(max_n): sent_ngram = dict() for st in range(0, (y_len[i] - n)): ngram = ('%d' % (n + 1)) for k in range((n + 1)): j = (st + k) ngram += (' %s' % y[i][j]) if (ngram not in sent_ngram): sent_ngram[ngram] = 0 sent_ngram[ngram] += 1 for ngram in sent_ngram.keys(): if ((ngram not in ref_ngram) or (ref_ngram[ngram] < sent_ngram[ngram])): ref_ngram[ngram] = sent_ngram[ngram] ret_len_hyp += x_len ret_len_ref += closest_length for n in range(max_n): hyp_ngram = dict() for st in range(0, (x_len - n)): ngram = ('%d' % (n + 1)) for k in range((n + 1)): j = (st + k) ngram += (' %s' % x[j]) if (ngram not in hyp_ngram): hyp_ngram[ngram] = 0 hyp_ngram[ngram] += 1 for ngram in hyp_ngram.keys(): if (ngram in ref_ngram): ret_clip_count[n] += min(ref_ngram[ngram], hyp_ngram[ngram]) ret_count[n] += hyp_ngram[ngram] return (ret_clip_count, ret_count, ret_len_hyp, ret_len_ref)
def to_directory(file_name, WIDTH, HEIGHT, tmp_dir, start_frame=None, end_frame=None): if os.path.isdir(file_name): for img_file in os.listdir(file_name): if img_file.endswith('.png'): img_index = int(img_file.split('.')[0]) if ((img_index >= (start_frame + 1)) and (img_index < (end_frame + 1))): spth = os.path.join(file_name, img_file) tpth = os.path.join(tmp_dir, ('%04d.png' % (img_index - start_frame))) cmd = f'cp {spth} {tpth}' print(cmd) os.system(cmd) return tmp_dir else: os.system('ffmpeg -i {} -s {}x{} {}'.format(file_name, WIDTH, HEIGHT, os.path.join(tmp_dir, '%4d.png'))) return tmp_dir
def optimize_pb_model_command(input_pb_file, output_pb_file): try: import tensorflow as tf from tensorflow.python.platform import gfile from nnabla.utils.converter.tensorflow.common import OptimizePb except ImportError: raise ImportError('nnabla_converter python package is not found, install nnabla_converter package with "pip install nnabla_converter"') with gfile.GFile(input_pb_file, 'rb') as f: graph_def = tf.compat.v1.GraphDef() graph_def.ParseFromString(f.read()) optimize = OptimizePb(graph_def).execute() optimize.export_to_file(output_pb_file) doc_file = (output_pb_file.replace('.', '_') + '.json') with open(doc_file, 'w') as f: json.dump(optimize.get_optimization_rate(), f)
class CosineAnnealingRestartCyclicLR(_LRScheduler): def __init__(self, optimizer, periods, restart_weights=(1,), eta_mins=(0,), last_epoch=(- 1)): self.periods = periods self.restart_weights = restart_weights self.eta_mins = eta_mins assert (len(self.periods) == len(self.restart_weights)), 'periods and restart_weights should have the same length.' self.cumulative_period = [sum(self.periods[0:(i + 1)]) for i in range(0, len(self.periods))] super(CosineAnnealingRestartCyclicLR, self).__init__(optimizer, last_epoch) def get_lr(self): idx = get_position_from_periods(self.last_epoch, self.cumulative_period) current_weight = self.restart_weights[idx] nearest_restart = (0 if (idx == 0) else self.cumulative_period[(idx - 1)]) current_period = self.periods[idx] eta_min = self.eta_mins[idx] return [(eta_min + (((current_weight * 0.5) * (base_lr - eta_min)) * (1 + math.cos((math.pi * ((self.last_epoch - nearest_restart) / current_period)))))) for base_lr in self.base_lrs]
class ManinSymbolList_gamma0(ManinSymbolList_group): def __init__(self, level, weight): ManinSymbolList_group.__init__(self, level, weight, p1list.P1List(level)) def __repr__(self): return ('Manin Symbol List of weight %s for Gamma0(%s)' % (self.weight(), self.level()))
class SeqCategoryIDColumn(CategoryColumn): def __init__(self, field_desc, bucket_size): assert isinstance(field_desc, FieldDesc) self.field_desc = field_desc self.bucket_size = bucket_size def get_field_desc(self): return [self.field_desc] def new_feature_column_from(self, field_desc): return SeqCategoryIDColumn(field_desc, self.bucket_size) def num_class(self): return self.bucket_size def _to_dict(self): return {'field_desc': self.field_desc.to_dict(), 'bucket_size': self.bucket_size} def _from_dict(cls, d): field_desc = FieldDesc.from_dict(d['field_desc']) bucket_size = d['bucket_size'] return SeqCategoryIDColumn(field_desc, bucket_size)
def __getattr__(name): return _sub_module_deprecation(sub_package='spatial', module='ckdtree', private_modules=['_ckdtree'], all=__all__, attribute=name)
def get_model_value_fn_policy(model, sim_threads, boltzmann_rationality=1): v_fn = get_model_value_fn(model, sim_threads) def v_policy(mdp_state, mdp, agent_index): successor_vals = [] for a in Action.INDEX_TO_ACTION: joint_action = ((a, Direction.STAY) if (agent_index == 0) else (Direction.STAY, a)) s_prime = mdp.get_state_transition(mdp_state, joint_action)[0][0][0] s_prime_val = v_fn(s_prime, mdp, agent_index) successor_vals.append(s_prime_val) numerator = (boltzmann_rationality * np.exp(successor_vals)) normalizer = sum(numerator) num_actions = len(Action.INDEX_TO_ACTION) if (normalizer != 0): probability_distribution = (numerator / normalizer) else: probability_distribution = (np.ones(num_actions) / num_actions) action_idx_array = list(range(num_actions)) sampled_action_idx = np.random.choice(action_idx_array, p=probability_distribution) return Action.INDEX_TO_ACTION[sampled_action_idx] return v_policy
def register_Ns3OlsrMprSelectorTuple_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_constructor([]) cls.add_constructor([param('ns3::olsr::MprSelectorTuple const &', 'arg0')]) cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False) cls.add_instance_attribute('mainAddr', 'ns3::Ipv4Address', is_const=False) return
class SDConvectTerm(Term): name = 'ev_sd_convect' arg_types = ('parameter_u', 'parameter_w', 'parameter_mv') arg_shapes = {'parameter_u': 'D', 'parameter_w': 'D', 'parameter_mv': 'D'} function = staticmethod(terms.d_sd_convect) def get_fargs(self, par_u, par_w, par_mv, mode=None, term_mode=None, diff_var=None, **kwargs): (vg, _) = self.get_mapping(par_u) val_u = self.get(par_u, 'val') grad_u = grad_as_vector(self.get(par_u, 'grad')) val_w = self.get(par_w, 'val') div_mv = self.get(par_mv, 'div') grad_mv = grad_as_vector(self.get(par_mv, 'grad')) return (val_u, grad_u, val_w, div_mv, grad_mv, vg, get_default(term_mode, 1)) def get_eval_shape(self, par_u, par_w, par_mv, mode=None, term_mode=None, diff_var=None, **kwargs): (n_el, n_qp, dim, n_en, n_c) = self.get_data_shape(par_u) return ((n_el, 1, 1, 1), par_u.dtype)
class deV(Sersic): def __init__(self, x=None, y=None, q=None, pa=None, re=None, amp=None): Sersic.__init__(self, x, y, q, pa, re, amp, 4.0)
def inference_pytorch(args, cfg, distributed, data_loader): if (args.average_clips is not None): if ((cfg.model.get('test_cfg') is None) and (cfg.get('test_cfg') is None)): cfg.model.setdefault('test_cfg', dict(average_clips=args.average_clips)) elif (cfg.model.get('test_cfg') is not None): cfg.model.test_cfg.average_clips = args.average_clips else: cfg.test_cfg.average_clips = args.average_clips turn_off_pretrained(cfg.model) model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg')) if (len(cfg.module_hooks) > 0): register_module_hooks(model, cfg.module_hooks) fp16_cfg = cfg.get('fp16', None) if (fp16_cfg is not None): wrap_fp16_model(model) load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) if (not distributed): model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) return outputs
class KNNOperation(Function): def forward(ctx, pointsa, pointsb, knn): nnidx = pl.knn_points(pointsa.contiguous(), pointsb.contiguous(), knn) return nnidx
def find_available_plugins(loaded=False): active_plugins = set() for plugin_func in plugin_store.values(): for (plugin, func) in plugin_func: active_plugins.add(plugin) d = {} for plugin in plugin_provides: if ((not loaded) or (plugin in active_plugins)): d[plugin] = [f for f in plugin_provides[plugin] if (not f.startswith('_'))] return d
def arch_mnasnet_small(variant, feat_multiplier=1.0, **kwargs): arch_def = [['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144']] model_kwargs = dict(block_defs=decode_arch_def(arch_def), stem_size=8, feat_multiplier=feat_multiplier, **kwargs) return model_kwargs
class ClassMemDataLoader(): def __init__(self, dataset, batch_size, drop_last=False, device='cuda'): self.device = device self.batch_size = batch_size self.dataset = dataset self.data = [d[0].to(device) for d in dataset] self.targets = torch.tensor(dataset.targets, dtype=torch.long, device=device) sampler = torch.utils.data.SubsetRandomSampler([i for i in range(len(dataset))]) self.batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=batch_size, drop_last=drop_last) self.iterator = iter(_RepeatSampler(self.batch_sampler)) self.nclass = dataset.nclass self.cls_idx = [[] for _ in range(self.nclass)] for i in range(len(dataset)): self.cls_idx[self.targets[i]].append(i) self.class_sampler = ClassBatchSampler(self.cls_idx, self.batch_size, drop_last=True) self.cls_targets = torch.tensor([(np.ones(batch_size) * c) for c in range(self.nclass)], dtype=torch.long, requires_grad=False, device=self.device) self.convert = None if (self.data[0].dtype == torch.uint8): self.convert = transforms.ConvertImageDtype(torch.float) def class_sample(self, c, ipc=(- 1)): if (ipc > 0): indices = self.cls_idx[c][:ipc] else: indices = next(self.class_sampler.samplers[c]) data = torch.stack([self.data[i] for i in indices]) if (self.convert != None): data = self.convert(data) return (data, self.cls_targets[c]) def sample(self): indices = next(self.iterator) data = torch.stack([self.data[i] for i in indices]) if (self.convert != None): data = self.convert(data) target = self.targets[indices] return (data, target) def __len__(self): return len(self.batch_sampler) def __iter__(self): for _ in range(len(self)): (data, target) = self.sample() (yield (data, target))
class InferCell(nn.Module): def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev): super(InferCell, self).__init__() print(C_prev_prev, C_prev, C) if (reduction_prev is None): self.preprocess0 = Identity() elif reduction_prev: self.preprocess0 = FactorizedReduce(C_prev_prev, C, 2) else: self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0) self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0) if reduction: (step_ops, concat) = (genotype.reduce, genotype.reduce_concat) else: (step_ops, concat) = (genotype.normal, genotype.normal_concat) self._steps = len(step_ops) self._concat = concat self._multiplier = len(concat) self._ops = nn.ModuleList() self._indices = [] for operations in step_ops: for (name, index) in operations: stride = (2 if (reduction and (index < 2)) else 1) if ((reduction_prev is None) and (index == 0)): op = OPS[name](C_prev_prev, C, stride, True) else: op = OPS[name](C, C, stride, True) self._ops.append(op) self._indices.append(index) def extra_repr(self): return '{name}(steps={_steps}, concat={_concat})'.format(name=self.__class__.__name__, **self.__dict__) def forward(self, S0, S1, drop_prob): s0 = self.preprocess0(S0) s1 = self.preprocess1(S1) states = [s0, s1] for i in range(self._steps): h1 = states[self._indices[(2 * i)]] h2 = states[self._indices[((2 * i) + 1)]] op1 = self._ops[(2 * i)] op2 = self._ops[((2 * i) + 1)] h1 = op1(h1) h2 = op2(h2) if (self.training and (drop_prob > 0.0)): if (not isinstance(op1, Identity)): h1 = drop_path(h1, drop_prob) if (not isinstance(op2, Identity)): h2 = drop_path(h2, drop_prob) state = (h1 + h2) states += [state] output = torch.cat([states[i] for i in self._concat], dim=1) return output
def ensure_config(impdb: str, branch: str, update: bool) -> bool: path = config_directory() if ((((path / branch) / impdb) / '_meta.json').exists() and (not update)): return True obsolete = is_obsolete(impdb, branch) if ((((path / branch) / impdb) / '_meta.json').exists() and (not obsolete)): return True else: download_config(impdb, branch) return False
class MemoryViewSliceNode(MemoryViewIndexNode): is_memview_slice = True is_ellipsis_noop = False is_memview_scalar_assignment = False is_memview_index = False is_memview_broadcast = False def analyse_ellipsis_noop(self, env, getting): self.is_ellipsis_noop = all(((index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none) for index in self.indices)) if self.is_ellipsis_noop: self.type = self.base.type def analyse_operation(self, env, getting, axes): from . import MemoryView if (not getting): self.is_memview_broadcast = True self.none_error_message = 'Cannot assign to None memoryview slice' else: self.none_error_message = 'Cannot slice None memoryview slice' self.analyse_ellipsis_noop(env, getting) if self.is_ellipsis_noop: return self.index = None self.is_temp = True self.use_managed_ref = True if (not MemoryView.validate_axes(self.pos, axes)): self.type = error_type return self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes) if (not (self.base.is_simple() or self.base.result_in_temp())): self.base = self.base.coerce_to_temp(env) def analyse_assignment(self, rhs): if ((not rhs.type.is_memoryviewslice) and (self.type.dtype.assignable_from(rhs.type) or rhs.type.is_pyobject)): return MemoryCopyScalar(self.pos, self) else: return MemoryCopySlice(self.pos, self) def merged_indices(self, indices): if (not indices): return None new_indices = self.original_indices[:] indices = indices[:] for (i, s) in enumerate(self.original_indices): if s.is_slice: if (s.start.is_none and s.stop.is_none and s.step.is_none): new_indices[i] = indices[0] indices.pop(0) if (not indices): return new_indices else: return None elif (not s.type.is_int): return None if indices: if ((len(new_indices) + len(indices)) > self.base.type.ndim): return None new_indices += indices return new_indices def is_simple(self): if self.is_ellipsis_noop: return (self.base.is_simple() or self.base.result_in_temp()) return self.result_in_temp() def calculate_result_code(self): return self.base.result() def generate_result_code(self, code): if self.is_ellipsis_noop: return buffer_entry = self.buffer_entry() have_gil = (not self.in_nogil_context) have_slices = False it = iter(self.indices) for index in self.original_indices: if index.is_slice: have_slices = True if (not index.start.is_none): index.start = next(it) if (not index.stop.is_none): index.stop = next(it) if (not index.step.is_none): index.step = next(it) else: next(it) assert (not list(it)) buffer_entry.generate_buffer_slice_code(code, self.original_indices, self.result(), have_gil=have_gil, have_slices=have_slices, directives=code.globalstate.directives) def generate_assignment_code(self, rhs, code, overloaded_assignment=False): if self.is_ellipsis_noop: self.generate_subexpr_evaluation_code(code) else: self.generate_evaluation_code(code) if self.is_memview_scalar_assignment: self.generate_memoryviewslice_assign_scalar_code(rhs, code) else: self.generate_memoryviewslice_setslice_code(rhs, code) if self.is_ellipsis_noop: self.generate_subexpr_disposal_code(code) else: self.generate_disposal_code(code) rhs.generate_disposal_code(code) rhs.free_temps(code)
def data_parallel(f, input, params, mode, device_ids, output_device=None): device_ids = list(device_ids) if (output_device is None): output_device = device_ids[0] if (len(device_ids) == 1): return f(input, params, mode) params_all = Broadcast.apply(device_ids, *params.values()) params_replicas = [{k: params_all[(i + (j * len(params)))] for (i, k) in enumerate(params.keys())} for j in range(len(device_ids))] replicas = [partial(f, params=p, mode=mode) for p in params_replicas] inputs = scatter([input], device_ids) outputs = parallel_apply(replicas, inputs) return gather(outputs, output_device)
class SemistandardSkewTableaux_all(SemistandardSkewTableaux): def __init__(self, max_entry): SemistandardSkewTableaux.__init__(self, category=InfiniteEnumeratedSets()) if (max_entry is None): self.max_entry = PlusInfinity() else: self.max_entry = max_entry def _repr_(self): if (self.max_entry == PlusInfinity()): return 'Semistandard skew tableaux' return 'Semistandard skew tableaux with maximum entry {}'.format(self.max_entry) def __iter__(self): if (self.max_entry == PlusInfinity()): n = 0 while True: for ssst in SemistandardSkewTableaux_size(n, n): (yield self.element_class(self, ssst)) n += 1 else: n = 0 while True: for ssst in SemistandardSkewTableaux_size(n, self.max_entry): (yield self.element_class(self, ssst)) n += 1
class ClsCntRegHead(nn.Module): def __init__(self, in_channel, class_num, GN=True, cnt_on_reg=True, prior=0.01): super(ClsCntRegHead, self).__init__() self.prior = prior self.class_num = class_num self.cnt_on_reg = cnt_on_reg cls_branch = [] reg_branch = [] for i in range(5): cls_branch.append(nn.Conv2d(in_channel, in_channel, kernel_size=3, padding=1, bias=True)) if GN: cls_branch.append(nn.GroupNorm(32, in_channel)) cls_branch.append(nn.ReLU(True)) reg_branch.append(nn.Conv2d(in_channel, in_channel, kernel_size=3, padding=1, bias=True)) if GN: reg_branch.append(nn.GroupNorm(32, in_channel)) reg_branch.append(nn.ReLU(True)) self.cls_conv = nn.Sequential(*cls_branch) self.reg_conv = nn.Sequential(*reg_branch) self.cls_logits = nn.Conv2d(in_channel, class_num, kernel_size=3, padding=1) self.cnt_logits = nn.Conv2d(in_channel, 1, kernel_size=3, padding=1) self.reg_pred = nn.Conv2d(in_channel, 4, kernel_size=3, padding=1) self.apply(self.init_conv_RandomNormal) nn.init.constant_(self.cls_logits.bias, (- math.log(((1 - prior) / prior)))) self.scale_exp = nn.ModuleList([ScaleExp(1.0) for _ in range(6)]) def init_conv_RandomNormal(self, module, std=0.01): if isinstance(module, nn.Conv2d): nn.init.normal_(module.weight, std=std) if (module.bias is not None): nn.init.constant_(module.bias, 0) def forward(self, inputs): cls_logits = [] cnt_logits = [] reg_preds = [] for (index, P) in enumerate(inputs): cls_conv_out = self.cls_conv(P) reg_conv_out = self.reg_conv(P) cls_logits.append(self.cls_logits(cls_conv_out)) if (not self.cnt_on_reg): cnt_logits.append(self.cnt_logits(cls_conv_out)) else: cnt_logits.append(self.cnt_logits(reg_conv_out)) reg_preds.append(self.scale_exp[index](self.reg_pred(reg_conv_out))) return (cls_logits, cnt_logits, reg_preds)
def test_case109(): url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert') headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'} r = requests.post(url, data=json.dumps(ld_data.subdata109), headers=headers) print(r.content) print(r.status_code) assert (r.status_code == 404)
class Meld(): def init(action, target, src): return (((jnp.int32(src) << 13) | (jnp.int32(target) << 7)) | jnp.int32(action)) def to_str(meld) -> str: action = Meld.action(meld) target = Meld.target(meld) src = Meld.src(meld) (suit, num) = ((target // 9), ((target % 9) + 1)) if (action == Action.PON): if (src == 1): return '{}{}[{}]{}'.format(num, num, num, ['m', 'p', 's', 'z'][suit]) elif (src == 2): return '{}[{}]{}{}'.format(num, num, num, ['m', 'p', 's', 'z'][suit]) elif (src == 3): return '[{}]{}{}{}'.format(num, num, num, ['m', 'p', 's', 'z'][suit]) elif Action.is_selfkan(action): if (src == 0): return '{}{}{}{}{}'.format(num, num, num, num, ['m', 'p', 's', 'z'][suit]) if (src == 1): return '{}{}[{}{}]{}'.format(num, num, num, num, ['m', 'p', 's', 'z'][suit]) elif (src == 2): return '{}[{}{}]{}{}'.format(num, num, num, num, ['m', 'p', 's', 'z'][suit]) elif (src == 3): return '[{}{}]{}{}{}'.format(num, num, num, num, ['m', 'p', 's', 'z'][suit]) elif (action == Action.MINKAN): if (src == 1): return '{}{}{}[{}]{}'.format(num, num, num, num, ['m', 'p', 's', 'z'][suit]) elif (src == 2): return '{}[{}]{}{}{}'.format(num, num, num, num, ['m', 'p', 's', 'z'][suit]) elif (src == 3): return '[{}]{}{}{}{}'.format(num, num, num, num, ['m', 'p', 's', 'z'][suit]) elif (Action.CHI_L <= action <= Action.CHI_R): assert (src == 3) pos = (action - Action.CHI_L) t = [((num - pos) + i) for i in range(3)] t.insert(0, t.pop(pos)) return '[{}]{}{}{}'.format(*t, ['m', 'p', 's', 'z'][suit]) assert False def src(meld) -> int: return ((meld >> 13) & 3) def target(meld) -> int: return ((meld >> 7) & 63) def action(meld) -> int: return (meld & 127) def suited_pung(meld) -> int: action = Meld.action(meld) target = Meld.target(meld) is_pung = (((action == Action.PON) | (action == Action.MINKAN)) | Action.is_selfkan(action)) is_suited_pon = (is_pung & (target < 27)) return (is_suited_pon << target) def chow(meld) -> int: action = Meld.action(meld) is_chi = ((Action.CHI_L <= action) & (action <= Action.CHI_R)) pos = (Meld.target(meld) - (action - Action.CHI_L)) pos *= is_chi return (is_chi << pos) def is_outside(meld) -> int: action = Meld.action(meld) target = Meld.target(meld) is_chi = ((Action.CHI_L <= action) & (action <= Action.CHI_R)) return jax.lax.cond(is_chi, (lambda : (Meld._is_outside((target - (action - Action.CHI_L))) | Meld._is_outside(((target - (action - Action.CHI_L)) + 2)))), (lambda : Meld._is_outside(target))) def fu(meld) -> int: action = Meld.action(meld) fu = ((((action == Action.PON) * 2) + ((action == Action.MINKAN) * 8)) + ((Action.is_selfkan(action) * 8) * (1 + (Meld.src(meld) == 0)))) return (fu * (1 + Meld._is_outside(Meld.target(meld)))) def _is_outside(tile) -> bool: num = (tile % 9) return (((tile >= 27) | (num == 0)) | (num == 8))
class AverageValueEstimationEvaluator(EvaluatorProtocol): _episodes: Optional[Sequence[EpisodeBase]] def __init__(self, episodes: Optional[Sequence[EpisodeBase]]=None): self._episodes = episodes def __call__(self, algo: QLearningAlgoProtocol, dataset: ReplayBuffer) -> float: total_values = [] episodes = (self._episodes if self._episodes else dataset.episodes) for episode in episodes: for batch in make_batches(episode, WINDOW_SIZE, dataset.transition_picker): actions = algo.predict(batch.observations) values = algo.predict_value(batch.observations, actions) total_values += values.tolist() return float(np.mean(total_values))
def ocp(F, bcs, J, y, u, p, config_ocp): return cashocs.OptimalControlProblem(F, bcs, J, y, u, p, config=config_ocp)