code
stringlengths
101
5.91M
class DetaPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def assign_proto(proto, name, val): is_repeated_field = hasattr(getattr(proto, name), 'extend') if (is_repeated_field and (not isinstance(val, list))): val = [val] if isinstance(val, list): if isinstance(val[0], dict): for item in val: proto_item = getattr(proto, name).add() for (k, v) in six.iteritems(item): assign_proto(proto_item, k, v) else: getattr(proto, name).extend(val) elif isinstance(val, dict): for (k, v) in six.iteritems(val): assign_proto(getattr(proto, name), k, v) else: setattr(proto, name, val)
def read_ter_output(): params = [('all-cat', 93), ('old-cat', 48), ('new-cat', 44)] for team in teams: for param in params: filelines = [] out = '' for block_id in range(1, (param[1] + 1)): with open((((((('eval/metric_per_block/ter3ref-' + team) + '-') + param[0]) + '-') + str(block_id)) + '.txt'), 'r') as f: filelines += [line for line in f] lastline = filelines[(- 4)] ter = lastline.split()[2] out += (((('Block-' + str(block_id)) + '\t') + ter[:4]) + '\n') with open((((('significance-2005-DARPA-NIST/ter3ref-' + team) + '-') + param[0]) + '.blocks'), 'w+') as f_blocks: f_blocks.write(out) print('Scores were written to the significance-2005-DARPA-NIST directory.')
def dec_recompose(enc_wts): dec_wts = [] global_aggregate = {} chunks = len(enc_wts) for h in range(chunks): plain_agg = Plaintext() decryptor.decrypt(enc_wts[h], plain_agg) crtbuilder.decompose(plain_agg) dec_wts += [plain_agg.coeff_at(h) for h in range(plain_agg.coeff_count())] for h in range(len(dec_wts)): if (dec_wts[h] > (int((parms.plain_modulus().value() - 1)) / 2)): dec_wts[h] = (dec_wts[h] - parms.plain_modulus().value()) for h in range(len(dec_wts)): dec_wts[h] = (float(dec_wts[h]) / (1000 * m)) pos_start = 0 for param_tensor in flattened_lengths: pos_end = (pos_start + flattened_lengths[param_tensor]) global_aggregate.update({param_tensor: torch.tensor(dec_wts[pos_start:pos_end]).reshape(shapes[param_tensor])}) pos_start = pos_end return global_aggregate
def get_logger(log_level): logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=log_level) logger = logging.getLogger() return logger
def load_checkpoint(model, pretrained_path, module=None): if (not os.path.exists(pretrained_path)): raise NotImplementedError(('no checkpoint file from path %s...' % pretrained_path)) state_dict = torch.load(pretrained_path, map_location='cpu') ckpt_state_dict = state_dict for key in state_dict.keys(): if (key in ['model', 'net', 'network', 'state_dict', 'base_model']): ckpt_state_dict = ckpt_state_dict[key] base_ckpt = {k.replace('module.', ''): v for (k, v) in ckpt_state_dict.items()} if (module is not None): base_ckpt = {k: v for (k, v) in base_ckpt.items() if (module in k)} if ('bert' in list(ckpt_state_dict.items())[0][0]): base_ckpt = bert2vit_ckpt_rename(ckpt_state_dict) state_dict = base_ckpt if hasattr(model, 'module'): incompatible = model.module.load_state_dict(base_ckpt, strict=False) else: incompatible = model.load_state_dict(base_ckpt, strict=False) if incompatible.missing_keys: logging.info('missing_keys') logging.info(get_missing_parameters_message(incompatible.missing_keys)) if incompatible.unexpected_keys: logging.info('unexpected_keys') logging.info(get_unexpected_parameters_message(incompatible.unexpected_keys)) logging.info(f'Successful Loading the ckpt from {pretrained_path}') epoch = state_dict.get('epoch', (- 1)) metrics = {} for key in state_dict.keys(): is_metric_key = (sum([(item in key) for item in ['metric', 'acc', 'test', 'val']]) > 0) if is_metric_key: metrics[key] = state_dict[key] logging.info(f'ckpts {epoch} epoch( {metrics} )') return (epoch, metrics)
def read_raw(filename): raw_messages = [] for line in open(filename): line = line.strip() tokens = line.split() assert (len(tokens) > 0), 'Blank line in text file {}'.format(filename) user = tokens[1] if (tokens[0] != '==='): user = user[1:(- 1)] if (len(user) == 0): user = tokens[2] raw_messages.append((user, line)) return raw_messages
def preprocess(ori_img): img = transform(ori_img) batch = torch.stack([img for _ in range(8)], 0) return batch
class BaseTracker(): def __init__(self, params): self.params = params self.visdom = None def predicts_segmentation_mask(self): return False def initialize(self, image, info: dict) -> dict: raise NotImplementedError def track(self, image, info: dict=None) -> dict: raise NotImplementedError def visdom_draw_tracking(self, image, box, segmentation=None): if isinstance(box, OrderedDict): box = [v for (k, v) in box.items()] else: box = (box,) if (segmentation is None): self.visdom.register((image, *box), 'Tracking', 1, 'Tracking') else: self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking') def transform_bbox_to_crop(self, box_in, resize_factor, device, box_extract=None, crop_type='template'): if (crop_type == 'template'): crop_sz = torch.Tensor([self.params.template_size, self.params.template_size]) elif (crop_type == 'search'): crop_sz = torch.Tensor([self.params.search_size, self.params.search_size]) else: raise NotImplementedError box_in = torch.tensor(box_in) if (box_extract is None): box_extract = box_in else: box_extract = torch.tensor(box_extract) template_bbox = transform_image_to_crop(box_in, box_extract, resize_factor, crop_sz, normalize=True) template_bbox = template_bbox.view(1, 1, 4).to(device) return template_bbox def _init_visdom(self, visdom_info, debug): visdom_info = ({} if (visdom_info is None) else visdom_info) self.pause_mode = False self.step = False self.next_seq = False if ((debug > 0) and visdom_info.get('use_visdom', True)): try: self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'}, visdom_info=visdom_info) except: time.sleep(0.5) print("!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\n!!! Start Visdom in a separate terminal window by typing 'visdom' !!!") def _visdom_ui_handler(self, data): if (data['event_type'] == 'KeyPress'): if (data['key'] == ' '): self.pause_mode = (not self.pause_mode) elif ((data['key'] == 'ArrowRight') and self.pause_mode): self.step = True elif (data['key'] == 'n'): self.next_seq = True
.wrap def record_output(output, name, output_process, student=False): recorded_output = output if (output_process != ''): if (isinstance(output, dict) and (output_process in output)): recorded_output = output[output_process] elif (isinstance(output, (tuple, list)) and str.isnumeric(output_process)): recorded_output = output[int(output_process)] elif callable(output_process): recorded_output = output_process(output) else: raise NotImplementedError(((('Current only support get the data with ' + 'integer index in case the output is tuple or list and only ') + 'need one item or with key in case the output is dict, ') + 'or output_process is a function.')) if student: STUDENT_FEATURES[name].append(recorded_output) else: TEACHER_FEATURES[name].append(recorded_output) return output
def export_onnx_model(model, path, opset=12): import torch x = torch.randn(100, 3, 224, 224, requires_grad=True) torch_out = model(x) torch.onnx.export(model, x, path, export_params=True, opset_version=opset, do_constant_folding=True, input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
def check_nan(list_of_tensors, folder=None): there_is_a_nan = False for tensor in list_of_tensors: if ((tensor is not None) and torch.isnan(tensor).any()): there_is_a_nan = True break if there_is_a_nan: if folder: f = open((folder + '/NAN_ALERT.txt'), 'w') f.close() pdb.set_trace()
class BertDictionary(Dictionary): def __init__(self, pad='[PAD]', unk='[UNK]', cls='[CLS]', mask='[MASK]', sep='[SEP]'): super().__init__(pad, unk) (self.cls_word, self.mask_word, self.sep_word) = (cls, mask, sep) self.is_start = None self.nspecial = len(self.symbols) def class_positive(self): return self.cls() def cls(self): idx = self.add_symbol(self.cls_word) return idx def mask(self): idx = self.add_symbol(self.mask_word) return idx def sep(self): idx = self.add_symbol(self.sep_word) return idx def is_start_word(self, idx): if (self.is_start is None): self.is_start = [(not self.symbols[i].startswith('##')) for i in range(len(self))] return self.is_start[idx]
class TFFunnelForQuestionAnswering(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class NdimGaussian(): def __init__(self, dimensionality, eta=None, lam=None): self.dim = dimensionality if ((eta is not None) and (len(eta) == self.dim)): self.eta = eta else: self.eta = np.zeros(self.dim) if ((lam is not None) and (lam.shape == (self.dim, self.dim))): self.lam = lam else: self.lam = np.zeros([self.dim, self.dim])
class deeplabv3plus_en(nn.Module): def __init__(self, num_classes=None): super(deeplabv3plus_en, self).__init__() self.MODEL_NUM_CLASSES = num_classes self.backbone = None self.backbone_layers = None self.aspp = ASPP(dim_in=2048, dim_out=256, rate=(16 // 16), bn_mom=0.99) self.dropout1 = nn.Dropout(0.5) self.cam_conv = nn.Sequential(nn.Conv2d((256 + 1), 256, 1, 1, padding=(1 // 2), bias=True), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=(16 // 4)) self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4) self.shortcut_conv = nn.Sequential(nn.Conv2d(256, 48, 1, 1, padding=(1 // 2), bias=True), nn.BatchNorm2d(48), nn.ReLU(inplace=True)) self.cat_conv = nn.Sequential(nn.Conv2d((256 + 48), 256, 3, 1, padding=1, bias=True), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Conv2d(256, 256, 3, 1, padding=1, bias=True), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Dropout(0.1)) self.cls_conv = nn.Conv2d(256, self.MODEL_NUM_CLASSES, 1, 1, padding=0) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) self.backbone = xception.Xception(os=16) self.backbone_layers = self.backbone.get_layers() def forward(self, x, cla_cam): x_bottom = self.backbone(x) layers = self.backbone.get_layers() feature_aspp = self.aspp(layers[(- 1)]) feature_aspp = self.dropout1(feature_aspp) feature_cat0 = torch.cat([feature_aspp, cla_cam], 1) feature_cam = self.cam_conv(feature_cat0) feature_cam = self.upsample_sub(feature_cam) feature_shallow = self.shortcut_conv(layers[0]) feature_cat1 = torch.cat([feature_cam, feature_shallow], 1) result = self.cat_conv(feature_cat1) result = self.cls_conv(result) result = self.upsample4(result) return result
def get_task_dataloader(task_name, set_name, tokenizer, args, sampler, batch_size=None, knowledge=None, extra_knowledge=None): if ('race' in task_name.lower()): return get_race_task_dataloader(task_name, set_name, tokenizer, args, sampler, batch_size, knowledge, extra_knowledge) else: return get_glue_task_dataloader(task_name, set_name, tokenizer, args, sampler, batch_size, knowledge, extra_knowledge)
class TestTreeModel(): def setup_method(self, method): sparkConf = init_spark_conf().setMaster('local[1]').setAppName('testTreeModel') self.sc = init_nncontext(sparkConf) self.sqlContext = SQLContext(self.sc) self.resource_path = os.path.join(os.path.split(__file__)[0], '../resources') assert (self.sc.appName == 'testTreeModel') def teardown_method(self, method): self.sc.stop() def test_XGBClassifierModel_predict(self): from sys import platform if (platform in ('darwin', 'win32')): return resource_path = os.path.join(os.path.split(__file__)[0], '../resources') path = os.path.join(resource_path, 'xgbclassifier/') modelPath = (path + 'XGBClassifer.bin') filePath = (path + 'test.csv') model = XGBClassifierModel.loadModel(modelPath, 2) df = self.sqlContext.read.csv(filePath, sep=',', inferSchema=True, header=True) df = df.select(array('age', 'gender', 'jointime', 'star').alias('features')).withColumn('features', udf((lambda x: DenseVector(x)), VectorUDT())('features')) model.setFeaturesCol('features') predict = model.transform(df) assert (predict.count() == 14) def test_XGBClassifier_train(self): from sys import platform if (platform in ('darwin', 'win32')): return path = os.path.join(self.resource_path, 'xgbclassifier/') modelPath = (path + 'XGBClassifer.bin') filePath = (path + 'test.csv') df = self.sqlContext.read.csv(filePath, sep=',', inferSchema=True, header=True) df = df.select(array('age', 'gender', 'jointime', 'star').alias('features'), 'label').withColumn('features', udf((lambda x: DenseVector(x)), VectorUDT())('features')) params = {'eta': 0.2, 'max_depth': 4, 'max_leaf_nodes': 8, 'objective': 'binary:logistic', 'num_round': 100} classifier = XGBClassifier(params) xgbmodel = classifier.fit(df) xgbmodel.setFeaturesCol('features') predicts = xgbmodel.transform(df) assert (predicts.count() == 14) def test_XGBClassfier_feature_importances(self): from sys import platform if (platform in ('darwin', 'win32')): return path = os.path.join(self.resource_path, 'xgbclassifier/') modelPath = (path + 'XGBClassifer.bin') filePath = (path + 'test.csv') df = self.sqlContext.read.csv(filePath, sep=',', inferSchema=True, header=True) df = df.select(array('age', 'gender', 'jointime', 'star').alias('features'), 'label').withColumn('features', udf((lambda x: DenseVector(x)), VectorUDT())('features')) params = {'eta': 0.2, 'max_depth': 4, 'max_leaf_nodes': 8, 'objective': 'binary:logistic', 'num_round': 100} classifier = XGBClassifier(params) xgbmodel = classifier.fit(df) xgbmodel.setFeaturesCol('features') fscore = xgbmodel.getFScore() score = xgbmodel.getScore(importance_type='gain') feature_importances = xgbmodel.feature_importances assert (len(fscore) == len(score)) assert (len(feature_importances) >= len(score)) def test_XGBRegressor(self): from sys import platform if (platform in ('darwin', 'win32')): return if (self.sc.version.startswith('3.1') or self.sc.version.startswith('2.4')): data = self.sc.parallelize([(1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 4.0, 8.0, 3.0, 116.3668), (1.0, 3.0, 8.0, 6.0, 5.0, 9.0, 5.0, 6.0, 7.0, 4.0, 116.367), (2.0, 1.0, 5.0, 7.0, 6.0, 7.0, 4.0, 1.0, 2.0, 3.0, 116.367), (2.0, 1.0, 4.0, 3.0, 6.0, 1.0, 3.0, 2.0, 1.0, 3.0, 116.3668)]) columns = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'label'] df = data.toDF(columns) from pyspark.ml.feature import VectorAssembler vecasembler = VectorAssembler(inputCols=columns, outputCol='features') assembledf = vecasembler.transform(df).select('features', 'label').cache() assembledf.printSchema() testdf = vecasembler.transform(df).select('features', 'label').cache() params = {'eta': 0.2, 'max_depth': 4, 'max_leaf_nodes': 8} xgbRf0 = XGBRegressor(params) xgbRf0.setNthread(1) xgbRf0.setNumRound(10) xgbmodel = xgbRf0.fit(assembledf) xgbmodel.save('/tmp/modelfile/') xgbmodel.setFeaturesCol('features') yxgb = xgbmodel.transform(assembledf) model = xgbmodel.load('/tmp/modelfile/') model.setFeaturesCol('features') y0 = model.transform(assembledf) assert (y0.subtract(yxgb).count() == 0) def test_LGBMClassifier_fit_transform(self): if (float(self.sc.version[:3]) < 3.1): return path = os.path.join(self.resource_path, 'xgbclassifier/') filePath = (path + 'test.csv') df = self.sqlContext.read.csv(filePath, sep=',', inferSchema=True, header=True) df = df.select(array('age', 'gender', 'jointime', 'star').alias('features'), 'label').withColumn('features', udf((lambda x: DenseVector(x)), VectorUDT())('features')) classifier = LightGBMClassifier() classifier.setObjective('binary') classifier.setMaxDepth(4) classifier.setLearningRate(0.2) model = classifier.fit(df) predicts = model.transform(df) print(predicts.filter((predicts['prediction'] == 1.0)).count()) assert (predicts.count() == 14) def test_LGBMClassifier_param_map(self): if (float(self.sc.version[:3]) < 3.1): return path = os.path.join(self.resource_path, 'xgbclassifier/') filePath = (path + 'test.csv') df = self.sqlContext.read.csv(filePath, sep=',', inferSchema=True, header=True) df = df.select(array('age', 'gender', 'jointime', 'star').alias('features'), 'label').withColumn('features', udf((lambda x: DenseVector(x)), VectorUDT())('features')) parammap = {'boosting_type': 'gbdt', 'num_leaves': 2, 'max_depth': 2, 'learning_rate': 0.3, 'num_iterations': 10, 'bin_construct_sample_cnt': 5, 'objective': 'binary', 'min_split_gain': 0.1, 'min_sum_hessian_in_leaf': 0.01, 'min_data_in_leaf': 1, 'bagging_fraction': 0.4, 'bagging_freq': 1, 'feature_fraction': 0.4, 'lambda_l1': 0.1, 'lambda_l2': 0.1, 'num_threads': 2, 'early_stopping_round': 10, 'max_bin': 100} classifier = LightGBMClassifier(parammap) model = classifier.fit(df) predicts = model.transform(df) print(predicts.filter((predicts['prediction'] == 1.0)).count()) assert (predicts.count() == 14) def test_LGBMClassifierModel_save_load(self): if (float(self.sc.version[:3]) < 3.1): return path = os.path.join(self.resource_path, 'xgbclassifier/') filePath = (path + 'test.csv') df = self.sqlContext.read.csv(filePath, sep=',', inferSchema=True, header=True) df = df.select(array('age', 'gender', 'jointime', 'star').alias('features'), 'label').withColumn('features', udf((lambda x: DenseVector(x)), VectorUDT())('features')) classifier = LightGBMClassifier() classifier.setObjective('binary') model = classifier.fit(df) predicts = model.transform(df) model.saveModel('/tmp/lightgbmClassifier1') model1 = LightGBMClassifierModel.loadModel('/tmp/lightgbmClassifier1') predicts1 = model1.transform(df) assert (predicts1.count() == 14) def test_LGBMRegressor_param_map(self): if (float(self.sc.version[:3]) < 3.1): return data = self.sc.parallelize([(1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 4.0, 8.0, 3.0, 116.3668), (1.0, 3.0, 8.0, 6.0, 5.0, 9.0, 5.0, 6.0, 7.0, 4.0, 116.367), (2.0, 1.0, 5.0, 7.0, 6.0, 7.0, 4.0, 1.0, 2.0, 3.0, 116.367), (2.0, 1.0, 4.0, 3.0, 6.0, 1.0, 3.0, 2.0, 1.0, 3.0, 116.3668), (1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 4.0, 8.0, 3.0, 116.3668), (1.0, 3.0, 8.0, 6.0, 5.0, 9.0, 5.0, 6.0, 7.0, 4.0, 116.367), (2.0, 1.0, 5.0, 7.0, 6.0, 7.0, 4.0, 1.0, 2.0, 3.0, 116.367), (2.0, 1.0, 4.0, 3.0, 6.0, 1.0, 3.0, 2.0, 1.0, 3.0, 116.3668)]) columns = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'label'] df = data.toDF(columns) from pyspark.ml.feature import VectorAssembler vecasembler = VectorAssembler(inputCols=columns, outputCol='features') assembledf = vecasembler.transform(df).select('features', 'label').cache() assembledf.printSchema() testdf = vecasembler.transform(df).select('features', 'label').cache() parammap = {'boosting_type': 'dart', 'num_leaves': 2, 'max_depth': 2, 'learning_rate': 0.3, 'num_iterations': 10, 'bin_construct_sample_cnt': 5, 'objective': 'huber', 'min_split_gain': 0.1, 'min_sum_hessian_in_leaf': 0.01, 'min_data_in_leaf': 1, 'bagging_fraction': 0.4, 'bagging_freq': 1, 'feature_fraction': 0.4, 'lambda_l1': 0.1, 'lambda_l2': 0.1, 'num_threads': 2, 'early_stopping_round': 10, 'max_bin': 100} regressor = LightGBMRegressor(parammap) model = regressor.fit(assembledf) predicts = model.transform(assembledf) predicts.show() assert (predicts.count() == 8) def test_LGBMRegressor_train_transform(self): if (float(self.sc.version[:3]) < 3.1): return data = self.sc.parallelize([(1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 4.0, 8.0, 3.0, 116.3668), (1.0, 3.0, 8.0, 6.0, 5.0, 9.0, 5.0, 6.0, 7.0, 4.0, 116.367), (2.0, 1.0, 5.0, 7.0, 6.0, 7.0, 4.0, 1.0, 2.0, 3.0, 116.367), (2.0, 1.0, 4.0, 3.0, 6.0, 1.0, 3.0, 2.0, 1.0, 3.0, 116.3668)]) columns = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'label'] df = data.toDF(columns) from pyspark.ml.feature import VectorAssembler vecasembler = VectorAssembler(inputCols=columns, outputCol='features') assembledf = vecasembler.transform(df).select('features', 'label').cache() assembledf.printSchema() testdf = vecasembler.transform(df).select('features', 'label').cache() regressor = LightGBMRegressor() model = regressor.fit(assembledf) predicts = model.transform(assembledf) predicts.show() assert (predicts.count() == 4) def test_LGBMRegressorModel_save_load(self): if (float(self.sc.version[:3]) < 3.1): return data = self.sc.parallelize([(1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 4.0, 8.0, 3.0, 116.3668), (1.0, 3.0, 8.0, 6.0, 5.0, 9.0, 5.0, 6.0, 7.0, 4.0, 116.367), (2.0, 1.0, 5.0, 7.0, 6.0, 7.0, 4.0, 1.0, 2.0, 3.0, 116.367), (2.0, 1.0, 4.0, 3.0, 6.0, 1.0, 3.0, 2.0, 1.0, 3.0, 116.3668)]) columns = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'label'] df = data.toDF(columns) from pyspark.ml.feature import VectorAssembler vecasembler = VectorAssembler(inputCols=columns, outputCol='features') df = vecasembler.transform(df).select('features', 'label').cache() regressor = LightGBMRegressor() model = regressor.fit(df) predicts = model.transform(df) model.saveModel('/tmp/lightgbmRegressor1') model1 = LightGBMRegressorModel.loadModel('/tmp/lightgbmRegressor1') predicts1 = model1.transform(df) assert (predicts1.count() == 4)
def save_fc(fp, fc_model): fc_model.bias.data.numpy().tofile(fp) fc_model.weight.data.numpy().tofile(fp)
def test_param2grid_with_iterable_types(): params = {'alpha': [np.array([0.1, 0.01]), np.array([0.1, 0.01])], 'regs': [(1, 2), (3, 4)]} expected = [{'alpha': [0.1, 0.1], 'regs': [1, 3]}, {'alpha': [0.1, 0.1], 'regs': [1, 4]}, {'alpha': [0.1, 0.1], 'regs': [2, 3]}, {'alpha': [0.1, 0.1], 'regs': [2, 4]}, {'alpha': [0.1, 0.01], 'regs': [1, 3]}, {'alpha': [0.1, 0.01], 'regs': [1, 4]}, {'alpha': [0.1, 0.01], 'regs': [2, 3]}, {'alpha': [0.1, 0.01], 'regs': [2, 4]}, {'alpha': [0.01, 0.1], 'regs': [1, 3]}, {'alpha': [0.01, 0.1], 'regs': [1, 4]}, {'alpha': [0.01, 0.1], 'regs': [2, 3]}, {'alpha': [0.01, 0.1], 'regs': [2, 4]}, {'alpha': [0.01, 0.01], 'regs': [1, 3]}, {'alpha': [0.01, 0.01], 'regs': [1, 4]}, {'alpha': [0.01, 0.01], 'regs': [2, 3]}, {'alpha': [0.01, 0.01], 'regs': [2, 4]}] grid = list(param2grid(params)) assert (grid == expected)
def initialise_halo_params(): G = 1.0 epsilon = 0.07 limit = 80000 radius = 4 num_pos_particles = 5000 num_neg_particles = 45000 chunks_value = ((num_pos_particles + num_neg_particles) / 5.0) time_steps = 1000 return (G, epsilon, limit, radius, num_pos_particles, num_neg_particles, chunks_value, time_steps)
def reset_sim(sim): arcsim.init_physics((sys.argv[1] + '/conf.json'), (sys.argv[1] + '/out1'), False) print(sim.obstacles[0].curr_state_mesh.dummy_node.x)
class writer(): def open(self, filename, mode): self._data = open(filename, 'wb') self._data.write(struct.pack('<B', mode)) def write(self, packet): self._data.write(hl2ss.pack_packet(packet)) def close(self): self._data.close()
class test_dataset(): def __init__(self, dataset='MoCA', split='TestDataset_per_sq', testsize=256): self.testsize = testsize self.image_list = [] self.gt_list = [] self.extra_info = [] if (dataset == 'CAD2016'): root = Path.db_root_dir('CAD2016') img_format = '*.png' for scene in os.listdir(osp.join(root)): images = sorted(glob(osp.join(root, scene, 'frames', img_format))) gt_list = sorted(glob(osp.join(root, scene, 'pseudo', '*.png'))) for i in range((len(images) - 2)): self.extra_info += [(scene, i)] self.gt_list += [gt_list[i]] self.image_list += [[images[i], images[(i + 1)], images[(i + 2)]]] for i in range((len(images) - 1), (len(images) - 3), (- 1)): self.gt_list += [gt_list[i]] self.image_list += [[images[i], images[(i - 1)], images[(i - 2)]]] else: root = Path.db_root_dir('MoCA') img_format = '*.jpg' data_root = osp.join(root, split) print(split) for scene in os.listdir(osp.join(data_root)): if (split == 'MoCA-Video-Test'): images = sorted(glob(osp.join(data_root, scene, 'Frame', img_format))) elif (split == 'TestDataset_per_sq'): images = sorted(glob(osp.join(data_root, scene, 'Imgs', img_format))) gt_list = sorted(glob(osp.join(data_root, scene, 'GT', '*.png'))) for i in range((len(images) - 2)): self.extra_info += [(scene, i)] self.gt_list += [gt_list[i]] self.image_list += [[images[i], images[(i + 1)], images[(i + 2)]]] self.transform = transforms.Compose([transforms.Resize((self.testsize, self.testsize)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) self.gt_transform = transforms.ToTensor() self.index = 0 self.size = len(self.gt_list) def load_data(self): imgs = [] names = [] for i in range(len(self.image_list[self.index])): imgs += [self.rgb_loader(self.image_list[self.index][i])] names += [self.image_list[self.index][i].split('/')[(- 1)]] imgs[i] = self.transform(imgs[i]).unsqueeze(0) scene = self.image_list[self.index][0].split('/')[(- 3)] gt = self.binary_loader(self.gt_list[self.index]) self.index += 1 self.index = (self.index % self.size) return (imgs, gt, names, scene) def rgb_loader(self, path): with open(path, 'rb') as f: img = Image.open(f) return img.convert('RGB') def binary_loader(self, path): with open(path, 'rb') as f: img = Image.open(f) return img.convert('L') def __len__(self): return self.size
def get_grasp(mask): dist = cv.distanceTransform(mask, cv.DIST_L2, 5) center = tuple(map(round, np.argwhere((dist == dist.max())).mean(axis=0))) center = (center[1], center[0]) (contours, _) = cv.findContours(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) if (len(contours) > 0): largest_contour = max(contours, key=cv.contourArea) moments = cv.moments(largest_contour) orientation = (0.5 * math.atan2((2 * moments['mu11']), (moments['mu20'] - moments['mu02']))) orientation = (- orientation) else: orientation = None return (center, orientation)
class TestLogger(unittest.TestCase): def test_logger(self): logger.log(0, 'call logger log function.') logger.log(1, {'msg': 'call logger log function.'}) logger.debug('call logger debug function.') logger.debug({'msg': 'call logger debug function.'}) logger.error('call logger error function.') logger.error({'msg': 'call logger error function.'}) logger.fatal('call logger fatal function') logger.fatal({'msg': 'call logger fatal function'}) logger.info('call logger info function') logger.info({'msg': 'call logger info function.'}) logger.warn('call logger warn function') logger.warn({'msg': 'call logger warn function'}) logger.warning('call logger warning function') logger.warning({'msg': 'call logger warning function'}) logger.warning(['call logger warning function', 'done']) logger.warning(('call logger warning function', 'done')) logger.warning({'msg': {('bert', 'embedding'): {'weight': {'dtype': ['unint8', 'int8']}}}}) logger.warning({'msg': {('bert', 'embedding'): {'op': ('a', 'b')}}}) logger.warning([{'msg': 'call logger warning function'}, {'msg2': 'done'}]) logger.warning(({'msg': 'call logger warning function'}, {'msg2': 'done'})) logger.warning(({'msg': [{'sub_msg': 'call logger'}, {'sub_msg2': 'call warning function'}]}, {'msg2': 'done'}))
def create_torchvision_biomodel(model_architecture, mode, layer_config: dict=None, pretrained: bool=False, progress: bool=True, num_classes: int=1000) -> BioModule: if (not pretrained): copy_weights = False model = model_architecture(pretrained, progress, num_classes=num_classes) else: copy_weights = True model = model_architecture(pretrained, progress, num_classes=1000) if (num_classes != 1000): model.fc = nn.Linear(model.fc.in_features, num_classes) return BioModule(model, mode=mode, copy_weights=copy_weights, layer_config=layer_config, output_dim=num_classes)
class MSELossWiedemann(MSELoss): def __init__(self, size_average=None, reduce=None, reduction: str='mean') -> None: super(MSELossWiedemann, self).__init__(size_average, reduce, reduction) def forward(self, input: Tensor, target: Tensor) -> Tensor: return mse_loss_wiedemann(input, target, reduction=self.reduction)
class TimerError(Exception): def __init__(self, message): self.message = message super(TimerError, self).__init__(message)
def modify_lr(optimizer, iter_count, init_lr=0.001, all_iter=10000, decay=0.999931, prompt_every=1000): new_lr = (init_lr * (decay ** float(iter_count))) if (((iter_count + 1) % prompt_every) == 0): print('INFO: Current learning rate is: {:.6f}'.format(new_lr)) for param_group in optimizer.param_groups: param_group['lr'] = new_lr return optimizer
class ModelDataConfig(): name: str system: str role_prefix: dict ai_role: str eot_token: str bos_token: Optional[str] max_tokens: int pad_token: int ignore_id: int
class RemBertConfig(PretrainedConfig): model_type = 'rembert' def __init__(self, vocab_size=250300, hidden_size=1152, num_hidden_layers=32, num_attention_heads=18, input_embedding_size=256, output_embedding_size=1664, intermediate_size=4608, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, is_encoder_decoder=False, pad_token_id=0, bos_token_id=312, eos_token_id=313, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.input_embedding_size = input_embedding_size self.output_embedding_size = output_embedding_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = False
class SSD(object): __category__ = 'architecture' __inject__ = ['backbone', 'multi_box_head', 'output_decoder'] __shared__ = ['num_classes'] def __init__(self, backbone, multi_box_head='MultiBoxHead', output_decoder=SSDOutputDecoder().__dict__, num_classes=21): super(SSD, self).__init__() self.backbone = backbone self.multi_box_head = multi_box_head self.num_classes = num_classes self.output_decoder = output_decoder if isinstance(output_decoder, dict): self.output_decoder = SSDOutputDecoder(**output_decoder) def build(self, feed_vars, mode='train'): im = feed_vars['image'] if ((mode == 'train') or (mode == 'eval')): gt_bbox = feed_vars['gt_bbox'] gt_class = feed_vars['gt_class'] mixed_precision_enabled = (mixed_precision_global_state() is not None) if mixed_precision_enabled: im = fluid.layers.cast(im, 'float16') body_feats = self.backbone(im) if isinstance(body_feats, OrderedDict): body_feat_names = list(body_feats.keys()) body_feats = [body_feats[name] for name in body_feat_names] if mixed_precision_enabled: body_feats = [fluid.layers.cast(v, 'float32') for v in body_feats] (locs, confs, box, box_var) = self.multi_box_head(inputs=body_feats, image=im, num_classes=self.num_classes) if (mode == 'train'): loss = fluid.layers.ssd_loss(locs, confs, gt_bbox, gt_class, box, box_var) loss = fluid.layers.reduce_sum(loss) return {'loss': loss} else: pred = self.output_decoder(locs, confs, box, box_var) return {'bbox': pred} def _inputs_def(self, image_shape): im_shape = ([None] + image_shape) inputs_def = {'image': {'shape': im_shape, 'dtype': 'float32', 'lod_level': 0}, 'im_id': {'shape': [None, 1], 'dtype': 'int64', 'lod_level': 0}, 'gt_bbox': {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 1}, 'gt_class': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1}, 'im_shape': {'shape': [None, 3], 'dtype': 'int32', 'lod_level': 0}, 'is_difficult': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1}} return inputs_def def build_inputs(self, image_shape=[3, None, None], fields=['image', 'im_id', 'gt_bbox', 'gt_class'], use_dataloader=True, iterable=False): inputs_def = self._inputs_def(image_shape) feed_vars = OrderedDict([(key, fluid.data(name=key, shape=inputs_def[key]['shape'], dtype=inputs_def[key]['dtype'], lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = (fluid.io.DataLoader.from_generator(feed_list=list(feed_vars.values()), capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None) return (feed_vars, loader) def train(self, feed_vars): return self.build(feed_vars, 'train') def eval(self, feed_vars): return self.build(feed_vars, 'eval') def test(self, feed_vars): return self.build(feed_vars, 'test') def is_bbox_normalized(self): return True
def get_loss(output_dict, target, spg_thresholds): ((h1, l1), (h2, l2), (h3, l3)) = spg_thresholds upsample_module = nn.Upsample(size=(224, 224), mode='bilinear') b2i = torch.sigmoid(upsample_module(output_dict['logits_b2'])) loss_cls = nn.CrossEntropyLoss()(output_dict['logits'], target.long()) loss_b2_att = _get_loss_attention(logits=output_dict['logits_b2'], pre_mask=output_dict['attention'], high_thr=h1, low_thr=l1) loss_b1_b2i = _get_loss_attention(logits=output_dict['logits_b1'], pre_mask=b2i, high_thr=h2, low_thr=l2) loss_c1_fus = _get_loss_attention(logits=output_dict['logits_c'], pre_mask=output_dict['fused_attention'], high_thr=h3, low_thr=l3) return (((loss_cls + loss_b2_att) + loss_b1_b2i) + loss_c1_fus)
def test_individual_importances(): data = synthetic_regression() X = data['full']['X'] y = data['full']['y'] ebm = ExplainableBoostingRegressor() ebm.fit(X, y) contributions = ebm.eval_terms(X) dict = get_individual_importances(ebm, X, contributions) assert (dict['A'] == compute_group_importance(['A'], ebm, X, contributions)) assert (dict['B'] == compute_group_importance(['B'], ebm, X, contributions)) assert (dict['C'] == compute_group_importance(['C'], ebm, X, contributions)) assert (dict['D'] == compute_group_importance(['D'], ebm, X, contributions))
def dice_loss(pred, target): assert (pred.shape == target.shape) assert ((pred.max() <= 1) and (pred.min() >= 0)) assert one_hot(target) numerator = (2 * torch.sum((pred * target))) denominator = torch.sum((pred + target)) return (1 - ((numerator + 1e-10) / (denominator + 1e-10)))
def film_normalize_data(context, model_params, ds_train, ds_valid, path_output): results = imed_film.get_film_metadata_models(ds_train=ds_train, metadata_type=model_params[ModelParamsKW.METADATA], debugging=context[ConfigKW.DEBUGGING]) (ds_train, train_onehotencoder, metadata_clustering_models) = results ds_valid = imed_film.normalize_metadata(ds_valid, metadata_clustering_models, context[ConfigKW.DEBUGGING], model_params[ModelParamsKW.METADATA]) model_params.update({ModelParamsKW.FILM_ONEHOTENCODER: train_onehotencoder, ModelParamsKW.N_METADATA: len([ll for l in train_onehotencoder.categories_ for ll in l])}) joblib.dump(metadata_clustering_models, Path(path_output, 'clustering_models.joblib')) joblib.dump(train_onehotencoder, Path((path_output + 'one_hot_encoder.joblib'))) return (model_params, ds_train, ds_valid, train_onehotencoder)
def cleanup_discard(d, key, val): s = d.get(key, set()) s.discard(val) if (len(s) == 0): d.pop(key, None)
class Mul2(nn.Module): def __init__(self): super(Mul2, self).__init__() def forward(self, x): assert ((type(x) == list) and (len(x) == 2)) return (x[0] * x[1])
def evaluate(model): from neural_compressor.model import Model if (isinstance(model, str) or isinstance(model, tf.compat.v1.Graph)): model = Model(model) model.input_tensor_names = ['image_tensor:0'] model.output_tensor_names = ['num_detections:0', 'detection_boxes:0', 'detection_scores:0', 'detection_classes:0'] input_tensor = model.input_tensor output_tensor = (model.output_tensor if (len(model.output_tensor) > 1) else model.output_tensor[0]) warmup = 5 iteration = (- 1) if (args.benchmark and (args.mode == 'performance')): iteration = args.iters metric = COCOmAPv2(output_index_mapping={'num_detections': 0, 'boxes': 1, 'scores': 2, 'classes': 3}) def eval_func(dataloader): latency_list = [] for (idx, (inputs, labels)) in enumerate(dataloader): inputs = np.array([inputs]) feed_dict = dict(zip(input_tensor, inputs)) start = time.time() predictions = model.sess.run(output_tensor, feed_dict) end = time.time() metric.update(predictions, labels) latency_list.append((end - start)) if ((idx + 1) == iteration): break latency = (np.array(latency_list[warmup:]).mean() / args.batch_size) return latency eval_dataset = COCORecordDataset(root=args.dataset_location, filter=None, transform=ComposeTransform(transform_list=[TensorflowResizeWithRatio(min_dim=800, max_dim=1356, padding=False)])) if (args.mode == 'accuracy'): eval_dataset = COCORecordDataset(root=args.dataset_location, filter=None, transform=ComposeTransform(transform_list=[TensorflowResizeWithRatio(min_dim=800, max_dim=1356, padding=False)])) eval_dataloader = DataLoader(framework='tensorflow', dataset=eval_dataset, batch_size=1) else: eval_dataset = COCORecordDataset(root=args.dataset_location, filter=None, transform=ComposeTransform(transform_list=[TensorflowResizeWithRatio(min_dim=800, max_dim=1356, padding=True)])) eval_dataloader = DataLoader(framework='tensorflow', dataset=eval_dataset, batch_size=args.batch_size) latency = eval_func(eval_dataloader) if (args.benchmark and (args.mode == 'performance')): print('Batch size = {}'.format(args.batch_size)) print('Latency: {:.3f} ms'.format((latency * 1000))) print('Throughput: {:.3f} images/sec'.format((1.0 / latency))) acc = metric.result() return acc
def event2frame(event, img_size, ts, f_span, total_span, num_frame, noise, roiTL=(0, 0)): (f_start, f_end) = f_span (total_start, total_end) = total_span event['x'] = event['x'].astype(int) event['y'] = event['y'].astype(int) event['t'] = event['t'].astype(int) event['p'] = event['p'].astype(int) preE = np.zeros((num_frame, 2, img_size[0], img_size[1])) postE = np.zeros((num_frame, 2, img_size[0], img_size[1])) interval = ((total_end - total_start) / num_frame) if (event['t'].shape[0] > 0): preE = e2f_detail(event, preE, ts, f_start, interval, noise, roiTL, img_size) postE = e2f_detail(event, postE, ts, f_end, interval, noise, roiTL, img_size) pre_coef = ((ts - f_start) / (f_end - f_start)) post_coef = ((f_end - ts) / (f_end - f_start)) return (preE, postE, pre_coef, post_coef)
class PeleeNet(nn.Module): def __init__(self, channels, init_block_channels, bottleneck_sizes, dropout_rate=0.5, in_channels=3, in_size=(224, 224), num_classes=1000): super(PeleeNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module('init_block', StemBlock(in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for (i, channels_per_stage) in enumerate(channels): bottleneck_size = bottleneck_sizes[i] stage = nn.Sequential() if (i != 0): stage.add_module('trans{}'.format((i + 1)), TransitionBlock(in_channels=in_channels, out_channels=in_channels)) for (j, out_channels) in enumerate(channels_per_stage): stage.add_module('unit{}'.format((j + 1)), DenseBlock(in_channels=in_channels, out_channels=out_channels, bottleneck_size=bottleneck_size)) in_channels = out_channels self.features.add_module('stage{}'.format((i + 1)), stage) self.features.add_module('final_block', conv1x1_block(in_channels=in_channels, out_channels=in_channels)) self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1)) self.output = nn.Sequential() self.output.add_module('dropout', nn.Dropout(p=dropout_rate)) self.output.add_module('fc', nn.Linear(in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for (name, module) in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if (module.bias is not None): init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), (- 1)) x = self.output(x) return x
def api_run_func(): res = atorch.init_distributed('gloo', coworker_num_per_node=1) assert res dataset = ToyDataset(50) dataloader_args = {'batch_size': 4} io_timeout = 5 initialize_timeout = 15 shm_context = create_coworker_shm_context(dataset=dataset, dataloader_args=dataloader_args, io_timeout=io_timeout, initialize_timeout=initialize_timeout) dsize = 2 if atorch.distributed.is_coworker(): batches = get_sample_batch(dataset, dataloader_args, num=dsize) shm_context.add_batch(batches) shm_context.add_batch(None) else: for _ in range(dsize): batch = shm_context.get_data(0) assert (batch is not None) batch = shm_context.get_data(0) assert (batch is None) shm_context.tear_down(master_wait_for_worker=True) atorch.reset_distributed()
class Logger(object): _fields = None def fields(self): assert (self._fields is not None), 'self.fields is not set!' return self._fields def fields(self, value): self._fields def __init__(self, fields=None): self.fields = fields def log(self, *args, **kwargs): pass def log_state(self, state_dict): pass
def _load_cfg_from_yaml_str(str_obj): cfg_as_dict = yaml.safe_load(str_obj) return CfgNode(cfg_as_dict)
class SimpleLSTM(object): def __init__(self) -> None: super(SimpleLSTM, self).__init__() self.max_len = 75 self.emb_dim = 32 self.max_vocab_len = 100 self.lstm_output_size = 32 self.W_reg = regularizers.l2(0.0001) def build_model(self): main_input = Input(shape=(self.max_len,), dtype='int32', name='main_input') emb = Embedding(input_dim=self.max_vocab_len, output_dim=self.emb_dim, input_length=self.max_len, embeddings_regularizer=self.W_reg)(main_input) emb = Dropout(0.2)(emb) lstm = LSTM(self.lstm_output_size)(emb) lstm = Dropout(0.5)(lstm) output = Dense(1, activation='sigmoid', name='output')(lstm) model = Model(inputs=[main_input], outputs=[output]) adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy']) return model
def many_to_one(input_dict): return dict(((key, val) for (keys, val) in input_dict.items() for key in keys))
def data_transform_3d(normalization): data_transform = {'train': T.Compose([T.RandomFlip(), T.RandomBiasField(coefficients=(0.12, 0.15), order=2, p=0.2), T.OneOf({T.RandomNoise(): 0.5, T.RandomBlur(std=1): 0.5}, p=0.2), T.ZNormalization(masking_method=normalization)]), 'val': T.Compose([T.ZNormalization(masking_method=normalization)]), 'test': T.Compose([T.ZNormalization(masking_method=normalization)])} return data_transform
class BaseRayEstimator(BaseEstimator, metaclass=ABCMeta): def __init__(self, **kwargs): pass def fit(self, **kwargs): pass def predict(self, **kwargs): pass def evaluate(self, **kwargs): pass def get_model(self): pass def setup(self, params, backend='ray', runner_cls=None, workers_per_node=1): ray_ctx = OrcaRayContext.get() if (backend == 'ray'): self.init_ddp_process = False self.cores_per_node = (ray_ctx.ray_node_cpu_cores // workers_per_node) self.num_nodes = (ray_ctx.num_ray_nodes * workers_per_node) RemoteRunner = ray.remote(num_cpus=self.cores_per_node)(runner_cls) self.remote_workers = [RemoteRunner.remote(**params) for i in range(self.num_nodes)] ray.get([worker.setup.remote(self.cores_per_node) for (i, worker) in enumerate(self.remote_workers)]) ray.get([worker.setup_torch_estimator.remote(i, self.num_nodes) for (i, worker) in enumerate(self.remote_workers)]) elif (backend == 'horovod'): from bigdl.orca.learn.horovod.horovod_ray_runner import HorovodRayRunner self.horovod_runner = HorovodRayRunner(ray_ctx, worker_cls=runner_cls, worker_param=params, workers_per_node=workers_per_node) self.remote_workers = self.horovod_runner.remote_workers cores_per_node = self.horovod_runner.cores_per_node ray.get([worker.setup.remote(cores_per_node) for (i, worker) in enumerate(self.remote_workers)]) ray.get([worker.setup_horovod.remote() for (i, worker) in enumerate(self.remote_workers)]) else: invalidInputError(False, 'Only "ray" and "horovod" are supported values of backend, but got {}'.format(backend)) self.num_workers = len(self.remote_workers) def setup_torch_ddp(self): import torch.distributed as dist driver_ip = get_driver_node_ip() driver_tcp_store_port = find_free_port() _ = dist.TCPStore(driver_ip, driver_tcp_store_port, (- 1), True, dist.constants.default_pg_timeout) ray.get([worker.setup_torch_distribute.remote(driver_ip, driver_tcp_store_port, i, self.num_nodes) for (i, worker) in enumerate(self.remote_workers)]) self.init_ddp_process = True def get_state_dict(self) -> Dict: stream_ids = [worker.get_state_stream.remote() for worker in self.remote_workers] ([stream_id], stream_ids) = ray.wait(stream_ids, num_returns=1, timeout=None) byte_obj = ray.get(stream_id) _buffer = io.BytesIO(byte_obj) state_dict = torch.load(_buffer, map_location='cpu') return state_dict def load_state_dict(self, state_dict: Dict, blocking: bool=True): _buffer = io.BytesIO() torch.save(state_dict, _buffer) state_stream = _buffer.getvalue() state_id = ray.put(state_stream) remote_calls = [worker.load_state_stream.remote(state_id) for worker in self.remote_workers] if blocking: ray.get(remote_calls) _multi_fs_save def save(self, model_path: str) -> str: state_dict = self.get_state_dict() torch.save(state_dict, model_path) return model_path _multi_fs_load def load(self, model_path: str): state_dict = torch.load(model_path) self.load_state_dict(state_dict) def save_checkpoint(self, model_path: str): from bigdl.dllib.utils.file_utils import is_local_path if is_local_path(model_path): self.save(model_path) else: results = [worker.save_checkpoint.remote(model_path) for worker in self.remote_workers] ray.get(results) def load_checkpoint(self, model_path: str): from bigdl.dllib.utils.file_utils import is_local_path if is_local_path(model_path): self.load(model_path) else: results = [worker.load_checkpoint.remote(model_path) for worker in self.remote_workers] ray.get(results) def shutdown(self, force: bool=False): if (not force): cleanup = [worker.shutdown.remote() for worker in self.remote_workers] try: ray.get(cleanup) [worker.__ray_terminate__.remote() for worker in self.remote_workers] except RayActorError: logger.warning('Failed to shutdown gracefully, forcing a shutdown.') for worker in self.remote_workers: logger.warning('Killing worker {}.'.format(worker)) ray.kill(worker) else: for worker in self.remote_workers: logger.debug('Killing worker {}.'.format(worker)) ray.kill(worker) self.remote_workers = [] def _train_epochs(self, **params): remote_worker_stats = [] for (i, w) in enumerate(self.remote_workers): stats = w.train_epochs.remote(**params) remote_worker_stats.append(stats) success = check_for_failure(remote_worker_stats) if success: return (success, ray.get(remote_worker_stats)) else: return (success, None)
def main(args): device = torch.device(args.device) seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) model = ELFNet(args) model = model.to(device) model = torch.nn.DataParallel(model) print_param(model) param_dicts = [{'params': [p for (n, p) in model.named_parameters() if (('backbone' not in n) and ('regression' not in n) and ('pcw' not in n) and p.requires_grad)]}, {'params': [p for (n, p) in model.named_parameters() if (('backbone' in n) and p.requires_grad)], 'lr': args.lr_backbone}, {'params': [p for (n, p) in model.named_parameters() if (('regression' in n) and p.requires_grad)], 'lr': args.lr_regression}, {'params': [p for (n, p) in model.named_parameters() if (('pcw' in n) and p.requires_grad)], 'lr': args.lr_pcw}] optimizer = torch.optim.AdamW(param_dicts, lr=args.lr_sttr, weight_decay=args.weight_decay) downscale_epochs = [int(eid_str) for eid_str in args.lrepochs.split(',')] lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, downscale_epochs, gamma=0.5) prev_best = np.inf if (args.resume != ''): if (not os.path.isfile(args.resume)): raise RuntimeError(f"=> no checkpoint found at '{args.resume}'") checkpoint = torch.load(args.resume) pretrained_dict = checkpoint['state_dict'] (missing, unexpected) = model.load_state_dict(pretrained_dict, strict=False) if (len(missing) > 0): print('Missing keys: ', ','.join(missing)) raise Exception('Missing keys.') unexpected_filtered = [k for k in unexpected if (('running_mean' not in k) and ('running_var' not in k))] if (len(unexpected_filtered) > 0): print('Unexpected keys: ', ','.join(unexpected_filtered)) raise Exception('Unexpected keys.') print('Pre-trained model successfully loaded.') if (not (args.ft or args.eval)): if (len(unexpected) > 0): raise Exception(('Resuming legacy model with BN parameters. Not possible due to BN param change. ' + 'Do you want to finetune? If so, check your arguments.')) else: args.start_epoch = (checkpoint['epoch'] + 1) optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) prev_best = checkpoint['best_pred'] print('Pre-trained optimizer, lr scheduler and stats successfully loaded.') checkpoint_saver = Saver(args) summary_writer = TensorboardSummary(checkpoint_saver.experiment_dir) (data_loader_train, data_loader_val, _) = build_data_loader(args) criterion = build_criterion(args) set_downsample(args) if args.eval: print('Start evaluation') evaluate(model, criterion, data_loader_val, device, 0, summary_writer, False) return print('Start training') for epoch in range(args.start_epoch, args.epochs): print(('Epoch: %d' % epoch)) train_one_epoch(model, data_loader_train, optimizer, criterion, device, epoch, summary_writer, args.clip_max_norm) if (not args.pre_train): lr_scheduler.step() print('current learning rate', lr_scheduler.get_lr()) torch.cuda.empty_cache() if (args.pre_train or ((epoch % 50) == 0)): save_checkpoint(epoch, model, optimizer, lr_scheduler, prev_best, checkpoint_saver, False) eval_stats = evaluate(model, criterion, data_loader_val, device, epoch, summary_writer, False) if ((prev_best > eval_stats['epe_combine']) and (0.5 > eval_stats['px_error_rate_combine'])): save_checkpoint(epoch, model, optimizer, lr_scheduler, prev_best, checkpoint_saver, True) save_checkpoint(epoch, model, optimizer, prev_best, checkpoint_saver, False) return
class IntEmbedding(nn.Module): def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, p=0, update_step=1000, bits=8, method='histogram'): super(IntEmbedding, self).__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if (padding_idx is not None): if (padding_idx > 0): assert (padding_idx < self.num_embeddings), 'Padding_idx must be within num_embeddings' elif (padding_idx < 0): assert (padding_idx >= (- self.num_embeddings)), 'Padding_idx must be within num_embeddings' padding_idx = (self.num_embeddings + padding_idx) self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if (_weight is None): self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim)) self.reset_parameters() else: assert (list(_weight.shape) == [num_embeddings, embedding_dim]), 'Shape of weight does not match num_embeddings and embedding_dim' self.weight = nn.Parameter(_weight) self.sparse = sparse self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.normal_(self.weight) if (self.padding_idx is not None): with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input): p = (self.p if self.training else 1) if ((self.counter % self.update_step) == 0): self.scale = None self.zero_point = None self.counter += 1 (weight_quantized, self.scale, self.zero_point) = emulate_int(self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point) mask = torch.zeros_like(self.weight) mask.bernoulli_((1 - p)) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) clamp_low = ((- self.scale) * self.zero_point) clamp_high = (self.scale * (((2 ** self.bits) - 1) - self.zero_point)) weight = (torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach()) output = F.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) return output def extra_repr(self): s = '{num_embeddings}, {embedding_dim}' if (self.padding_idx is not None): s += ', padding_idx={padding_idx}' if (self.max_norm is not None): s += ', max_norm={max_norm}' if (self.norm_type != 2): s += ', norm_type={norm_type}' if (self.scale_grad_by_freq is not False): s += ', scale_grad_by_freq={scale_grad_by_freq}' if (self.sparse is not False): s += ', sparse=True' s += 'quant_noise={p}, bits={bits}, method={method}' return s.format(**self.__dict__)
def test_transformed_views_have_same_shape_as_original(mock_data): (brain_data, behavior_data, _) = mock_data views = [brain_data, behavior_data] preprocessing_steps = [StandardScaler(), StandardScaler()] mvp = MultiViewPreprocessing(preprocessing_steps) mvp.fit(views) transformed_views = mvp.transform(views) assert all([(original.shape == transformed.shape) for (original, transformed) in zip(views, transformed_views)])
def train(train_loader, model, criterion, optimizer, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.train() end = time.time() for (i, (input, target)) in enumerate(train_loader): data_time.update((time.time() - end)) if (args.gpu is not None): input = input.cuda(args.gpu, non_blocking=True) target = target.cuda(args.gpu, non_blocking=True) output = model(input) loss = criterion(output, target) (prec1, prec5) = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1[0], input.size(0)) top5.update(prec5[0], input.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update((time.time() - end)) end = time.time() if ((i % args.print_freq) == 0): print('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})\ {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) return (losses.avg, top1.avg, top5.avg)
class PositionwiseFeedForward(rtrans.PositionwiseFeedForward): def __init__(self, mask_type, mask_init_value, d_model, d_ff, dropout=(0.1 / 3)): nn.Module.__init__(self) self.w_1 = MaskedLinear(d_model, d_ff, mask_type, mask_init_value) self.w_2 = MaskedLinear(d_ff, d_model, mask_type, mask_init_value) self.dropout = nn.Dropout(dropout) self.cache_output = False self.cache = None
(autouse=True) def _override_cache_config(cache_config: cache.CacheConfig) -> None: def _test_auto_load() -> cache.CacheConfig: logger = logging.getLogger('pybaseball') logger.debug('_test_auto_load') return cache_config if (not hasattr(cache.cache_config, '_autoload_cache')): cache.cache_config._autoload_cache = copy.copy(cache.cache_config.autoload_cache) cache.cache_config.autoload_cache = _test_auto_load if (not hasattr(cache.cache_config.CacheConfig, '_save')): cache.cache_config.CacheConfig._save = copy.copy(cache.cache_config.CacheConfig.save) cache.cache_config.CacheConfig.save = MagicMock() cache.config = cache_config cache.cache_record.cfg = cache_config
_builder('msrvtt_caption') class MSRVTTCapBuilder(BaseDatasetBuilder): train_dataset_cls = VideoCaptionDataset eval_dataset_cls = VideoCaptionEvalDataset DATASET_CONFIG_DICT = {'default': 'configs/datasets/msrvtt/defaults_cap.yaml'}
def test_digits_greedi_nn_object(): model1 = FeatureBasedSelection(100, 'sqrt') model2 = FeatureBasedSelection(100, 'log') model = MixtureSelection(100, [model1, model2], [1.0, 0.3], optimizer=GreeDi(optimizer1='naive', optimizer2='naive', random_state=0)) model.fit(X_digits) assert_array_equal(model.ranking[:85], digits_greedi_ranking[:85]) assert_array_almost_equal(model.gains[:85], digits_greedi_gains[:85], 4) assert_array_almost_equal(model.subset, X_digits[model.ranking])
class Generator(nn.Module): def __init__(self, ct1_channels=512, ct2_channels=256, ct3_channels=128, ct4_channels=64, d_channels_in_2=False): super().__init__() self.ct1_channels = ct1_channels self.pheight = 4 self.pwidth = 4 if d_channels_in_2: self.ct2_channels = (self.ct1_channels // 2) self.ct3_channels = (self.ct2_channels // 2) self.ct4_channels = (self.ct3_channels // 2) else: self.ct2_channels = ct2_channels self.ct3_channels = ct3_channels self.ct4_channels = ct4_channels self.convt_0 = nn.ConvTranspose2d(in_channels=z_size, out_channels=self.ct1_channels, kernel_size=4, padding=0, stride=1, bias=False) self.bnorm0 = nn.BatchNorm2d(self.ct1_channels) self.convt_1 = nn.ConvTranspose2d(in_channels=self.ct1_channels, out_channels=self.ct2_channels, kernel_size=4, stride=2, padding=1, bias=False) self.bnorm1 = nn.BatchNorm2d(num_features=self.ct2_channels) self.convt_2 = nn.ConvTranspose2d(in_channels=self.ct2_channels, out_channels=self.ct3_channels, kernel_size=4, stride=2, padding=1, bias=False) self.bnorm2 = nn.BatchNorm2d(num_features=self.ct3_channels) self.convt_3 = nn.ConvTranspose2d(in_channels=self.ct3_channels, out_channels=self.ct4_channels, kernel_size=4, stride=2, padding=1, bias=False) self.bnorm3 = nn.BatchNorm2d(num_features=self.ct4_channels) self.convt_4 = nn.ConvTranspose2d(in_channels=self.ct4_channels, out_channels=3, kernel_size=4, stride=2, padding=1, bias=False) self.relu = nn.ReLU() self.tanh = nn.Tanh() def forward(self, z): x = self.relu(self.bnorm0(self.convt_0(z))) x = self.relu(self.bnorm1(self.convt_1(x))) x = self.relu(self.bnorm2(self.convt_2(x))) x = self.relu(self.bnorm3(self.convt_3(x))) out = self.tanh(self.convt_4(x)) return out
def test_interpolation_potential_diffinputs_c(): rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(0.01, 2.0, 151), zgrid=(0.0, 0.2, 151), logR=False, interpPot=True, zsym=True, enable_c=True) rs = numpy.linspace(0.01, 2.0, 20) zs = numpy.linspace((- 0.2), 0.2, 40) assert numpy.all((numpy.fabs(((rzpot(rs, zs[10]) - potential.evaluatePotentials(potential.MWPotential, rs, (zs[10] * numpy.ones(len(rs))))) / potential.evaluatePotentials(potential.MWPotential, rs, (zs[10] * numpy.ones(len(rs)))))) < (10.0 ** (- 6.0)))), 'RZPot interpolation w/ interpRZPotential fails for vector R and scalar Z' assert numpy.all((numpy.fabs(((rzpot(rs[10], zs) - potential.evaluatePotentials(potential.MWPotential, (rs[10] * numpy.ones(len(zs))), zs)) / potential.evaluatePotentials(potential.MWPotential, (rs[10] * numpy.ones(len(zs))), zs))) < (10.0 ** (- 6.0)))), 'RZPot interpolation w/ interpRZPotential fails for vector R and scalar Z' return None
class ConvLayer(nn.Sequential): def __init__(self, in_channels, out_channels, kernel=3, stride=1): super().__init__() self.add_module('conv', nn.Conv2d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=(kernel // 2), bias=False)) self.add_module('norm', nn.BatchNorm2d(out_channels)) self.add_module('relu', nn.ReLU(inplace=True)) def forward(self, x): return super().forward(x)
.parametrize('device', list_devices()) def test_knn_search(device): dtype = o3c.float32 dataset_points = o3c.Tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.1], [0.0, 0.0, 0.2], [0.0, 0.1, 0.0], [0.0, 0.1, 0.1], [0.0, 0.1, 0.2], [0.0, 0.2, 0.0], [0.0, 0.2, 0.1], [0.0, 0.2, 0.2], [0.1, 0.0, 0.0]], dtype=dtype, device=device) nns = o3c.nns.NearestNeighborSearch(dataset_points) nns.knn_index() query_points = o3c.Tensor([[0.064705, 0.043921, 0.087843]], dtype=dtype, device=device) (indices, distances) = nns.knn_search(query_points, 3) np.testing.assert_equal(indices.cpu().numpy(), np.array([[1, 4, 9]], dtype=np.int64)) np.testing.assert_allclose(distances.cpu().numpy(), np.array([[0., 0., 0.0108912]], dtype=np.float64), rtol=1e-05, atol=0) query_points = o3c.Tensor([[0.064705, 0.043921, 0.087843], [0.064705, 0.043921, 0.087843]], dtype=dtype, device=device) (indices, distances) = nns.knn_search(query_points, 3) np.testing.assert_equal(indices.cpu().numpy(), np.array([[1, 4, 9], [1, 4, 9]], dtype=np.int64)) np.testing.assert_allclose(distances.cpu().numpy(), np.array([[0., 0., 0.0108912], [0., 0., 0.0108912]], dtype=np.float64), rtol=1e-05, atol=0)
def get_unique_smiles(valid_molecules): unique = set() for mol in valid_molecules: unique.add(Chem.MolToSmiles(mol)) return list(unique)
class CellDETR(nn.Module): def __init__(self, num_classes: int=1, number_of_query_positions: int=1, hidden_features=128, backbone_channels: Tuple[(Tuple[(int, int)], ...)]=((1, 64), (64, 128), (128, 256), (256, 256)), backbone_block: Type=ResNetBlock, backbone_convolution: Type=conv, backbone_normalization: Type=nn.BatchNorm2d, backbone_activation: Type=act, backbone_pooling: Type=nn.AvgPool2d, bounding_box_head_features: Tuple[(Tuple[(int, int)], ...)]=((128, 64), (64, 16), (16, 4)), bounding_box_head_activation: Type=act, classification_head_activation: Type=act, num_encoder_layers: int=3, num_decoder_layers: int=2, dropout: float=0.0, transformer_attention_heads: int=8, transformer_activation: Type=act, segmentation_attention_heads: int=8, segmentation_head_channels: Tuple[(Tuple[(int, int)], ...)]=(((128 + 8), 64), (64, 32), (32, 16)), segmentation_head_feature_channels: Tuple[(int, ...)]=(256, 128, 64), segmentation_head_block: Type=ResPACFeaturePyramidBlock, segmentation_head_convolution: Type=conv, segmentation_head_normalization: Type=nn.InstanceNorm2d, segmentation_head_activation: Type=act, segmentation_head_final_activation: Type=nn.Sigmoid) -> None: super(CellDETR, self).__init__() self.backbone = Backbone(channels=backbone_channels, block=backbone_block, convolution=backbone_convolution, normalization=backbone_normalization, activation=backbone_activation, pooling=backbone_pooling) self.convolution_mapping = nn.Conv2d(in_channels=backbone_channels[(- 1)][(- 1)], out_channels=hidden_features, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True) self.query_positions = nn.Parameter(data=torch.randn(number_of_query_positions, hidden_features, dtype=torch.float), requires_grad=True) self.row_embedding = nn.Parameter(data=torch.randn(50, (hidden_features // 2), dtype=torch.float), requires_grad=True) self.column_embedding = nn.Parameter(data=torch.randn(50, (hidden_features // 2), dtype=torch.float), requires_grad=True) self.transformer = Transformer(d_model=hidden_features, nhead=transformer_attention_heads, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, dropout=dropout, dim_feedforward=(4 * hidden_features), activation=transformer_activation) self.segmentation_attention_head = MultiHeadAttention(query_dimension=hidden_features, hidden_features=hidden_features, number_of_heads=segmentation_attention_heads, dropout=dropout) self.segmentation_head = SegmentationHead(channels=segmentation_head_channels, feature_channels=segmentation_head_feature_channels, convolution=segmentation_head_convolution, normalization=segmentation_head_normalization, activation=segmentation_head_activation, block=segmentation_head_block, number_of_query_positions=number_of_query_positions, softmax=isinstance(segmentation_head_final_activation(), nn.Softmax)) self.segmentation_final_activation = (segmentation_head_final_activation(dim=1) if isinstance(segmentation_head_final_activation(), nn.Softmax) else segmentation_head_final_activation()) self.point_pre_layer = nn.Conv2d(hidden_features, 1, kernel_size=1) def get_parameters(self, lr_main: float=0.0001, lr_backbone: float=1e-05) -> Iterable: return [{'params': self.backbone.parameters(), 'lr': lr_backbone}, {'params': self.convolution_mapping.parameters(), 'lr': lr_main}, {'params': [self.row_embedding], 'lr': lr_main}, {'params': [self.column_embedding], 'lr': lr_main}, {'params': self.transformer.parameters(), 'lr': lr_main}, {'params': self.bounding_box_head.parameters(), 'lr': lr_main}, {'params': self.class_head.parameters(), 'lr': lr_main}, {'params': self.segmentation_attention_head.parameters(), 'lr': lr_main}, {'params': self.segmentation_head.parameters(), 'lr': lr_main}] def get_segmentation_head_parameters(self, lr: float=1e-05) -> Iterable: return [{'params': self.segmentation_attention_head.parameters(), 'lr': lr}, {'params': self.segmentation_head.parameters(), 'lr': lr}] def forward(self, input: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: (features, feature_list) = self.backbone(input) features = self.convolution_mapping(features) (height, width) = features.shape[2:] batch_size = features.shape[0] positional_embeddings = torch.cat([self.column_embedding[:height].unsqueeze(dim=0).repeat(height, 1, 1), self.row_embedding[:width].unsqueeze(dim=1).repeat(1, width, 1)], dim=(- 1)).permute(2, 0, 1).unsqueeze(0).repeat(batch_size, 1, 1, 1) (latent_tensor, features_encoded) = self.transformer(features, None, self.query_positions, positional_embeddings) latent_tensor = latent_tensor.permute(2, 0, 1) point = self.point_pre_layer(features_encoded) point = torch.sigmoid(point) Feature = ((point * features_encoded) + features_encoded) bounding_box_attention_masks = self.segmentation_attention_head(latent_tensor, Feature.contiguous()) instance_segmentation_prediction = self.segmentation_head(features.contiguous(), bounding_box_attention_masks.contiguous(), feature_list[(- 2)::(- 1)]) return (self.segmentation_final_activation(instance_segmentation_prediction).clone(), point)
def _make_stage(transformation_module, in_channels, bottleneck_channels, out_channels, block_count, num_groups, stride_in_1x1, first_stride, dilation=1, dcn_config={}): blocks = [] stride = first_stride max_dcn_layer = dcn_config.get('max_dcn_layer', 0) for i in range(block_count): if (i < (block_count - max_dcn_layer)): block_dcn_config = {} else: block_dcn_config = dcn_config blocks.append(transformation_module(in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation=dilation, dcn_config=block_dcn_config)) stride = 1 in_channels = out_channels return nn.Sequential(*blocks)
def log_gradient(model, global_step=None): if tt.arg.log_grad: for (k, v) in model.named_parameters(): if ('weight' in k): if (v.grad is not None): log_scalar(('gradient/' + k), v.grad.norm(), global_step)
class KMeansTransformerOriginal(object): def __init__(self, k_fold=None): self._new_features = [] self._input_columns = [] self._error = None self._kmeans = None self._scale = None self._k_fold = k_fold def fit(self, X, y): if self._new_features: return if ((self._error is not None) and self._error): raise Exception(('KMeans Features not created due to error (please check errors.md). ' + self._error)) return if (X.shape[1] == 0): self._error = f'KMeans not created. No continous features. Input data shape: {X.shape}, {y.shape}' raise Exception('KMeans Features not created. No continous features.') n_clusters = int((np.log10(X.shape[0]) * 8)) n_clusters = max(8, n_clusters) n_clusters = min(n_clusters, X.shape[1]) self._input_columns = X.columns.tolist() self._scale = StandardScaler(copy=True, with_mean=True, with_std=True) X = self._scale.fit_transform(X) self._kmeans = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++') self._kmeans.fit(X) self._create_new_features_names() def _create_new_features_names(self): n_clusters = self._kmeans.cluster_centers_.shape[0] self._new_features = [f'Dist_Cluster_{i}' for i in range(n_clusters)] self._new_features += ['Cluster'] def transform(self, X): if (self._kmeans is None): raise Exception('KMeans not fitted') X_scaled = self._scale.transform(X[self._input_columns]) distances = self._kmeans.transform(X_scaled) clusters = self._kmeans.predict(X_scaled) X[self._new_features[:(- 1)]] = distances X[self._new_features[(- 1)]] = clusters return X
class Similarity(nn.Module): def __init__(self): super(Similarity, self).__init__() def forward(self, g_s, g_t): return [self.similarity_loss(f_s, f_t) for (f_s, f_t) in zip(g_s, g_t)] def similarity_loss(self, f_s, f_t): bsz = f_s.shape[0] f_s = f_s.view(bsz, (- 1)) f_t = f_t.view(bsz, (- 1)) G_s = torch.mm(f_s, torch.t(f_s)) G_s = torch.nn.functional.normalize(G_s) G_t = torch.mm(f_t, torch.t(f_t)) G_t = torch.nn.functional.normalize(G_t) G_diff = (G_t - G_s) loss = ((G_diff * G_diff).view((- 1), 1).sum(0) / (bsz * bsz)) return loss
def date_generator(doc): spans = [] i = 0 while (i < len(doc)): tok = doc[i] if (tok.lemma_ in (DAYS | DAYS_ABBRV)): spans.append((i, (i + 1), 'DATE')) elif (tok.is_digit and re.match('\\d+$', tok.text) and (1920 < int(tok.text) < 2040)): spans.append((i, (i + 1), 'DATE')) elif (tok.lemma_ in (MONTHS | MONTHS_ABBRV)): if (tok.tag_ == 'MD'): pass elif ((i > 0) and re.match('\\d+$', doc[(i - 1)].text) and (int(doc[(i - 1)].text) < 32)): spans.append(((i - 1), (i + 1), 'DATE')) elif ((i > 1) and re.match('\\d+(?:st|nd|rd|th)$', doc[(i - 2)].text) and (doc[(i - 1)].lower_ == 'of')): spans.append(((i - 2), (i + 1), 'DATE')) elif ((i < (len(doc) - 1)) and re.match('\\d+$', doc[(i + 1)].text) and (int(doc[(i + 1)].text) < 32)): spans.append((i, (i + 2), 'DATE')) i += 1 else: spans.append((i, (i + 1), 'DATE')) i += 1 for (start, end, content) in utils.merge_contiguous_spans(spans, doc): (yield (start, end, content))
class LeakyReluPar(nn.Module): def forward(self, x, a): return ((((1.0 - a) / 2.0) * torch.abs(x)) + (((1.0 + a) / 2.0) * x))
def get_font(fonts_valid: List[str]=None, font_size: int=15) -> ImageFont: if (fonts_valid is None): fonts_valid = ['FreeSerif.ttf', 'FreeSans.ttf', 'Century.ttf', 'Calibri.ttf', 'arial.ttf'] fonts_in_sys = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf') fonts_in_sys = sorted(fonts_in_sys) for font_in_sys in fonts_in_sys: if any(((os.path.basename(font_in_sys) in s) for s in fonts_valid)): return ImageFont.truetype(font_in_sys, font_size) warnings.warn('No suitable fonts were found in your system. A default font will be used instead (the font size will not be adjustable).') return ImageFont.load_default()
def utf8_visual_to_logical(text): text_dir = determine_text_direction(text) bidi = icu_bidi.Bidi() bidi.inverse = True bidi.reordering_mode = icu_bidi.UBiDiReorderingMode.UBIDI_REORDER_INVERSE_LIKE_DIRECT bidi.reordering_options = icu_bidi.UBiDiReorderingOption.UBIDI_OPTION_DEFAULT bidi.set_para(text, text_dir, None) res = bidi.get_reordered(((0 | icu_bidi.UBidiWriteReorderedOpt.UBIDI_DO_MIRRORING) | icu_bidi.UBidiWriteReorderedOpt.UBIDI_KEEP_BASE_COMBINING)) return res
class OPENPOSE_18(): LEFT_LINES = [(2, 3), (3, 4), (2, 8), (8, 9), (9, 10)] LEFT_POINTS = [2, 3, 4, 8, 9, 10] RIGHT_LINES = [(5, 6), (6, 7), (5, 11), (11, 12), (12, 13)] RIGHT_POINTS = [5, 6, 7, 11, 12, 13] CENTER_LINES = [(16, 14), (14, 0), (0, 15), (15, 17), (0, 1)] CENTER_BODY = [1, 2, 8, 11, 5] CENTER_POINTS = [0, 1, 14, 15, 16, 17]
def get_eventpath_byDM(total_list, d, m): out = [] for item in total_list: if ((('/' + d) + '/') in item): if ((('/' + m) + '/') in item): out.append(item) assert (len(out) == 1), 'supposed only 1 here' return out[0]
class Unet(nn.Module): def __init__(self, c=4, n=16, dropout=0.5, norm='gn', num_classes=5): super(Unet, self).__init__() self.upsample = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) self.convd1 = ConvD(c, n, dropout, norm, first=True) self.convd2 = ConvD(n, (2 * n), dropout, norm) self.convd3 = ConvD((2 * n), (4 * n), dropout, norm) self.convd4 = ConvD((4 * n), (8 * n), dropout, norm) self.convd5 = ConvD((8 * n), (16 * n), dropout, norm) self.convu4 = ConvU((16 * n), norm, True) self.convu3 = ConvU((8 * n), norm, True) self.convu2 = ConvU((4 * n), norm, True) self.convu1 = ConvU((2 * n), norm, True) self.seg3 = nn.Conv3d((8 * n), num_classes, 1) self.seg2 = nn.Conv3d((4 * n), num_classes, 1) self.seg1 = nn.Conv3d((2 * n), num_classes, 1) for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif (isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): x1 = self.convd1(x) x2 = self.convd2(x1) x3 = self.convd3(x2) x4 = self.convd4(x3) x5 = self.convd5(x4) y4 = self.convu4(x5, x4) y3 = self.convu3(y4, x3) y2 = self.convu2(y3, x2) y1 = self.convu1(y2, x1) return y1
class DIAPreResUnit(nn.Module): def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride, attention=None): super(DIAPreResUnit, self).__init__() self.resize_identity = ((in_channels != out_channels) or (stride != 1)) if bottleneck: self.body = PreResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = PreResBlock(in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1(in_channels=in_channels, out_channels=out_channels, stride=stride) self.attention = attention def forward(self, x, hc=None): identity = x (x, x_pre_activ) = self.body(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) (x, hc) = self.attention(x, hc) x = (x + identity) return (x, hc)
class TFAutoModelForNextSentencePrediction(): def __init__(self): raise EnvironmentError('TFAutoModelForNextSentencePrediction is designed to be instantiated using the `TFAutoModelForNextSentencePrediction.from_pretrained(pretrained_model_name_or_path)` or `TFAutoModelForNextSentencePrediction.from_config(config)` methods.') _list_option_in_docstrings(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, use_model_types=False) def from_config(cls, config): if (type(config) in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys()): return TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING[type(config)](config) raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys())))) _list_option_in_docstrings(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING) _start_docstrings('Instantiate one of the model classes of the library---with a next sentence prediction head---from a pretrained model.', TF_AUTO_MODEL_PRETRAINED_DOCSTRING) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop('config', None) if (not isinstance(config, PretrainedConfig)): (config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs) if (type(config) in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys()): return TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys()))))
def generate_training_labels(data_folder: Path, resume): print(f'Processing label data in {data_folder}') for city in ['london', 'madrid', 'melbourne']: data_folder_train_city_labels = (((data_folder / 'train') / city) / 'labels') data_folder_train_city_labels.mkdir(exist_ok=True, parents=True) generate_cc_labels(city, in_folder=(data_folder / 'speed_classes'), out_folder=(data_folder / 'train'), road_graph_folder=(data_folder / 'road_graph'), resume=resume)
class erf_step(PhaseGenerator): def help(self): return 'Step function polynomial using erf(), but only for specific pre-computed values. Argument is n, where n may be 7 or 23' def generate(self, n): phi_n7_erf = [1.58019, 0., 0.251897, (- 0.834542), (- 0.834542), 0.251897, 0., 0.] phi_n23_erf = [1.5708, 2.87883e-08, 5.83909e-07, 1.84144e-06, 2.09995e-05, (- 1.20126e-05), 0., (- 0.0022922), 0.0150024, (- 0.064666), 0.263754, (- 0.926685), (- 0.926912), 0.263756, (- 0.0645576), 0.014932, (- 0.), 0., (- 8.29284e-06), 2.00459e-05, 2.09476e-06, 5.3026e-07, 4.38578e-08, 3.66401e-09] if (n == 7): return phi_n7_erf elif (n == 23): return phi_n23_erf raise Exception('[pyqsp.phases.erf_step] n must be 7 or 23')
def emulate_int8_tensor(w, scale=None, zero_point=None): if (scale is None): obs = torch.quantization.observer.MinMaxObserver() _ = obs(w) (scale, zero_point) = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return (quantize(w, scale, zero_point), scale, zero_point)
def lineset_from_pose_graph(pose_graph): points = [] colors = [] lines = [] cnt = 0 for node in pose_graph.nodes: pose = np.array(node.pose) l = 0.1 points.append((pose np.array([0, 0, 0, 1]).T)[:3]) points.append((pose np.array([l, l, (2 * l), 1]).T)[:3]) points.append((pose np.array([l, (- l), (2 * l), 1]).T)[:3]) points.append((pose np.array([(- l), (- l), (2 * l), 1]).T)[:3]) points.append((pose np.array([(- l), l, (2 * l), 1]).T)[:3]) lines.append([(cnt + 0), (cnt + 1)]) lines.append([(cnt + 0), (cnt + 2)]) lines.append([(cnt + 0), (cnt + 3)]) lines.append([(cnt + 0), (cnt + 4)]) lines.append([(cnt + 1), (cnt + 2)]) lines.append([(cnt + 2), (cnt + 3)]) lines.append([(cnt + 3), (cnt + 4)]) lines.append([(cnt + 4), (cnt + 1)]) for i in range(0, EDGES_PER_FRUSTUM): colors.append(np.array([1, 0, 0])) cnt += POINTS_PER_FRUSTUM for edge in pose_graph.edges: s = edge.source_node_id t = edge.target_node_id lines.append([(POINTS_PER_FRUSTUM * s), (POINTS_PER_FRUSTUM * t)]) colors.append((np.array([0, 1, 0]) if edge.uncertain else np.array([0, 0, 1]))) lineset = o3d.geometry.LineSet() lineset.points = o3d.utility.Vector3dVector(np.vstack(points)) lineset.lines = o3d.utility.Vector2iVector(np.vstack(lines).astype(int)) lineset.colors = o3d.utility.Vector3dVector(np.vstack(colors)) return lineset
class TrainDataset(object): def __init__(self, batch_size=100): ((x_train, y_train), (x_test, y_test)) = cifar10.load_data() x_train = (x_train.astype('float32') / 255) x_test = (x_test.astype('float32') / 255) x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) self.test_images = x_test self.test_labels = y_test self.train_images = x_train self.train_labels = y_train def __len__(self): return len(self.train_images) def __getitem__(self, idx): return (self.train_images[idx], self.train_labels[idx])
def main(): args = parse_args() print('Called with args:') print(args) if (not torch.cuda.is_available()): sys.exit('Need a CUDA device to run the code.') if (args.cuda or (cfg.NUM_GPUS > 0)): cfg.CUDA = True else: raise ValueError('Need Cuda device to run !') if (args.dataset == 'coco2017'): cfg.TRAIN.DATASETS = ('coco_2017_train',) cfg.MODEL.NUM_CLASSES = 81 elif (args.dataset == 'keypoints_coco2017'): cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',) cfg.MODEL.NUM_CLASSES = 2 else: raise ValueError('Unexpected args.dataset: {}'.format(args.dataset)) cfg_from_file(args.cfg_file) if (args.set_cfgs is not None): cfg_from_list(args.set_cfgs) original_batch_size = (cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH) original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH original_num_gpus = cfg.NUM_GPUS if (args.batch_size is None): args.batch_size = original_batch_size cfg.NUM_GPUS = torch.cuda.device_count() assert ((args.batch_size % cfg.NUM_GPUS) == 0), ('batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)) cfg.TRAIN.IMS_PER_BATCH = (args.batch_size // cfg.NUM_GPUS) effective_batch_size = (args.iter_size * args.batch_size) print(('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))) print('Adaptive config changes:') print((' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))) print((' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))) print((' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))) old_base_lr = cfg.SOLVER.BASE_LR cfg.SOLVER.BASE_LR *= (args.batch_size / original_batch_size) print('Adjust BASE_LR linearly according to batch_size change:\n BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR)) step_scale = (original_batch_size / effective_batch_size) old_solver_steps = cfg.SOLVER.STEPS old_max_iter = cfg.SOLVER.MAX_ITER cfg.SOLVER.STEPS = list(map((lambda x: int(((x * step_scale) + 0.5))), cfg.SOLVER.STEPS)) cfg.SOLVER.MAX_ITER = int(((cfg.SOLVER.MAX_ITER * step_scale) + 0.5)) print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n SOLVER.STEPS: {} --> {}\n SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS, old_max_iter, cfg.SOLVER.MAX_ITER)) if (cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN): cfg.FPN.RPN_COLLECT_SCALE = (cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch) print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE)) if (args.num_workers is not None): cfg.DATA_LOADER.NUM_THREADS = args.num_workers print(('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)) if (args.optimizer is not None): cfg.SOLVER.TYPE = args.optimizer if (args.lr is not None): cfg.SOLVER.BASE_LR = args.lr if (args.lr_decay_gamma is not None): cfg.SOLVER.GAMMA = args.lr_decay_gamma assert_and_infer_cfg() timers = defaultdict(Timer) timers['roidb'].tic() (roidb, ratio_list, ratio_index) = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES) timers['roidb'].toc() roidb_size = len(roidb) logger.info('{:d} roidb entries'.format(roidb_size)) logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time) train_size = ((roidb_size // args.batch_size) * args.batch_size) batchSampler = BatchSampler(sampler=MinibatchSampler(ratio_list, ratio_index), batch_size=args.batch_size, drop_last=True) dataset = RoiDataLoader(roidb, cfg.MODEL.NUM_CLASSES, training=True) dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=batchSampler, num_workers=cfg.DATA_LOADER.NUM_THREADS, collate_fn=collate_minibatch) dataiterator = iter(dataloader) maskRCNN = Generalized_RCNN() if cfg.CUDA: maskRCNN.cuda() gn_param_nameset = set() for (name, module) in maskRCNN.named_modules(): if isinstance(module, nn.GroupNorm): gn_param_nameset.add((name + '.weight')) gn_param_nameset.add((name + '.bias')) gn_params = [] gn_param_names = [] bias_params = [] bias_param_names = [] nonbias_params = [] nonbias_param_names = [] nograd_param_names = [] for (key, value) in maskRCNN.named_parameters(): if value.requires_grad: if ('bias' in key): bias_params.append(value) bias_param_names.append(key) elif (key in gn_param_nameset): gn_params.append(value) gn_param_names.append(key) else: nonbias_params.append(value) nonbias_param_names.append(key) else: nograd_param_names.append(key) assert (((gn_param_nameset - set(nograd_param_names)) - set(bias_param_names)) == set(gn_param_names)) params = [{'params': nonbias_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}, {'params': bias_params, 'lr': (0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1)), 'weight_decay': (cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0)}, {'params': gn_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}] param_names = [nonbias_param_names, bias_param_names, gn_param_names] if (cfg.SOLVER.TYPE == 'SGD'): optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM) elif (cfg.SOLVER.TYPE == 'Adam'): optimizer = torch.optim.Adam(params) if args.load_ckpt: load_name = args.load_ckpt logging.info('loading checkpoint %s', load_name) checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage)) net_utils.load_ckpt(maskRCNN, checkpoint['model']) if args.resume: args.start_step = (checkpoint['step'] + 1) if ('train_size' in checkpoint): if (checkpoint['train_size'] != train_size): print(('train_size value: %d different from the one in checkpoint: %d' % (train_size, checkpoint['train_size']))) optimizer.load_state_dict(checkpoint['optimizer']) del checkpoint torch.cuda.empty_cache() if args.load_detectron: logging.info('loading Detectron weights %s', args.load_detectron) load_detectron_weight(maskRCNN, args.load_detectron) lr = optimizer.param_groups[0]['lr'] maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'], minibatch=True) args.run_name = (misc_utils.get_run_name() + '_step') output_dir = misc_utils.get_output_dir(args, args.run_name) args.cfg_filename = os.path.basename(args.cfg_file) if (not args.no_save): if (not os.path.exists(output_dir)): os.makedirs(output_dir) blob = {'cfg': yaml.dump(cfg), 'args': args} with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f: pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL) if args.use_tfboard: from tensorboardX import SummaryWriter tblogger = SummaryWriter(output_dir) maskRCNN.train() CHECKPOINT_PERIOD = int((cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)) decay_steps_ind = None for i in range(1, len(cfg.SOLVER.STEPS)): if (cfg.SOLVER.STEPS[i] >= args.start_step): decay_steps_ind = i break if (decay_steps_ind is None): decay_steps_ind = len(cfg.SOLVER.STEPS) training_stats = TrainingStats(args, args.disp_interval, (tblogger if (args.use_tfboard and (not args.no_save)) else None)) try: logger.info('Training starts !') step = args.start_step for step in range(args.start_step, cfg.SOLVER.MAX_ITER): if (step < cfg.SOLVER.WARM_UP_ITERS): method = cfg.SOLVER.WARM_UP_METHOD if (method == 'constant'): warmup_factor = cfg.SOLVER.WARM_UP_FACTOR elif (method == 'linear'): alpha = (step / cfg.SOLVER.WARM_UP_ITERS) warmup_factor = ((cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha)) + alpha) else: raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method)) lr_new = (cfg.SOLVER.BASE_LR * warmup_factor) net_utils.update_learning_rate(optimizer, lr, lr_new) lr = optimizer.param_groups[0]['lr'] assert (lr == lr_new) elif (step == cfg.SOLVER.WARM_UP_ITERS): net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR) lr = optimizer.param_groups[0]['lr'] assert (lr == cfg.SOLVER.BASE_LR) if ((decay_steps_ind < len(cfg.SOLVER.STEPS)) and (step == cfg.SOLVER.STEPS[decay_steps_ind])): logger.info('Decay the learning on step %d', step) lr_new = (lr * cfg.SOLVER.GAMMA) net_utils.update_learning_rate(optimizer, lr, lr_new) lr = optimizer.param_groups[0]['lr'] assert (lr == lr_new) decay_steps_ind += 1 training_stats.IterTic() optimizer.zero_grad() for inner_iter in range(args.iter_size): try: input_data = next(dataiterator) except StopIteration: dataiterator = iter(dataloader) input_data = next(dataiterator) for key in input_data: if (key != 'roidb'): input_data[key] = list(map(Variable, input_data[key])) net_outputs = maskRCNN(**input_data) training_stats.UpdateIterStats(net_outputs, inner_iter) loss = net_outputs['total_loss'] loss.backward() optimizer.step() training_stats.IterToc() training_stats.LogIterStats(step, lr) if (((step + 1) % CHECKPOINT_PERIOD) == 0): save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer) save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer) except (RuntimeError, KeyboardInterrupt): del dataiterator logger.info('Save ckpt on exception ...') save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer) logger.info('Save ckpt done.') stack_trace = traceback.format_exc() print(stack_trace) finally: if (args.use_tfboard and (not args.no_save)): tblogger.close()
class GCNModelVAE(Model): def __init__(self, placeholders, num_features, num_nodes, features_nonzero, **kwargs): super(GCNModelVAE, self).__init__(**kwargs) self.inputs = placeholders['features'] self.input_dim = num_features self.features_nonzero = features_nonzero self.n_samples = num_nodes self.adj = placeholders['adj'] self.dropout = placeholders['dropout'] self.build() def _build(self): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=(lambda x: x), dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=(lambda x: x), dropout=self.dropout, logging=self.logging)(self.hidden1) self.z = (self.z_mean + (tf.random_normal([self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std))) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=(lambda x: x), logging=self.logging)(self.z)
class ArchiveImageFolder(ImageFolder): def __init__(self, archive: str, cache_dir: Optional[str]=None, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, is_valid_file: Optional[Callable[([str], bool)]]=None, root_in_archive: str='') -> None: assert (archive.endswith('.tar') or archive.endswith('.zip')), 'Only .tar and .zip are supported' self._fs_cls = (TarFS if archive.endswith('.tar') else ZipFS) self.root_in_archive = PurePath(root_in_archive) self.cache_dir = (None if (cache_dir is None) else Path(cache_dir).expanduser()) logger = logging.getLogger(__name__) logger.info(f'Reading archive headers from {str(archive)} with root_in_archive {root_in_archive}') t = time.time() worker = get_worker_info() worker = (worker.id if worker else None) self.archive_fs = {worker: self._fs_cls(str(Path(archive).expanduser()))} super().__init__(archive, loader=None, transform=transform, target_transform=target_transform, is_valid_file=is_valid_file) logger.info(f'Done in {float((time.time() - t)):.1f} seconds.') def find_classes(self, directory: str) -> Tuple[(List[str], Dict[(str, int)])]: if (self.cache_dir is not None): try: return load_from_cache(((self.cache_dir / self._cache_dir_name) / 'classes.pkl')) except FileNotFoundError: pass archive_fs = self.get_archive_fs().opendir(str(self.root_in_archive)) classes = sorted((entry.name for entry in archive_fs.scandir('/') if entry.is_dir)) if (not classes): raise FileNotFoundError(f"Couldn't find any class folder in {str(self.root_in_archive)} inside {self.root}.") class_to_idx = {cls_name: i for (i, cls_name) in enumerate(classes)} if (self.cache_dir is not None): save_to_cache(((self.cache_dir / self._cache_dir_name) / 'classes.pkl'), (classes, class_to_idx)) return (classes, class_to_idx) def make_dataset(self, directory: str, class_to_idx: Dict[(str, int)], extensions: Optional[Tuple[(str, ...)]]=None, is_valid_file: Optional[Callable[([str], bool)]]=None) -> List[Tuple[(str, int)]]: if (self.cache_dir is not None): try: return load_from_cache(((self.cache_dir / self._cache_dir_name) / 'samples.pkl')) except FileNotFoundError: pass if (class_to_idx is None): (_, class_to_idx) = self.find_classes(directory) elif (not class_to_idx): raise ValueError("'class_to_index' must have at least one entry to collect any samples.") both_none = ((extensions is None) and (is_valid_file is None)) both_something = ((extensions is not None) and (is_valid_file is not None)) if (both_none or both_something): raise ValueError('Both extensions and is_valid_file cannot be None or not None at the same time') if (extensions is not None): def is_valid_file(x: str) -> bool: return has_file_allowed_extension(x, cast(Tuple[(str, ...)], extensions)) is_valid_file = cast(Callable[([str], bool)], is_valid_file) archive_fs = self.get_archive_fs().opendir(str(self.root_in_archive)) instances = [] available_classes = set() for target_class in sorted(class_to_idx.keys()): class_index = class_to_idx[target_class] target_dir_info = archive_fs.getinfo(target_class) if (not target_dir_info.is_dir): continue for (root, _, fnames) in sorted(archive_fs.walk(target_class)): root = removeprefix(root, '/') for fname in sorted(fnames, key=(lambda info: info.name)): if is_valid_file(fname.name): path = ((self.root_in_archive / root) / fname.name) item = (str(path), class_index) instances.append(item) if (target_class not in available_classes): available_classes.add(target_class) empty_classes = (set(class_to_idx.keys()) - available_classes) if empty_classes: msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. " if (extensions is not None): msg += f"Supported extensions are: {', '.join(extensions)}" raise FileNotFoundError(msg) if (self.cache_dir is not None): save_to_cache(((self.cache_dir / self._cache_dir_name) / 'samples.pkl'), instances) return instances def __getitem__(self, index: int) -> Tuple[(Any, Any)]: (path, target) = self.samples[index] with self.get_archive_fs().openbin(path) as f: sample = Image.open(f).convert('RGB') if (self.transform is not None): sample = self.transform(sample) if (self.target_transform is not None): target = self.target_transform(target) return (sample, target) def _cache_dir_name(self): return f'root_in_archive-{str(self.root_in_archive)}' def get_archive_fs(self): worker = get_worker_info() worker = (worker.id if worker else None) if (worker not in self.archive_fs): self.archive_fs[worker] = self._fs_cls(str(Path(self.root).expanduser())) return self.archive_fs[worker] def __del__(self): for o in self.archive_fs.values(): o.close() def __getstate__(self): state = dict(self.__dict__) state['archive_fs'] = {} return state
def to_numpy(tensors): if isinstance(tensors, (list, tuple)): return [bkd.to_numpy(tensor) for tensor in tensors] return bkd.to_numpy(tensors)
.xfail('env.PYPY', reason='getrefcount is not available') .parametrize('method', [m.test_memoryview_object, m.test_memoryview_buffer_info]) def test_memoryview_refcount(method): buf = b'\n\x0b\x0c\r' ref_before = sys.getrefcount(buf) view = method(buf) ref_after = sys.getrefcount(buf) assert (ref_before < ref_after) assert (list(view) == list(buf))
def parse_function_fewshot(*metrics, directory='', args=None, end_signal=None): print(f'Parsing files in {directory}') outputs = [] file_list = os.listdir(directory) file_list.sort() for file in file_list: if (('log' in file) or ('pt.txt' in file)): num = 0 way = 'None' shot = 'None' fpath = osp.join(directory, file) with open(fpath, 'r') as f: lines = f.readlines() output = OrderedDict() for line in lines: if ('args.way :' in line): way = line.split('args.way :')[1] if ('args.shot :' in line): shot = line.split('args.shot :')[1] if ((way != 'None') and (shot != 'None')): if ('acc = ' in line): num = max(float(line.split('acc =')[1]), num) else: pass exp_setting = (((way[:(- 1)] + 'way') + shot[:(- 1)]) + 'shot') output[exp_setting] = num if ((way != 'None') and (shot != 'None')): outputs.append(output) else: pass metrics_results = defaultdict(list) for output in outputs: msg = '' for (key, value) in output.items(): if isinstance(value, float): msg += f'{key}: {value:.3f}%. ' else: msg += f'{key}: {value}. ' if (key != 'file'): metrics_results[key].append(value) print(msg) output_results = OrderedDict() print('===') print(f'Summary of directory: {directory}') for (key, values) in metrics_results.items(): avg = np.mean(values) max_value = np.max(values) std = (compute_ci95(values) if args.ci95 else np.std(values)) print(f'* {key}: {max_value:.3f}%; {avg:.3f}% +- {std:.3f}%') output_results[key] = avg print('===') return output_results
def form_word_to_index_dict_from_dataset(word_vec_dict): word_to_index = {} next_index_to_assign = 1 for key in sorted(word_vec_dict.keys()): word_to_index[key] = next_index_to_assign next_index_to_assign += 1 return word_to_index
class CovidNet(nn.Module): def __init__(self, model: str='small', n_classes: int=3): super(CovidNet, self).__init__() filters = {'pepx1_1': [56, 56], 'pepx1_2': [56, 56], 'pepx1_3': [56, 56], 'pepx2_1': [56, 112], 'pepx2_2': [112, 112], 'pepx2_3': [112, 112], 'pepx2_4': [112, 112], 'pepx3_1': [112, 216], 'pepx3_2': [216, 216], 'pepx3_3': [216, 216], 'pepx3_4': [216, 216], 'pepx3_5': [216, 216], 'pepx3_6': [216, 216], 'pepx4_1': [216, 424], 'pepx4_2': [424, 424], 'pepx4_3': [424, 424]} self.add_module('conv1', nn.Conv2d(in_channels=3, out_channels=56, kernel_size=7, stride=2, padding=3)) for key in filters: if ('pool' in key): self.add_module(key, nn.MaxPool2d(filters[key][0], filters[key][1])) else: self.add_module(key, PEPX(filters[key][0], filters[key][1])) if (model == 'large'): self.add_module('conv1_1x1', nn.Conv2d(in_channels=56, out_channels=112, kernel_size=1)) self.add_module('conv2_1x1', nn.Conv2d(in_channels=112, out_channels=216, kernel_size=1)) self.add_module('conv3_1x1', nn.Conv2d(in_channels=216, out_channels=424, kernel_size=1)) self.add_module('conv4_1x1', nn.Conv2d(in_channels=424, out_channels=424, kernel_size=1)) self.__forward__ = self.forward_large_net else: self.__forward__ = self.forward_small_net self.add_module('flatten', Flatten()) self.add_module('fc1', nn.Linear(((7 * 7) * 424), 512)) self.add_module('classifier', nn.Linear(512, n_classes)) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.__forward__(x) def forward_large_net(self, x: torch.Tensor, target: Optional[torch.Tensor]=None) -> torch.Tensor: x = F.max_pool2d(F.relu(self.conv1(x)), 2) out_conv1_1x1 = self.conv1_1x1(x) pepx11 = self.pepx1_1(x) pepx12 = self.pepx1_2((pepx11 + out_conv1_1x1)) pepx13 = self.pepx1_3(((pepx12 + pepx11) + out_conv1_1x1)) out_conv2_1x1 = F.max_pool2d(self.conv2_1x1((((pepx12 + pepx11) + pepx13) + out_conv1_1x1)), 2) pepx21 = self.pepx2_1((((F.max_pool2d(pepx13, 2) + F.max_pool2d(pepx11, 2)) + F.max_pool2d(pepx12, 2)) + F.max_pool2d(out_conv1_1x1, 2))) pepx22 = self.pepx2_2((pepx21 + out_conv2_1x1)) pepx23 = self.pepx2_3(((pepx22 + pepx21) + out_conv2_1x1)) pepx24 = self.pepx2_4((((pepx23 + pepx21) + pepx22) + out_conv2_1x1)) out_conv3_1x1 = F.max_pool2d(self.conv3_1x1(((((pepx22 + pepx21) + pepx23) + pepx24) + out_conv2_1x1)), 2) pepx31 = self.pepx3_1(((((F.max_pool2d(pepx24, 2) + F.max_pool2d(pepx21, 2)) + F.max_pool2d(pepx22, 2)) + F.max_pool2d(pepx23, 2)) + F.max_pool2d(out_conv2_1x1, 2))) pepx32 = self.pepx3_2((pepx31 + out_conv3_1x1)) pepx33 = self.pepx3_3(((pepx31 + pepx32) + out_conv3_1x1)) pepx34 = self.pepx3_4((((pepx31 + pepx32) + pepx33) + out_conv3_1x1)) pepx35 = self.pepx3_5(((((pepx31 + pepx32) + pepx33) + pepx34) + out_conv3_1x1)) pepx36 = self.pepx3_6((((((pepx31 + pepx32) + pepx33) + pepx34) + pepx35) + out_conv3_1x1)) out_conv4_1x1 = F.max_pool2d(self.conv4_1x1(((((((pepx31 + pepx32) + pepx33) + pepx34) + pepx35) + pepx36) + out_conv3_1x1)), 2) pepx41 = self.pepx4_1(((((((F.max_pool2d(pepx31, 2) + F.max_pool2d(pepx32, 2)) + F.max_pool2d(pepx32, 2)) + F.max_pool2d(pepx34, 2)) + F.max_pool2d(pepx35, 2)) + F.max_pool2d(pepx36, 2)) + F.max_pool2d(out_conv3_1x1, 2))) pepx42 = self.pepx4_2((pepx41 + out_conv4_1x1)) pepx43 = self.pepx4_3(((pepx41 + pepx42) + out_conv4_1x1)) flattened = self.flatten((((pepx41 + pepx42) + pepx43) + out_conv4_1x1)) fc1out = F.relu(self.fc1(flattened)) logits = self.classifier(fc1out) return logits def forward_small_net(self, x: torch.Tensor) -> torch.Tensor: x = F.max_pool2d(F.relu(self.conv1(x)), 2) pepx11 = self.pepx1_1(x) pepx12 = self.pepx1_2(pepx11) pepx13 = self.pepx1_3((pepx12 + pepx11)) pepx21 = self.pepx2_1(((F.max_pool2d(pepx13, 2) + F.max_pool2d(pepx11, 2)) + F.max_pool2d(pepx12, 2))) pepx22 = self.pepx2_2(pepx21) pepx23 = self.pepx2_3((pepx22 + pepx21)) pepx24 = self.pepx2_4(((pepx23 + pepx21) + pepx22)) pepx31 = self.pepx3_1((((F.max_pool2d(pepx24, 2) + F.max_pool2d(pepx21, 2)) + F.max_pool2d(pepx22, 2)) + F.max_pool2d(pepx23, 2))) pepx32 = self.pepx3_2(pepx31) pepx33 = self.pepx3_3((pepx31 + pepx32)) pepx34 = self.pepx3_4(((pepx31 + pepx32) + pepx33)) pepx35 = self.pepx3_5((((pepx31 + pepx32) + pepx33) + pepx34)) pepx36 = self.pepx3_6(((((pepx31 + pepx32) + pepx33) + pepx34) + pepx35)) pepx41 = self.pepx4_1((((((F.max_pool2d(pepx31, 2) + F.max_pool2d(pepx32, 2)) + F.max_pool2d(pepx32, 2)) + F.max_pool2d(pepx34, 2)) + F.max_pool2d(pepx35, 2)) + F.max_pool2d(pepx36, 2))) pepx42 = self.pepx4_2(pepx41) pepx43 = self.pepx4_3((pepx41 + pepx42)) flattened = self.flatten(((pepx41 + pepx42) + pepx43)) fc1out = F.relu(self.fc1(flattened)) logits = self.classifier(fc1out) return logits
class LabelMapUtilTest(tf.test.TestCase): def _generate_label_map(self, num_classes): label_map_proto = string_int_label_map_pb2.StringIntLabelMap() for i in range(1, (num_classes + 1)): item = label_map_proto.item.add() item.id = i item.name = ('label_' + str(i)) item.display_name = str(i) return label_map_proto def test_get_label_map_dict(self): label_map_string = "\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n " label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) label_map_dict = label_map_util.get_label_map_dict(label_map_path) self.assertEqual(label_map_dict['dog'], 1) self.assertEqual(label_map_dict['cat'], 2) def test_load_bad_label_map(self): label_map_string = "\n item {\n id:0\n name:'class that should not be indexed at zero'\n }\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n " label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) with self.assertRaises(ValueError): label_map_util.load_labelmap(label_map_path) def test_keep_categories_with_unique_id(self): label_map_proto = string_int_label_map_pb2.StringIntLabelMap() label_map_string = "\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'child'\n }\n item {\n id:1\n name:'person'\n }\n item {\n id:1\n name:'n'\n }\n " text_format.Merge(label_map_string, label_map_proto) categories = label_map_util.convert_label_map_to_categories(label_map_proto, max_num_classes=3) self.assertListEqual([{'id': 2, 'name': u'cat'}, {'id': 1, 'name': u'child'}], categories) def test_convert_label_map_to_categories_no_label_map(self): categories = label_map_util.convert_label_map_to_categories(None, max_num_classes=3) expected_categories_list = [{'name': u'category_1', 'id': 1}, {'name': u'category_2', 'id': 2}, {'name': u'category_3', 'id': 3}] self.assertListEqual(expected_categories_list, categories) def test_convert_label_map_to_coco_categories(self): label_map_proto = self._generate_label_map(num_classes=4) categories = label_map_util.convert_label_map_to_categories(label_map_proto, max_num_classes=3) expected_categories_list = [{'name': u'1', 'id': 1}, {'name': u'2', 'id': 2}, {'name': u'3', 'id': 3}] self.assertListEqual(expected_categories_list, categories) def test_convert_label_map_to_coco_categories_with_few_classes(self): label_map_proto = self._generate_label_map(num_classes=4) cat_no_offset = label_map_util.convert_label_map_to_categories(label_map_proto, max_num_classes=2) expected_categories_list = [{'name': u'1', 'id': 1}, {'name': u'2', 'id': 2}] self.assertListEqual(expected_categories_list, cat_no_offset) def test_create_category_index(self): categories = [{'name': u'1', 'id': 1}, {'name': u'2', 'id': 2}] category_index = label_map_util.create_category_index(categories) self.assertDictEqual({1: {'name': u'1', 'id': 1}, 2: {'name': u'2', 'id': 2}}, category_index)
def track_infer_time(buffer: [int]): start = time() (yield) end = time() buffer.append((end - start))
def main(): all_models = [name for name in dir(models) if callable(getattr(models, name))] parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') parser.add_argument('experiment', nargs='?', default='test') parser.add_argument('-model', choices=all_models, default='BasicNetBN') parser.add_argument('-optimizer', choices=['sgd', 'adam', 'curveball'], default='curveball') parser.add_argument('-lr', default=(- 1), type=float, help='learning rate') parser.add_argument('-momentum', type=float, default=(- 1), metavar='M') parser.add_argument('-lambda', type=float, default=1.0) parser.add_argument('--no-auto-lambda', action='store_true', default=False, help='disables automatic lambda estimation') parser.add_argument('-batch-size', default=128, type=int) parser.add_argument('-epochs', default=200, type=int) parser.add_argument('-save-interval', default=10, type=int) parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('-outputdir', default='data/cifar-experiments', type=str) parser.add_argument('-datadir', default='data/cifar', type=str) parser.add_argument('-device', default='cuda', type=str) parser.add_argument('--parallel', action='store_true', default=False) args = parser.parse_args() args.outputdir += ((((('/' + args.model) + '/') + args.optimizer) + '/') + args.experiment) if os.path.isdir(args.outputdir): input('Directory already exists. Press Enter to overwrite or Ctrl+C to cancel.') if (not torch.cuda.is_available()): args.device = 'cpu' best_acc = 0 start_epoch = 0 transform_train = transforms.Compose([transforms.RandomCrop(32, padding=2, fill=(128, 128, 128)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) train_set = torchvision.datasets.CIFAR10(root=args.datadir, train=True, download=True, transform=transform_train) train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, num_workers=2, shuffle=True) test_set = torchvision.datasets.CIFAR10(root=args.datadir, train=False, download=True, transform=transform_test) test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, num_workers=2, shuffle=False) net = getattr(models, args.model)() net = net.to(args.device) if ((args.device != 'cpu') and args.parallel): net = torch.nn.DataParallel(net) torch.backends.cudnn.benchmark = True if args.resume: print('Resuming from checkpoint..') assert os.path.isdir(args.outputdir), 'Error: no checkpoint directory found!' checkpoint = torch.load((args.outputdir + '/last.t7')) net.load_state_dict(checkpoint['net']) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] if (args.optimizer == 'sgd'): if (args.lr < 0): args.lr = 0.1 optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9) elif (args.optimizer == 'adam'): if (args.lr < 0): args.lr = 0.001 optimizer = optim.Adam(net.parameters(), lr=args.lr) elif (args.optimizer == 'curveball'): lambd = getattr(args, 'lambda') optimizer = CurveBall(net.parameters(), lr=args.lr, momentum=args.momentum, lambd=lambd, auto_lambda=(not args.no_auto_lambda)) logger = None if Logger: logger = Logger(args.outputdir, meta=args, resume=args.resume) for epoch in range(start_epoch, args.epochs): train(args, net, args.device, train_loader, optimizer, epoch, logger) test(args, net, args.device, test_loader, logger) if logger: acc = logger.average()['val.accuracy'] logger.append() if ((epoch % args.save_interval) == 0): print('Saving..') state = {'net': net.state_dict(), 'optimizer': optimizer.state_dict(), 'acc': acc, 'epoch': epoch} if (not os.path.isdir(args.outputdir)): os.mkdir(args.outputdir) torch.save(state, (args.outputdir + '/last.t7')) if (logger and (acc > best_acc)): shutil.copyfile((args.outputdir + '/last.t7'), (args.outputdir + '/best.t7')) best_acc = acc
class SupervisedDataset(): X: pd.DataFrame y: pd.Series meta: dict def serialize(cls, obj): (_, X_bstream) = BytesParser.serialize(obj.X) (_, y_bstream) = BytesParser.serialize(obj.y) (_, meta_bstream) = BytesParser.serialize(obj.meta) return (X_bstream, y_bstream, meta_bstream) def deserialize(cls, X_bstream: io.BytesIO, y_bstream: io.BytesIO, meta_bstream: io.BytesIO): X = BytesParser.deserialize(MIMETYPE_DF, X_bstream) y = BytesParser.deserialize(MIMETYPE_SERIES, y_bstream) meta = BytesParser.deserialize(MIMETYPE_JSON, meta_bstream) cls(X, y, meta) def asset_names(self): X_name = f"{self.meta['name']}.X.parquet" y_name = f"{self.meta['name']}.y.parquet" meta_name = f"{self.meta['name']}.meta.json" return (X_name, y_name, meta_name) def mimetypes(self): X_metadata = MIMETYPE_DF y_metadata = MIMETYPE_SERIES meta_metadata = MIMETYPE_JSON return (X_metadata, y_metadata, meta_metadata) def name(self): return self.meta['name']
class TestAutoContrast(unittest.TestCase): def setUp(self): self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags', 'gt_seg_map') self.results_mask = construct_toy_data(poly2mask=True) def test_autocontrast(self): transform = AutoContrast(prob=0.0) results_wo_autocontrast = transform(copy.deepcopy(self.results_mask)) check_result_same(self.results_mask, results_wo_autocontrast, self.check_keys) transform = AutoContrast(prob=1.0) transform(copy.deepcopy(self.results_mask)) def test_repr(self): transform = AutoContrast(prob=1.0) self.assertEqual(repr(transform), 'AutoContrast(prob=1.0, level=None, min_mag=0.1, max_mag=1.9)')
def mahalanobis_metric_fast(p, mu, covi, U): mean_p = torch.mean(p, dim=0, keepdim=True) mahalanobis_distances_new = (mean_p - mu).mm(U.mm(U.t())).mm((mean_p - mu).t()) mahalanobis_distances_new = mahalanobis_distances_new.diag().sqrt().expand(p.size(0)) return mahalanobis_distances_new.data
class IBN(nn.Module): def __init__(self, planes, ratio=0.5): super(IBN, self).__init__() self.half = int((planes * (1 - ratio))) self.BN = nn.BatchNorm2d(self.half) self.IN = nn.InstanceNorm2d((planes - self.half), affine=True) def forward(self, x): split = torch.split(x, self.half, 1) out1 = self.BN(split[0].contiguous()) out2 = self.IN(split[1].contiguous()) out = torch.cat((out1, out2), 1) return out
class XLNetTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES padding_side = 'left' def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], **kwargs): super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs) self._pad_token_type_id = 3 try: import sentencepiece as spm except ImportError: logger.warning('You need to install SentencePiece to use XLNetTokenizer: install sentencepiece') raise self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d try: import sentencepiece as spm except ImportError: logger.warning('You need to install SentencePiece to use XLNetTokenizer: install sentencepiece') raise self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = ' '.join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace('``', '"').replace("''", '"') if (not self.keep_accents): outputs = unicodedata.normalize('NFKD', outputs) outputs = ''.join([c for c in outputs if (not unicodedata.combining(c))]) if self.do_lower_case: outputs = outputs.lower() return outputs def _tokenize(self, text, sample=False): text = self.preprocess_text(text) if (not sample): pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) new_pieces = [] for piece in pieces: if ((len(piece) > 1) and (piece[(- 1)] == str(',')) and piece[(- 2)].isdigit()): cur_pieces = self.sp_model.EncodeAsPieces(piece[:(- 1)].replace(SPIECE_UNDERLINE, '')) if ((piece[0] != SPIECE_UNDERLINE) and (cur_pieces[0][0] == SPIECE_UNDERLINE)): if (len(cur_pieces[0]) == 1): cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[(- 1)]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def _convert_token_to_id(self, token): return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return ((token_ids_0 + sep) + cls) return ((((token_ids_0 + sep) + token_ids_1) + sep) + cls) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if (token_ids_1 is not None): raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.') return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0)) if (token_ids_1 is not None): return (((([0] * len(token_ids_0)) + [1]) + ([0] * len(token_ids_1))) + [1, 1]) return (([0] * len(token_ids_0)) + [1, 1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls_segment_id = [2] if (token_ids_1 is None): return ((len((token_ids_0 + sep)) * [0]) + cls_segment_id) return (((len((token_ids_0 + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) + cls_segment_id) def save_vocabulary(self, save_directory): if (not os.path.isdir(save_directory)): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
class TokenKind(object): _value_map = {} def __init__(self, value, name): self.value = value self.name = name def __repr__(self): return ('TokenKind.%s' % (self.name,)) def from_value(value): result = TokenKind._value_map.get(value, None) if (result is None): raise ValueError(('Unknown TokenKind: %d' % value)) return result def register(value, name): if (value in TokenKind._value_map): raise ValueError(('TokenKind already registered: %d' % value)) kind = TokenKind(value, name) TokenKind._value_map[value] = kind setattr(TokenKind, name, kind)
class Cora(BaseData): def __init__(self, data_root: Optional[str]=None) -> None: super().__init__('cora', data_root) self._content = {'num_classes': 7, 'num_vertices': 2708, 'num_edges': 10858, 'dim_features': 1433, 'features': {'upon': [{'filename': 'features.pkl', 'md5': '05b45e9c38cc95f4fc44b3668cc9ddc9'}], 'loader': load_from_pickle, 'preprocess': [to_tensor, partial(norm_ft, ord=1)]}, 'edge_list': {'upon': [{'filename': 'edge_list.pkl', 'md5': 'f488389c1edd0d898ce273fbd27822b3'}], 'loader': load_from_pickle}, 'labels': {'upon': [{'filename': 'labels.pkl', 'md5': 'ec6a36cb583c28bdae1d'}], 'loader': load_from_pickle, 'preprocess': [to_long_tensor]}, 'train_mask': {'upon': [{'filename': 'train_mask.pkl', 'md5': 'a11357a40e1f0b5cce728d1a961b8e13'}], 'loader': load_from_pickle, 'preprocess': [to_bool_tensor]}, 'val_mask': {'upon': [{'filename': 'val_mask.pkl', 'md5': '355544dabcfa74d30539a71'}], 'loader': load_from_pickle, 'preprocess': [to_bool_tensor]}, 'test_mask': {'upon': [{'filename': 'test_mask.pkl', 'md5': 'bbfc87d661560f55f6946f8cb9d602b9'}], 'loader': load_from_pickle, 'preprocess': [to_bool_tensor]}}
def getFeat(I, net, transform): feat = net(transform(I).unsqueeze(0).cuda()) feat = feat.data.squeeze() feat = (feat / (torch.sum((feat ** 2), dim=0, keepdim=True).expand(feat.size()) ** 0.5)) return feat