code stringlengths 101 5.91M |
|---|
class EsmConfig(PretrainedConfig):
model_type = 'esm'
def __init__(self, vocab_size=None, mask_token_id=None, pad_token_id=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1026, initializer_range=0.02, layer_norm_eps=1e-12, position_embedding_type='absolute', use_cache=True, emb_layer_norm_before=None, token_dropout=False, is_folding_model=False, esmfold_config=None, vocab_list=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, mask_token_id=mask_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.emb_layer_norm_before = emb_layer_norm_before
self.token_dropout = token_dropout
self.is_folding_model = is_folding_model
if is_folding_model:
if (esmfold_config is None):
logger.info('No esmfold_config supplied for folding model, using default values.')
esmfold_config = EsmFoldConfig()
elif isinstance(esmfold_config, dict):
esmfold_config = EsmFoldConfig(**esmfold_config)
self.esmfold_config = esmfold_config
if (vocab_list is None):
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!')
self.vocab_list = get_default_vocab_list()
else:
self.vocab_list = vocab_list
else:
self.esmfold_config = None
self.vocab_list = None
if ((self.esmfold_config is not None) and getattr(self.esmfold_config, 'use_esm_attn_map', False)):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!')
def to_dict(self):
output = super().to_dict()
if isinstance(self.esmfold_config, EsmFoldConfig):
output['esmfold_config'] = self.esmfold_config.to_dict()
return output |
def convert_to_coco_dict(dataset_name):
dataset_dicts = DatasetCatalog.get(dataset_name)
metadata = MetadataCatalog.get(dataset_name)
if hasattr(metadata, 'thing_dataset_id_to_contiguous_id'):
reverse_id_mapping = {v: k for (k, v) in metadata.thing_dataset_id_to_contiguous_id.items()}
reverse_id_mapper = (lambda contiguous_id: reverse_id_mapping[contiguous_id])
else:
reverse_id_mapper = (lambda contiguous_id: contiguous_id)
categories = [{'id': reverse_id_mapper(id), 'name': name} for (id, name) in enumerate(metadata.thing_classes)]
logger.info('Converting dataset dicts into COCO format')
coco_images = []
coco_annotations = []
for (image_id, image_dict) in enumerate(dataset_dicts):
coco_image = {'id': image_dict.get('image_id', image_id), 'width': int(image_dict['width']), 'height': int(image_dict['height']), 'file_name': str(image_dict['file_name'])}
coco_images.append(coco_image)
anns_per_image = image_dict.get('annotations', [])
for annotation in anns_per_image:
coco_annotation = {}
bbox = annotation['bbox']
if isinstance(bbox, np.ndarray):
if (bbox.ndim != 1):
raise ValueError(f'bbox has to be 1-dimensional. Got shape={bbox.shape}.')
bbox = bbox.tolist()
if (len(bbox) not in [4, 5]):
raise ValueError(f'bbox has to has length 4 or 5. Got {bbox}.')
from_bbox_mode = annotation['bbox_mode']
to_bbox_mode = (BoxMode.XYWH_ABS if (len(bbox) == 4) else BoxMode.XYWHA_ABS)
bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)
if ('segmentation' in annotation):
segmentation = annotation['segmentation']
if isinstance(segmentation, list):
polygons = PolygonMasks([segmentation])
area = polygons.area()[0].item()
elif isinstance(segmentation, dict):
area = mask_util.area(segmentation).item()
else:
raise TypeError(f'Unknown segmentation type {type(segmentation)}!')
elif (to_bbox_mode == BoxMode.XYWH_ABS):
bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)
area = Boxes([bbox_xy]).area()[0].item()
else:
area = RotatedBoxes([bbox]).area()[0].item()
if ('keypoints' in annotation):
keypoints = annotation['keypoints']
for (idx, v) in enumerate(keypoints):
if ((idx % 3) != 2):
keypoints[idx] = (v - 0.5)
if ('num_keypoints' in annotation):
num_keypoints = annotation['num_keypoints']
else:
num_keypoints = sum(((kp > 0) for kp in keypoints[2::3]))
coco_annotation['id'] = (len(coco_annotations) + 1)
coco_annotation['image_id'] = coco_image['id']
coco_annotation['bbox'] = [round(float(x), 3) for x in bbox]
coco_annotation['area'] = float(area)
coco_annotation['iscrowd'] = int(annotation.get('iscrowd', 0))
coco_annotation['category_id'] = int(reverse_id_mapper(annotation['category_id']))
if ('keypoints' in annotation):
coco_annotation['keypoints'] = keypoints
coco_annotation['num_keypoints'] = num_keypoints
if ('segmentation' in annotation):
seg = coco_annotation['segmentation'] = annotation['segmentation']
if isinstance(seg, dict):
counts = seg['counts']
if (not isinstance(counts, str)):
seg['counts'] = counts.decode('ascii')
coco_annotations.append(coco_annotation)
logger.info(f'Conversion finished, #images: {len(coco_images)}, #annotations: {len(coco_annotations)}')
info = {'date_created': str(datetime.datetime.now()), 'description': 'Automatically generated COCO json file for Detectron2.'}
coco_dict = {'info': info, 'images': coco_images, 'categories': categories, 'licenses': None}
if (len(coco_annotations) > 0):
coco_dict['annotations'] = coco_annotations
return coco_dict |
class MyNewExpectedFlux(ExpectedFlux):
def __init__(self, config):
super().__init__()
pass
def compute_expected_flux(self, forest):
pass |
def define_saver(exclude=None):
variables = []
exclude = (exclude or [])
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.global_variables():
if any((regex.match(variable.name) for regex in exclude)):
continue
variables.append(variable)
saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)
return saver |
def build_dataset(src_trgs_pairs, opt, mode='one2one', include_original=True):
word2idx = opt.vocab['word2idx']
return_examples = []
oov_target = 0
max_oov_len = 0
max_oov_sent = ''
for (idx, (source, targets)) in enumerate(src_trgs_pairs):
src = [(word2idx[w] if ((w in word2idx) and (word2idx[w] < opt.vocab_size)) else word2idx[UNK_WORD]) for w in source]
(src_oov, oov_dict, oov_list) = extend_vocab_OOV(source, word2idx, opt.vocab_size, opt.max_unk_words)
examples = []
for target in targets:
example = {}
if include_original:
example['src_str'] = source
example['trg_str'] = target
example['src'] = src
example['src_oov'] = src_oov
example['oov_dict'] = oov_dict
example['oov_list'] = oov_list
if (len(oov_list) > max_oov_len):
max_oov_len = len(oov_list)
max_oov_sent = source
trg = [(word2idx[w] if ((w in word2idx) and (word2idx[w] < opt.vocab_size)) else word2idx[UNK_WORD]) for w in target]
example['trg'] = trg
trg_copy = []
for w in target:
if ((w in word2idx) and (word2idx[w] < opt.vocab_size)):
trg_copy.append(word2idx[w])
elif (w in oov_dict):
trg_copy.append(oov_dict[w])
else:
trg_copy.append(word2idx[UNK_WORD])
example['trg_copy'] = trg_copy
if any([(w >= opt.vocab_size) for w in trg_copy]):
oov_target += 1
if (mode == 'one2one'):
return_examples.append(example)
else:
examples.append(example)
if ((mode == 'one2many') and (len(examples) > 0)):
o2m_example = {}
keys = examples[0].keys()
for key in keys:
if (key.startswith('src') or key.startswith('oov') or key.startswith('title')):
o2m_example[key] = examples[0][key]
else:
o2m_example[key] = [e[key] for e in examples]
if include_original:
assert (len(o2m_example['src']) == len(o2m_example['src_oov']) == len(o2m_example['src_str']))
assert (len(o2m_example['oov_dict']) == len(o2m_example['oov_list']))
assert (len(o2m_example['trg']) == len(o2m_example['trg_copy']) == len(o2m_example['trg_str']))
else:
assert (len(o2m_example['src']) == len(o2m_example['src_oov']))
assert (len(o2m_example['oov_dict']) == len(o2m_example['oov_list']))
assert (len(o2m_example['trg']) == len(o2m_example['trg_copy']))
return_examples.append(o2m_example)
logging.info(('Find #(oov_target)/#(all) = %d/%d' % (oov_target, len(return_examples))))
logging.info(('Find max_oov_len = %d' % max_oov_len))
logging.info(('max_oov sentence: %s' % str(max_oov_sent)))
return return_examples |
class Params():
def __init__(self, json_path):
self.update(json_path)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
def dict(self):
return self.__dict__ |
class MemComputer():
def __init__(self, net_def, np_data_type):
self.net_def = net_def
self.np_data_type = np_data_type
self.const_tensor_names = []
for const_tensor in net_def.tensors:
self.const_tensor_names.append(const_tensor.name)
self.input_names = []
for input_info in net_def.input_info:
self.input_names.append(input_info.name)
def init_computer(self):
self.free_mem_list = []
self.used_mem_list = []
self.buffer_size = 0
self.ref_counts = {}
for op in self.net_def.op:
for tensor_name in op.input:
if ((tensor_name in self.const_tensor_names) or (tensor_name in self.input_names)):
continue
if (tensor_name not in self.ref_counts):
self.ref_counts[tensor_name] = 0
self.ref_counts[tensor_name] += 1
def get_mem_size(self, op, output_shape):
np_data_type = self.np_data_type
if (len(op.output_type) > 0):
np_data_type = data_type_to_np_dt(op.output_type[0], self.np_data_type)
data_type_bytes = np.dtype(np_data_type).itemsize
if ((op.type == 'WinogradTransform') or (op.type == 'GEMM')):
mace_check((len(output_shape) == 4), 'WinogradTransform and GEMM only support 4-dim')
mem_size = ((((output_shape[2] * output_shape[3]) * output_shape[0]) * int(((output_shape[1] + 3) / 4))) * 4)
else:
dim_size = len(output_shape)
if (dim_size > 0):
mem_size = (int(((output_shape[(dim_size - 1)] + 3) / 4)) * 4)
for i in range((dim_size - 1)):
mem_size *= output_shape[i]
else:
print(("the op %s's output dim size is 0" % op.type))
mem_size = 0
return (mem_size * data_type_bytes)
def remove_mem_block_by_name(self, mem_list, tensor_name):
return_mem_block = None
for mem_block in mem_list:
if (tensor_name == mem_block.tensor_name):
return_mem_block = mem_block
mem_list.remove(mem_block)
break
return return_mem_block
def fake_new(self, op):
output_size = len(op.output)
for i in range(output_size):
mem_size = self.get_mem_size(op, op.output_shape[i].dims)
final_mem_block = None
reused = False
for mem_block in self.free_mem_list:
if (mem_block.size >= mem_size):
mem_block.tensor_name = op.output[i]
final_mem_block = mem_block
self.free_mem_list.remove(mem_block)
mace_check((final_mem_block is not None), 'Error: final_mem_block should not be None')
reused = True
break
if (not reused):
final_mem_block = MemBlock(op.output[i], self.buffer_size, mem_size)
self.buffer_size += mem_size
op.mem_id.append(final_mem_block.offset)
self.used_mem_list.append(final_mem_block)
def fake_delete(self, op):
for tensor_name in op.input:
if ((tensor_name in self.const_tensor_names) or (tensor_name in self.input_names)):
continue
mace_check(((tensor_name in self.ref_counts) and (self.ref_counts[tensor_name] > 0)), 'Invalid: ref_count is 0.')
self.ref_counts[tensor_name] -= 1
if (self.ref_counts[tensor_name] is 0):
mem_block = self.remove_mem_block_by_name(self.used_mem_list, tensor_name)
mace_check((mem_block is not None), ('error, can not find tensor: %s' % tensor_name))
self.free_mem_list.append(mem_block)
self.free_mem_list.sort(key=(lambda mem_block: mem_block.size))
def fake_execute_op(self, op):
for i in range(len(op.output)):
self.fake_new(op)
self.fake_delete(op)
def compute(self):
self.init_computer()
for op in self.net_def.op:
self.fake_execute_op(op)
return self.buffer_size |
class RGBD_sal(nn.Module):
def __init__(self):
super(RGBD_sal, self).__init__()
feats = list(models.vgg16_bn(pretrained=True).features.children())
self.conv0 = nn.Conv2d(4, 64, kernel_size=3, padding=1)
self.conv1 = nn.Sequential(*feats[1:6])
self.conv2 = nn.Sequential(*feats[6:13])
self.conv3 = nn.Sequential(*feats[13:23])
self.conv4 = nn.Sequential(*feats[23:33])
self.conv5 = nn.Sequential(*feats[33:43])
self.dem1 = PAFEM(512, 512)
self.dem2 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU())
self.dem3 = nn.Sequential(nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU())
self.dem4 = nn.Sequential(nn.Conv2d(128, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.PReLU())
self.dem5 = nn.Sequential(nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.PReLU())
self.fuse_1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 1, kernel_size=3, padding=1))
self.fuse_2 = nn.Sequential(nn.Conv2d(256, 128, kernel_size=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=3, padding=1))
self.fuse_3 = nn.Sequential(nn.Conv2d(128, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU(), nn.Conv2d(64, 1, kernel_size=3, padding=1))
self.fuse_4 = nn.Sequential(nn.Conv2d(64, 32, kernel_size=1), nn.BatchNorm2d(32), nn.PReLU(), nn.Conv2d(32, 1, kernel_size=3, padding=1))
self.fuse_5 = nn.Sequential(nn.Conv2d(32, 16, kernel_size=1), nn.BatchNorm2d(16), nn.PReLU(), nn.Conv2d(16, 1, kernel_size=3, padding=1))
self.output1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU())
self.output1_rev = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU())
self.output2 = nn.Sequential(nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU())
self.output2_rev = nn.Sequential(nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU())
self.output3 = nn.Sequential(nn.Conv2d(128, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.PReLU())
self.output3_rev = nn.Sequential(nn.Conv2d(128, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.PReLU())
self.output4 = nn.Sequential(nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.PReLU())
self.output4_rev = nn.Sequential(nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.PReLU())
self.output5 = nn.Sequential(nn.Conv2d(32, 1, kernel_size=3, padding=1))
self.output5_rev = nn.Sequential(nn.Conv2d(32, 1, kernel_size=3, padding=1))
self.fuseout = nn.Sequential(nn.Conv2d(2, 1, kernel_size=3, padding=1), nn.PReLU())
for m in self.modules():
if (isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout)):
m.inplace = True
def forward(self, x, depth):
input = x
(B, _, _, _) = input.size()
c0 = self.conv0(torch.cat((x, depth), 1))
c1 = self.conv1(c0)
c2 = self.conv2(c1)
c3 = self.conv3(c2)
c4 = self.conv4(c3)
c5 = self.conv5(c4)
dem1 = self.dem1(c5)
dem2 = self.dem2(c4)
dem3 = self.dem3(c3)
dem4 = self.dem4(c2)
dem5 = self.dem5(c1)
dem1_attention = F.sigmoid(self.fuse_1((dem1 + F.upsample(depth, size=dem1.size()[2:], mode='bilinear'))))
output1 = self.output1((dem1 * (dem1_attention * (F.upsample(depth, size=dem1.size()[2:], mode='bilinear') + dem1_attention))))
output1_rev = self.output1_rev((dem1 * ((1 - dem1_attention) * (F.upsample(depth, size=dem1.size()[2:], mode='bilinear') + (1 - dem1_attention)))))
dem2_attention = F.sigmoid(self.fuse_2(((dem2 + F.upsample(output1, size=dem2.size()[2:], mode='bilinear')) + F.upsample(depth, size=dem2.size()[2:], mode='bilinear'))))
output2 = self.output2((F.upsample(output1, size=dem2.size()[2:], mode='bilinear') + (dem2 * (dem2_attention * (F.upsample(depth, size=dem2.size()[2:], mode='bilinear') + dem2_attention)))))
output2_rev = self.output2_rev((F.upsample(output1_rev, size=dem2.size()[2:], mode='bilinear') + (dem2 * ((1 - dem2_attention) * (F.upsample(depth, size=dem2.size()[2:], mode='bilinear') + (1 - dem2_attention))))))
dem3_attention = F.sigmoid(self.fuse_3(((dem3 + F.upsample(output2, size=dem3.size()[2:], mode='bilinear')) + F.upsample(depth, size=dem3.size()[2:], mode='bilinear'))))
output3 = self.output3((F.upsample(output2, size=dem3.size()[2:], mode='bilinear') + (dem3 * (dem3_attention * (F.upsample(depth, size=dem3.size()[2:], mode='bilinear') + dem3_attention)))))
output3_rev = self.output3_rev((F.upsample(output2_rev, size=dem3.size()[2:], mode='bilinear') + (dem3 * ((1 - dem3_attention) * (F.upsample(depth, size=dem3.size()[2:], mode='bilinear') + (1 - dem3_attention))))))
dem4_attention = F.sigmoid(self.fuse_4(((dem4 + F.upsample(output3, size=dem4.size()[2:], mode='bilinear')) + F.upsample(depth, size=dem4.size()[2:], mode='bilinear'))))
output4 = self.output4((F.upsample(output3, size=dem4.size()[2:], mode='bilinear') + (dem4 * (dem4_attention * (F.upsample(depth, size=dem4.size()[2:], mode='bilinear') + dem4_attention)))))
output4_rev = self.output4_rev((F.upsample(output3_rev, size=dem4.size()[2:], mode='bilinear') + (dem4 * ((1 - dem4_attention) * (F.upsample(depth, size=dem4.size()[2:], mode='bilinear') + (1 - dem4_attention))))))
dem5_attention = F.sigmoid(self.fuse_5(((dem5 + F.upsample(output4, size=dem5.size()[2:], mode='bilinear')) + F.upsample(depth, size=dem5.size()[2:], mode='bilinear'))))
output5 = self.output5((F.upsample(output4, size=dem5.size()[2:], mode='bilinear') + (dem5 * (dem5_attention * (F.upsample(depth, size=dem5.size()[2:], mode='bilinear') + dem5_attention)))))
output5_rev = self.output5_rev((F.upsample(output4_rev, size=dem5.size()[2:], mode='bilinear') + (dem5 * ((1 - dem5_attention) * (F.upsample(depth, size=dem5.size()[2:], mode='bilinear') + (1 - dem5_attention))))))
output5 = F.upsample(output5, size=input.size()[2:], mode='bilinear')
output5_rev = F.upsample(output5_rev, size=input.size()[2:], mode='bilinear')
output = self.fuseout(torch.cat((output5, (- output5_rev)), 1))
output = ((- output5_rev) + output)
if self.training:
return (output, output5, output5_rev, dem1_attention, dem2_attention, dem3_attention, dem4_attention, dem5_attention)
return F.sigmoid(output) |
class Conv2dGaussian(ConvNdGaussianMixin, torch.nn.Conv2d):
def forward(self, input):
return self._forward_impl(input, F.conv2d) |
class MissingPackageError(Exception):
error_message = "Mandatory package '{name}' not found!"
def __init__(self, package_name: str):
self.package_name = package_name
super(MissingPackageError, self).__init__(self.error_message.format(name=package_name)) |
def _megatron_glm_attn_com(ranks, tensor_shape, orig_module):
return ([('all_reduce', ranks, tensor_shape)] * 4) |
def other_class(n_classes, current_class):
if ((current_class < 0) or (current_class >= n_classes)):
error_str = 'class_ind must be within the range (0, nb_classes - 1)'
raise ValueError(error_str)
other_class_list = list(range(n_classes))
other_class_list.remove(current_class)
other_class = np.random.choice(other_class_list)
return other_class |
def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None):
images = []
dir = os.path.expanduser(dir)
if (not ((extensions is None) ^ (is_valid_file is None))):
raise ValueError('Both extensions and is_valid_file cannot be None or not None at the same time')
if (extensions is not None):
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if (not os.path.isdir(d)):
continue
for (root, _, fnames) in sorted(os.walk(d)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = (path, class_to_idx[target])
images.append(item)
return images |
class TACREDProcessor(DataProcessor):
def __init__(self):
super().__init__(labels=['no_relation', 'org:founded', 'org:subsidiaries', 'per:date_of_birth', 'per:cause_of_death', 'per:age', 'per:stateorprovince_of_birth', 'per:countries_of_residence', 'per:country_of_birth', 'per:stateorprovinces_of_residence', 'org:website', 'per:cities_of_residence', 'per:parents', 'per:employee_of', 'per:city_of_birth', 'org:parents', 'org:political/religious_affiliation', 'per:schools_attended', 'per:country_of_death', 'per:children', 'org:top_members/employees', 'per:date_of_death', 'org:members', 'org:alternate_names', 'per:religion', 'org:member_of', 'org:city_of_headquarters', 'per:origin', 'org:shareholders', 'per:charges', 'per:title', 'org:number_of_employees/members', 'org:dissolved', 'org:country_of_headquarters', 'per:alternate_names', 'per:siblings', 'org:stateorprovince_of_headquarters', 'per:spouse', 'per:other_family', 'per:city_of_death', 'per:stateorprovince_of_death', 'org:founded_by'])
def get_examples(self, data_dir, split):
path = os.path.join(data_dir, '{}.json'.format(split))
examples = []
with open(path, encoding='utf8') as f:
example_jsons = json.load(f)
for example_json in example_jsons:
guid = example_json['id']
label = self.get_label_id(example_json['relation'])
tokens = example_json['token']
text_a = ' '.join(tokens)
meta = {'head': ' '.join(tokens[example_json['subj_start']:(example_json['subj_end'] + 1)]), 'tail': ' '.join(tokens[example_json['obj_start']:(example_json['obj_end'] + 1)])}
example = InputExample(guid=guid, text_a=text_a, meta=meta, label=label)
examples.append(example)
return examples |
class BaseProgressBar(Subscriber):
def __init__(self):
super().__init__()
self.type = 'progressbar'
self.touched = False
self.iter = None
self.t_start = None
self.t_done = None
def start(self, iterations):
self.touched = True
self.iter = int(iterations)
self.t_start = time.time()
def update(self, n):
pass
def time_elapsed(self):
return ('%6.2fs' % (time.time() - self.t_start))
def time_remaining_est(self, completed_iter):
if completed_iter:
t_r_est = (((time.time() - self.t_start) / completed_iter) * (self.iter - completed_iter))
else:
t_r_est = 0
date_time = (datetime.datetime(1, 1, 1) + datetime.timedelta(seconds=t_r_est))
time_string = ('%02d:%02d:%02d:%02d' % ((date_time.day - 1), date_time.hour, date_time.minute, date_time.second))
return time_string
def finished(self):
pass |
class PrefixTuning(GPT2PreTrainedModel):
def __init__(self, config, model_gpt2, optim_prefix=False, preseqlen=5, use_infix=False, deep_param=False):
super().__init__(config)
print('under the PrefixTuning model')
self.match_n_layer = config.n_layer
self.match_n_head = config.n_head
self.match_n_embd = (config.n_embd // config.n_head)
self.n_embd = config.n_embd
if hasattr(config, 'optim_prefix'):
self.optim_prefix = config.optim_prefix
else:
self.optim_prefix = optim_prefix
if (hasattr(config, 'preseqlen') and self.optim_prefix):
self.preseqlen = config.preseqlen
elif self.optim_prefix:
self.preseqlen = preseqlen
if hasattr(config, 'use_infix'):
self.use_infix = config.use_infix
else:
self.use_infix = use_infix
if hasattr(config, '_my_arg_tune_mode'):
self.tuning_mode = config._my_arg_tune_mode
else:
self.tuning_mode = 'prefixtune'
if hasattr(config, '_my_arg_task_mode'):
self.task_mode = config._my_arg_task_mode
else:
self.task_mode = 'underspecified'
assert False, 'the task is underspecified'
if hasattr(config, 'train_weights'):
self.train_weights = (config.train_weights == 'yes')
else:
assert False, 'unspecified train weights'
if hasattr(config, 'format_mode'):
self.format_mode = config.format_mode
else:
self.format_mode = 'cat'
if hasattr(config, 'prefix_dropout'):
self.prefix_dropout = config.prefix_dropout
else:
self.prefix_dropout = 0.0
if hasattr(config, 'init_random'):
self.init_random = (config.init_random == 'yes')
else:
self.init_random = False
if hasattr(config, 'mid_dim'):
self.mid_dim = config.mid_dim
else:
self.mid_dim = 512
if hasattr(config, 'lowdata'):
self.lowdata = config.lowdata
else:
self.lowdata = False
if hasattr(config, 'lowdata_token'):
self.lowdata_token = config.lowdata_token
else:
self.lowdata_token = None
if hasattr(config, 'init_shallow'):
self.init_shallow = (config.init_shallow == 'yes')
else:
self.init_shallow = False
if hasattr(config, 'init_shallow_word'):
self.init_shallow_word = config.init_shallow_word
else:
self.init_shallow_word = None
if True:
self.mode_para = 0
print('PrefixTuning')
print('preseqlen is {}, optimizing the prefix directly'.format(self.preseqlen))
if (self.lowdata and (self.lowdata_token is not None)):
low_data_init = 3
assert (self.lowdata_token is not None)
self.preseqlen = len(self.lowdata_token[0])
print('LOW DATA SETTING, UNDER PARAMETRIZATION 1, low_data_init=3, preseqlen = {} Unifying with FINETUNE'.format(self.preseqlen))
self.input_tokens = torch.arange(self.preseqlen).long()
self.wte = nn.Embedding(self.preseqlen, config.n_embd)
self.control_trans = nn.Sequential(nn.Linear(config.n_embd, self.mid_dim), nn.Tanh(), nn.Linear(self.mid_dim, ((config.n_layer * 2) * config.n_embd)))
self.get_prompt = self.get_prompt_p5
elif ((not deep_param) and (not self.init_shallow)):
low_data_init = 0
print('[Full prefix-tuning Setting :) ]')
self.input_tokens = torch.arange(self.preseqlen).long()
self.wte = nn.Embedding(self.preseqlen, config.n_embd)
self.control_trans = nn.Sequential(nn.Linear(config.n_embd, self.mid_dim), nn.Tanh(), nn.Linear(self.mid_dim, ((config.n_layer * 2) * config.n_embd)))
if self.use_infix:
self.wte2 = nn.Embedding(self.preseqlen, config.n_embd)
self.get_prompt = self.get_prompt_p5_infix
else:
self.get_prompt = self.get_prompt_p5
elif self.init_shallow:
low_data_init = 0
print('[DOUBLE CHECK]: ABLATION STUDY on no parametrization trick... [shallow]')
if (self.init_shallow_word is not None):
assert (self.init_shallow_word is not None)
self.preseqlen = len(self.init_shallow_word[0])
init_val = self.get_gold_init(model_gpt2, torch.LongTensor(self.init_shallow_word))
print(init_val.shape)
self.control_trans = nn.Parameter(init_val)
self.get_prompt = self.get_prompt_p2_shallow
else:
print('random init of the prefix')
self.control_trans = nn.Parameter(torch.randn((((self.preseqlen * config.n_layer) * 2) * config.n_embd)))
self.get_prompt = self.get_prompt_p2
else:
low_data_init = 0
print('[DOUBLE CHECK]: DEEP MLP')
self.input_tokens = torch.arange(self.preseqlen).long()
self.wte = nn.Embedding(self.preseqlen, config.n_embd)
self.control_trans = nn.Sequential(nn.Linear(config.n_embd, self.mid_dim), nn.Tanh(), nn.Linear(self.mid_dim, self.mid_dim), nn.Tanh(), nn.Linear(self.mid_dim, ((config.n_layer * 2) * config.n_embd)))
if self.use_infix:
self.get_prompt = self.get_prompt_p5_infix
else:
self.get_prompt = self.get_prompt_p5
self.dropout = nn.Dropout(self.prefix_dropout)
if self.use_infix:
self.forward = self.forward_infix
total_param = 0
for (name, param) in self.named_parameters():
print(param.shape)
total_param += param.numel()
print('total param is {}'.format(total_param))
if (low_data_init == 3):
print('use pt for this tensor', torch.LongTensor(self.lowdata_token))
self.lowdata_init_train3(gpt2=model_gpt2, sample_input=torch.LongTensor(self.lowdata_token))
def get_gold_init(self, gpt2, sample_input):
gpt2 = gpt2.cuda()
with torch.no_grad():
output = gpt2(sample_input.to(gpt2.device), return_dict=True, use_cache=True)
output = output.past_key_values
print(len(output), output[0].shape)
output = torch.cat(output, dim=0)
return output
def lowdata_init_train3(self, gpt2, sample_input, epochs=500):
self = self.cuda()
gpt2 = gpt2.cuda()
with torch.no_grad():
output = gpt2(sample_input.to(gpt2.device), return_dict=True, use_cache=True)
output = output.past_key_values
print(len(output), output[0].shape)
output = torch.cat(output, dim=0)
optimizer_temp = torch.optim.Adam(self.control_trans.parameters(), lr=0.0001)
for e in range(epochs):
our_prompt = self.get_prompt_p5(bsz=1)
our_prompt = torch.cat(our_prompt, dim=0)
loss_metrics = nn.MSELoss()
loss = loss_metrics(our_prompt.to(gpt2.device), output)
print(loss)
loss.backward()
optimizer_temp.step()
self.control_trans.zero_grad()
return
def get_prompt_p2(self, control_code=None, gpt2=None, bsz=None):
assert (bsz is not None)
temp_control = self.control_trans.view(1, self.preseqlen, (self.match_n_layer * 2), self.match_n_head, self.match_n_embd).expand(bsz, (- 1), (- 1), (- 1), (- 1))
temp_control = self.dropout(temp_control)
past_key_values = temp_control.permute([2, 0, 3, 1, 4]).split(2)
return past_key_values
def get_prompt_p2_shallow(self, control_code=None, gpt2=None, bsz=None):
assert (bsz is not None)
temp = self.control_trans.expand((- 1), bsz, (- 1), (- 1), (- 1))
return temp.split(2)
def get_prompt_p5(self, control_code=None, gpt2=None, bsz=None):
input_tokens = self.input_tokens.unsqueeze(0).expand(bsz, (- 1)).to(self.device)
temp_control = self.wte(input_tokens)
past_key_values = self.control_trans(temp_control)
(bsz, seqlen, _) = past_key_values.shape
past_key_values = past_key_values.view(bsz, seqlen, (self.match_n_layer * 2), self.match_n_head, self.match_n_embd)
past_key_values = self.dropout(past_key_values)
past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2)
return past_key_values
def get_prompt_p5_infix(self, src, control_code=None, gpt2=None, bsz=None, attn_mask=None):
input_tokens = self.input_tokens.unsqueeze(0).expand(bsz, (- 1)).to(self.device)
temp_control = self.wte(input_tokens)
past_key_values = self.control_trans(temp_control)
(bsz, seqlen, _) = past_key_values.shape
past_key_values = past_key_values.view(bsz, seqlen, (self.match_n_layer * 2), self.match_n_head, self.match_n_embd)
past_key_values = self.dropout(past_key_values)
past_key_values = past_key_values.permute([2, 0, 3, 1, 4])
temp_emb = self.wte2(input_tokens)
src_emb = gpt2.transformer.wte(src)
total_emb = torch.cat([src_emb, temp_emb], dim=1)
src_out = gpt2(inputs_embeds=total_emb, attention_mask=attn_mask, use_cache=True, return_dict=True)
src_past_key_vals = src_out.past_key_values
src_past_key_vals = torch.cat(src_past_key_vals, dim=0)
(_, src_len) = src.shape
(nl, nb, nh, _, ndim) = past_key_values.shape
zero_mask = torch.zeros(nl, nb, nh, src_len, ndim).to(self.device)
past_key_values = torch.cat([zero_mask, past_key_values], dim=3)
past_key_values = (past_key_values + src_past_key_vals)
past_key_values = past_key_values.split(2)
return past_key_values
def forward(self, input_ids=None, weights=None, control_code=None, emb_match=None, past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, gpt2_model=None, src=None, tgt=None, src_attn=None, tgt_attn=None, **kwargs):
bsz = input_ids.shape[0]
if (self.mode_para == 2):
past_key_values_prompt = self.get_prompt(src, gpt2=gpt2_model, bsz=bsz)
else:
past_key_values_prompt = self.get_prompt(control_code, gpt2=gpt2_model, bsz=bsz)
if (past_key_values is not None):
assert False, 'Attention, use past_key_values for other things'
else:
past_key_values = past_key_values_prompt
if (gpt2_model is None):
assert False, "Didn't specify gpt2 model"
if ((self.mode_para == 2) and (src_attn is not None) and (tgt_attn is not None)):
attention_mask = torch.cat([src_attn, tgt_attn], dim=1)
output = gpt2_model(input_ids=input_ids, control_code=None, weights=weights, emb_match=emb_match, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
return output
def forward_infix(self, input_ids=None, weights=None, control_code=None, emb_match=None, past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, gpt2_model=None, src=None, tgt=None, src_attn=None, tgt_attn=None, cate_batch=None, cate_attn=None, **kwargs):
bsz = input_ids.shape[0]
if (self.mode_para == 2):
past_key_values_prompt = self.get_prompt(src, None, gpt2=gpt2_model, bsz=bsz)
attention_mask = torch.cat([src_attn, src_attn, tgt_attn], dim=1)
else:
infix_attn = torch.ones(bsz, self.preseqlen).bool().to(self.device)
attention_mask = torch.cat([src_attn, infix_attn, tgt_attn], dim=1)
partial_attn_mask = torch.cat([src_attn, infix_attn], dim=1)
past_key_values_prompt = self.get_prompt(src, None, gpt2=gpt2_model, bsz=bsz, attn_mask=partial_attn_mask)
if (past_key_values is not None):
assert False, 'Attention, use past_key_values for other things'
else:
past_key_values = past_key_values_prompt
if (gpt2_model is None):
assert False, "Didn't specify gpt2 model"
output = gpt2_model(input_ids=input_ids, control_code=None, weights=weights, emb_match=emb_match, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
return output |
def bessel_basis(n, k):
zeros = Jn_zeros(n, k)
normalizer = []
for order in range(n):
normalizer_tmp = []
for i in range(k):
normalizer_tmp += [(0.5 * (Jn(zeros[(order, i)], (order + 1)) ** 2))]
normalizer_tmp = (1 / (np.array(normalizer_tmp) ** 0.5))
normalizer += [normalizer_tmp]
f = spherical_bessel_formulas(n)
x = sym.symbols('x')
bess_basis = []
for order in range(n):
bess_basis_tmp = []
for i in range(k):
bess_basis_tmp += [sym.simplify((normalizer[order][i] * f[order].subs(x, (zeros[(order, i)] * x))))]
bess_basis += [bess_basis_tmp]
return bess_basis |
class QDA(AutotabularClassificationAlgorithm):
def __init__(self, reg_param, random_state=None):
self.reg_param = float(reg_param)
self.estimator = None
def fit(self, X, Y):
import sklearn.discriminant_analysis
estimator = sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis(reg_param=self.reg_param)
if ((len(Y.shape) == 2) and (Y.shape[1] > 1)):
import sklearn.multiclass
self.estimator = sklearn.multiclass.OneVsRestClassifier(estimator, n_jobs=1)
else:
self.estimator = estimator
self.estimator.fit(X, Y)
if ((len(Y.shape) == 2) and (Y.shape[1] > 1)):
problems = []
for est in self.estimator.estimators_:
problem = np.any(np.any([np.any((s <= 0.0)) for s in est.scalings_]))
problems.append(problem)
problem = np.any(problems)
else:
problem = np.any(np.any([np.any((s <= 0.0)) for s in self.estimator.scalings_]))
if problem:
raise ValueError('Numerical problems in QDA. QDA.scalings_ contains values <= 0.0')
return self
def predict(self, X):
if (self.estimator is None):
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if (self.estimator is None):
raise NotImplementedError()
df = self.estimator.predict_proba(X)
return softmax(df)
def get_properties(dataset_properties=None):
return {'shortname': 'QDA', 'name': 'Quadratic Discriminant Analysis', 'handles_regression': False, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': False, 'is_deterministic': True, 'input': (DENSE, UNSIGNED_DATA), 'output': (PREDICTIONS,)}
def get_hyperparameter_search_space(dataset_properties=None):
reg_param = UniformFloatHyperparameter('reg_param', 0.0, 1.0, default_value=0.0)
cs = ConfigurationSpace()
cs.add_hyperparameter(reg_param)
return cs |
class Cluster(object):
def __init__(self, root, img_path, combine_all=True):
self.images_dir = osp.join(root)
self.img_path = img_path
self.train_path = self.img_path
self.gallery_path = ''
self.query_path = ''
self.train = []
self.gallery = []
self.query = []
self.num_train_ids = 0
self.has_time_info = False
self.load()
def preprocess(self):
fpaths = sorted(glob(osp.join(self.images_dir, self.train_path, '*')))
data = []
all_pids = {}
for fpath in fpaths:
fname = osp.basename(fpath)
pid = len(all_pids)
all_pids[len(all_pids)] = len(all_pids)
camid = 0
time = 0
data.append((fname, pid, camid, time))
return (data, int(len(all_pids)))
def load(self):
(self.train, self.num_train_ids) = self.preprocess()
print(self.__class__.__name__, 'dataset loaded')
print(' subset | # ids | # images')
print(' ')
print(' all | {:5d} | {:8d}'.format(self.num_train_ids, len(self.train))) |
class AngleLoss(nn.Module):
def __init__(self, gamma=0):
super(AngleLoss, self).__init__()
self.gamma = gamma
self.it = 0
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
self.it += 1
(cos_theta, phi_theta) = input
target = target.view((- 1), 1)
index = (cos_theta.data * 0.0)
index.scatter_(1, target.data.view((- 1), 1), 1)
index = index.byte()
index = Variable(index)
self.lamb = max(self.LambdaMin, (self.LambdaMax / (1 + (0.1 * self.it))))
output = (cos_theta * 1.0)
output[index] -= ((cos_theta[index] * (1.0 + 0)) / (1 + self.lamb))
output[index] += ((phi_theta[index] * (1.0 + 0)) / (1 + self.lamb))
logpt = F.log_softmax(output, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view((- 1))
pt = Variable(logpt.data.exp())
loss = (((- 1) * ((1 - pt) ** self.gamma)) * logpt)
loss = loss.mean()
return loss |
def train_model(config, exp_dir):
torch.manual_seed(config.random_seed)
(tokenizer, max_len_token) = get_tokenizer(config)
vocab = get_vocab(config, tokenizer, max_len_token)
model = get_model(config, vocab, max_len_token)
model = model.cuda()
optimizer = optim.Adam(filter((lambda p: p.requires_grad), model.parameters()), lr=config.learning_rate, weight_decay=config.l2penalty)
train_batcher = Batcher(config, 'train', tokenizer)
dev_evaluator = Evaluator(config, vocab, tokenizer, 'dev', exp_dir, list_k=[5])
for (train_num_batches, (qry_tk, pos_tk, neg_tk)) in enumerate(train_batcher.get_train_batches()):
optimizer.zero_grad()
model.train()
loss = model.compute_loss(qry_tk, pos_tk, neg_tk)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip)
optimizer.step()
if (train_num_batches == config.num_minibatches):
break
if ((train_num_batches > 0) and ((train_num_batches % config.eval_every_minibatch) == 0)):
model.eval()
dev_evaluator.evaluate(model, train_num_batches) |
def jsonline_iter(path) -> Iterable[Dict]:
with open(path) as file:
for line in file:
obj = json.loads(line)
if obj:
(yield obj) |
class MyRandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img):
(width, height) = img.size
(target_width, target_height) = self.size
pad_width = 0
pad_height = 0
do_padding = False
if (width < target_width):
pad_width = (target_width - width)
do_padding = True
if (height < target_height):
pad_height = (target_height - height)
do_padding = True
pad = (0, 0, pad_width, pad_height)
if do_padding:
img = ImageOps.expand(img, border=pad, fill=0)
(width, height) = img.size
if ((width == target_width) and (height == target_height)):
return img
x1 = random.randint(0, (width - target_width))
y1 = random.randint(0, (height - target_height))
return img.crop((x1, y1, (x1 + target_width), (y1 + target_height))) |
def _status_file(key, host=None):
if (host is not None):
return os.path.join(_status_path(key), ('status-%s.txt' % host))
else:
return os.path.join(_status_path(key), 'status.txt') |
def get_macro_recall(guess_entities, gold_entities, mode='strong'):
(guess_entities, gold_entities) = get_doc_level_guess_gold_entities(guess_entities, gold_entities)
all_scores = [get_micro_recall(guess_entities[k], gold_entities[k], mode) for k in guess_entities]
return ((sum(all_scores) / len(all_scores)) if len(all_scores) else 0) |
class MemoryDataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MEMORYDATAPARAMETER |
class SquaredLR(LambdaStepLR):
def __init__(self, optimizer, max_iter, last_step=(- 1)):
super(SquaredLR, self).__init__(optimizer, (lambda s: ((1 - (s / (max_iter + 1))) ** 2)), last_step) |
class LongformerSelfAttentionForBart(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
self.embed_dim = config.d_model
self.longformer_self_attn = LongformerSelfAttention(config, layer_id=layer_id)
self.output = nn.Linear(self.embed_dim, self.embed_dim)
def forward(self, query, key: Optional[Tensor], key_padding_mask: Optional[Tensor]=None, layer_state: Optional[Dict[(str, Optional[Tensor])]]=None, attn_mask: Optional[Tensor]=None, need_weights=False, output_attentions=False) -> Tuple[(Tensor, Optional[Tensor])]:
(tgt_len, bsz, embed_dim) = query.size()
assert (embed_dim == self.embed_dim)
assert (list(query.size()) == [tgt_len, bsz, embed_dim])
assert (attn_mask is None)
outputs = self.longformer_self_attn(query.transpose(0, 1), attention_mask=(key_padding_mask.unsqueeze(dim=1).unsqueeze(dim=1) * (- 1)), head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=output_attentions)
attn_output = self.output(outputs[0].transpose(0, 1))
return (((attn_output,) + outputs[1:]) if (len(outputs) == 2) else (attn_output, None)) |
def dglane_from_position(p: T2value, network: LaneletNetwork, init_lane_selection: int=0, succ_lane_selection: int=0) -> DgLanelet:
lane_id = network.find_lanelet_by_position([p])
assert (len(lane_id[0]) > 0), p
lane = network.find_lanelet_by_id(lane_id[0][init_lane_selection])
merged_lane = Lanelet.all_lanelets_by_merging_successors_from_lanelet(lanelet=lane, network=network)[0][succ_lane_selection]
return DgLanelet.from_commonroad_lanelet(merged_lane) |
def get_model_size(model: Union[(nn.Module, torch.jit.ScriptModule)]):
tmp_model_path = Path('temp.p')
if isinstance(model, torch.jit.ScriptModule):
torch.jit.save(model, tmp_model_path)
else:
torch.save(model.state_dict(), tmp_model_path)
size = tmp_model_path.stat().st_size
os.remove(tmp_model_path)
return (size / 1000000.0) |
class TFData2VecVisionForSemanticSegmentation(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
_module()
class mit_b2(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b2, self).__init__(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs) |
class ErrorMetricsAverager(object):
def __init__(self):
(self.rmse_avg, self.mae_avg, self.absrel_avg) = (0, 0, 0)
(self.inv_rmse_avg, self.inv_mae_avg, self.inv_absrel_avg) = (0, 0, 0)
self.total_count = 0
def accumulate(self, error_metrics):
assert isinstance(error_metrics, ErrorMetrics)
self.rmse_avg += error_metrics.rmse
self.mae_avg += error_metrics.mae
self.absrel_avg += error_metrics.absrel
self.inv_rmse_avg += error_metrics.inv_rmse
self.inv_mae_avg += error_metrics.inv_mae
self.inv_absrel_avg += error_metrics.inv_absrel
self.total_count += 1
def average(self):
self.rmse_avg = (self.rmse_avg / self.total_count)
self.mae_avg = (self.mae_avg / self.total_count)
self.absrel_avg = (self.absrel_avg / self.total_count)
self.inv_rmse_avg = (self.inv_rmse_avg / self.total_count)
self.inv_mae_avg = (self.inv_mae_avg / self.total_count)
self.inv_absrel_avg = (self.inv_absrel_avg / self.total_count) |
def main(argv):
if (len(argv) > 1):
raise RuntimeError('generate_copts needs no command line args')
generate_copt_file(StarlarkStyle())
generate_copt_file(CMakeStyle()) |
def get_pretraining_stl10(data_dir):
train_data = CIFAR10Pair(numpy_file=(data_dir + 'train_unlabeled.npz'), class_type=classes, transform=train_transform)
memory_data = CIFAR10Mem(numpy_file=(data_dir + 'train.npz'), class_type=classes, transform=test_transform_stl10)
test_data = CIFAR10Mem(numpy_file=(data_dir + 'test.npz'), class_type=classes, transform=test_transform_stl10)
return (train_data, memory_data, test_data) |
def from_txt(txt):
captions = []
with open(txt, 'rb') as f:
for line in f:
captions.append(line.strip())
return captions |
def build_vision_tower(vision_tower_cfg, **kwargs):
vision_tower = getattr(vision_tower_cfg, 'vision_tower', getattr(vision_tower_cfg, 'mm_vision_tower', None))
is_absolute_path_exists = os.path.exists(vision_tower)
if (is_absolute_path_exists or vision_tower.startswith('openai') or vision_tower.startswith('laion')):
return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
raise ValueError(f'Unknown vision tower: {vision_tower}') |
(signature3, parallel=True)
def erf_numba3(x):
t = (1 / (1 + (p * np.abs(x))))
return (np.sign(x) * (1 - ((t * ((a1 + (a2 * t)) + (a3 * (t ** 2)))) * np.exp((- (x ** 2)))))) |
def compute_jittered_speed(factor: float, speed: int) -> float:
min_speed = (speed * (1 - factor))
max_speed = (speed * (1 + factor))
jittered_speed = np.random.uniform(min_speed, max_speed)
return jittered_speed |
class PPOTransition(NamedTuple):
done: Done
action: Action
value: Value
reward: chex.Array
log_prob: chex.Array
obs: chex.Array
info: Dict |
def add_resizing_arguments(parser):
group = parser.add_mutually_exclusive_group()
group.add_argument('--resize_factor', type=float, help="Option to resize the images. Examples: '0.5' downscale half, '2' upscale twice, '1' no resizing (default)")
group.add_argument('--image_size', type=int, help='Option to resize the images so that the average worm of the dataset fits into a square image of side: image_size') |
def sql_dataframe_writer_api(spark):
print('Start running dataframe writer API')
sc = spark.sparkContext
sqlContext = SQLContext(sc)
df = spark.createDataFrame([(2, 'Alice'), (5, 'Bob')], ['age', 'name'])
df.write.format('parquet').bucketBy(100, 'age', 'name').mode('overwrite').saveAsTable('bucketed_table', path='work/spark-warehouse/bucketed_table/')
print('bucketBy and saveAsTable API finished')
df = spark.createDataFrame([(2, 'Alice'), (5, 'Bob')], ['age', 'name'])
df.write.option('header', 'true').csv(os.path.join(tempfile.mkdtemp(), 'data'))
print('csv and option API finished')
df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
print('format API finished')
df2 = spark.createDataFrame([(3, 'Alice')], ['age', 'name'])
df2.write.insertInto('bucketed_table')
print('insertInto API finished')
df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
print('json API finished')
df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
print('mode API finished')
orc_df = spark.read.orc('/ppml/trusted-big-data-ml/work/spark-3.1.2/python/test_support/sql/orc_partitioned')
orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
print('orc API finished')
df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
print('parquet API finished')
df.write.partitionBy('age').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
print('partitionBy API finished')
df.write.mode('append').save(os.path.join(tempfile.mkdtemp(), 'data'))
print('save API finished')
df.write.format('parquet').bucketBy(100, 'name').sortBy('age').mode('overwrite').saveAsTable('sorted_bucketed_table', path='work/spark-warehouse/sorted_bucketed_table/')
print('sortBy API finished')
df = spark.createDataFrame([1.0, 2.0, 3.0], StringType())
df.write.text(os.path.join(tempfile.mkdtemp(), 'data'))
print('text API finished')
print('Finish running dataframe writer API') |
def extract_graph(ebm, feature_index, normalization='none', use_feature_bounds=True):
feature_name = ebm.feature_names_in_[feature_index]
feature_type = ebm.feature_types_in_[feature_index]
scores = ebm.term_scores_[feature_index][1:(- 1)]
stds = ebm.standard_deviations_[feature_index][1:(- 1)]
normalization_constant = None
if (normalization == 'mean'):
normalization_constant = np.mean(scores)
elif (normalization == 'min'):
normalization_constant = np.min(scores)
elif (normalization == 'none'):
normalization_constant = 0
else:
raise Exception(f'Unknown normalization {normalization}')
scores = (scores - normalization_constant)
if (feature_type == 'continuous'):
x_vals = convert_to_intervals(ebm.bins_[feature_index][0])
if use_feature_bounds:
x_vals[0] = (ebm.feature_bounds_[feature_index][0], x_vals[0][1])
x_vals[(- 1)] = (x_vals[(- 1)][0], ebm.feature_bounds_[feature_index][1])
elif (feature_type == 'nominal'):
x_vals = ebm.bins_[feature_index][0]
x_vals = {(v - 1): k for (k, v) in x_vals.items()}
x_vals = [x_vals[idx] for idx in range(len(x_vals.keys()))]
else:
raise Exception(f'Feature {feature_index} is of unknown feature_type {feature_type}.')
assert (len(x_vals) == len(scores)), 'The number of bins and scores does not match.'
return EBMGraph(feature_name, feature_type, x_vals, scores, stds) |
class NormalizedEnv(Serializable):
def __init__(self, env, scale_reward=1.0, normalize_obs=False, normalize_reward=False, obs_alpha=0.001, reward_alpha=0.001, normalization_scale=1.0, dummy_flag=False):
Serializable.quick_init(self, locals())
self._scale_reward = 1
self._wrapped_env = env
self._normalize_obs = normalize_obs
self._normalize_reward = normalize_reward
self._obs_alpha = obs_alpha
self._obs_mean = np.zeros(self.observation_space.shape)
self._obs_var = np.ones(self.observation_space.shape)
self._reward_alpha = reward_alpha
self._reward_mean = 0.0
self._reward_var = 1.0
self._normalization_scale = normalization_scale
self._dummy_flag = dummy_flag
def action_space(self):
if isinstance(self._wrapped_env.action_space, Box):
ub = (np.ones(self._wrapped_env.action_space.shape) * self._normalization_scale)
return Box(((- 1) * ub), ub, dtype=np.float32)
elif isinstance(self._wrapped_env.action_space, CustomBox):
ub = (np.ones(self._wrapped_env.action_space.shape) * self._normalization_scale)
return Box(((- 1) * ub), ub, dtype=np.float32)
return self._wrapped_env.action_space
def __getattr__(self, attr):
orig_attr = self._wrapped_env.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
return result
return hooked
else:
return orig_attr
def _update_obs_estimate(self, obs):
o_a = self._obs_alpha
self._obs_mean = (((1 - o_a) * self._obs_mean) + (o_a * obs))
self._obs_var = (((1 - o_a) * self._obs_var) + (o_a * np.square((obs - self._obs_mean))))
def _update_reward_estimate(self, reward):
r_a = self._reward_alpha
self._reward_mean = (((1 - r_a) * self._reward_mean) + (r_a * reward))
self._reward_var = (((1 - r_a) * self._reward_var) + (r_a * np.square((reward - self._reward_mean))))
def _apply_normalize_obs(self, obs):
self._update_obs_estimate(obs)
return ((obs - self._obs_mean) / (np.sqrt(self._obs_var) + 1e-08))
def _apply_normalize_reward(self, reward):
self._update_reward_estimate(reward)
return (reward / (np.sqrt(self._reward_var) + 1e-08))
def reset(self):
obs = self._wrapped_env.reset()
if self._normalize_obs:
return self._apply_normalize_obs(obs)
else:
return obs
def __getstate__(self):
d = Serializable.__getstate__(self)
d['_obs_mean'] = self._obs_mean
d['_obs_var'] = self._obs_var
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
self._obs_mean = d['_obs_mean']
self._obs_var = d['_obs_var']
def step(self, action):
if (isinstance(self._wrapped_env.action_space, Box) or isinstance(self._wrapped_env.action_space, CustomBox)):
(lb, ub) = (self._wrapped_env.action_space.low, self._wrapped_env.action_space.high)
scaled_action = (lb + (((action + self._normalization_scale) * (ub - lb)) / (2 * self._normalization_scale)))
scaled_action = np.clip(scaled_action, lb, ub)
else:
scaled_action = action
wrapped_step = self._wrapped_env.step(scaled_action)
(next_obs, reward, done, info) = wrapped_step
if getattr(self, '_normalize_obs', False):
next_obs = self._apply_normalize_obs(next_obs)
if getattr(self, '_normalize_reward', False):
reward = self._apply_normalize_reward(reward)
return (next_obs, (reward * self._scale_reward), done, info)
def get_sim_parameters(self):
if self._dummy_flag:
original = self._wrapped_env.get_sim_parameters()
original = np.zeros(original.shape)
return original
else:
return self._wrapped_env.get_sim_parameters() |
def sample_normal_ig(prior):
(mu, lambda0, alpha, beta) = prior
tau = np.random.gamma(shape=alpha, scale=(1.0 / beta))
var = (1.0 / (lambda0 * tau))
mean = np.random.normal(loc=mu, scale=np.sqrt(var))
return (mean, tau) |
class MLP4(nn.Module):
def __init__(self, nin, nout, nh):
super().__init__()
self.net = nn.Sequential(nn.Linear(nin, nh), nn.LeakyReLU(0.2), nn.Linear(nh, nh), nn.LeakyReLU(0.2), nn.Linear(nh, nh), nn.LeakyReLU(0.2), nn.Linear(nh, nout))
def forward(self, x):
return self.net(x) |
class BaseLoss(nn.Module):
def __init__(self):
super(BaseLoss, self).__init__()
def forward(self, preds, targets, weight=None):
if isinstance(preds, list):
N = len(preds)
if (weight is None):
weight = preds[0].new_ones(1)
errs = [self._forward(preds[n], targets[n], weight) for n in range(N)]
err = torch.mean(torch.stack(errs))
elif isinstance(preds, torch.Tensor):
if (weight is None):
weight = preds.new_ones(1)
err = self._forward(preds, targets, weight)
return err |
def skip(app, what, name, obj, would_skip, options):
if (name == '__init__'):
return False
return would_skip |
class TransformerDecoderLayer(Module):
def __init__(self, self_attention, cross_attention, d_model, d_ff=None, dropout=0.1, activation='relu', event_dispatcher=''):
super(TransformerDecoderLayer, self).__init__()
d_ff = (d_ff or (4 * d_model))
self.self_attention = self_attention
self.cross_attention = cross_attention
self.linear1 = Linear(d_model, d_ff)
self.linear2 = Linear(d_ff, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout = Dropout(dropout)
self.activation = getattr(F, activation)
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, x, memory, x_mask=None, x_length_mask=None, memory_mask=None, memory_length_mask=None):
N = x.shape[0]
L = x.shape[1]
L_prime = memory.shape[1]
x_mask = (x_mask or FullMask(L, device=x.device))
x_length_mask = (x_length_mask or LengthMask(x.new_full((N,), L, dtype=torch.int64)))
memory_mask = (memory_mask or FullMask(L, L_prime, device=x.device))
memory_length_mask = (memory_length_mask or LengthMask(x.new_full((N,), L_prime, dtype=torch.int64)))
x = (x + self.dropout(self.self_attention(x, x, x, attn_mask=x_mask, query_lengths=x_length_mask, key_lengths=x_length_mask)))
x = self.norm1(x)
x = (x + self.dropout(self.cross_attention(x, memory, memory, attn_mask=memory_mask, query_lengths=x_length_mask, key_lengths=memory_length_mask)))
y = x = self.norm2(x)
y = self.dropout(self.activation(self.linear1(y)))
y = self.dropout(self.linear2(y))
return self.norm3((x + y)) |
def intervals_to_boundaries(intervals):
boundaries = np.zeros(intervals[(- 1)][1], dtype=bool)
boundaries[[(i[1] - 1) for i in intervals]] = True
return boundaries |
def total_incorrect_edges(true_adj, pred_adj, abs_tol=0.5):
diff = remove_diag(tf.math.abs((true_adj - pred_adj)))
return num_incorrect(diff, abs_tol) |
def condense_complex_conv1x1(in_channels, out_channels, groups):
return CondenseComplexConv(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, groups=groups) |
class Instance():
def __init__(self, gate_type, name):
self.gate_type = gate_type
self.name = name
self.ipins = list()
self.opins = list()
self.ipin_name_to_net = dict()
self.opin_name_to_net = dict()
def __str__(self):
return ('%s %s %s %s' % (self.gate_type, self.name, self.ipin_name_to_net, self.opin_name_to_net))
def get_instantiation_string(self):
val = ('%-8s %s ( ' % (self.gate_type, self.name))
(output_pin_string, input_pin_string) = (list(), list())
for (k, v) in self.opin_name_to_net.items():
output_pin_string.append(('.%s(%s)' % (k, v.name)))
for (k, v) in self.ipin_name_to_net.items():
input_pin_string.append(('.%s(%s)' % (k, v.name)))
input_pin_string.sort()
if ((len(output_pin_string) + len(input_pin_string)) == 1):
val += ((output_pin_string + input_pin_string)[0] + ' );')
else:
output_pins = ', '.join(sorted(output_pin_string))
input_pins = ', '.join(sorted(input_pin_string))
val += (((output_pins + ', ') + input_pins) + ' );')
return val |
class AutoModelForCTC(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def deeplabv3_resnetd101b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, multi_output=True).features
del backbone[(- 1)]
return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name='deeplabv3_resnetd101b_voc', **kwargs) |
def load_superres_from_checkpoint(checkpoint_path, load_weights=True, load_ema_if_available=False):
model_path = Path(checkpoint_path)
full_model_path = str(model_path.resolve())
assert model_path.exists(), f'checkpoint not found at {full_model_path}'
loaded = torch.load(str(model_path), map_location='cpu')
superres_params = safeget(loaded, 'superres_params')
superres_type = safeget(loaded, 'superres_type')
if (superres_type == 'original'):
superres_klass = SuperresConfig
elif (superres_type == 'elucidated'):
superres_klass = ElucidatedSuperresConfig
else:
raise ValueError(f'unknown superres type {superres_type} - you need to instantiate your superres with configurations, using classes SuperresConfig or ElucidatedSuperresConfig')
assert (exists(superres_params) and exists(superres_type)), 'superres type and configuration not saved in this checkpoint'
superres = superres_klass(**superres_params).create()
if (not load_weights):
return superres
has_ema = ('ema' in loaded)
should_load_ema = (has_ema and load_ema_if_available)
superres.load_state_dict(loaded['model'])
if (not should_load_ema):
print('loading non-EMA version of unets')
return superres
ema_unets = nn.ModuleList([])
for unet in superres.unets:
ema_unets.append(EMA(unet))
ema_unets.load_state_dict(loaded['ema'])
for (unet, ema_unet) in zip(superres.unets, ema_unets):
unet.load_state_dict(ema_unet.ema_model.state_dict())
print('loaded EMA version of unets')
return superres |
class DummyFloatProblem(FloatProblem):
def __init__(self):
super(DummyFloatProblem, self).__init__()
def number_of_objectives(self) -> int:
return 2
def number_of_constraints(self) -> int:
return 0
def evaluate(self, solution: FloatSolution) -> FloatSolution:
return solution
def name(self) -> str:
return 'Dummy float problem' |
def build_dataset(fields, data_type=None, data_iter=None, data_path=None, total_token_length=500, src_seq_length=100, src_sent_length=100, seq_length_trunc=0, use_filter_pred=True):
(examples_iter, num_feats) = TextDataset.make_text_examples_nfeats_tpl(data_iter, data_path, seq_length_trunc)
dataset = TextDataset(fields, data_type, examples_iter, num_feats, total_token_length=total_token_length, src_seq_length=src_seq_length, src_sent_length=src_sent_length, use_filter_pred=use_filter_pred)
return dataset |
class Bottleneck(Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def sigmoid(tensor: Tensor, temp: float) -> Tensor:
exponent = ((- tensor) / temp)
exponent = torch.clamp(exponent, min=(- 50), max=50)
y = (1.0 / (1.0 + torch.exp(exponent)))
return y |
def bbox_ohem_orginal(bbox_pred, bbox_target, label):
zeros_index = tf.zeros_like(label, dtype=tf.float32)
valid_inds = tf.where((label != zeros_index), tf.ones_like(label, dtype=tf.float32), zeros_index)
square_error = tf.reduce_sum(tf.square((bbox_pred - bbox_target)), axis=1)
keep_num = tf.cast((tf.reduce_sum(valid_inds) * num_keep_radio), dtype=tf.int32)
square_error = (square_error * valid_inds)
(_, k_index) = tf.nn.top_k(square_error, k=keep_num)
square_error = tf.gather(square_error, k_index)
return tf.reduce_mean(square_error) |
()
('--data_root', default='../../ultrasound/train')
('--output_path', default='./unet_trained_ultrasound')
('--training_iters', default=20)
('--epochs', default=100)
('--restore', default=False)
('--layers', default=3)
('--features_root', default=32)
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
print(('Using data from: %s' % data_root))
if (not os.path.exists(data_root)):
raise IOError('Kaggle Ultrasound Dataset not found')
data_provider = DataProvider(search_path=(data_root + '/*.tif'), mean=100, std=56)
net = unet.Unet(channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root)
path = (output_path if restore else util.create_training_path(output_path))
trainer = unet.Trainer(net, batch_size=1, norm_grads=False, optimizer='adam')
path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore)
(x_test, y_test) = data_provider(1)
prediction = net.predict(path, x_test)
print('Testing error rate: {:.2f}%'.format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape)))) |
.script
def CCA_CV(representations: List[torch.Tensor]):
latent_dimensions = representations[0].shape[1]
C = torch.zeros(latent_dimensions, latent_dimensions, device=representations[0].device)
V = torch.zeros(latent_dimensions, latent_dimensions, device=representations[0].device)
for (i, zi) in enumerate(representations):
V.add_(torch.cov(zi.T))
for (j, zj) in enumerate(representations):
C.add_(torch_cross_cov(zi, zj))
C.div_(len(representations))
V.div_(len(representations))
return (C, V) |
class StochasticPolicy(Policy):
def distribution(self):
def dist_info(self, obs, state_infos): |
def _convert_tokens_to_string_with_added_encoders(tokenizer: Union[(PreTrainedTokenizer, PreTrainedTokenizerFast)], output_tokens: List[str], skip_special_tokens: bool, spaces_between_special_tokens: bool) -> str:
sub_texts = []
current_sub_text = []
all_special_tokens = set(tokenizer.all_special_tokens)
for token in output_tokens:
if (skip_special_tokens and (token in all_special_tokens)):
continue
if (token in tokenizer.get_added_vocab()):
if current_sub_text:
sub_text = tokenizer.convert_tokens_to_string(current_sub_text)
sub_texts.append(sub_text)
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_text = tokenizer.convert_tokens_to_string(current_sub_text)
sub_texts.append(sub_text)
if spaces_between_special_tokens:
return ' '.join(sub_texts)
else:
return ''.join(sub_texts) |
def apu_enabled(configs):
if (RuntimeType.apu in get_runtimes(configs)):
return True
return False |
def test_digits_naive_init():
model1 = FeatureBasedSelection(100, 'sqrt')
model2 = FeatureBasedSelection(100, 'log')
model = MixtureSelection(100, [model1, model2], [1.0, 0.3], optimizer='naive', initial_subset=digits_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking[:20], digits_ranking[5:25])
assert_array_almost_equal(model.gains[:20], digits_gains[5:25], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def _get_lazy_chamfer_dataset(inf_cloud_dataset, cat_id, n_samples):
return get_lazy_evaluation_dataset(inf_cloud_dataset, cat_id, n_samples, (lambda c0, c1: (np_metrics.chamfer(c0, c1) / n_samples))) |
_bpe('sentencepiece', dataclass=SentencepieceConfig)
class SentencepieceBPE(object):
def __init__(self, cfg):
self.enable_sampling = cfg.sentencepiece_enable_sampling
self.alpha = cfg.sentencepiece_alpha
sentencepiece_model = file_utils.cached_path(cfg.sentencepiece_model)
try:
import sentencepiece as spm
self.sp = spm.SentencePieceProcessor()
self.sp.Load(sentencepiece_model)
except ImportError:
raise ImportError('Please install sentencepiece with: pip install sentencepiece')
def encode(self, x: str) -> str:
return ' '.join(self.sp.Encode(x, out_type=str, enable_sampling=self.enable_sampling, alpha=self.alpha))
def decode(self, x: str) -> str:
return x.replace(' ', '').replace('', ' ').strip()
def is_beginning_of_word(self, x: str) -> bool:
if (x in ['<unk>', '<s>', '</s>', '<pad>']):
return True
return x.startswith('') |
class LGBOptimizerOptuna(object):
def __init__(self, objective: str='binary', verbose: bool=False):
self.objective = objective
self.verbose = verbose
self.best: Dict[(str, Any)] = {}
def optimize(self, dtrain: lgbDataset, deval: lgbDataset):
params: Dict = {'objective': self.objective}
if self.verbose:
params['verbosity'] = 1
else:
params['verbosity'] = (- 1)
train_set = lgb.Dataset(data=pd.concat([dtrain.data, deval.data]).reset_index(drop=True), label=pd.concat([dtrain.label, deval.label]).reset_index(drop=True), categorical_feature=dtrain.categorical_feature, free_raw_data=False)
train_index = range(len(dtrain.data))
valid_index = range(len(dtrain.data), len(train_set.data))
self.tuner = LightGBMTunerCV(params=params, train_set=train_set, folds=[(train_index, valid_index)], verbose_eval=False, num_boost_round=100, early_stopping_rounds=50)
self.tuner.run()
self.best = self.tuner.best_params
self.best['n_estimators'] = 1000 |
class VecTaskPython(VecTask):
def get_state(self):
return torch.clamp(self.task.states_buf, (- self.clip_obs), self.clip_obs).to(self.rl_device)
def step(self, actions):
actions_tensor = torch.clamp(actions, (- self.clip_actions), self.clip_actions)
self.task.step(actions_tensor)
return (torch.clamp(self.task.obs_buf, (- self.clip_obs), self.clip_obs).to(self.rl_device), self.task.rew_buf.to(self.rl_device), self.task.reset_buf.to(self.rl_device), self.task.extras)
def reset(self):
actions = (0.01 * (1 - (2 * torch.rand([self.task.num_envs, self.task.num_actions], dtype=torch.float32, device=self.rl_device))))
self.task.step(actions)
return torch.clamp(self.task.obs_buf, (- self.clip_obs), self.clip_obs).to(self.rl_device) |
def temporal_sampling(frames, start_idx, end_idx, num_samples):
index = torch.linspace(start_idx, end_idx, num_samples)
index = torch.clamp(index, 0, (len(frames) - 1)).long().tolist()
frames = [frames[idx] for idx in index]
return frames |
def get_git_version():
result = subprocess.run(['git', '--version'], stdout=subprocess.PIPE).stdout.decode('utf-8')
version = [int(c) for c in result.replace('git version ', '').replace('\n', '').split('.')]
return version |
def _test():
import torch
pretrained = False
models = [fishnet99, fishnet150]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != fishnet99) or (weight_count == ))
assert ((model != fishnet150) or (weight_count == ))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
_grad()
def from_importance_weights(log_rhos, discounts, rewards, values, bootstrap_value, clip_rho_threshold=1.0, clip_pg_rho_threshold=1.0):
with torch.no_grad():
rhos = torch.exp(log_rhos)
if (clip_rho_threshold is not None):
clipped_rhos = torch.clamp(rhos, max=clip_rho_threshold)
else:
clipped_rhos = rhos
cs = torch.clamp(rhos, max=1.0)
values_t_plus_1 = torch.cat([values[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0)
deltas = (clipped_rhos * ((rewards + (discounts * values_t_plus_1)) - values))
acc = torch.zeros_like(bootstrap_value)
result = []
for t in range((discounts.shape[0] - 1), (- 1), (- 1)):
acc = (deltas[t] + ((discounts[t] * cs[t]) * acc))
result.append(acc)
result.reverse()
vs_minus_v_xs = torch.stack(result)
vs = torch.add(vs_minus_v_xs, values)
broadcasted_bootstrap_values = (torch.ones_like(vs[0]) * bootstrap_value)
vs_t_plus_1 = torch.cat([vs[1:], broadcasted_bootstrap_values.unsqueeze(0)], dim=0)
if (clip_pg_rho_threshold is not None):
clipped_pg_rhos = torch.clamp(rhos, max=clip_pg_rho_threshold)
else:
clipped_pg_rhos = rhos
pg_advantages = (clipped_pg_rhos * ((rewards + (discounts * vs_t_plus_1)) - values))
return VTraceReturns(vs=vs, pg_advantages=pg_advantages) |
_module
class GHMC(nn.Module):
def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
self.edges = (torch.arange((bins + 1)).float().cuda() / bins)
self.edges[(- 1)] += 1e-06
if (momentum > 0):
self.acc_sum = torch.zeros(bins).cuda()
self.use_sigmoid = use_sigmoid
if (not self.use_sigmoid):
raise NotImplementedError
self.loss_weight = loss_weight
def forward(self, pred, target, label_weight, *args, **kwargs):
if (pred.dim() != target.dim()):
(target, label_weight) = _expand_binary_labels(target, label_weight, pred.size((- 1)))
(target, label_weight) = (target.float(), label_weight.float())
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
g = torch.abs((pred.sigmoid().detach() - target))
valid = (label_weight > 0)
tot = max(valid.float().sum().item(), 1.0)
n = 0
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = inds.sum().item()
if (num_in_bin > 0):
if (mmt > 0):
self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin))
weights[inds] = (tot / self.acc_sum[i])
else:
weights[inds] = (tot / num_in_bin)
n += 1
if (n > 0):
weights = (weights / n)
loss = (F.binary_cross_entropy_with_logits(pred, target, weights, reduction='sum') / tot)
return (loss * self.loss_weight) |
def checkLabelledGraph(g, *, string: str, vertexString: str, edgeString: str, graphNameInElements: str=None, vIdFull: bool=True):
checkGraph(g, string=string, vertexString=vertexString, edgeString=edgeString, graphNameInElements=graphNameInElements, vIdFull=vIdFull)
vNull = g.Vertex()
fail((lambda : vNull.stringLabel), 'Can not get string label on a null vertex.')
fail((lambda : vNull.atomId), 'Can not get atom id on a null vertex.')
fail((lambda : vNull.isotope), 'Can not get isotope on a null vertex.')
fail((lambda : vNull.charge), 'Can not get charge on a null vertex.')
fail((lambda : vNull.radical), 'Can not get radical status on a null vertex.')
fail((lambda : vNull.printStereo()), 'Can not print stereo on a null vertex.')
fail((lambda : vNull.printStereo(GraphPrinter())), 'Can not print stereo on a null vertex.')
for v in g.vertices:
assert (type(v.stringLabel) == str)
assert (type(v.atomId) == AtomId)
assert (type(v.isotope) == Isotope)
assert (type(v.charge) == Charge)
assert (type(v.radical) == bool)
v.printStereo()
v.printStereo(GraphPrinter())
eNull = g.Edge()
fail((lambda : eNull.stringLabel), 'Can not get string label on a null edge.')
fail((lambda : eNull.bondType), 'Can not get bond type on a null edge.')
for e in g.edges:
assert (type(e.stringLabel) == str)
assert (type(e.bondType) == BondType) |
class PILCutout(object):
def __init__(self, min_box: int, max_box: int, pad_value: int=0) -> None:
super().__init__()
self.min_box = int(min_box)
self.max_box = int(max_box)
self.pad_value = int(pad_value)
def __call__(self, img: Image.Image) -> Image.Image:
r_img = img.copy()
(w, h) = img.size
box_sz = np.random.randint(self.min_box, (self.max_box + 1))
half_box_sz = int(np.floor((box_sz / 2.0)))
x_c = np.random.randint(half_box_sz, (w - half_box_sz))
y_c = np.random.randint(half_box_sz, (h - half_box_sz))
box = ((x_c - half_box_sz), (y_c - half_box_sz), (x_c + half_box_sz), (y_c + half_box_sz))
r_img.paste(self.pad_value, box=box)
return r_img |
def dummy_input_layer():
from lasagne.layers.input import InputLayer
input_layer = InputLayer((2, 3, 4))
mock = Mock(input_layer)
mock.shape = input_layer.shape
mock.input_var = input_layer.input_var
mock.output_shape = input_layer.output_shape
return mock |
def evaluate(data_source, batch_size=10):
model.eval()
model_now = model.module
criterion_now = criterion.module
if (args.model == 'QRNN'):
model_now.reset()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model_now.init_hidden(batch_size)
for i in range(0, (data_source.size(0) - 1), args.bptt):
(data, targets) = get_batch(data_source, i, args, evaluation=True)
(output, hidden) = model_now(data, hidden)
criterion_now.replicate_weight_and_bias(model.module.decoder.weight, model.module.decoder.bias)
total_loss += (len(data) * criterion_now(hiddens=output, targets=targets).data)
hidden = repackage_hidden(hidden)
return (total_loss.item() / len(data_source)) |
def plot_model(model, ratio, keep_x_axis, keep_y_axis):
(adapted, original) = ([], [])
filename = f'{model}-results/{model}_evaluate_ratio={ratio}_run='
(adapted1, original1) = interpret(open((filename + '1.txt')).readlines())
(adapted2, original2) = interpret(open((filename + '2.txt')).readlines())
(adapted3, original3) = interpret(open((filename + '3.txt')).readlines())
adapted_average = np.mean(np.array([adapted1, adapted2, adapted3]), axis=0)
adapted_average = np.convolve(adapted_average, (np.ones((3,)) / 3), mode='valid')[:26]
adapted.append(adapted_average)
adapted_average = [(1 - x) for x in adapted_average]
adapted_std = np.std(np.array([adapted1, adapted2, adapted3]), axis=0)[:26]
original_average = np.mean(np.array([original1, original2, original3]), axis=0)
original_average = np.convolve(original_average, (np.ones((3,)) / 3), mode='valid')[:26]
original_std = np.std(np.array([original1, original2, original3]), axis=0)[:26]
original.append(original_average)
steps = range(0, len(original_average))
top = [1 for x in steps]
sns.set()
sns.set_context('paper')
sns.set_style('whitegrid')
sns.set_context(rc={'lines.linewidth': 1.5})
f = plt.figure(figsize=(6, 3))
sns.set(font_scale=1.75)
plt.ylim(0, 1)
plt.xlim(0, 25)
ax = sns.lineplot(steps, adapted_average, color='blue')
ax = sns.lineplot(steps, original_average, color='red', legend=False, ax=ax)
ax.fill_between(steps, original_average, adapted_average, alpha=0.2, color='grey')
ax.fill_between(steps, adapted_average, top, alpha=0.12, color='blue')
ax.fill_between(steps, original_average, alpha=0.2, color='red')
ax.fill_between(steps, (original_average - original_std), (original_average + original_std), alpha=0.2, color='red')
ax.fill_between(steps, (adapted_average - adapted_std), (adapted_average + adapted_std), alpha=0.2, color='blue')
ax.grid(False)
if keep_x_axis:
plt.xlabel('epoch')
else:
ax.set_xticklabels([])
if keep_y_axis:
plt.ylabel('accuracy')
else:
ax.set_yticklabels([])
if (max(original_average) > 0.5):
plt.text(0.5, 0.07, 'overgeneralisation', horizontalalignment='left', size='medium', color='black')
if (min(adapted_average) < 0.5):
plt.text(15, 0.8, 'memorisation', horizontalalignment='left', size='medium', color='black')
f.savefig(f"{model}ratio={str((ratio * 100)).replace('.', '')}.pdf", bbox_inches='tight')
plt.clf() |
class SVHNClusteringDatasetInterface(ClusterDatasetInterface):
ALLOWED_SPLIT = ['train', 'test']
def __init__(self, data_root=None, split_partitions: List[str]=[], batch_size: int=1, shuffle: bool=False, num_workers: int=1, pin_memory: bool=True) -> None:
super().__init__(SVHN, data_root, split_partitions, batch_size, shuffle, num_workers, pin_memory)
def _creat_concatDataset(self, image_transform: Callable, target_transform: Callable, dataset_dict: dict={}):
for split in self.split_partitions:
assert (split in self.ALLOWED_SPLIT), f'Allowed split in SVHN:{self.ALLOWED_SPLIT}, given {split}.'
_datasets = []
for split in self.split_partitions:
dataset = self.DataClass(self.data_root, split=split, transform=image_transform, target_transform=target_transform, download=True, **dataset_dict)
_datasets.append(dataset)
serial_dataset = reduce((lambda x, y: (x + y)), _datasets)
return serial_dataset |
def register_embedding(embedding_tensor_name, meta_data_fname, log_dir):
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_tensor_name
embedding.metadata_path = meta_data_fname
projector.visualize_embeddings(log_dir, config) |
def set_config(new_config):
global config
config = new_config
import crypten.mpc
crypten.mpc.mpc.config = new_config |
def sequence_to_text(sequence):
result = ''
for symbol_id in sequence:
s = _id_to_symbol[symbol_id]
result += s
return result |
def make_lm_config(data_dir=None, extra_flags=None, task='language_modeling', arch='transformer_lm_gpt2_tiny'):
task_args = [task]
if (data_dir is not None):
task_args += [data_dir]
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, (['--task', *task_args, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1'] + (extra_flags or [])))
cfg = convert_namespace_to_omegaconf(train_args)
return cfg |
def test_init_shared_network(dataloaders_with_covariates):
dataset = dataloaders_with_covariates['train'].dataset
net = TemporalFusionTransformer.from_dataset(dataset, share_single_variable_networks=True)
net.predict(dataset, fast_dev_run=True) |
def create_critic_model(opt, fields):
encoder_src = EncoderRNN('GRU', opt.embedding_size, opt.hidden_size, opt.num_layers, opt.dropout, opt.bidirectional)
encoder_tgt = EncoderRNN('GRU', opt.embedding_size, opt.hidden_size, opt.num_layers, opt.dropout, opt.bidirectional)
model = Critic(encoder_src, encoder_tgt, opt.dropout)
model.apply(weights_init)
return model |
def require_pytesseract(test_case):
return unittest.skipUnless(is_pytesseract_available(), 'test requires PyTesseract')(test_case) |
class InvLrUpdaterHook(LrUpdaterHook):
def __init__(self, gamma, power=1.0, **kwargs):
self.gamma = gamma
self.power = power
super(InvLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = (runner.epoch if self.by_epoch else runner.iter)
return (base_lr * ((1 + (self.gamma * progress)) ** (- self.power))) |
class Ensemble(nn.ModuleList):
def __init__(self):
super(Ensemble, self).__init__()
def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
y = torch.cat(y, 1)
return (y, None) |
def _download_images(label_path: PathOrStr, img_tuples: list, max_workers: int=defaults.cpus, timeout: int=4) -> FilePathList:
os.makedirs(Path(label_path), exist_ok=True)
parallel(partial(_download_single_image, label_path, timeout=timeout), img_tuples, max_workers=max_workers)
return get_image_files(label_path) |
def get_classes(filename='../tiny-imagenet-200/val/val_annotations.txt'):
class_dict = {}
for line in open(filename):
line_array = line.rstrip('\n').split('\t')
class_dict[line_array[0]] = line_array[1]
return class_dict |
class InputWire(DrawElement):
def __init__(self, label):
super().__init__(label)
def fillup_layer(names):
longest = max([len(name) for name in names])
inputs_wires = []
for name in names:
inputs_wires.append(InputWire(name.rjust(longest)))
return inputs_wires |
class OHEMSampler(BaseSampler):
def __init__(self, num, pos_fraction, context, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
if (not hasattr(context, 'num_stages')):
self.bbox_roi_extractor = context.bbox_roi_extractor
self.bbox_head = context.bbox_head
else:
self.bbox_roi_extractor = context.bbox_roi_extractor[context.current_stage]
self.bbox_head = context.bbox_head[context.current_stage]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
bbox_feats = self.bbox_roi_extractor(feats[:self.bbox_roi_extractor.num_inputs], rois)
(cls_score, _) = self.bbox_head(bbox_feats)
loss = self.bbox_head.loss(cls_score=cls_score, bbox_pred=None, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls']
(_, topk_loss_inds) = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0))
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats)
def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
neg_inds = torch.nonzero((assign_result.gt_inds == 0))
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], assign_result.labels[neg_inds], feats) |
class TestValidSubsetsErrors(unittest.TestCase):
def _test_case(self, paths, extra_flags):
with tempfile.TemporaryDirectory() as data_dir:
[write_empty_file(os.path.join(data_dir, f'{p}.bin')) for p in (paths + ['train'])]
cfg = make_lm_config(data_dir, extra_flags=extra_flags)
raise_if_valid_subsets_unintentionally_ignored(cfg)
def test_default_raises(self):
with self.assertRaises(ValueError):
self._test_case(['valid', 'valid1'], [])
with self.assertRaises(ValueError):
self._test_case(['valid', 'valid1', 'valid2'], ['--valid-subset', 'valid,valid1'])
def partially_specified_valid_subsets(self):
with self.assertRaises(ValueError):
self._test_case(['valid', 'valid1', 'valid2'], ['--valid-subset', 'valid,valid1'])
self._test_case(['valid', 'valid1', 'valid2'], ['--valid-subset', 'valid,valid1', '--ignore-unused-valid-subsets'])
def test_legal_configs(self):
self._test_case(['valid'], [])
self._test_case(['valid', 'valid1'], ['--ignore-unused-valid-subsets'])
self._test_case(['valid', 'valid1'], ['--combine-val'])
self._test_case(['valid', 'valid1'], ['--valid-subset', 'valid,valid1'])
self._test_case(['valid', 'valid1'], ['--valid-subset', 'valid1'])
self._test_case(['valid', 'valid1'], ['--combine-val', '--ignore-unused-valid-subsets'])
self._test_case(['valid1'], ['--valid-subset', 'valid1'])
def test_disable_validation(self):
self._test_case([], ['--disable-validation'])
self._test_case(['valid', 'valid1'], ['--disable-validation'])
def test_dummy_task(self):
cfg = make_lm_config(task='dummy_lm')
raise_if_valid_subsets_unintentionally_ignored(cfg)
def test_masked_dummy_task(self):
cfg = make_lm_config(task='dummy_masked_lm')
raise_if_valid_subsets_unintentionally_ignored(cfg) |
def _tokenize(text_path, dictionary):
print('Tokenizing {}'.format(text_path))
assert os.path.exists(text_path)
ids = []
with open(text_path, 'r', encoding='utf8') as f:
for line in f:
tokens = (line.split() + ['<eos>'])
for token in tokens:
ids.append(dictionary[token])
ids = torch.LongTensor(ids)
return ids |
class ExternalProcess(object):
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor):
(self._conn, conn) = multiprocessing.Pipe()
self._process = multiprocessing.Process(target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
def observation_space(self):
if (not self._observ_space):
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
def action_space(self):
if (not self._action_space):
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
payload = (name, args, kwargs)
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
pass
self._process.join()
def step(self, action, blocking=True):
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
(message, payload) = self._conn.recv()
if (message == self._EXCEPTION):
stacktrace = payload
raise Exception(stacktrace)
if (message == self._RESULT):
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
try:
env = constructor()
while True:
try:
if (not conn.poll(0.1)):
continue
(message, payload) = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if (message == self._ACCESS):
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if (message == self._CALL):
(name, args, kwargs) = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if (message == self._CLOSE):
assert (payload is None)
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close() |
def default_flist_reader(flist):
imlist = []
with open(flist, 'r') as rf:
for line in rf.readlines():
impath = line.strip()
imlist.append(impath)
return imlist |
def resnet_ole18(pretrained=False, **kwargs):
model = Resnet_Ole(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet_ole18']))
return model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.