code stringlengths 101 5.91M |
|---|
class Adafactor(torch.optim.Optimizer):
def __init__(self, params, lr=None, eps=1e-30, eps_scale=0.001, clip_threshold=1.0, decay_rate=(- 0.8), betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):
relative_step = (lr is None)
if (warmup_init and (not relative_step)):
raise ValueError('warmup_init requires relative_step=True')
beta1 = (None if (betas is None) else betas[0])
defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
def _get_lr(param_group, param_state):
if param_group['relative_step']:
min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01)
lr_t = min(min_step, (1.0 / math.sqrt(param_state['step'])))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps_scale'], param_state['RMS'])
param_group['lr'] = (lr_t * param_scale)
return param_group['lr']
def _get_options(param_group, param_shape):
factored = (len(param_shape) >= 2)
use_first_moment = (param_group['beta1'] is not None)
return (factored, use_first_moment)
def _rms(tensor):
return (tensor.norm(2) / (tensor.numel() ** 0.5))
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1), keepdim=True)).rsqrt_().unsqueeze((- 1))
c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if (grad.dtype in {torch.float16, torch.bfloat16}):
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
(factored, use_first_moment) = self._get_options(group, grad_shape)
if (len(state) == 0):
state['step'] = 0
if use_first_moment:
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).to(grad)
state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_data_fp32 = p.data
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p_data_fp32 = p_data_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_data_fp32)
lr_t = self._get_lr(group, state)
beta2t = (1.0 - math.pow(state['step'], group['decay_rate']))
update = ((grad ** 2) + group['eps'])
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 1)))
exp_avg_sq_col.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 2)))
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_((1.0 - beta2t), update)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(lr_t)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_((1 - group['beta1']), update)
update = exp_avg
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * lr_t), p_data_fp32)
p_data_fp32.add_((- update))
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p.data.copy_(p_data_fp32)
return loss |
def main():
print('loading dataset')
(test_loader, text_proc) = get_dataset(args)
print('building model')
model = get_model(text_proc, args)
recall_area = validate(model, test_loader, args)
print('proposal recall area: {:.6f}'.format(recall_area)) |
class CamVid(data.Dataset):
train_folder = 'train'
train_lbl_folder = 'trainannot'
val_folder = 'val'
val_lbl_folder = 'valannot'
test_folder = 'test'
test_lbl_folder = 'testannot'
img_extension = '.png'
color_encoding = OrderedDict([('sky', (128, 128, 128)), ('building', (128, 0, 0)), ('pole', (192, 192, 128)), ('road_marking', (255, 69, 0)), ('road', (128, 64, 128)), ('pavement', (60, 40, 222)), ('tree', (128, 128, 0)), ('sign_symbol', (192, 128, 128)), ('fence', (64, 64, 128)), ('car', (64, 0, 128)), ('pedestrian', (64, 64, 0)), ('bicyclist', (0, 128, 192)), ('unlabeled', (0, 0, 0))])
def __init__(self, root_dir, mode='train', transform=None, label_transform=None, loader=utils.pil_loader):
self.root_dir = root_dir
self.mode = mode
self.transform = transform
self.label_transform = label_transform
self.loader = loader
if (self.mode.lower() == 'train'):
self.train_data = utils.get_files(os.path.join(root_dir, self.train_folder), extension_filter=self.img_extension)
self.train_labels = utils.get_files(os.path.join(root_dir, self.train_lbl_folder), extension_filter=self.img_extension)
elif (self.mode.lower() == 'val'):
self.val_data = utils.get_files(os.path.join(root_dir, self.val_folder), extension_filter=self.img_extension)
self.val_labels = utils.get_files(os.path.join(root_dir, self.val_lbl_folder), extension_filter=self.img_extension)
elif (self.mode.lower() == 'test'):
self.test_data = utils.get_files(os.path.join(root_dir, self.test_folder), extension_filter=self.img_extension)
self.test_labels = utils.get_files(os.path.join(root_dir, self.test_lbl_folder), extension_filter=self.img_extension)
else:
raise RuntimeError('Unexpected dataset mode. Supported modes are: train, val and test')
def __getitem__(self, index):
if (self.mode.lower() == 'train'):
(data_path, label_path) = (self.train_data[index], self.train_labels[index])
elif (self.mode.lower() == 'val'):
(data_path, label_path) = (self.val_data[index], self.val_labels[index])
elif (self.mode.lower() == 'test'):
(data_path, label_path) = (self.test_data[index], self.test_labels[index])
else:
raise RuntimeError('Unexpected dataset mode. Supported modes are: train, val and test')
(img, label) = self.loader(data_path, label_path)
if (self.transform is not None):
img = self.transform(img)
if (self.label_transform is not None):
label = self.label_transform(label)
return (img, label)
def __len__(self):
if (self.mode.lower() == 'train'):
return len(self.train_data)
elif (self.mode.lower() == 'val'):
return len(self.val_data)
elif (self.mode.lower() == 'test'):
return len(self.test_data)
else:
raise RuntimeError('Unexpected dataset mode. Supported modes are: train, val and test') |
class StarTransEnc(nn.Module):
def __init__(self, embed, hidden_size, num_layers, num_head, head_dim, max_len, emb_dropout, dropout):
super(StarTransEnc, self).__init__()
self.embedding = get_embeddings(embed)
emb_dim = self.embedding.embedding_dim
self.emb_fc = nn.Linear(emb_dim, hidden_size)
self.encoder = StarTransformer(hidden_size=hidden_size, num_layers=num_layers, num_head=num_head, head_dim=head_dim, dropout=dropout, max_len=max_len)
def forward(self, x, mask):
x = self.embedding(x)
x = self.emb_fc(x)
(nodes, relay) = self.encoder(x, mask)
return (nodes, relay) |
def precision_recall_f1_report(list_tuples_gold: List[List[tuple]], list_tuples_pred: List[List[tuple]], macro_over='types', **kwargs):
assert (len(list_tuples_gold) == len(list_tuples_pred))
if (macro_over == 'types'):
scores = _prf_scores_over_types(list_tuples_gold, list_tuples_pred, **kwargs)
elif (macro_over == 'samples'):
scores = _prf_scores_over_samples(list_tuples_gold, list_tuples_pred, **kwargs)
else:
raise ValueError(f'Invalid `macro_over` {macro_over}')
ave_scores = {}
ave_scores['macro'] = {key: _agg_scores_by_key(scores, key, agg_mode='mean') for key in ['precision', 'recall', 'f1']}
ave_scores['micro'] = {key: _agg_scores_by_key(scores, key, agg_mode='sum') for key in ['n_gold', 'n_pred', 'n_true_positive']}
(micro_precision, micro_recall, micro_f1) = _precision_recall_f1(ave_scores['micro']['n_gold'], ave_scores['micro']['n_pred'], ave_scores['micro']['n_true_positive'], **kwargs)
ave_scores['micro'].update({'precision': micro_precision, 'recall': micro_recall, 'f1': micro_f1})
return (scores, ave_scores) |
def linear_algebra_heuristic(d):
d = copy(d)
I = d['I']
def want_la():
if (not I):
return False
n_used_vars = None
bound = None
if next(iter(I)).ring().has_degree_order():
new_bound = 200
n_used_vars = used_vars_set(I, bound=new_bound).deg()
if (n_used_vars < new_bound):
return True
bound = new_bound
if dense_system(I):
new_bound = 100
if (not (bound and (new_bound < bound))):
n_used_vars = used_vars_set(I, bound=new_bound).deg()
bound = new_bound
if (n_used_vars < bound):
return True
return False
if (not ((('faugere' in d) and (not d['faugere'])) or (('noro' in d) and d['noro']))):
if ((('faugere' in d) and d['faugere']) or want_la()):
d['faugere'] = True
if ('red_tail' not in d):
d['red_tail'] = False
if ('selection_size' not in d):
d['selection_size'] = 10000
if ('ll' not in d):
d['ll'] = True
return d |
def redirect(location, code=302, Response=None):
if (Response is None):
from .wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
from .urls import iri_to_uri
location = iri_to_uri(location, safe_conversion=True)
response = Response(('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n<title>Redirecting...</title>\n<h1>Redirecting...</h1>\n<p>You should be redirected automatically to target URL: <a href="%s">%s</a>. If not click the link.' % (escape(location), display_location)), code, mimetype='text/html')
response.headers['Location'] = location
return response |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_mx_rfc(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format == 'compact'):
result = ([rfc.compact(val)] + result)
elif (output_format == 'standard'):
result = ([rfc.format(val)] + result)
return result |
def get_logger():
logger = logging.getLogger()
logger.handlers = []
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel('DEBUG')
return logger |
class TestAnalyseDeclarationsTransform(unittest.TestCase):
def test_calculate_pickle_checksums(self):
checksums = _calculate_pickle_checksums(['member1', 'member2', 'member3'])
assert (2 <= len(checksums) <= 3), checksums |
def format_baseline(retrievals, kg_type='atomic'):
saved_rels = {}
if (kg_type == 'atomic'):
for i in range(len(retrievals)):
relations = [ast.literal_eval(r) for r in retrievals[i][1][0][0][1:(- 1)]]
saved_rels[i] = {}
for d in range(len(dimensions_of_interest)):
saved_rels[i][dimensions_of_interest[d]] = {'relations': relations[d], 'scores': [(0 * len(relations[d]))]}
return retrievals |
class ReformerTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
class BroadcastParameterRule(str, enum.Enum):
INTERSECT = 'intersect'
ONE_TO_ONE = 'one_to_one'
ALL_OR_NOTHING = 'all_or_nothing'
NONE = 'none' |
class JoinAcceptPayload(Payload):
_OFFSET_APPNONCE = 0
_LEN_APPNONCE = 3
_OFFSET_NETID = (_OFFSET_APPNONCE + _LEN_APPNONCE)
_LEN_NETID = 3
_OFFSET_DEVADDR = (_OFFSET_NETID + _LEN_NETID)
_LEN_DEVADDR = 4
_OFFSET_DLSETTINGS = (_OFFSET_DEVADDR + _LEN_DEVADDR)
_LEN_DLSETTINGS = 1
_MASK_DLSETTINGS_RX1DROFFSET = 112
_MASK_DLSETTINGS_RX2DATARATE = 15
_OFFSET_RXDELAY = (_OFFSET_DLSETTINGS + _LEN_DLSETTINGS)
_LEN_RXDELAY = 1
_OFFSET_CFLIST = (_OFFSET_RXDELAY + _LEN_RXDELAY)
_LEN_MIC = 4
def __init__(self, msg):
super().__init__(msg)
def defaultPayload():
pLength = (((((JoinAcceptPayload._LEN_APPNONCE + JoinAcceptPayload._LEN_NETID) + JoinAcceptPayload._LEN_DEVADDR) + JoinAcceptPayload._LEN_DLSETTINGS) + JoinAcceptPayload._LEN_RXDELAY) + JoinAcceptPayload._LEN_MIC)
return [0 for x in range(pLength)]
def _decrypted(self):
appKey = msg.rootKeys.appKey
raise NotImplementedError()
_decrypted.setter
def _decrypted(self, decrypted):
raise NotImplementedError()
def appNonce(self):
return extractNumber(self._decrypted, self._OFFSET_APPNONCE, self._LEN_APPNONCE, isLittleEndian=True)
def appNonce(self, appNonce):
self._decrypted = replaceNumber(self._decrypted, self._OFFSET_APPNONCE, self._LEN_APPNONCE, appNonce, True)
def netID(self):
return extractBytes(self._decrypted, self._OFFSET_NETID, self._LEN_NETID, True, True)
def netID(self, netID):
self._decrypted = replaceBytes(self._decrypted, self._OFFSET_NETID, self._LEN_NETID, netID, True, True)
def devAddr(self):
return extractBytes(self._decrypted, self._OFFSET_DEVADDR, self._LEN_DEVADDR, True, True)
def devAddr(self, devAddr):
self._decrypted = replaceBytes(self._decrypted, self._OFFSET_DEVADDR, self._LEN_DEVADDR, reversed(devAddr), True)
def _dlSettings(self):
return self._decrypted[self._OFFSET_DLSETTINGS]
_dlSettings.setter
def _dlSettings(self, dlSettings):
self._decrypted = replaceBytes(self._decrypted, self._OFFSET_DLSETTINGS, self._LEN_DLSETTINGS, [dlSettings], True)
def rx1drOffset(self):
region = self._msg.region
return region.binToRx1DrOffset(getWithMask(self._dlSettings, self._MASK_DLSETTINGS_RX1DROFFSET))
.setter
def rx1drOffset(self, rx1drOffset):
region = self._msg.region
return region.rx1DrOffsetToBin(setWithMask(self._dlSettings, rx1drOffset, self._MASK_DLSETTINGS_RX1DROFFSET))
def rx2dr(self):
region = self._msg.region
return region.binToDataRate(getWithMask(self._dlSettings, self._MASK_DLSETTINGS_RX2DATARATE))
.setter
def rx2dr(self, rx2dr):
region = self._msg.region
return region.dataRateToBin(setWithMask(self._dlSettings, rx2dr, self._MASK_DLSETTINGS_RX2DATARATE)) |
def module_has_exports(mod):
for name in dir(mod):
if hasattr(mod, name):
item = getattr(mod, name)
if callable(item):
if (get_torchscript_modifier(item) is FunctionModifiers.EXPORT):
return True
return False |
def set_values(params, values):
old_values = [p.value() for p in params]
for (p, v) in zip(params, values):
p.set_value(v)
(yield)
for (p, v) in zip(params, old_values):
p.set_value(v) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', default='output', type=str)
parser.add_argument('--data', default='val2017', type=str)
parser.add_argument('--annotations', default='annotations', type=str)
parser.add_argument('--inres', default='512,512', type=str)
parser.add_argument('--no-full-resolution', action='store_true')
(args, _) = parser.parse_known_args()
args.inres = tuple((int(x) for x in args.inres.split(',')))
if (not args.no_full_resolution):
args.inres = (None, None)
os.makedirs(args.output, exist_ok=True)
kwargs = {'num_stacks': 2, 'cnv_dim': 256, 'weights': 'hpdet_coco', 'inres': args.inres}
heads = {'hm': 1, 'hm_hp': 17, 'hp_offset': 2, 'hps': 34, 'reg': 2, 'wh': 2}
out_fn_keypoints = os.path.join(args.output, (args.data + ('_keypoints_results_%s_%s.json' % (args.inres[0], args.inres[1]))))
model = HourglassNetwork(heads=heads, **kwargs)
model = HpDetDecode(model)
if args.no_full_resolution:
letterbox_transformer = LetterboxTransformer(args.inres[0], args.inres[1])
else:
letterbox_transformer = LetterboxTransformer(mode='testing', max_stride=128)
fns = sorted(glob(os.path.join(args.data, '*.jpg')))
results = []
for fn in tqdm(fns):
img = cv2.imread(fn)
image_id = int(os.path.splitext(os.path.basename(fn))[0])
pimg = letterbox_transformer(img)
pimg = normalize_image(pimg)
pimg = np.expand_dims(pimg, 0)
detections = model.predict(pimg)[0]
for d in detections:
score = d[4]
(x1, y1, x2, y2) = d[:4]
(x1, y1, x2, y2) = letterbox_transformer.correct_box(x1, y1, x2, y2)
(x1, y1, x2, y2) = (float(x1), float(y1), float(x2), float(y2))
kps = d[5:(- 1)]
kps_x = kps[:17]
kps_y = kps[17:]
kps = letterbox_transformer.correct_coords(np.vstack([kps_x, kps_y])).T
kps = np.concatenate([kps, np.ones((17, 1), dtype='float32')], (- 1))
kps = list(map(float, kps.flatten()))
image_result = {'image_id': image_id, 'category_id': 1, 'score': float(score), 'bbox': [x1, y1, (x2 - x1), (y2 - y1)], 'keypoints': kps}
results.append(image_result)
if (not len(results)):
print('No predictions were generated.')
return
with open(out_fn_keypoints, 'w') as f:
json.dump(results, f, indent=2)
print(('Predictions saved to: %s' % out_fn_keypoints))
gt_fn = os.path.join(args.annotations, ('person_keypoints_%s.json' % args.data))
print(('Loading GT: %s' % gt_fn))
coco_true = COCO(gt_fn)
coco_pred = coco_true.loadRes(out_fn_keypoints)
coco_eval = COCOeval(coco_true, coco_pred, 'keypoints')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats |
def module_init():
root_module = Module('ns.csma_layout', cpp_namespace='::ns3')
return root_module |
class Partition(object):
def __init__(self, pid=0):
self.pid = pid
self.meta = []
self.density = None
self.data = None
self.maxdiff = None
def __str__(self):
if (self.density is None):
return f'''{self.pid}: # : {len(self.data)}
MaxDiff: {self.maxdiff}
Metadata: {self.meta}'''
else:
return f'''{self.pid}: Density: {self.density}
Metadata: {self.meta}'''
def clean(self):
self.data = None
self.maxdiff = None
def construct_from_table(self, table):
self.data = table.normalize()
for c in self.data.columns:
self.meta.append([self.data[c].min(), self.data[c].max(), True, None, None])
def get_maxdiff(self):
if (self.maxdiff is not None):
return self.maxdiff[0]
for (cid, c) in enumerate(self.data.columns):
counter = self.data[c].value_counts().sort_index()
areas = (counter.iloc[:(- 1)] * (counter.index[1:] - counter.index[:(- 1)]))
if (len(areas) > 0):
c_max = areas.max()
if ((self.maxdiff is None) or (c_max > self.maxdiff[0])):
self.maxdiff = (c_max, cid, areas.idxmax())
if (self.maxdiff is None):
self.maxdiff = (0, None, None)
return self.maxdiff[0]
def split_partition(self):
if (self.maxdiff is None):
self.get_maxdiff()
assert ((self.maxdiff is not None) and (self.maxdiff[0] > 0))
(_, cid, split) = self.maxdiff
c = self.data.columns[cid]
p1 = Partition()
p1.data = self.data[(self.data[c] <= split)]
p1.meta = copy.deepcopy(self.meta)
p1.meta[cid] = [p1.meta[cid][0], split, p1.meta[cid][2], None, None]
p2 = Partition()
p2.data = self.data[(self.data[c] > split)]
p2.meta = copy.deepcopy(self.meta)
p2.meta[cid] = [split, p2.meta[cid][1], False, None, None]
return (p1, p2)
def calculate_spread_density(self):
total_distinct = 1
for cid in range(len(self.meta)):
c = self.data.columns[cid]
unique = self.data[c].unique()
distinct = len(unique)
self.meta[cid][M.DISTINCT] = distinct
if (distinct == 1):
self.meta[cid][M.SPREAD] = float((unique.item() - self.meta[cid][M.LEFT]))
continue
if (self.meta[cid][M.LEFT_IN] is True):
self.meta[cid][M.SPREAD] = float(((self.meta[cid][M.RIGHT] - self.meta[cid][M.LEFT]) / (distinct - 1)))
else:
self.meta[cid][M.SPREAD] = float(((self.meta[cid][M.RIGHT] - self.meta[cid][M.LEFT]) / distinct))
total_distinct *= distinct
self.density = (len(self.data) / total_distinct)
def query(self, columns, operators, values):
def get_points_on_left(v, closed=False):
if ((v < left) or ((v == left) and (not closed))):
return 0
if ((v > right) or ((v == right) and closed)):
return distinct
(covered, remains) = divmod((v - left), spread)
if ((not closed) and (remains < 1e-10)):
covered -= 1
covered = (int(covered) + 1)
return covered
total_covered = 1
for (cid, op, val) in zip(columns, operators, values):
(left, right, left_in, spread, distinct) = self.meta[cid]
if (distinct == 1):
left = right = (left + spread)
elif (not left_in):
left += spread
assert (left <= right), f'{self.pid}-{cid}: {self.meta[cid]}'
c_covered = None
if (op == '<'):
c_covered = get_points_on_left(val, closed=False)
elif (op == '<='):
c_covered = get_points_on_left(val, closed=True)
elif (op == '>'):
c_covered = (distinct - get_points_on_left(val, closed=True))
elif (op == '>='):
c_covered = (distinct - get_points_on_left(val, closed=False))
elif (op == '[]'):
c_covered = get_points_on_left(val[1], closed=True)
if (c_covered > 0):
c_covered -= get_points_on_left(val[0], closed=False)
elif (op == '='):
if ((val < left) or (val > right)):
c_covered = 0
else:
c_covered = 1
assert ((type(c_covered) == int) and (c_covered >= 0)), f'{self.pid}-{cid}-{op}-{val}:{self.meta[cid]}, c_cover: {c_covered}'
total_covered *= c_covered
if (total_covered == 0):
break
if (total_covered == 0):
return 0
for cid in range(len(self.meta)):
if (not (cid in columns)):
total_covered *= self.meta[cid][M.DISTINCT]
return (total_covered * self.density) |
class QuotientOfSimplicialSet(PushoutOfSimplicialSets):
def __init__(self, inclusion, vertex_name='*'):
subcomplex = inclusion.domain()
PushoutOfSimplicialSets.__init__(self, [inclusion, subcomplex.constant_map()], vertex_name=vertex_name)
ambient = inclusion.codomain()
if (ambient.is_pointed() and ambient.is_finite()):
if (ambient.base_point() not in subcomplex):
self._basepoint = self.structure_map(0)(ambient.base_point())
def ambient(self):
return self._maps[0].codomain()
def subcomplex(self):
return self._maps[0].domain()
def n_skeleton(self, n):
if self.is_finite():
ambient = SimplicialSet_finite.n_skeleton(self.ambient(), n)
subcomplex = SimplicialSet_finite.n_skeleton(self.subcomplex(), n)
subcomplex = ambient.subsimplicial_set(subcomplex.nondegenerate_simplices())
return QuotientOfSimplicialSet_finite(subcomplex.inclusion_map(), vertex_name=self._vertex_name)
(start, skel) = self._n_skeleton
if (start == n):
return skel
elif (start > n):
return skel.n_skeleton(n)
ambient = self.ambient().n_skeleton(n)
subcomplex = ambient.subsimplicial_set(self.subcomplex().nondegenerate_simplices(n))
ans = QuotientOfSimplicialSet_finite(subcomplex.inclusion_map(), vertex_name=self._vertex_name)
self._n_skeleton = (n, ans)
return ans
def _repr_(self):
return 'Quotient: ({}/{})'.format(self.ambient(), self.subcomplex())
def _latex_(self):
return '{} / {}'.format(latex(self.ambient()), latex(self.subcomplex())) |
.parametrize('interval', [Interval(0, 1, False, False), Interval(0, 1, False, True), Interval(0, 1, True, False), Interval(0, 1, True, True), Interval((- np.inf), np.inf, False, False), Interval((- np.inf), np.inf, False, True), Interval((- np.inf), np.inf, True, False), Interval((- np.inf), np.inf, True, True), Interval((- 10), (- 1), False, False), Interval((- 10), (- 1), False, True), Interval((- 10), (- 1), True, False), Interval((- 10), (- 1), True, True)])
def test_is_in_range(interval):
(low, high) = _inclusive_low_high(interval)
x = np.linspace(low, high, num=10)
assert interval.includes(x)
assert (interval.includes(np.r_[(x, interval.low)]) == interval.low_inclusive)
assert (interval.includes(np.r_[(x, interval.high)]) == interval.high_inclusive)
assert (interval.includes(np.r_[(x, interval.low, interval.high)]) == (interval.low_inclusive and interval.high_inclusive)) |
def mask_v2(val, m, multi_head=False, high_dim=False, name=None):
with tf.name_scope((name or 'new_exp_mask')):
if multi_head:
m = tf.expand_dims(m, 0)
if high_dim:
m = tf.expand_dims(m, (- 1))
m_flt = tf.cast(m, val.dtype)
return (val * m_flt) |
def hexists(file_path: str) -> bool:
if file_path.startswith('hdfs'):
return (os.system('{} dfs -test -e {}'.format(HADOOP_BIN, file_path)) == 0)
return os.path.exists(file_path) |
def get_ft_output_directory(params, makedirs=True):
path = get_output_directory(params, makedirs=makedirs)
if (not params.ut):
path = os.path.join(path, params.target_dataset)
ft_basename = '{:02d}way_{:03d}shot_{}_{}'.format(params.n_way, params.n_shot, params.ft_parts, params.ft_tag)
path = os.path.join(path, ft_basename)
if makedirs:
os.makedirs(path, exist_ok=True)
return path |
class Softmin(Module):
def __init__(self, dim=None):
super(Softmin, self).__init__()
self.dim = dim
def forward(self, input):
return F.softmin(input, self.dim, _stacklevel=5) |
def resnet50(num_classes=1000, pretrained=None):
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes)
if (pretrained is not None):
state_dict = torch.load(pretrained)
load_pretrained_model(model, state_dict)
return model |
def store_recommendation(recommendations, path=''):
with open(path, 'w') as out:
for (u, recs) in recommendations.items():
for (i, value) in recs:
out.write((((((str(u) + '\t') + str(i)) + '\t') + str(value)) + '\n')) |
def indent(str, indent=4):
indent_str = (' ' * indent)
if (str is None):
return indent_str
lines = str.split('\n')
return '\n'.join(((indent_str + l) for l in lines)) |
('/settings')
def settings():
if g.user:
return redirect('/confidential')
else:
return 'You are not logged in' |
def calculate_scores(gold_annotations, system_annotations):
scores = {}
for (example_id, gold_annotation) in gold_annotations.iteritems():
system_annotation = system_annotations[example_id]
name_a_annotations = [gold_annotation.name_a_coref, system_annotation.name_a_coref]
name_b_annotations = [gold_annotation.name_b_coref, system_annotation.name_b_coref]
for gender in [None, gold_annotation.gender]:
if (gender not in scores):
scores[gender] = Scores()
for (gold, system) in [name_a_annotations, name_b_annotations]:
if (system is None):
print('Missing output for', example_id)
scores[gender].false_negatives += 1
elif (gold and system):
scores[gender].true_positives += 1
elif ((not gold) and system):
scores[gender].false_positives += 1
elif ((not gold) and (not system)):
scores[gender].true_negatives += 1
elif (gold and (not system)):
scores[gender].false_negatives += 1
return scores |
class OmniSourceDistSamplerSeedHook(Hook):
def before_epoch(self, runner):
for data_loader in runner.data_loaders:
if hasattr(data_loader.sampler, 'set_epoch'):
data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(data_loader.batch_sampler.sampler, 'set_epoch'):
data_loader.batch_sampler.sampler.set_epoch(runner.epoch) |
def isogenies_2(E, minimal_models=True):
f2 = E.division_polynomial(2)
x2 = sorted(f2.roots(multiplicities=False))
x = f2.parent().gen()
ff = [(x - x2i) for x2i in x2]
from sage.rings.number_field.number_field_base import NumberField
model = ('minimal' if (minimal_models and isinstance(E.base_field(), NumberField)) else None)
isogs = [E.isogeny(f, model=model) for f in ff]
return isogs |
class BaselineTrain(nn.Module):
def __init__(self, model_func, num_class, loss_type='softmax'):
super(BaselineTrain, self).__init__()
self.feature = model_func
if (loss_type == 'softmax'):
self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)
self.classifier.bias.data.fill_(0)
elif (loss_type == 'dist'):
self.classifier = backbone.distLinear(self.feature.final_feat_dim, num_class)
self.loss_type = loss_type
self.num_class = num_class
self.loss_fn = nn.CrossEntropyLoss()
self.top1 = utils.AverageMeter()
def forward(self, x):
x = Variable(x.cuda())
out = self.feature.forward(x)
scores = self.classifier.forward(out)
return scores
def forward_loss(self, x, y):
y = Variable(y.cuda())
scores = self.forward(x)
(_, predicted) = torch.max(scores.data, 1)
correct = predicted.eq(y.data).cpu().sum()
self.top1.update(((correct.item() * 100) / (y.size(0) + 0.0)), y.size(0))
return self.loss_fn(scores, y)
def train_loop(self, epoch, train_loader, optimizer, scheduler):
print_freq = 10
avg_loss = 0
for (i, (x, y)) in enumerate(train_loader):
optimizer.zero_grad()
loss = self.forward_loss(x, y)
loss.backward()
optimizer.step()
avg_loss = (avg_loss + loss.item())
if ((i % print_freq) == 0):
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Top1 Val {:f} | Top1 Avg {:f}'.format(epoch, i, len(train_loader), (avg_loss / float((i + 1))), self.top1.val, self.top1.avg))
if (scheduler is not None):
scheduler.step()
def test_loop(self, val_loader):
return (- 1) |
def text_clean_phi(text_cleaned, alphabet):
text_cleaned = re.sub('^(IG|SEG|BCH|Agora|vacat) .*\\n?', '', text_cleaned, flags=re.MULTILINE)
text_cleaned = text_cleaned.replace('', '[').replace('', ']')
text_cleaned = re.sub('vacat .*\\n?', '\n', text_cleaned, flags=re.MULTILINE)
text_cleaned = re.sub(' [:]+ ', '. ', text_cleaned)
text_cleaned = re.sub('-\\n', '', text_cleaned)
text_cleaned = re.sub('[\\w]+', '', text_cleaned)
word_boundary = '([\\s\\.\\\\\\-\\-\\,.\\[\\]]|$|^)'
greek_numerals = re.escape('T')
text_cleaned = re.sub(f'\[[{greek_numerals}]+\]', '', text_cleaned)
text_cleaned = re.sub(f'{word_boundary}([{greek_numerals}]+){word_boundary}', (lambda m: ('%s0%s' % (m.group(1), m.group(3)))), text_cleaned)
text_cleaned = re.sub(f'{word_boundary}([{greek_numerals}]+){word_boundary}', (lambda m: ('%s0%s' % (m.group(1), m.group(3)))), text_cleaned)
text_cleaned = re.sub('(\\s*)[\\|\\|\\|\\|\\;]+(\\s*)', ' ', text_cleaned)
text_cleaned = re.sub('\\s*\\(\\?\\)', '', text_cleaned)
text_cleaned = re.sub('{[^}]*}', '', text_cleaned)
text_cleaned = re.sub('<([^>]*)>', '\\1', text_cleaned)
text_cleaned = re.sub('\\[M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})\\]\\s*$', '', text_cleaned)
text_cleaned = text_cleaned.lower()
text_cleaned = alphabet.filter(text_cleaned)
text_cleaned = re.sub('(\\d+\\s)?\\s*v[\\w\\.\\?]*(\\s\\d+(\\.\\d+)?)?', '', text_cleaned)
text_cleaned = re.sub('\\(([{}]+)\\)'.format(''.join(alphabet.alphabet)), '\\1', text_cleaned)
text_cleaned = re.sub('\\([^\\)]*\\)', '', text_cleaned)
text_cleaned = text_cleaned.replace('', '')
text_cleaned = '\n'.join([line for line in text_cleaned.splitlines() if (len(line) and ((len(re.findall('[a-z]', line)) / len(line)) < 0.1))])
text_cleaned = text_cleaned.replace('', '')
text_cleaned = text_cleaned.replace('', '')
text_cleaned = text_cleaned.replace('-', '')
text_cleaned = re.sub('(?:[\\s]+)+', (lambda g: re.sub('[\\s]+', '', g.group(0))), text_cleaned, flags=re.MULTILINE)
text_cleaned = re.sub('(?:\\.|.|)+\\s?(?:c\\.)?(\\d+)(?:(\\-|-|)\\d+)?\\s?(?:\\.|.|)*', (lambda g: (alphabet.missing * int(g.group(1)))), text_cleaned, flags=re.MULTILINE)
text_cleaned = text_cleaned.replace(u'', alphabet.missing)
text_cleaned = text_cleaned.replace(u'', alphabet.missing)
text_cleaned = text_cleaned.replace('.', alphabet.missing)
text_cleaned = text_cleaned.replace('][', '').replace('[]', '')
chars = re.escape(''.join((((alphabet.alphabet + alphabet.numerals) + alphabet.punctuation) + [alphabet.space, alphabet.missing, alphabet.sog, alphabet.eog])))
text_cleaned = re.sub(f'[^{chars}]', ' ', text_cleaned)
text_cleaned = re.sub('\\d+', '0', text_cleaned)
text_cleaned = re.sub('(\\s+0)+', ' 0', text_cleaned)
text_cleaned = re.sub('\\[\\s*\\]', '', text_cleaned)
chars = re.escape(''.join((alphabet.punctuation + [alphabet.eog])))
text_cleaned = re.sub(f'\s+([{chars}])', '\\1', text_cleaned)
text_cleaned = text_cleaned.lstrip(''.join((alphabet.punctuation + [alphabet.space])))
punc = re.escape(''.join(alphabet.punctuation))
text_cleaned = re.sub(f'([{punc}])+', '\\1', text_cleaned)
text_cleaned = re.sub('\\s+', ' ', text_cleaned).strip()
return text_cleaned |
def load_caviar(data_path, val_split=0.5, canonical_split=True, verbose=0):
((xtr, ytr_deg, *info_tr), (xvalte, yvalte_deg, *info_valte)) = pickle.load(gzip.open(data_path, 'rb'))
def _parse_info(info):
parsed_info = {}
parsed_info['x_coord'] = info[0]
parsed_info['y_coord'] = info[1]
parsed_info['size'] = info[2]
parsed_info['image_name'] = np.asarray(info[3])
return parsed_info
info_tr = _parse_info(info_tr)
info_valte = _parse_info(info_valte)
xtr = xtr.transpose([0, 2, 3, 1])
xvalte = xvalte.transpose([0, 2, 3, 1])
n_valtest_images = xvalte.shape[0]
if canonical_split:
val_split = 0.5
np.random.seed(13)
val_size = int((n_valtest_images * val_split))
rix = np.random.choice(n_valtest_images, n_valtest_images, replace=False)
np.random.seed(None)
val_ix = rix[0:val_size]
te_ix = rix[val_size:]
xval = xvalte[val_ix]
yval_deg = yvalte_deg[val_ix]
info_val = _parse_info([info_valte[key][val_ix] for key in info_valte.keys()])
xte = xvalte[te_ix]
yte_deg = yvalte_deg[te_ix]
info_te = _parse_info([info_valte[key][te_ix] for key in info_valte.keys()])
return ((xtr, ytr_deg, info_tr), (xval, yval_deg, info_val), (xte, yte_deg, info_te)) |
class VigenereCryptosystem(SymmetricKeyCryptosystem):
def __init__(self, S, n):
if (not isinstance(S, StringMonoid_class)):
raise TypeError(('S (= %s) must be a string monoid.' % S))
SymmetricKeyCryptosystem.__init__(self, S, S, S, block_length=1, period=n)
def __call__(self, K):
S = self.key_space()
m = self.period()
if isinstance(K, list):
try:
K = S(K)
except Exception:
raise TypeError(('K (= %s) must specify a string of length %s.' % (K, m)))
if (not (len(K) == m)):
raise TypeError(('K (= %s) must specify a string of length %s.' % (K, m)))
return VigenereCipher(self, K)
def _repr_(self):
return ('Vigenere cryptosystem on %s of period %s' % (self.cipher_domain(), self.period()))
def random_key(self):
S = self.key_space()
n = S.ngens()
m = self.period()
return S([randint(0, (n - 1)) for i in range(m)])
def inverse_key(self, K):
S = self.key_space()
n = S.ngens()
return S([((- i) % n) for i in K._element_list])
def encoding(self, M):
S = self.cipher_domain()
if isinstance(S, AlphabeticStringMonoid):
return S(strip_encoding(M))
try:
return S.encoding(M)
except Exception:
raise TypeError(('Argument M = %s does not encode in the cipher domain' % M))
def deciphering(self, K, C):
i = self(self.inverse_key(K))
return i(C)
def enciphering(self, K, M):
e = self(K)
return e(M) |
class Entity(object):
def __init__(self, type_id: List[int]=None, type_prob: List[float]=None, qid: List[int]=None):
self.type_id = type_id
self.type_prob = type_prob
self.qid = qid
def __eq__(self, other):
return (self.__dict__ == other.__dict__)
def flatten(self):
result = []
for field in VALID_ENTITY_ATTRIBUTES:
field_val = getattr(self, field)
if field_val:
result += field_val
return result
def get_pad_entity(max_features_size):
pad_feature = Entity()
for (i, field) in enumerate(VALID_ENTITY_ATTRIBUTES):
setattr(pad_feature, field, ([0] * max_features_size))
return pad_feature |
def annotate_fps(image: Image.Image, fps: int) -> None:
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('fonts/arial.ttf', 25)
draw.text((0, 0), f'FPS: {fps} (Press q to exit.)', fill=(0, 0, 255), font=font) |
def GenerateSM90_TensorOp_tf32_WGMMA_gemm(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 12, 0)):
return
layouts_tf32 = [[[LayoutType.ColumnMajor, 1], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 1]], [[LayoutType.ColumnMajor, 1], [LayoutType.RowMajor, 1], [LayoutType.ColumnMajor, 1]], [[LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 1]], [[LayoutType.RowMajor, 4], [LayoutType.RowMajor, 1], [LayoutType.ColumnMajor, 1]]]
math_inst = MathInstruction([64, 128, 8], DataType.tf32, DataType.tf32, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add)
min_cc = 90
max_cc = 90
tile_descriptions = [TileDescription([128, math_inst.instruction_shape[1], (math_inst.instruction_shape[2] * 4)], 0, [4, 1, 1], math_inst, min_cc, max_cc, [2, 1, 1]), TileDescription([64, math_inst.instruction_shape[1], (math_inst.instruction_shape[2] * 4)], 0, [4, 1, 1], math_inst, min_cc, max_cc, [2, 1, 1]), TileDescription([128, math_inst.instruction_shape[1], (math_inst.instruction_shape[2] * 4)], 0, [4, 1, 1], math_inst, min_cc, max_cc, [1, 2, 1]), TileDescription([64, math_inst.instruction_shape[1], (math_inst.instruction_shape[2] * 4)], 0, [4, 1, 1], math_inst, min_cc, max_cc, [1, 2, 1]), TileDescription([128, math_inst.instruction_shape[1], (math_inst.instruction_shape[2] * 4)], 0, [4, 1, 1], math_inst, min_cc, max_cc, [1, 1, 1]), TileDescription([64, math_inst.instruction_shape[1], (math_inst.instruction_shape[2] * 4)], 0, [4, 1, 1], math_inst, min_cc, max_cc, [1, 1, 1])]
data_type_tf32 = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator]
CreateGemmUniversal3xOperator(manifest, layouts_tf32, tile_descriptions, data_type_tf32)
layouts_f32 = [layouts_tf32[2]]
data_type_f32 = [DataType.f32, DataType.f32, math_inst.element_accumulator, DataType.f32]
CreateGemmUniversal3xOperator(manifest, layouts_f32, tile_descriptions, data_type_f32) |
def eval_policy(policy, eval_env, seed, eval_episodes=10):
eval_env.seed((seed + 100))
avg_reward = 0.0
gt = []
pred = []
for _ in range(eval_episodes):
(state, done) = (eval_env.reset(), False)
while (not done):
gt.append(state.copy())
state[0] = 0
pred.append(state.copy())
action = policy.select_action(np.array(state))
(state, reward, done, _) = eval_env.step(action)
avg_reward += reward
l1_loss = abs((np.array(pred) - np.array(gt))).mean(0)
print(l1_loss)
avg_reward /= eval_episodes
print('')
print(f'Evaluation over {eval_episodes} episodes: {avg_reward:.3f}')
print('')
return avg_reward |
class SEResNetBottleneck(Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule((planes * 4), reduction=reduction)
self.downsample = downsample
self.stride = stride
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_() |
def test_context_window(device):
from speechbrain.processing.features import ContextWindow
inp = torch.tensor([1, 2, 3], device=device).unsqueeze(0).unsqueeze((- 1)).float()
compute_cw = ContextWindow(left_frames=1, right_frames=1).to(device)
out = torch.tensor([[0, 1, 2], [1, 2, 3], [2, 3, 0]], device=device).unsqueeze(0).float()
assert (torch.sum((compute_cw(inp) == out)) == 9)
inp = torch.rand([2, 10, 5], device=device)
compute_cw = ContextWindow(left_frames=0, right_frames=0).to(device)
assert (torch.sum((compute_cw(inp) == inp)) == inp.numel())
assert torch.jit.trace(compute_cw, inp) |
class PdfArray(list):
def __bytes__(self):
return ((b'[ ' + b' '.join((pdf_repr(x) for x in self))) + b' ]') |
class SniffTest(AllenNlpTestCase):
def test_config(self):
assert (set(DEFAULT_MODELS.keys()) == {'machine-comprehension', 'semantic-role-labeling', 'textual-entailment', 'coreference-resolution', 'named-entity-recognition'})
def test_machine_comprehension(self):
predictor = DEFAULT_MODELS['machine-comprehension'].predictor()
passage = 'The Matrix is a 1999 science fiction action film written and directed by The Wachowskis, starring Keanu Reeves, Laurence Fishburne, Carrie-Anne Moss, Hugo Weaving, and Joe Pantoliano. It depicts a dystopian future in which reality as perceived by most humans is actually a simulated reality called "the Matrix", created by sentient machines to subdue the human population, while their bodies\' heat and electrical activity are used as an energy source. Computer programmer Neo" learns this truth and is drawn into a rebellion against the machines, which involves other people who have been freed from the "dream world". '
question = 'Who stars in The Matrix?'
result = predictor.predict_json({'passage': passage, 'question': question})
correct = 'Keanu Reeves, Laurence Fishburne, Carrie-Anne Moss, Hugo Weaving, and Joe Pantoliano'
assert (correct == result['best_span_str'])
def test_semantic_role_labeling(self):
predictor = DEFAULT_MODELS['semantic-role-labeling'].predictor()
sentence = "If you liked the music we were playing last night, you will absolutely love what we're playing tomorrow!"
result = predictor.predict_json({'sentence': sentence})
assert (result['tokens'] == ['If', 'you', 'liked', 'the', 'music', 'we', 'were', 'playing', 'last', 'night', ',', 'you', 'will', 'absolutely', 'love', 'what', 'we', "'re", 'playing', 'tomorrow', '!'])
assert (result['words'] == ['If', 'you', 'liked', 'the', 'music', 'we', 'were', 'playing', 'last', 'night', ',', 'you', 'will', 'absolutely', 'love', 'what', 'we', "'re", 'playing', 'tomorrow', '!'])
assert (result['verbs'] == [{'verb': 'liked', 'description': "If [ARG0: you] [V: liked] [ARG1: the music we were playing last night] , you will absolutely love what we 're playing tomorrow !", 'tags': ['O', 'B-ARG0', 'B-V', 'B-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']}, {'verb': 'were', 'description': "If you liked the music we [V: were] playing last night , you will absolutely love what we 're playing tomorrow !", 'tags': ['O', 'O', 'O', 'O', 'O', 'O', 'B-V', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']}, {'verb': 'playing', 'description': "If you liked [ARG1: the music] [ARG0: we] were [V: playing] [ARGM-TMP: last night] , you will absolutely love what we 're playing tomorrow !", 'tags': ['O', 'O', 'O', 'B-ARG1', 'I-ARG1', 'B-ARG0', 'O', 'B-V', 'B-ARGM-TMP', 'I-ARGM-TMP', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']}, {'verb': 'will', 'description': "[ARGM-ADV: If you liked the music we were playing last night] , [ARG0: you] [V: will] [ARG1: absolutely love what we 're playing tomorrow] !", 'tags': ['B-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'O', 'B-ARG0', 'B-V', 'B-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'O']}, {'verb': 'love', 'description': "[ARGM-ADV: If you liked the music we were playing last night] , [ARG0: you] [ARGM-MOD: will] [ARGM-ADV: absolutely] [V: love] [ARG1: what we 're playing tomorrow] !", 'tags': ['B-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'I-ARGM-ADV', 'O', 'B-ARG0', 'B-ARGM-MOD', 'B-ARGM-ADV', 'B-V', 'B-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'I-ARG1', 'O']}, {'verb': "'re", 'description': "If you liked the music we were playing last night , you will absolutely love what we [V: 're] playing tomorrow !", 'tags': ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-V', 'O', 'O', 'O']}, {'verb': 'playing', 'description': "If you liked the music we were playing last night , you will absolutely love [ARG1: what] [ARG0: we] 're [V: playing] [ARGM-TMP: tomorrow] !", 'tags': ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-ARG1', 'B-ARG0', 'O', 'B-V', 'B-ARGM-TMP', 'O']}])
def test_textual_entailment(self):
predictor = DEFAULT_MODELS['textual-entailment'].predictor()
result = predictor.predict_json({'premise': "An interplanetary spacecraft is in orbit around a gas giant's icy moon.", 'hypothesis': 'The spacecraft has the ability to travel between planets.'})
assert (result['label_probs'][0] > 0.7)
result = predictor.predict_json({'premise': 'Two women are wandering along the shore drinking iced tea.', 'hypothesis': 'Two women are sitting on a blanket near some rocks talking about politics.'})
assert (result['label_probs'][1] > 0.8)
result = predictor.predict_json({'premise': 'A large, gray elephant walked beside a herd of zebras.', 'hypothesis': 'The elephant was lost.'})
assert (result['label_probs'][2] > 0.7)
def test_coreference_resolution(self):
predictor = DEFAULT_MODELS['coreference-resolution'].predictor()
document = "We 're not going to skimp on quality , but we are very focused to make next year . The only problem is that some of the fabrics are wearing out - since I was a newbie I skimped on some of the fabric and the poor quality ones are developing holes . For some , an awareness of this exit strategy permeates the enterprise , allowing them to skimp on the niceties they would more or less have to extend toward a person they were likely to meet again ."
result = predictor.predict_json({'document': document})
assert (result['clusters'] == [[[0, 0], [10, 10]], [[33, 33], [37, 37]], [[26, 27], [42, 43]], [[63, 64], [67, 67], [73, 73], [84, 84]], [[5, 5], [69, 69]]])
assert (result['document'] == ['We', "'re", 'not', 'going', 'to', 'skimp', 'on', 'quality', ',', 'but', 'we', 'are', 'very', 'focused', 'to', 'make', 'next', 'year', '.', 'The', 'only', 'problem', 'is', 'that', 'some', 'of', 'the', 'fabrics', 'are', 'wearing', 'out', '-', 'since', 'I', 'was', 'a', 'newbie', 'I', 'skimped', 'on', 'some', 'of', 'the', 'fabric', 'and', 'the', 'poor', 'quality', 'ones', 'are', 'developing', 'holes', '.', 'For', 'some', ',', 'an', 'awareness', 'of', 'this', 'exit', 'strategy', 'permeates', 'the', 'enterprise', ',', 'allowing', 'them', 'to', 'skimp', 'on', 'the', 'niceties', 'they', 'would', 'more', 'or', 'less', 'have', 'to', 'extend', 'toward', 'a', 'person', 'they', 'were', 'likely', 'to', 'meet', 'again', '.'])
def test_ner(self):
predictor = DEFAULT_MODELS['named-entity-recognition'].predictor()
sentence = 'Michael Jordan is a professor at Berkeley.'
result = predictor.predict_json({'sentence': sentence})
assert (result['words'] == ['Michael', 'Jordan', 'is', 'a', 'professor', 'at', 'Berkeley', '.'])
assert (result['tags'] == ['B-PER', 'L-PER', 'O', 'O', 'O', 'O', 'U-LOC', 'O']) |
_start_docstrings('CamemBERT Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. ', CAMEMBERT_START_DOCSTRING)
class CamembertForTokenClassification(RobertaForTokenClassification):
config_class = CamembertConfig
pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP |
def get_args():
def exclusive_group(group, name, default, help):
destname = name.replace('-', '_')
subgroup = group.add_mutually_exclusive_group(required=False)
subgroup.add_argument(f'--{name}', dest=f'{destname}', action='store_true', help=f"{help} (use '--no-{name}' to disable)")
subgroup.add_argument(f'--no-{name}', dest=f'{destname}', action='store_false', help=argparse.SUPPRESS)
subgroup.set_defaults(**{destname: default})
parser = argparse.ArgumentParser(description='GNMT training', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
dataset = parser.add_argument_group('dataset setup')
dataset.add_argument('--dataset-dir', default='data/wmt16_de_en', help='path to the directory with training/test data')
dataset.add_argument('--max-size', default=None, type=int, help='use at most MAX_SIZE elements from training dataset (useful for benchmarking), by default uses entire dataset')
results = parser.add_argument_group('results setup')
results.add_argument('--results-dir', default='results', help='path to directory with results, it will be automatically created if it does not exist')
results.add_argument('--save', default='gnmt', help='defines subdirectory within RESULTS_DIR for results from this training run')
results.add_argument('--print-freq', default=10, type=int, help='print log every PRINT_FREQ batches')
model = parser.add_argument_group('model setup')
model.add_argument('--hidden-size', default=1024, type=int, help='model hidden size')
model.add_argument('--num-layers', default=4, type=int, help='number of RNN layers in encoder and in decoder')
model.add_argument('--dropout', default=0.2, type=float, help='dropout applied to input of RNN cells')
exclusive_group(group=model, name='share-embedding', default=True, help='use shared embeddings for encoder and decoder')
model.add_argument('--smoothing', default=0.1, type=float, help='label smoothing, if equal to zero model will use CrossEntropyLoss, if not zero model will be trained with label smoothing loss')
general = parser.add_argument_group('general setup')
general.add_argument('--math', default='fp32', choices=['fp16', 'fp32'], help='arithmetic type')
general.add_argument('--seed', default=None, type=int, help='master seed for random number generators, if "seed" is undefined then the master seed will be sampled from random.SystemRandom()')
exclusive_group(group=general, name='eval', default=True, help='run validation and test after every epoch')
exclusive_group(group=general, name='env', default=False, help='print info about execution env')
exclusive_group(group=general, name='cuda', default=True, help='enables cuda')
exclusive_group(group=general, name='cudnn', default=True, help='enables cudnn')
training = parser.add_argument_group('training setup')
training.add_argument('--train-batch-size', default=128, type=int, help='training batch size per worker')
training.add_argument('--train-global-batch-size', default=None, type=int, help='global training batch size, this argument does not have to be defined, if it is defined it will be used to automatically compute train_iter_size using the equation: train_iter_size = train_global_batch_size // (train_batch_size * world_size)')
training.add_argument('--train-iter-size', metavar='N', default=1, type=int, help='training iter size, training loop will accumulate gradients over N iterations and execute optimizer every N steps')
training.add_argument('--epochs', default=8, type=int, help='max number of training epochs')
training.add_argument('--grad-clip', default=5.0, type=float, help='enables gradient clipping and sets maximum norm of gradients')
training.add_argument('--max-length-train', default=50, type=int, help='maximum sequence length for training (including special BOS and EOS tokens)')
training.add_argument('--min-length-train', default=0, type=int, help='minimum sequence length for training (including special BOS and EOS tokens)')
training.add_argument('--train-loader-workers', default=2, type=int, help='number of workers for training data loading')
training.add_argument('--batching', default='bucketing', type=str, choices=['random', 'sharding', 'bucketing'], help='select batching algorithm')
training.add_argument('--shard-size', default=80, type=int, help='shard size for "sharding" batching algorithm, in multiples of global batch size')
training.add_argument('--num-buckets', default=5, type=int, help='number of buckets for "bucketing" batching algorithm')
optimizer = parser.add_argument_group('optimizer setup')
optimizer.add_argument('--optimizer', type=str, default='Adam', help='training optimizer')
optimizer.add_argument('--lr', type=float, default=0.001, help='learning rate')
scheduler = parser.add_argument_group('learning rate scheduler setup')
scheduler.add_argument('--warmup-steps', type=str, default='200', help='number of learning rate warmup iterations')
scheduler.add_argument('--remain-steps', type=str, default='0.666', help='starting iteration for learning rate decay')
scheduler.add_argument('--decay-interval', type=str, default='None', help='interval between learning rate decay steps')
scheduler.add_argument('--decay-steps', type=int, default=4, help='max number of learning rate decay steps')
scheduler.add_argument('--decay-factor', type=float, default=0.5, help='learning rate decay factor')
val = parser.add_argument_group('validation setup')
val.add_argument('--val-batch-size', default=64, type=int, help='batch size for validation')
val.add_argument('--max-length-val', default=125, type=int, help='maximum sequence length for validation (including special BOS and EOS tokens)')
val.add_argument('--min-length-val', default=0, type=int, help='minimum sequence length for validation (including special BOS and EOS tokens)')
val.add_argument('--val-loader-workers', default=0, type=int, help='number of workers for validation data loading')
test = parser.add_argument_group('test setup')
test.add_argument('--test-batch-size', default=128, type=int, help='batch size for test')
test.add_argument('--max-length-test', default=150, type=int, help='maximum sequence length for test (including special BOS and EOS tokens)')
test.add_argument('--min-length-test', default=0, type=int, help='minimum sequence length for test (including special BOS and EOS tokens)')
test.add_argument('--beam-size', default=5, type=int, help='beam size')
test.add_argument('--len-norm-factor', default=0.6, type=float, help='length normalization factor')
test.add_argument('--cov-penalty-factor', default=0.1, type=float, help='coverage penalty factor')
test.add_argument('--len-norm-const', default=5.0, type=float, help='length normalization constant')
test.add_argument('--intra-epoch-eval', metavar='N', default=0, type=int, help='evaluate within training epoch, this option will enable extra N equally spaced evaluations executed during each training epoch')
test.add_argument('--test-loader-workers', default=0, type=int, help='number of workers for test data loading')
chkpt = parser.add_argument_group('checkpointing setup')
chkpt.add_argument('--start-epoch', default=0, type=int, help='manually set initial epoch counter')
chkpt.add_argument('--resume', default=None, type=str, metavar='PATH', help='resumes training from checkpoint from PATH')
chkpt.add_argument('--save-all', action='store_true', default=False, help='saves checkpoint after every epoch')
chkpt.add_argument('--save-freq', default=5000, type=int, help='save checkpoint every SAVE_FREQ batches')
chkpt.add_argument('--keep-checkpoints', default=0, type=int, help='keep only last KEEP_CHECKPOINTS checkpoints, affects only checkpoints controlled by --save-freq option')
benchmark = parser.add_argument_group('benchmark setup')
benchmark.add_argument('--target-bleu', default=24.0, type=float, help='target accuracy, training will be stopped when the target is achieved')
distributed = parser.add_argument_group('distributed setup')
distributed.add_argument('--rank', default=0, type=int, help='global rank of the process, do not set!')
distributed.add_argument('--local_rank', default=0, type=int, help='local rank of the process, do not set!')
args = parser.parse_args([])
args.warmup_steps = literal_eval(args.warmup_steps)
args.remain_steps = literal_eval(args.remain_steps)
args.decay_interval = literal_eval(args.decay_interval)
return args |
def test_predict_proba():
X = np.array([1, 2, 3])
classifier = ConstantClassifier()
predict_proba = classifier.predict_proba(X)
ground_truth = np.array([[1], [1], [1]])
assert_array_equal(ground_truth, predict_proba) |
def ShuffleV1(**kwargs):
cfg = {'out_planes': [240, 480, 960], 'num_blocks': [4, 8, 4], 'groups': 3}
return ShuffleNet(cfg, **kwargs) |
class CorefResult():
def __init__(self, text, clusters, char_map, reverse_char_map, coref_logit, text_idx):
self.text = text
self.clusters = clusters
self.char_map = char_map
self.reverse_char_map = reverse_char_map
self.coref_logit = coref_logit
self.text_idx = text_idx
def get_clusters(self, as_strings=True):
if (not as_strings):
return [[self.char_map[mention][1] for mention in cluster] for cluster in self.clusters]
return [[self.text[self.char_map[mention][1][0]:self.char_map[mention][1][1]] for mention in cluster if (None not in self.char_map[mention])] for cluster in self.clusters]
def get_logit(self, span_i, span_j):
if (span_i not in self.reverse_char_map):
raise ValueError(f'span_i="{self.text[span_i[0]:span_i[1]]}" is not an entity in this model!')
if (span_j not in self.reverse_char_map):
raise ValueError(f'span_i="{self.text[span_j[0]:span_j[1]]}" is not an entity in this model!')
span_i_idx = self.reverse_char_map[span_i][0]
span_j_idx = self.reverse_char_map[span_j][0]
if (span_i_idx < span_j_idx):
return self.coref_logit[(span_j_idx, span_i_idx)]
return self.coref_logit[(span_i_idx, span_j_idx)]
def __str__(self):
if (len(self.text) > 50):
text_to_print = f'{self.text[:50]}...'
else:
text_to_print = self.text
return f'CorefResult(text="{text_to_print}", clusters={self.get_clusters()})'
def __repr__(self):
return self.__str__() |
def override_options(opt, opt_over, key_stack=None, safe_check=False):
for (key, value) in opt_over.items():
if isinstance(value, dict):
opt[key] = override_options(opt.get(key, dict()), value, key_stack=(key_stack + [key]), safe_check=safe_check)
else:
if (safe_check and (key not in opt)):
add_new = None
while (add_new not in ['y', 'n']):
key_str = '.'.join((key_stack + [key]))
add_new = input('"{}" not found in original opt, add? (y/n) '.format(key_str))
if (add_new == 'n'):
print('safe exiting...')
exit()
opt[key] = value
return opt |
def assert_allclose(a, b, rtol=1e-05, atol=1e-08):
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol) |
class LessThanInfinity(_uniq, RingElement):
def __init__(self, parent=UnsignedInfinityRing):
RingElement.__init__(self, parent)
def _repr_(self):
return 'A number less than infinity'
def _latex_(self):
return '(<\\infty)'
def _add_(self, other):
if isinstance(other, UnsignedInfinity):
return other
return self
def _sub_(self, other):
if isinstance(other, UnsignedInfinity):
return other
return self
def _mul_(self, other):
if isinstance(other, UnsignedInfinity):
raise ValueError('oo times number < oo not defined')
return self
def _div_(self, other):
if isinstance(other, UnsignedInfinity):
return Integer(0)
raise ValueError('quotient of number < oo by number < oo not defined')
def _richcmp_(self, other, op):
if isinstance(other, UnsignedInfinity):
return rich_to_bool(op, (- 1))
return rich_to_bool(op, 0)
def sign(self):
raise NotImplementedError('sign of number < oo is not well defined') |
class List(Type):
def __init__(self, elem_type):
self.elem_type = elem_type
def __eq__(self, other):
return ((self.__class__ == other.__class__) and (self.elem_type == other.elem_type))
def from_str(self, s):
if (';' in s):
segments = s.split(';')
elif (',' in s):
segments = s.split(',')
else:
segments = s.split(' ')
return list(map(self.elem_type.from_str, segments)) |
.verilator
def test_multi_tasklet():
sdfg = dace.SDFG('rtl_multi_tasklet')
state = sdfg.add_state()
sdfg.add_array('A', [1], dtype=dace.int32)
sdfg.add_array('B', [1], dtype=dace.int32)
sdfg.add_array('C', [1], dtype=dace.int32)
tasklet0 = state.add_tasklet(name='rtl_tasklet0', inputs={'a'}, outputs={'b'}, code="\n typedef enum [1:0] {READY, BUSY, DONE} state_e;\n state_e state;\n\n (posedge ap_aclk) begin\n if (ap_areset) begin // case: reset\n m_axis_b_tdata <= 0;\n s_axis_a_tready <= 1'b1;\n state <= READY;\n end else if (s_axis_a_tvalid && state == READY) begin // case: load a\n m_axis_b_tdata <= s_axis_a_tdata;\n s_axis_a_tready <= 1'b0;\n state <= BUSY;\n end else if (m_axis_b_tdata < 80) // case: increment counter b\n m_axis_b_tdata <= m_axis_b_tdata + 1;\n else\n m_axis_b_tdata <= m_axis_b_tdata;\n state <= DONE;\n end\n\n assign m_axis_b_tvalid = (m_axis_b_tdata >= 80) ? 1'b1:1'b0;\n ", language=dace.Language.SystemVerilog)
tasklet1 = state.add_tasklet(name='rtl_tasklet1', inputs={'b'}, outputs={'c'}, code="\n typedef enum [1:0] {READY, BUSY, DONE} state_e;\n state_e state;\n\n (posedge ap_aclk) begin\n if (ap_areset) begin // case: reset\n m_axis_c_tdata <= 0;\n s_axis_b_tready <= 1'b1;\n state <= READY;\n end else if (s_axis_b_tvalid && state == READY) begin // case: load a\n m_axis_c_tdata <= s_axis_b_tdata;\n s_axis_b_tready <= 1'b0;\n state <= BUSY;\n end else if (m_axis_c_tdata < 100) // case: increment counter b\n m_axis_c_tdata <= m_axis_c_tdata + 1;\n else\n m_axis_c_tdata <= m_axis_c_tdata;\n state <= DONE;\n end\n\n assign m_axis_c_tvalid = (m_axis_c_tdata >= 100) ? 1'b1:1'b0;\n ", language=dace.Language.SystemVerilog)
A = state.add_read('A')
B_w = state.add_write('B')
B_r = state.add_read('B')
C = state.add_write('C')
state.add_edge(A, None, tasklet0, 'a', dace.Memlet('A[0]'))
state.add_edge(tasklet0, 'b', B_w, None, dace.Memlet('B[0]'))
state.add_edge(B_r, None, tasklet1, 'b', dace.Memlet('B[0]'))
state.add_edge(tasklet1, 'c', C, None, dace.Memlet('C[0]'))
sdfg.validate()
a = np.random.randint(0, 80, 1).astype(np.int32)
b = np.array([0]).astype(np.int32)
c = np.array([0]).astype(np.int32)
sdfg(A=a, B=b, C=c)
assert (b == 80)
assert (c == 100) |
_utils.test()
def test_check_grad_struct_field_not_placed():
d = ti.Struct.field({'pos': ti.types.vector(3, float), 'vel': ti.types.vector(3, float), 'acc': ti.types.vector(3, float), 'mass': ti.f32}, needs_grad=True)
ti.root.dense(ti.i, 1).place(d)
def foo():
pass
with pytest.raises(RuntimeError, match='These field\\(s\\) requrie `needs_grad=True`, however their grad field\\(s\\) are not placed.*'):
foo() |
def build_scheduler(optimizer, warmup_epoches, start_epoches, end_epoches, scale=0.1):
def scheduler(epoch):
epoch0 = (epoch + 1.0)
decay_rate = 0.1
decay_steps = (250 * 1000)
new_lrate = (decay_rate ** (epoch0 / decay_steps))
return new_lrate
return torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=scheduler) |
class CTRLTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
control_codes = CONTROL_CODES
def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs):
super(CTRLTokenizer, self).__init__(unk_token=unk_token, **kwargs)
self.max_len_single_sentence = self.max_len
self.max_len_sentences_pair = self.max_len
self.encoder = json.load(open(vocab_file, encoding='utf-8'))
self.decoder = {v: k for (k, v) in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[1:(- 1)]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
word = tuple((list(word[:(- 1)]) + [(word[(- 1)] + '</w>')]))
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
word = word[:(- 4)]
self.cache[token] = word
return word
def _tokenize(self, text):
split_tokens = []
text = text.split(' ')
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ', '').strip()
return out_string
def save_vocabulary(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write(u'#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file))
index = token_index
writer.write((' '.join(bpe_tokens) + u'\n'))
index += 1
return (vocab_file, merge_file) |
def cam_loss(source, non_source):
identity_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(source), logits=source))
non_identity_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(non_source), logits=non_source))
loss = (identity_loss + non_identity_loss)
return loss |
class ConjugateGradientOptimizer(Serializable):
def __init__(self, cg_iters=10, reg_coeff=1e-05, subsample_factor=1.0, backtrack_ratio=0.8, max_backtracks=15, debug_nan=False, accept_violation=False, hvp_approach=None, num_slices=1):
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._debug_nan = debug_nan
self._accept_violation = accept_violation
if (hvp_approach is None):
hvp_approach = FiniteDifferenceHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name='constraint', *args, **kwargs):
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
(constraint_term, constraint_value) = leq_constraint
params = target.get_params(trainable=True)
grads = tf.gradients(loss, xs=params)
for (idx, (grad, param)) in enumerate(zip(grads, params)):
if (grad is None):
grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(grads)
self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=(inputs + extra_inputs), reg_coeff=self._reg_coeff)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = ext.lazydict(f_loss=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=loss, log_name='f_loss')), f_grad=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=flat_grad, log_name='f_grad')), f_constraint=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=constraint_term, log_name='constraint')), f_loss_constraint=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=[loss, constraint_term], log_name='f_loss_constraint')))
def loss(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
return sliced_fun(self._opt_fun['f_loss'], self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
return sliced_fun(self._opt_fun['f_constraint'], self._num_slices)(inputs, extra_inputs)
def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):
prev_param = np.copy(self._target.get_param_values(trainable=True))
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
if (self._subsample_factor < 1):
if (subsample_grouped_inputs is None):
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(n_samples, int((n_samples * self._subsample_factor)), replace=False)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
logger.log(('Start CG optimization: #parameters: %d, #inputs: %d, #subsample_inputs: %d' % (len(prev_param), len(inputs[0]), len(subsample_inputs[0]))))
logger.log('computing loss before')
loss_before = sliced_fun(self._opt_fun['f_loss'], self._num_slices)(inputs, extra_inputs)
logger.log('performing update')
logger.log('computing gradient')
flat_g = sliced_fun(self._opt_fun['f_grad'], self._num_slices)(inputs, extra_inputs)
logger.log('gradient computed')
logger.log('computing descent direction')
Hx = self._hvp_approach.build_eval((subsample_inputs + extra_inputs))
descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(((2.0 * self._max_constraint_val) * (1.0 / (descent_direction.dot(Hx(descent_direction)) + 1e-08))))
if np.isnan(initial_step_size):
initial_step_size = 1.0
flat_descent_step = (initial_step_size * descent_direction)
logger.log('descent direction computed')
n_iter = 0
for (n_iter, ratio) in enumerate((self._backtrack_ratio ** np.arange(self._max_backtracks))):
cur_step = (ratio * flat_descent_step)
cur_param = (prev_param - cur_step)
self._target.set_param_values(cur_param, trainable=True)
(loss, constraint_val) = sliced_fun(self._opt_fun['f_loss_constraint'], self._num_slices)(inputs, extra_inputs)
if (self._debug_nan and np.isnan(constraint_val)):
import ipdb
ipdb.set_trace()
if ((loss < loss_before) and (constraint_val <= self._max_constraint_val)):
break
if ((np.isnan(loss) or np.isnan(constraint_val) or (loss >= loss_before) or (constraint_val >= self._max_constraint_val)) and (not self._accept_violation)):
logger.log('Line search condition violated. Rejecting the step!')
if np.isnan(loss):
logger.log('Violated because loss is NaN')
if np.isnan(constraint_val):
logger.log(('Violated because constraint %s is NaN' % self._constraint_name))
if (loss >= loss_before):
logger.log('Violated because loss not improving')
if (constraint_val >= self._max_constraint_val):
logger.log(('Violated because constraint %s is violated' % self._constraint_name))
self._target.set_param_values(prev_param, trainable=True)
logger.log(('backtrack iters: %d' % n_iter))
logger.log('computing loss after')
logger.log('optimization finished') |
def y_scatter(file=None, query=None, y=None, save=False, title='', label=None):
try:
df = (pd.read_csv(file).query(query) if query else pd.read_csv(file))
rows = np.arange(df.shape[0])
plt.rcParams['figure.figsize'] = [8, 8]
(fig, ax1) = plt.subplots(1, 1)
ax1.scatter(rows, df[y], color='black', s=30)
ax1.set_title(title)
if (label is not None):
for (i, txt) in enumerate(df[label]):
ax1.annotate(txt[:2], (rows[i], df[y].iloc[i]), xytext=((rows[i] - 0.4), (df[y].iloc[i] + 0.006)))
ax1.set_ylabel(y)
ax1.set_ylim(0.4, 1)
ax1.grid(True, axis='y')
if save:
plt.savefig((((((file.split('.')[0] + '-') + title) + '_') + y) + '.png'))
else:
plt.show()
plt.close('all')
except Exception as e:
print('[NVIZ-WARN]', e) |
class DryRunMetric(Metric):
def __init__(self):
self.token_cost_estimator = AutoTokenCostEstimator()
def __repr__(self):
return 'DryRunMetric'
def evaluate(self, scenario_state: ScenarioState, metric_service: MetricService, eval_cache_path: str, parallelism: int) -> MetricResult:
processor = Processor(token_cost_estimator=self.token_cost_estimator, metric_service=metric_service)
results: List[List[Stat]] = parallel_map(processor.process, scenario_state.request_states, parallelism=parallelism)
per_instance_stats = [PerInstanceStats(cast(str, request_state.instance.id), request_state.instance.perturbation, request_state.train_trial_index, stats) for (request_state, stats) in zip(scenario_state.request_states, results)]
stats: Dict[(MetricName, Stat)] = {}
for instance_stats in results:
for stat in instance_stats:
merge_stat(stats, stat)
merge_stat(stats, Stat(MetricName('num_requests')).add(len(scenario_state.request_states)))
return MetricResult(list(stats.values()), per_instance_stats) |
class TestGaussianMLPEncoder(TfGraphTestCase):
.parametrize('obs_dim, embedding_dim', [((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))])
def test_get_embedding(self, obs_dim, embedding_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=embedding_dim))
embedding_spec = InOutSpec(input_space=env.spec.observation_space, output_space=env.spec.action_space)
embedding = GaussianMLPEncoder(embedding_spec)
task_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, embedding.input_dim))
embedding.build(task_input, name='task_input')
env.reset()
(obs, _, _, _) = env.step(1)
(latent, _) = embedding.get_latent(obs)
(latents, _) = embedding.get_latents(([obs] * 5))
assert env.action_space.contains(latent)
for latent in latents:
assert env.action_space.contains(latent)
.parametrize('obs_dim, embedding_dim', [((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))])
def test_is_pickleable(self, obs_dim, embedding_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=embedding_dim))
embedding_spec = InOutSpec(input_space=env.spec.observation_space, output_space=env.spec.action_space)
embedding = GaussianMLPEncoder(embedding_spec)
env.reset()
(obs, _, _, _) = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
with tf.compat.v1.variable_scope('GaussianMLPEncoder/GaussianMLPModel', reuse=True):
bias = tf.compat.v1.get_variable('dist_params/mean_network/hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run([embedding.distribution.loc, embedding.distribution.stddev()], feed_dict={embedding.model.input: [[obs.flatten()]]})
p = pickle.dumps(embedding)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
embedding_pickled = pickle.loads(p)
output2 = sess.run([embedding_pickled.distribution.loc, embedding_pickled.distribution.stddev()], feed_dict={embedding_pickled.model.input: [[obs.flatten()]]})
assert np.array_equal(output1, output2)
def test_clone(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(2,), action_dim=(2,)))
embedding_spec = InOutSpec(input_space=env.spec.observation_space, output_space=env.spec.action_space)
embedding = GaussianMLPEncoder(embedding_spec)
clone_embedding = embedding.clone(name='cloned')
assert (clone_embedding.input_dim == embedding.input_dim)
assert (clone_embedding.output_dim == embedding.output_dim)
def test_auxiliary(self):
input_space = akro.Box(np.array([(- 1), (- 1)]), np.array([1, 1]))
latent_space = akro.Box(np.array([(- 2), (- 2), (- 2)]), np.array([2, 2, 2]))
embedding_spec = InOutSpec(input_space=input_space, output_space=latent_space)
embedding = GaussianMLPEncoder(embedding_spec, hidden_sizes=[32, 32, 32])
assert (len(embedding.get_params()) == 9)
assert (len(embedding.get_global_vars()) == 9)
assert (embedding.distribution.loc.get_shape().as_list()[(- 1)] == latent_space.shape[0])
assert (embedding.input.shape.as_list() == [None, None, input_space.shape[0]])
assert (embedding.latent_mean.shape.as_list() == [None, None, latent_space.shape[0]])
assert (embedding.latent_std_param.shape.as_list() == [None, 1, latent_space.shape[0]])
embedding.reset()
assert (embedding.input_dim == embedding_spec.input_space.flat_dim)
assert (embedding.output_dim == embedding_spec.output_space.flat_dim)
var_shapes = [(2, 32), (32,), (32, 32), (32,), (32, 32), (32,), (32, 3), (3,), (3,)]
assert (sorted(embedding.get_param_shapes()) == sorted(var_shapes))
var_count = sum(list(map(np.prod, var_shapes)))
embedding.set_param_values(np.ones(var_count))
assert (embedding.get_param_values() == np.ones(var_count)).all()
assert (sorted(map(np.shape, embedding.flat_to_params(np.ones(var_count)))) == sorted(var_shapes)) |
class TrainingConfig(object):
def __init__(self):
self.num_examples_per_epoch = 586363
self.optimizer = 'SGD'
self.initial_learning_rate = 2.0
self.learning_rate_decay_factor = 0.5
self.num_epochs_per_decay = 8.0
self.train_inception_learning_rate = 0.0005
self.clip_gradients = 5.0
self.max_checkpoints_to_keep = 5 |
def save_train_history(args, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc):
dict_save_path = os.path.join(args.out_dir, 'dicts', 'train_hist_{}.json'.format(args.experiment_id))
os.makedirs(os.path.dirname(dict_save_path), exist_ok=True)
with open(dict_save_path, 'w') as f:
json.dump({'train_loss': train_loss, 'train_acc': train_acc, 'val_loss': val_loss, 'val_acc': val_acc, 'test_loss': test_loss, 'test_acc': test_acc}, f) |
class WithinVisitLabeler(Labeler):
def __init__(self, ontology: extension_datasets.Ontology, visit_start_adjust_func: Callable=identity, visit_end_adjust_func: Callable=identity):
self.ontology: extension_datasets.Ontology = ontology
self.visit_start_adjust_func: Callable = visit_start_adjust_func
self.visit_end_adjust_func: Callable = visit_end_adjust_func
def get_outcome_times(self, patient: Patient) -> List[datetime.datetime]:
return []
def get_visit_events(self, patient: Patient) -> List[Event]:
return []
def label(self, patient: Patient) -> List[Label]:
visits: List[Event] = self.get_visit_events(patient)
prediction_start_times: List[datetime.datetime] = [self.visit_start_adjust_func(visit.start) for visit in visits]
prediction_end_times: List[datetime.datetime] = [self.visit_end_adjust_func(visit.end) for visit in visits]
outcome_times: List[datetime.datetime] = self.get_outcome_times(patient)
results: List[Label] = []
curr_outcome_idx: int = 0
for (prediction_start, prediction_end) in zip(prediction_start_times, prediction_end_times):
if ((curr_outcome_idx < len(outcome_times)) and (outcome_times[curr_outcome_idx] is None)):
raise RuntimeError('Outcome times must be of type `datetime.datetime`, but value of `None` provided for `self.get_outcome_times(patient)[{curr_outcome_idx}]')
if (prediction_start is None):
raise RuntimeError('Prediction start times must be of type `datetime.datetime`, but value of `None` provided for `prediction_start_time`')
if (prediction_end is None):
raise RuntimeError('Prediction end times must be of type `datetime.datetime`, but value of `None` provided for `prediction_end_time`')
if (prediction_start > prediction_end):
raise RuntimeError(f'Prediction start time must be before prediction end time, but `prediction_start_time` is {prediction_start} and `prediction_end_time` is {prediction_end}. Maybe you `visit_start_adjust_func()` or `visit_end_adjust_func()` in such a way that the `start` time got pushed after the `end` time?')
while ((curr_outcome_idx < len(outcome_times)) and (outcome_times[curr_outcome_idx] < prediction_start)):
curr_outcome_idx += 1
is_outcome_occurs_in_time_horizon: bool = ((curr_outcome_idx < len(outcome_times)) and (prediction_start <= outcome_times[curr_outcome_idx]) and (outcome_times[curr_outcome_idx] <= prediction_end))
is_censored: bool = False
if is_outcome_occurs_in_time_horizon:
results.append(Label(time=prediction_start, value=True))
elif (not is_censored):
results.append(Label(time=prediction_start, value=False))
return results
def get_labeler_type(self) -> LabelType:
return 'boolean' |
class TestBuiltinEntityParser(SnipsTest):
def setUp(self):
_BUILTIN_ENTITY_PARSERS.clear()
def test_should_parse_grammar_entities(self):
text = "we'll be 2 at the meeting"
language = 'en'
parser = BuiltinEntityParser.build(language=language)
parse = parser.parse(text)
expected_parse = [{'resolved_value': {'kind': 'Number', 'value': 2.0}, 'entity_kind': 'snips/number', 'range': {'end': 10, 'start': 9}, 'value': '2'}]
self.assertEqual(parse, expected_parse)
def test_should_parse_gazetteer_entities(self):
text = "je veux ecouter daft punk s'il vous plait"
parser = BuiltinEntityParser.build(language='fr', gazetteer_entity_scope=['snips/musicArtist'])
parse = parser.parse(text)
expected_parse = [{'resolved_value': {'kind': 'MusicArtist', 'value': 'Daft Punk'}, 'entity_kind': 'snips/musicArtist', 'range': {'end': 25, 'start': 16}, 'value': 'daft punk'}]
self.assertEqual(parse, expected_parse)
def test_should_support_all_languages(self):
text = ''
for language in get_all_languages():
parser = BuiltinEntityParser.build(language=language)
msg = ('get_builtin_entities does not support %s.' % language)
with self.fail_if_exception(msg):
parser.parse(text)
def test_should_not_disambiguate_grammar_and_gazetteer_entities(self):
text = 'trois nuits par semaine'
gazetteer_entities = ['snips/musicTrack']
parser = BuiltinEntityParser.build(language='fr', gazetteer_entity_scope=gazetteer_entities)
result = parser.parse(text)
expected_result = [{'value': 'trois', 'range': {'start': 0, 'end': 5}, 'resolved_value': {'kind': 'Number', 'value': 3.0}, 'entity_kind': 'snips/number'}, {'value': 'trois nuits par semaine', 'range': {'start': 0, 'end': 23}, 'resolved_value': {'kind': 'MusicTrack', 'value': 'Trois nuits par semaine'}, 'entity_kind': 'snips/musicTrack'}]
self.assertListEqual(expected_result, result)
('snips_nlu.entity_parser.builtin_entity_parser._build_builtin_parser')
def test_should_share_parser(self, mocked_build_builtin_parser):
def mock_build_builtin_parser(language, gazetteer_entity_scope):
return None
mocked_build_builtin_parser.side_effect = mock_build_builtin_parser
dataset1 = {LANGUAGE: 'fr', ENTITIES: {'snips/musicArtist': {}, 'snips/musicTrack': {}, 'snips/number': {}}}
dataset2 = {LANGUAGE: 'fr', ENTITIES: {'snips/musicTrack': {}, 'snips/musicAlbum': {}, 'snips/amountOfMoney': {}}}
dataset3 = {LANGUAGE: 'fr', ENTITIES: {'snips/musicTrack': {}, 'snips/musicArtist': {}}}
BuiltinEntityParser.build(dataset=dataset1)
BuiltinEntityParser.build(dataset=dataset2)
BuiltinEntityParser.build(dataset=dataset3)
self.assertEqual(2, mocked_build_builtin_parser.call_count) |
class SPADEResBlock(nn.Module):
def __init__(self, opt, input_nc, output_nc, use_mask_norm=True):
super(SPADEResBlock, self).__init__()
self.param_opt = opt
self.learned_shortcut = (input_nc != output_nc)
middle_nc = min(input_nc, output_nc)
self.conv_0 = nn.Conv2d(input_nc, middle_nc, kernel_size=3, padding=1)
self.conv_1 = nn.Conv2d(middle_nc, output_nc, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(input_nc, output_nc, kernel_size=1, bias=False)
subnorm_type = opt.norm_G
if subnorm_type.startswith('spectral'):
subnorm_type = subnorm_type[len('spectral'):]
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
gen_semantic_nc = opt.gen_semantic_nc
if use_mask_norm:
subnorm_type = 'aliasmask'
gen_semantic_nc = (gen_semantic_nc + 1)
self.norm_0 = SPADENorm(opt, subnorm_type, input_nc, gen_semantic_nc)
self.norm_1 = SPADENorm(opt, subnorm_type, middle_nc, gen_semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADENorm(opt, subnorm_type, input_nc, gen_semantic_nc)
self.relu = nn.LeakyReLU(0.2)
def shortcut(self, x, seg, misalign_mask):
if self.learned_shortcut:
return self.conv_s(self.norm_s(x, seg, misalign_mask))
else:
return x
def forward(self, x, seg, misalign_mask=None):
seg = F.interpolate(seg, size=x.size()[2:], mode='nearest')
if (misalign_mask is not None):
misalign_mask = F.interpolate(misalign_mask, size=x.size()[2:], mode='nearest')
x_s = self.shortcut(x, seg, misalign_mask)
dx = self.conv_0(self.relu(self.norm_0(x, seg, misalign_mask)))
dx = self.conv_1(self.relu(self.norm_1(dx, seg, misalign_mask)))
output = (x_s + dx)
return output |
def classify(images, model, adversarial_attack):
images = images.cpu().numpy().transpose(0, 2, 3, 1)
with TFHider.tf.Session(graph=model) as sess:
logits = sess.run('import/logits/output:0', feed_dict={'import/Placeholder:0': images})
outputs = torch.from_numpy(logits).cuda()
return outputs |
class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_data_cache_path(args):
name = Path(args.data.path).name
return (args.data.output.path, name) |
def parse_schema_kind(schema: str, app: (str | None)) -> SchemaInputKind:
try:
netloc = urlparse(schema).netloc
except ValueError as exc:
raise click.UsageError(INVALID_SCHEMA_MESSAGE) from exc
if (('\x00' in schema) or (not schema)):
raise click.UsageError(INVALID_SCHEMA_MESSAGE)
if netloc:
return SchemaInputKind.URL
if (file_exists(schema) or is_filename(schema)):
return SchemaInputKind.PATH
if (app is not None):
return SchemaInputKind.APP_PATH
return SchemaInputKind.NAME |
class Camera():
def GetNumParams(type_):
if ((type_ == 0) or (type_ == 'SIMPLE_PINHOLE')):
return 3
if ((type_ == 1) or (type_ == 'PINHOLE')):
return 4
if ((type_ == 2) or (type_ == 'SIMPLE_RADIAL')):
return 4
if ((type_ == 3) or (type_ == 'RADIAL')):
return 5
if ((type_ == 4) or (type_ == 'OPENCV')):
return 8
raise Exception('Camera type not supported')
def GetNameFromType(type_):
if (type_ == 0):
return 'SIMPLE_PINHOLE'
if (type_ == 1):
return 'PINHOLE'
if (type_ == 2):
return 'SIMPLE_RADIAL'
if (type_ == 3):
return 'RADIAL'
if (type_ == 4):
return 'OPENCV'
raise Exception('Camera type not supported')
def __init__(self, type_, width_, height_, params):
self.width = width_
self.height = height_
if ((type_ == 0) or (type_ == 'SIMPLE_PINHOLE')):
(self.fx, self.cx, self.cy) = params
self.fy = self.fx
self.distortion_func = None
self.camera_type = 0
elif ((type_ == 1) or (type_ == 'PINHOLE')):
(self.fx, self.fy, self.cx, self.cy) = params
self.distortion_func = None
self.camera_type = 1
elif ((type_ == 2) or (type_ == 'SIMPLE_RADIAL')):
(self.fx, self.cx, self.cy, self.k1) = params
self.fy = self.fx
self.distortion_func = simple_radial_distortion
self.camera_type = 2
elif ((type_ == 3) or (type_ == 'RADIAL')):
(self.fx, self.cx, self.cy, self.k1, self.k2) = params
self.fy = self.fx
self.distortion_func = radial_distortion
self.camera_type = 3
elif ((type_ == 4) or (type_ == 'OPENCV')):
(self.fx, self.fy, self.cx, self.cy) = params[:4]
(self.k1, self.k2, self.p1, self.p2) = params[4:]
self.distortion_func = opencv_distortion
self.camera_type = 4
else:
raise Exception('Camera type not supported')
def __str__(self):
s = (self.GetNameFromType(self.camera_type) + ' {} {} {}'.format(self.width, self.height, self.fx))
if (self.camera_type in (1, 4)):
s += ' {}'.format(self.fy)
s += ' {} {}'.format(self.cx, self.cy)
if (self.camera_type == 2):
s += ' {}'.format(self.k1)
elif (self.camera_type == 3):
s += ' {} {}'.format(self.k1, self.k2)
elif (self.camera_type == 4):
s += ' {} {} {} {}'.format(self.k1, self.k2, self.p1, self.p2)
return s
def get_params(self):
if (self.camera_type == 0):
return np.array((self.fx, self.cx, self.cy))
if (self.camera_type == 1):
return np.array((self.fx, self.fy, self.cx, self.cy))
if (self.camera_type == 2):
return np.array((self.fx, self.cx, self.cy, self.k1))
if (self.camera_type == 3):
return np.array((self.fx, self.cx, self.cy, self.k1, self.k2))
if (self.camera_type == 4):
return np.array((self.fx, self.fy, self.cx, self.cy, self.k1, self.k2, self.p1, self.p2))
def get_camera_matrix(self):
return np.array(((self.fx, 0, self.cx), (0, self.fy, self.cy), (0, 0, 1)))
def get_inverse_camera_matrix(self):
return np.array((((1.0 / self.fx), 0, ((- self.cx) / self.fx)), (0, (1.0 / self.fy), ((- self.cy) / self.fy)), (0, 0, 1)))
def K(self):
return self.get_camera_matrix()
def K_inv(self):
return self.get_inverse_camera_matrix()
def get_inv_camera_matrix(self):
(inv_fx, inv_fy) = ((1.0 / self.fx), (1.0 / self.fy))
return np.array(((inv_fx, 0, ((- inv_fx) * self.cx)), (0, inv_fy, ((- inv_fy) * self.cy)), (0, 0, 1)))
def get_image_grid(self):
xmin = ((0.5 - self.cx) / self.fx)
xmax = (((self.width - 0.5) - self.cx) / self.fx)
ymin = ((0.5 - self.cy) / self.fy)
ymax = (((self.height - 0.5) - self.cy) / self.fy)
return np.meshgrid(np.linspace(xmin, xmax, self.width), np.linspace(ymin, ymax, self.height))
def distort_points(self, x, normalized=True, denormalize=True):
x = np.atleast_2d(x)
if (not normalized):
x -= np.array([[self.cx, self.cy]])
x /= np.array([[self.fx, self.fy]])
if (self.distortion_func is not None):
x = self.distortion_func(self, x)
if denormalize:
x *= np.array([[self.fx, self.fy]])
x += np.array([[self.cx, self.cy]])
return x
def undistort_points(self, x, normalized=False, denormalize=True):
x = np.atleast_2d(x)
if (not normalized):
x = (x - np.array([self.cx, self.cy]))
x /= np.array([self.fx, self.fy])
if (self.distortion_func is not None):
def objective(xu):
return (x - self.distortion_func(self, xu.reshape(*x.shape))).ravel()
xu = root(objective, x).x.reshape(*x.shape)
else:
xu = x
if denormalize:
xu *= np.array([[self.fx, self.fy]])
xu += np.array([[self.cx, self.cy]])
return xu |
def encode_dataset2(*splits, encoder):
encoded_splits = []
for split in splits:
fields = []
field_t = 0
for field in split:
if isinstance(field[0], str):
if (field_t == 0):
special = [[encoder.encoder[('<|' + x.split('<|')[1].replace(' ', ''))], encoder.encoder[('<|' + x.split('<|')[2].replace(' ', ''))]] for x in field]
field = [(encoder.convert_tokens_to_ids(encoder.tokenize(field[i].split('<|')[0])) + special[i]) for i in range(len(field))]
else:
field = [encoder.convert_tokens_to_ids(encoder.tokenize(field[i])) for i in range(len(field))]
if (field_t == 2):
field = [[convert_list(x[key], encoder) for key in x.keys()] for x in field]
field_vals = [list(itertools.chain.from_iterable([s[0] for s in x])) for x in field]
field_scores = [list(itertools.chain.from_iterable([s[1] for s in x])) for x in field]
field = [(field_vals[i], field_scores[i]) for i in range(len(field))]
fields.append(field)
field_t += 1
encoded_splits.append(fields)
return encoded_splits |
class Test__ExoDataEqn(TestCase):
def test__repr__(self):
eqn = _ExoDataEqn()
self.assertEqual(eqn.__repr__(), '_ExoDataEqn()') |
def test_Detector_get():
efficiency = 0.5
(detector, parent, tl) = create_detector(efficiency=efficiency)
tl.init()
for i in range(1000):
tl.time = (i * .0)
detector.get()
assert (((len(parent.log) / 1000) - efficiency) < 0.1)
dark_count = 100
stop_time = .0
(detector, parent, tl) = create_detector(dark_count=dark_count)
tl.init()
tl.stop_time = stop_time
tl.run()
assert (((len(parent.log) - ((stop_time / .0) * dark_count)) / ((stop_time / .0) * dark_count)) < 0.1)
count_rate = .0
interval = (.0 / count_rate)
(detector, parent, tl) = create_detector(efficiency=1, count_rate=count_rate)
arrive_times = [0, (2 * interval), (4 * interval), (4.5 * interval), (5.1 * interval)]
expect_len = [1, 2, 3, 3, 4]
for (time, log_len) in zip(arrive_times, expect_len):
tl.time = time
detector.get()
assert (len(parent.log) == log_len)
time_resolution = 233
(detector, parent, tl) = create_detector(efficiency=1, count_rate=.0, time_resolution=time_resolution)
times = np.random.randint(0, .0, 100, dtype=np.int64)
times.sort()
for t in times:
tl.time = t
detector.get()
assert ((parent.log[(- 1)][1] % time_resolution) == 0) |
class Resnet18Triplet(nn.Module):
def __init__(self, embedding_dimension=512, pretrained=False):
super(Resnet18Triplet, self).__init__()
self.model = resnet18(pretrained=pretrained)
input_features_fc_layer = self.model.fc.in_features
self.model.fc = nn.Linear(input_features_fc_layer, embedding_dimension, bias=False)
def forward(self, images):
embedding = self.model(images)
embedding = F.normalize(embedding, p=2, dim=1)
return embedding |
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), (float((i + 1)) / matrix_size)) for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
(ik, jk) = ((k, i), (k, j))
else:
(ik, jk) = ((i, k), (j, k))
(aik, ajk) = (data.get(ik, 0), data.get(jk, 0))
(aik, ajk) = (((cs * aik) + (sn * ajk)), (((- sn) * aik) + (cs * ajk)))
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = ((density * matrix_size) * matrix_size)
while (len(data) < target_nnz):
i = random.randint(0, (matrix_size - 1))
j = random.randint(0, (matrix_size - 1))
if (i != j):
theta = random.uniform(0, (2 * math.pi))
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
(icoords, jcoords, values) = ([], [], [])
for ((i, j), v) in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device) |
class RecordQueue(object):
def __init__(self, fields, name=None, capacity=1, enforce_unique_name=False, num_threads=1):
assert (isinstance(fields, list) or isinstance(fields, Struct)), 'fields must be either a Struct or a list of raw field names.'
if isinstance(fields, list):
fields = from_column_list(fields)
self.schema = fields
self.name = (name or 'queue')
self.num_threads = num_threads
num_blobs = len(self.schema.field_names())
init_net = core.Net((self.name + '/init_net'))
self.blobs_queue = init_net.CreateBlobsQueue([], 1, capacity=capacity, num_blobs=num_blobs, enforce_unique_name=enforce_unique_name)
core.workspace.RunNetOnce(init_net)
self.writer = _QueueWriter(self.blobs_queue, self.schema)
reader_name = (self.name + '_reader')
self.reader = _QueueReader(self.blobs_queue, self.schema, reader_name)
exit_net = core.Net((self.name + '/exit_net'))
exit_net.CloseBlobsQueue(self.blobs_queue, 0)
self.exit_step = core.execution_step('{}_close_step'.format(str(exit_net)), exit_net)
def build(self, reader, process=None):
producer_steps = []
for i in range(self.num_threads):
name = ('reader_' + str(i))
net_reader = core.Net(name)
(should_stop, fields) = reader.read_record(net_reader)
step_read = core.execution_step(name, net_reader)
name = ('queue_writer' + str(i))
net_prod = core.Net(name)
field_blobs = fields.field_blobs()
if process:
field_blobs = process(net_prod, fields).field_blobs()
self.writer.write(net_prod, field_blobs)
step_prod = core.execution_step(name, net_prod)
step = core.execution_step(('producer_' + str(i)), [step_read, step_prod], should_stop_blob=should_stop)
producer_steps.append(step)
producer_step = core.execution_step('producers', producer_steps, concurrent_substeps=True)
return (self.reader, producer_step, self.exit_step, self.schema) |
def get_BertAdam_optimizer(cfg, model):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (n not in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for (n, p) in param_optimizer if (n in no_decay)], 'weight_decay_rate': 0.0}]
return BertAdam(optimizer_grouped_parameters, lr=cfg.learning_rate, warmup=cfg.warm_up, t_total=cfg.total_steps) |
def tokenize_corpus(filename, np_filename, print_interval=10000):
print(' > tokenizing {}'.format(filename))
tokenizer = Tokenizer(cache_dir='./cache')
tokenized_docs = []
num_docs = 0
num_tokens = 0
start_time = time.time()
with open(filename, 'r') as f:
for line in f:
try:
myjson = json.loads(line)
url = myjson['url']
sample = myjson['text']
tokens = tokenizer.tokenize_document(sample)
tokenized_docs.append(np.array(tokens, dtype=np.uint16))
num_docs += 1
num_tokens += len(tokens)
if ((num_docs % print_interval) == 0):
print(' processed {:9d} documents in {:.2f} (s) so far'.format(num_docs, (time.time() - start_time)), flush=True)
except Exception as e:
print(' skipping ', line, e)
print(' >> processed {} document with total of {} tokens ...'.format(num_docs, num_tokens))
tokenized_docs = np.array(tokenized_docs, dtype=object)
np.save(np_filename, tokenized_docs, allow_pickle=True)
print(' >> saved the tokenzed document to {} ...'.format(np_filename)) |
def AUNP_calc(classes, P, POP, AUC_dict):
try:
result = 0
for i in classes:
result += ((P[i] / POP[i]) * AUC_dict[i])
return result
except Exception:
return 'None' |
def erase_3D_path(path, base_pos=5, item=AIR, offset=(0, 0, 0)):
if (len(path) == 0):
return
blocks = []
for pos in path:
blocks.append(Block(position=Point(x=(pos[0] + offset[0]), y=((pos[2] + 5) + offset[2]), z=(pos[1] + offset[1])), type=item))
CLIENT.spawnBlocks(Blocks(blocks=blocks))
return |
def get_real(input, input_type='linear', channels_axis=1):
if (input_type == 'linear'):
nb_hidden = input.size()[(- 1)]
if (input.dim() == 2):
return input.narrow(1, 0, (nb_hidden // 2))
elif (input.dim() == 3):
return input.narrow(2, 0, (nb_hidden // 2))
else:
nb_featmaps = input.size(channels_axis)
return input.narrow(channels_axis, 0, (nb_featmaps // 2)) |
def _random_dataset(n_samples=1000, n_features=1000, representation='dense', dtype=np.float32):
if (representation == 'dense'):
X = np.random.RandomState(0).random_sample((n_samples, n_features))
X = X.astype(dtype, copy=False)
else:
X = sp.random(n_samples, n_features, density=0.05, format='csr', dtype=dtype, random_state=0)
(X, X_val) = train_test_split(X, test_size=0.1, random_state=0)
return (X, X_val, None, None) |
class GardensPointDataset(Dataset):
def __init__(self, destination: str='images/GardensPoint/'):
self.destination = destination
def load(self) -> Tuple[(List[np.ndarray], List[np.ndarray], np.ndarray, np.ndarray)]:
print('===== Load dataset GardensPoint day_right--night_right')
if (not os.path.exists(self.destination)):
self.download(self.destination)
fns_db = sorted(glob((self.destination + 'day_right/*.jpg')))
fns_q = sorted(glob((self.destination + 'night_right/*.jpg')))
imgs_db = [np.array(Image.open(fn)) for fn in fns_db]
imgs_q = [np.array(Image.open(fn)) for fn in fns_q]
GThard = np.eye(len(imgs_db)).astype('bool')
GTsoft = convolve2d(GThard.astype('int'), np.ones((17, 1), 'int'), mode='same').astype('bool')
return (imgs_db, imgs_q, GThard, GTsoft)
def download(self, destination: str):
print((('===== GardensPoint dataset does not exist. Download to ' + destination) + '...'))
fn = 'GardensPoint_Walking.zip'
url = (' + fn)
path = os.path.expanduser(destination)
os.makedirs(path, exist_ok=True)
urllib.request.urlretrieve(url, (path + fn))
with zipfile.ZipFile((path + fn), 'r') as zip_ref:
zip_ref.extractall(destination)
os.remove((destination + fn)) |
def random_fgp_morphism_0(*args, **kwds):
A = random_fgp_module(*args, **kwds)
return A.hom([(ZZ.random_element() * g) for g in A.smith_form_gens()]) |
def waterfall_legacy(expected_value, shap_values=None, features=None, feature_names=None, max_display=10, show=True):
if (show is False):
plt.ioff()
upper_bounds = None
lower_bounds = None
if str(type(expected_value)).endswith("Explanation'>"):
shap_exp = expected_value
expected_value = shap_exp.expected_value
shap_values = shap_exp.values
features = shap_exp.data
feature_names = shap_exp.feature_names
lower_bounds = getattr(shap_exp, 'lower_bounds', None)
upper_bounds = getattr(shap_exp, 'upper_bounds', None)
if ((isinstance(expected_value, np.ndarray) and (len(expected_value) > 0)) or isinstance(expected_value, list)):
raise Exception('waterfall_plot requires a scalar expected_value of the model output as the first parameter, but you have passed an array as the first parameter! Try shap.waterfall_plot(explainer.expected_value[0], shap_values[0], X[0]) or for multi-output models try shap.waterfall_plot(explainer.expected_value[0], shap_values[0][0], X[0]).')
if (len(shap_values.shape) == 2):
raise Exception('The waterfall_plot can currently only plot a single explanation but a matrix of explanations was passed!')
if isinstance(features, pd.Series):
if (feature_names is None):
feature_names = list(features.index)
features = features.values
if (feature_names is None):
feature_names = np.array([(labels['FEATURE'] % str(i)) for i in range(len(shap_values))])
num_features = min(max_display, len(shap_values))
row_height = 0.5
rng = range((num_features - 1), (- 1), (- 1))
order = np.argsort((- np.abs(shap_values)))
pos_lefts = []
pos_inds = []
pos_widths = []
pos_low = []
pos_high = []
neg_lefts = []
neg_inds = []
neg_widths = []
neg_low = []
neg_high = []
loc = (expected_value + shap_values.sum())
yticklabels = ['' for i in range((num_features + 1))]
plt.gcf().set_size_inches(8, ((num_features * row_height) + 1.5))
if (num_features == len(shap_values)):
num_individual = num_features
else:
num_individual = (num_features - 1)
for i in range(num_individual):
sval = shap_values[order[i]]
loc -= sval
if (sval >= 0):
pos_inds.append(rng[i])
pos_widths.append(sval)
if (lower_bounds is not None):
pos_low.append(lower_bounds[order[i]])
pos_high.append(upper_bounds[order[i]])
pos_lefts.append(loc)
else:
neg_inds.append(rng[i])
neg_widths.append(sval)
if (lower_bounds is not None):
neg_low.append(lower_bounds[order[i]])
neg_high.append(upper_bounds[order[i]])
neg_lefts.append(loc)
if ((num_individual != num_features) or ((i + 4) < num_individual)):
plt.plot([loc, loc], [((rng[i] - 1) - 0.4), (rng[i] + 0.4)], color='#bbbbbb', linestyle='--', linewidth=0.5, zorder=(- 1))
if (features is None):
yticklabels[rng[i]] = feature_names[order[i]]
else:
yticklabels[rng[i]] = ((format_value(features[order[i]], '%0.03f') + ' = ') + feature_names[order[i]])
if (num_features < len(shap_values)):
yticklabels[0] = ('%d other features' % ((len(shap_values) - num_features) + 1))
remaining_impact = (expected_value - loc)
if (remaining_impact < 0):
pos_inds.append(0)
pos_widths.append((- remaining_impact))
pos_lefts.append((loc + remaining_impact))
else:
neg_inds.append(0)
neg_widths.append((- remaining_impact))
neg_lefts.append((loc + remaining_impact))
points = (((pos_lefts + list((np.array(pos_lefts) + np.array(pos_widths)))) + neg_lefts) + list((np.array(neg_lefts) + np.array(neg_widths))))
dataw = (np.max(points) - np.min(points))
label_padding = np.array([((0.1 * dataw) if (w < 1) else 0) for w in pos_widths])
plt.barh(pos_inds, ((np.array(pos_widths) + label_padding) + (0.02 * dataw)), left=(np.array(pos_lefts) - (0.01 * dataw)), color=colors.red_rgb, alpha=0)
label_padding = np.array([(((- 0.1) * dataw) if ((- w) < 1) else 0) for w in neg_widths])
plt.barh(neg_inds, ((np.array(neg_widths) + label_padding) - (0.02 * dataw)), left=(np.array(neg_lefts) + (0.01 * dataw)), color=colors.blue_rgb, alpha=0)
head_length = 0.08
bar_width = 0.8
xlen = (plt.xlim()[1] - plt.xlim()[0])
fig = plt.gcf()
ax = plt.gca()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width = bbox.width
bbox_to_xscale = (xlen / width)
hl_scaled = (bbox_to_xscale * head_length)
renderer = fig.canvas.get_renderer()
for i in range(len(pos_inds)):
dist = pos_widths[i]
arrow_obj = plt.arrow(pos_lefts[i], pos_inds[i], max((dist - hl_scaled), 1e-06), 0, head_length=min(dist, hl_scaled), color=colors.red_rgb, width=bar_width, head_width=bar_width)
if ((pos_low is not None) and (i < len(pos_low))):
plt.errorbar((pos_lefts[i] + pos_widths[i]), pos_inds[i], xerr=np.array([[(pos_widths[i] - pos_low[i])], [(pos_high[i] - pos_widths[i])]]), ecolor=colors.light_red_rgb)
txt_obj = plt.text((pos_lefts[i] + (0.5 * dist)), pos_inds[i], format_value(pos_widths[i], '%+0.02f'), horizontalalignment='center', verticalalignment='center', color='white', fontsize=12)
text_bbox = txt_obj.get_window_extent(renderer=renderer)
arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)
if (text_bbox.width > arrow_bbox.width):
txt_obj.remove()
txt_obj = plt.text(((pos_lefts[i] + ((5 / 72) * bbox_to_xscale)) + dist), pos_inds[i], format_value(pos_widths[i], '%+0.02f'), horizontalalignment='left', verticalalignment='center', color=colors.red_rgb, fontsize=12)
for i in range(len(neg_inds)):
dist = neg_widths[i]
arrow_obj = plt.arrow(neg_lefts[i], neg_inds[i], (- max(((- dist) - hl_scaled), 1e-06)), 0, head_length=min((- dist), hl_scaled), color=colors.blue_rgb, width=bar_width, head_width=bar_width)
if ((neg_low is not None) and (i < len(neg_low))):
plt.errorbar((neg_lefts[i] + neg_widths[i]), neg_inds[i], xerr=np.array([[(neg_widths[i] - neg_low[i])], [(neg_high[i] - neg_widths[i])]]), ecolor=colors.light_blue_rgb)
txt_obj = plt.text((neg_lefts[i] + (0.5 * dist)), neg_inds[i], format_value(neg_widths[i], '%+0.02f'), horizontalalignment='center', verticalalignment='center', color='white', fontsize=12)
text_bbox = txt_obj.get_window_extent(renderer=renderer)
arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)
if (text_bbox.width > arrow_bbox.width):
txt_obj.remove()
txt_obj = plt.text(((neg_lefts[i] - ((5 / 72) * bbox_to_xscale)) + dist), neg_inds[i], format_value(neg_widths[i], '%+0.02f'), horizontalalignment='right', verticalalignment='center', color=colors.blue_rgb, fontsize=12)
plt.yticks((list(range(num_features)) * 2), (yticklabels[:(- 1)] + [label.split('=')[(- 1)] for label in yticklabels[:(- 1)]]), fontsize=13)
for i in range(num_features):
plt.axhline(i, color='#cccccc', lw=0.5, dashes=(1, 5), zorder=(- 1))
plt.axvline(expected_value, 0, (1 / num_features), color='#bbbbbb', linestyle='--', linewidth=0.5, zorder=(- 1))
fx = (expected_value + shap_values.sum())
plt.axvline(fx, 0, 1, color='#bbbbbb', linestyle='--', linewidth=0.5, zorder=(- 1))
plt.gca().xaxis.set_ticks_position('bottom')
plt.gca().yaxis.set_ticks_position('none')
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
ax.tick_params(labelsize=13)
(xmin, xmax) = ax.get_xlim()
ax2 = ax.twiny()
ax2.set_xlim(xmin, xmax)
ax2.set_xticks([expected_value, (expected_value + 1e-08)])
ax2.set_xticklabels(['\n$E[f(X)]$', (('\n$ = ' + format_value(expected_value, '%0.03f')) + '$')], fontsize=12, ha='left')
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax3 = ax2.twiny()
ax3.set_xlim(xmin, xmax)
ax3.set_xticks([(expected_value + shap_values.sum()), ((expected_value + shap_values.sum()) + 1e-08)])
ax3.set_xticklabels(['$f(x)$', (('$ = ' + format_value(fx, '%0.03f')) + '$')], fontsize=12, ha='left')
tick_labels = ax3.xaxis.get_majorticklabels()
tick_labels[0].set_transform((tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(((- 10) / 72.0), 0, fig.dpi_scale_trans)))
tick_labels[1].set_transform((tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation((12 / 72.0), 0, fig.dpi_scale_trans)))
tick_labels[1].set_color('#999999')
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.spines['left'].set_visible(False)
tick_labels = ax2.xaxis.get_majorticklabels()
tick_labels[0].set_transform((tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(((- 20) / 72.0), 0, fig.dpi_scale_trans)))
tick_labels[1].set_transform((tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation((22 / 72.0), ((- 1) / 72.0), fig.dpi_scale_trans)))
tick_labels[1].set_color('#999999')
tick_labels = ax.yaxis.get_majorticklabels()
for i in range(num_features):
tick_labels[i].set_color('#999999')
if show:
plt.show()
else:
return plt.gcf() |
def prepare_bounds(bounds, n):
(lb, ub) = [np.asarray(b, dtype=float) for b in bounds]
if (lb.ndim == 0):
lb = np.resize(lb, n)
if (ub.ndim == 0):
ub = np.resize(ub, n)
return (lb, ub) |
class BertAdam(Optimizer):
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (schedule not in SCHEDULES):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))):
raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))
if (not (0.0 <= b1 < 1.0)):
raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))
if (not (0.0 <= b2 < 1.0)):
raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (len(state) == 0):
return [0]
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup']))
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = (group['b1'], group['b2'])
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_((1 - beta1), grad)
next_v.mul_(beta2).addcmul_((1 - beta2), grad, grad)
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup']))
else:
lr_scheduled = group['lr']
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss |
class TestBroadcast(object):
def setup(self):
self.seed =
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0., 0., 0.])
random = Generator(MT19937(self.seed))
actual = random.uniform((low * 3), high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, (high * 3))
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [(- 1)]
random = Generator(MT19937(self.seed))
desired = np.array([(- 0.), 0., 0.])
random = Generator(MT19937(self.seed))
actual = random.normal((loc * 3), scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, (loc * 3), bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, (scale * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, (bad_scale * 3))
def test_beta(self):
a = [1]
b = [2]
bad_a = [(- 1)]
bad_b = [(- 2)]
desired = np.array([0., 0., 0.])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta((a * 3), b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, (bad_a * 3), b)
assert_raises(ValueError, beta, (a * 3), bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, (b * 3))
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [(- 1)]
desired = np.array([0., 0., 0.])
random = Generator(MT19937(self.seed))
actual = random.exponential((scale * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, (bad_scale * 3))
def test_standard_gamma(self):
shape = [1]
bad_shape = [(- 1)]
desired = np.array([0., 0., 0.])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma((shape * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, (bad_shape * 3))
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [(- 1)]
bad_scale = [(- 2)]
desired = np.array([1., 0., 1.])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma((shape * 3), scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, (bad_shape * 3), scale)
assert_raises(ValueError, gamma, (shape * 3), bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, (scale * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, (scale * 3))
assert_raises(ValueError, gamma, shape, (bad_scale * 3))
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [(- 1)]
bad_dfden = [(- 2)]
desired = np.array([0., 7., 0.])
random = Generator(MT19937(self.seed))
f = random.f
actual = f((dfnum * 3), dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, (bad_dfnum * 3), dfden)
assert_raises(ValueError, f, (dfnum * 3), bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, (dfden * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, (dfden * 3))
assert_raises(ValueError, f, dfnum, (bad_dfden * 3))
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [(- 1)]
bad_nonc = [(- 2)]
desired = np.array([2., 12., 1.])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f((dfnum * 3), dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, ([np.nan] * 3))))
assert_raises(ValueError, nonc_f, (bad_dfnum * 3), dfden, nonc)
assert_raises(ValueError, nonc_f, (dfnum * 3), bad_dfden, nonc)
assert_raises(ValueError, nonc_f, (dfnum * 3), dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, (dfden * 3), nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, (dfden * 3), nonc)
assert_raises(ValueError, nonc_f, dfnum, (bad_dfden * 3), nonc)
assert_raises(ValueError, nonc_f, dfnum, (dfden * 3), bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, (nonc * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, (nonc * 3))
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, (nonc * 3))
assert_raises(ValueError, nonc_f, dfnum, dfden, (bad_nonc * 3))
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0., 0.])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [(- 1)]
desired = np.array([0., 1., 2.])
random = Generator(MT19937(self.seed))
actual = random.chisquare((df * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, (bad_df * 3))
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [(- 1)]
bad_nonc = [(- 2)]
desired = np.array([0., 5., 0.])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi((df * 3), nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, (bad_df * 3), nonc)
assert_raises(ValueError, nonc_chi, (df * 3), bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, (nonc * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, (nonc * 3))
assert_raises(ValueError, nonc_chi, df, (bad_nonc * 3))
def test_standard_t(self):
df = [1]
bad_df = [(- 1)]
desired = np.array([(- 1.), (- 1.), 0.])
random = Generator(MT19937(self.seed))
actual = random.standard_t((df * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, (bad_df * 3))
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [(- 1)]
desired = np.array([2., 2., (- 2.)])
random = Generator(MT19937(self.seed))
actual = random.vonmises((mu * 3), kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, (mu * 3), bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, (kappa * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, (bad_kappa * 3))
def test_pareto(self):
a = [1]
bad_a = [(- 1)]
desired = np.array([0., 0., 1.])
random = Generator(MT19937(self.seed))
actual = random.pareto((a * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, (bad_a * 3))
def test_weibull(self):
a = [1]
bad_a = [(- 1)]
desired = np.array([0., 0., 0.])
random = Generator(MT19937(self.seed))
actual = random.weibull((a * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, (bad_a * 3))
def test_power(self):
a = [1]
bad_a = [(- 1)]
desired = np.array([0., 0., 0.])
random = Generator(MT19937(self.seed))
actual = random.power((a * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, (bad_a * 3))
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [(- 1)]
desired = np.array([(- 1.), (- 0.), 0.])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace((loc * 3), scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, (loc * 3), bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, (scale * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, (bad_scale * 3))
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [(- 1)]
desired = np.array([1., 1., (- 0.)])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel((loc * 3), scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, (loc * 3), bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, (scale * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, (bad_scale * 3))
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [(- 1)]
desired = np.array([(- 1.), (- 1.), 1.])
random = Generator(MT19937(self.seed))
actual = random.logistic((loc * 3), scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, (loc * 3), bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, (scale * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, (bad_scale * 3))
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [(- 1)]
desired = np.array([0., 2., 1.])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal((mean * 3), sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, (mean * 3), bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, (sigma * 3))
assert_raises(ValueError, random.lognormal, mean, (bad_sigma * 3))
def test_rayleigh(self):
scale = [1]
bad_scale = [(- 1)]
desired = np.array([0., 0., 1.])
random = Generator(MT19937(self.seed))
actual = random.rayleigh((scale * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, (bad_scale * 3))
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [(- 2)]
desired = np.array([0., 0., 0.])
random = Generator(MT19937(self.seed))
actual = random.wald((mean * 3), scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, (bad_mean * 3), scale)
assert_raises(ValueError, random.wald, (mean * 3), bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, (scale * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, (scale * 3))
assert_raises(ValueError, random.wald, mean, (bad_scale * 3))
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
(bad_left_two, bad_mode_two) = (right * 2)
desired = np.array([1., 1., 2.])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular((left * 3), mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, (bad_left_one * 3), mode, right)
assert_raises(ValueError, triangular, (left * 3), bad_mode_one, right)
assert_raises(ValueError, triangular, (bad_left_two * 3), bad_mode_two, right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, (mode * 3), right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, (mode * 3), right)
assert_raises(ValueError, triangular, left, (bad_mode_one * 3), right)
assert_raises(ValueError, triangular, bad_left_two, (bad_mode_two * 3), right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, (right * 3))
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, (right * 3))
assert_raises(ValueError, triangular, left, bad_mode_one, (right * 3))
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, (right * 3))
assert_raises(ValueError, triangular, 10.0, 0.0, 20.0)
assert_raises(ValueError, triangular, 10.0, 25.0, 20.0)
assert_raises(ValueError, triangular, 10.0, 10.0, 10.0)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [(- 1)]
bad_p_one = [(- 1)]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom((n * 3), p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, (bad_n * 3), p)
assert_raises(ValueError, binom, (n * 3), bad_p_one)
assert_raises(ValueError, binom, (n * 3), bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, (p * 3))
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, (p * 3))
assert_raises(ValueError, binom, n, (bad_p_one * 3))
assert_raises(ValueError, binom, n, (bad_p_two * 3))
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [(- 1)]
bad_p_one = [(- 1)]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom((n * 3), p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, (bad_n * 3), p)
assert_raises(ValueError, neg_binom, (n * 3), bad_p_one)
assert_raises(ValueError, neg_binom, (n * 3), bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, (p * 3))
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, (p * 3))
assert_raises(ValueError, neg_binom, n, (bad_p_one * 3))
assert_raises(ValueError, neg_binom, n, (bad_p_two * 3))
def test_poisson(self):
lam = [1]
bad_lam_one = [(- 1)]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [(max_lam * 2)]
poisson = random.poisson
actual = poisson((lam * 3))
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, (bad_lam_one * 3))
assert_raises(ValueError, poisson, (bad_lam_two * 3))
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf((a * 3))
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, (bad_a * 3))
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [(- 1)]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric((p * 3))
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, (bad_p_one * 3))
assert_raises(ValueError, geometric, (bad_p_two * 3))
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [(- 1)]
bad_nbad = [(- 2)]
bad_nsample_one = [(- 1)]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric((ngood * 3), nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, (bad_ngood * 3), nbad, nsample)
assert_raises(ValueError, random.hypergeometric, (ngood * 3), bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, (ngood * 3), nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, (ngood * 3), nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, (nbad * 3), nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, (nbad * 3), nsample)
assert_raises(ValueError, random.hypergeometric, ngood, (bad_nbad * 3), nsample)
assert_raises(ValueError, random.hypergeometric, ngood, (nbad * 3), bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, (nbad * 3), bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, (nsample * 3))
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, (nsample * 3))
assert_raises(ValueError, hypergeom, ngood, bad_nbad, (nsample * 3))
assert_raises(ValueError, hypergeom, ngood, nbad, (bad_nsample_one * 3))
assert_raises(ValueError, hypergeom, ngood, nbad, (bad_nsample_two * 3))
assert_raises(ValueError, hypergeom, (- 1), 10, 20)
assert_raises(ValueError, hypergeom, 10, (- 1), 20)
assert_raises(ValueError, hypergeom, 10, 10, (- 1))
assert_raises(ValueError, hypergeom, 10, 10, 25)
assert_raises(ValueError, hypergeom, (2 ** 30), 10, 20)
assert_raises(ValueError, hypergeom, 999, (2 ** 31), 50)
assert_raises(ValueError, hypergeom, 999, [(2 ** 29), (2 ** 30)], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [(- 1)]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries((p * 3))
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, (bad_p_one * 3))
assert_raises(ValueError, logseries, (bad_p_two * 3))
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], ([(1 / 6.0)] * 6), size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0], [2, 3, 6, 4, 2, 3]], [[1, 0, 1, 0, 2, 1], [7, 2, 2, 1, 4, 4]], [[0, 2, 0, 1, 2, 0], [3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], ([(1 / 6.0)] * 6))
desired = np.array([[0, 0, 2, 1, 2, 0], [2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired) |
class REO(BaseMetric):
def __init__(self, recommendations, config, params, eval_objects, additional_data):
super().__init__(recommendations, config, params, eval_objects, additional_data)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.binary_relevance
self._train = self._evaluation_objects.data.train_dict
self._item_clustering_path = self._additional_data.get('clustering_file', False)
if self._item_clustering_path:
self._item_clustering = pd.read_csv(self._item_clustering_path, sep='\t', header=None, names=['id', 'cluster'])
self._item_n_clusters = self._item_clustering['cluster'].nunique()
self._item_clustering = self._item_clustering.groupby('cluster')['id'].apply(set).to_dict()
self._item_clustering_name = self._additional_data['clustering_name']
else:
self._item_n_clusters = 1
self._item_clustering = {}
self._item_clustering_name = ''
self._num = np.zeros(self._item_n_clusters)
self._den = np.zeros(self._item_n_clusters)
self.process()
def name(self):
return f'REO_items:{self._item_clustering_name}'
def __user_pop_reo(self, user_recommendations, user_train, cutoff, user_relevant_items):
recommended_items = set([i for (i, _) in user_recommendations[:cutoff] if (i in user_relevant_items)])
for (i, i_set) in self._item_clustering.items():
self._num[i] += len((recommended_items & i_set))
self._den[i] += len(((i_set & user_relevant_items) - user_train))
def eval(self):
pass
def process(self):
for (u, u_r) in self._recommendations.items():
if len(self._relevance.get_user_rel(u)):
self.__user_pop_reo(u_r, set(self._train[u].keys()), self._cutoff, set(self._relevance.get_user_rel(u)))
PR = (self._num / self._den)
self._metric_objs_list = []
for i_category in range(self._item_n_clusters):
self._metric_objs_list.append(ProxyMetric(name=f'REO-ProbToBeRanked_items:{self._item_clustering_name}-{i_category}', val=PR[i_category], needs_full_recommendations=False))
self._metric_objs_list.append(ProxyMetric(name=f'REO_items:{self._item_clustering_name}', val=(np.std(PR) / np.mean(PR)), needs_full_recommendations=False))
def get(self):
return self._metric_objs_list |
def test_wrap_scalar_function_with_validation():
def func_(x):
return x
(fcalls, func) = optimize._optimize._wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
for i in range(5):
func(np.asarray(i))
assert (fcalls[0] == (i + 1))
msg = 'Too many function calls'
with assert_raises(optimize._optimize._MaxFuncCallError, match=msg):
func(np.asarray(i))
(fcalls, func) = optimize._optimize._wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
msg = 'The user-provided objective function must return a scalar value.'
with assert_raises(ValueError, match=msg):
func(np.array([1, 1])) |
def phi_on_basis(L):
F = F_algebra(QQ)
return F.prod((phi_on_multiplicative_basis(compo) for compo in L)) |
def get_top5_vertices(hgraph):
nodes = hgraph['nodes']
v_list = [node['id'] for node in nodes if (node['bipartite'] == 0)]
v_list.sort(key=natural_keys)
v2he_sorted = collections.OrderedDict()
for v in v_list:
v2he_sorted[v] = []
for link in hgraph['links']:
if (link['source'] not in v2he_sorted[link['target']]):
v2he_sorted[link['target']].append(link['source'])
v2len_sorted = collections.OrderedDict()
for v in v_list:
v2len_sorted[v] = len(v2he_sorted[v])
top5 = sorted(v2len_sorted, key=v2len_sorted.get, reverse=True)[:5]
return top5 |
def AFMEstimator(linear_feature_columns, dnn_feature_columns, use_attention=True, attention_factor=8, l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_att=1e-05, afm_dropout=0, seed=1024, task='binary', model_dir=None, config=None, linear_optimizer='Ftrl', dnn_optimizer='Adagrad', training_chief_hooks=None):
def _model_fn(features, labels, mode, config):
train_flag = (mode == tf.estimator.ModeKeys.TRAIN)
linear_logits = get_linear_logit(features, linear_feature_columns, l2_reg_linear=l2_reg_linear)
with variable_scope(DNN_SCOPE_NAME):
(sparse_embedding_list, _) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding=l2_reg_embedding)
if use_attention:
fm_logit = AFMLayer(attention_factor, l2_reg_att, afm_dropout, seed)(sparse_embedding_list, training=train_flag)
else:
fm_logit = FM()(concat_func(sparse_embedding_list, axis=1))
logits = (linear_logits + fm_logit)
return deepctr_model_fn(features, mode, logits, labels, task, linear_optimizer, dnn_optimizer, training_chief_hooks=training_chief_hooks)
return tf.estimator.Estimator(_model_fn, model_dir=model_dir, config=config) |
def _subsample_by_classes(all_examples, labels, num_per_class=None):
if (num_per_class is None):
return all_examples
examples = {label: [] for label in labels}
for example in all_examples:
if (example.label in labels):
examples[example.label].append(example)
picked_examples = []
for label in labels:
random.shuffle(examples[label])
examples_with_label = examples[label][:num_per_class[label]]
picked_examples.extend(examples_with_label)
print(f"number of examples with label '{label}': {len(examples_with_label)}")
return picked_examples |
def mock_database():
mock_db = Mock(spec=SingleDatabase)
mock_db.get_schema_given.return_value = Mock(name='schema', spec=pd.DataFrame)
mock_db.get_table_given.return_value = Mock(name='table', spec=pd.DataFrame)
return mock_db |
def getintegrator(rhs, u0, solver, context):
params = solver.params
u1 = u0.copy()
if (params.integrator == 'RK4'):
a = np.array([(1.0 / 6.0), (1.0 / 3.0), (1.0 / 3.0), (1.0 / 6.0)], dtype=context.float)
b = np.array([0.5, 0.5, 1.0], dtype=context.float)
u2 = u0.copy()
(RK4)
def func():
return RK4(u0, u1, u2, rhs, a, b, params.dt, solver, context)
return func
elif (params.integrator in ('BS5_adaptive', 'BS5_fixed')):
A = np.array([[0, 0, 0, 0, 0, 0, 0, 0], [(1 / 6), 0, 0, 0, 0, 0, 0, 0], [(2 / 27), (4 / 27), 0, 0, 0, 0, 0, 0], [(183 / 1372), ((- 162) / 343), (1053 / 1372), 0, 0, 0, 0, 0], [(68 / 297), ((- 4) / 11), (42 / 143), (1960 / 3861), 0, 0, 0, 0], [(597 / 22528), (81 / 352), (63099 / 585728), (58653 / 366080), (4617 / 20480), 0, 0, 0], [(174197 / 959244), ((- 30942) / 79937), (8152137 / ), (666106 / 1039181), ((- 29421) / 29068), (482048 / 414219), 0, 0], [(587 / 8064), 0, (4440339 / ), (24353 / 124800), (387 / 44800), (2152 / 5985), (7267 / 94080), 0]], dtype=context.float)
b = np.array([(587 / 8064), 0, (4440339 / ), (24353 / 124800), (387 / 44800), (2152 / 5985), (7267 / 94080), 0], dtype=context.float)
bhat = np.array([(2479 / 34992), 0, (123 / 416), (612941 / 3411720), (43 / 1440), (2272 / 6561), (79937 / 1113912), (3293 / 556956)], dtype=context.float)
err_order = 4
errnorm = '2'
fsal = True
adaptive = (True if (params.integrator == 'BS5_adaptive') else False)
offset = [0]
s = A.shape[0]
fY_hat = np.zeros(((s,) + u0.shape), dtype=u0.dtype)
sc = np.zeros_like(u0)
err = np.zeros_like(u0)
(adaptiveRK)
def func():
return adaptiveRK(A, b, bhat, err_order, fY_hat, u1, sc, err, fsal, offset, params.TOL, params.TOL, adaptive, errnorm, rhs, u0, solver, params.dt, params.tstep, context, solver.additional_callback, params)
return func
elif (params.integrator == 'ForwardEuler'):
(ForwardEuler)
def func():
return ForwardEuler(u0, rhs, params.dt, solver, context)
return func
elif (params.integrator == 'AB2'):
(AB2)
def func():
return AB2(u0, u1, rhs, params.dt, params.tstep, solver, context)
return func |
def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
value = (torch.rand(N, S, M, channels).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
func = MSDeformAttnFunction.apply
value.requires_grad = grad_value
sampling_locations.requires_grad = grad_sampling_loc
attention_weights.requires_grad = grad_attn_weight
gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
print(f'* {gradok} check_gradient_numerical(D={channels})') |
class XLMRobertaTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', **kwargs):
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs)
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.fairseq_tokens_to_ids['<mask>'] = (len(self.sp_model) + self.fairseq_offset)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def vocab_size(self):
return ((len(self.sp_model) + self.fairseq_offset) + 1)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.