code stringlengths 281 23.7M |
|---|
class Logger(object):
INFO = 0
DEBUG = 1
WARNING = 2
ERROR = 3
CRITICAL = 4
def config_logger(cls, file_folder='.', level='info', save_log=False, display_source=False):
cls.file_folder = file_folder
cls.file_json = os.path.join(file_folder, 'log-1.json')
cls.file_log = os.path.join(file_folder, 'log.log')
cls.values = []
cls.save_log = save_log
logger = logging.getLogger()
if display_source:
cls.formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)d] %(levelname)s %(message)s')
else:
cls.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
cls.level = level
if (level == 'info'):
logger.setLevel(logging.INFO)
elif (level == 'debug'):
logger.setLevel(logging.DEBUG)
elif (level == 'warning'):
logger.setLevel(logging.WARNING)
elif (level == 'error'):
logger.setLevel(logging.ERROR)
elif (level == 'critical'):
logger.setLevel(logging.CRITICAL)
strhdlr = logging.StreamHandler()
strhdlr.setFormatter(cls.formatter)
logger.addHandler(strhdlr)
if save_log:
cls.add_log_file(cls.file_log)
cls.logger = logger
def add_log_file(cls, logfile):
assert (cls.save_log is True)
hdlr = logging.FileHandler(logfile)
hdlr.setFormatter(cls.formatter)
cls.logger.addHandler(hdlr)
def display_metric(cls, name, values, tags):
cls.info(value='{name} ({tags}): {values} '.format(name=name, values=values))
def cache_metric_in_memory(cls, name, values, tags):
cls.values.append({'measurement': name, **tags, **values})
def log_timer(cls, name, values, tags):
cls.info(value='{name} ({tags}): {values} '.format(name=name, values=values))
def info(cls, value):
cls.logger.info(value)
def debug(cls, value):
cls.logger.debug(value)
def warning(cls, value):
cls.logger.warning(value)
def error(cls, value):
cls.logger.error(value)
def critical(cls, value):
cls.logger.critical(value)
def save_json(cls):
with open(cls.file_json, 'w') as fp:
json.dump(cls.values, fp, indent=' ')
if (len(cls.values) > 1000.0):
cls.values = []
cls.redirect_new_json()
def redirect_new_json(cls):
existing_json_files = [file for file in os.listdir(cls.file_folder) if ('json' in file)]
cls.file_json = os.path.join(cls.file_folder, 'log-{}.json'.format((len(existing_json_files) + 1))) |
class JavaScriptLoader(ScriptLoader):
def __init__(self):
super().__init__('js')
self.original_template = gradio.routes.templates.TemplateResponse
self.load_js()
gradio.routes.templates.TemplateResponse = self.template_response
def load_js(self):
js_scripts = ScriptLoader.get_scripts(self.path, self.script_type)
for (file_path, file_name) in js_scripts:
with open(file_path, 'r', encoding='utf-8') as file:
self.loaded_scripts.append(f'''
<!--{file_name}-->
<script>
{file.read()}
</script>''')
def template_response(self, *args, **kwargs):
response = self.original_template(*args, **kwargs)
response.body = response.body.replace('</head>'.encode('utf-8'), f'''{''.join(self.loaded_scripts)}
</head>'''.encode('utf-8'))
response.init_headers()
return response |
def manager_function(input_queue: Queue, output_queue: Queue, worker_function: Callable) -> None:
logging.getLogger().setLevel(logging.ERROR)
warnings.filterwarnings(action='ignore', category=UserWarning, module=MODULE_ADDONS_INSTALL)
logging.info('MANAGER: initializing')
worker_result_queue = Queue()
def worker_function_with_queue(*worker_args) -> None:
result = worker_function(*worker_args)
worker_result_queue.put(result)
do_exit = False
while (not do_exit):
logging.info('MANAGER: waiting for a task')
child_task = input_queue.get()
do_exit = child_task.do_exit
logging.info('MANAGER: Got a task')
if (not child_task.do_exit):
child = Process(target=worker_function_with_queue, args=child_task.args)
child.start()
logging.info('MANAGER: Waiting for result')
output_queue.put(worker_result_queue.get())
child.join()
logging.info('MANAGER: done, reached end of function.') |
class EnumList(EnumType):
def __init__(self, *args, **kwargs):
assert (len(kwargs) in (0, 1, 2)), (type(self).__name__ + ': expected 0 to 2 extra parameters ("ctype", "cname").')
ctype = kwargs.pop('ctype', 'int')
cname = kwargs.pop('cname', None)
for (arg_rank, arg) in enumerate(args):
if isinstance(arg, (list, tuple)):
if (len(arg) != 2):
raise TypeError(f'{type(self).__name__}: when using a tuple to define a constant, your tuple should contain 2 values: constant name followed by constant alias.')
(constant_name, constant_alias) = arg
if (not isinstance(constant_alias, str)):
raise TypeError(f'{type(self).__name__}: constant alias should be a string, got "{constant_alias}".')
constant_value = (constant_alias, arg_rank)
else:
constant_name = arg
constant_value = arg_rank
if (not isinstance(constant_name, str)):
raise TypeError(f'{type(self).__name__}: constant name should be a string, got "{constant_name}".')
if (constant_name in kwargs):
raise TypeError(f'{type(self).__name__}: constant name already used ("{constant_name}").')
kwargs[constant_name] = constant_value
kwargs.update(ctype=ctype)
if (cname is not None):
kwargs.update(cname=cname)
super().__init__(**kwargs) |
def read_tables(data_dir, bc):
bc.create_table('web_sales', os.path.join(data_dir, 'web_sales/*.parquet'))
bc.create_table('web_returns', os.path.join(data_dir, 'web_returns/*.parquet'))
bc.create_table('date_dim', os.path.join(data_dir, 'date_dim/*.parquet'))
bc.create_table('item', os.path.join(data_dir, 'item/*.parquet'))
bc.create_table('warehouse', os.path.join(data_dir, 'warehouse/*.parquet')) |
class Robot(namedtuple('Robot', ['name', 'password', 'created', 'last_accessed', 'description', 'unstructured_metadata'])):
def to_dict(self, include_metadata=False, include_token=False):
data = {'name': self.name, 'created': (format_date(self.created) if (self.created is not None) else None), 'last_accessed': (format_date(self.last_accessed) if (self.last_accessed is not None) else None), 'description': self.description}
if include_token:
data['token'] = self.password
if include_metadata:
data['unstructured_metadata'] = self.unstructured_metadata
return data |
def set_settings(**new_settings):
def decorator(testcase):
if (type(testcase) is type):
namespace = {'OVERRIDE_SETTINGS': new_settings, 'ORIGINAL_SETTINGS': {}}
wrapper = type(testcase.__name__, (SettingsTestCase, testcase), namespace)
else:
(testcase)
def wrapper(*args, **kwargs):
old_settings = override_settings(new_settings)
try:
testcase(*args, **kwargs)
finally:
override_settings(old_settings)
return wrapper
return decorator |
def train(train_loader, model, criterion, optimizer, epoch, cfg, logger, writer):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
num_iter = len(train_loader)
end = time.time()
time1 = time.time()
for (idx, (images, _)) in enumerate(train_loader):
images[0] = images[0].cuda(non_blocking=True)
images[1] = images[1].cuda(non_blocking=True)
data_time.update((time.time() - end))
(p1, p2, z1, z2) = model(x1=images[0], x2=images[1])
loss = ((- 2) * (criterion(p1, z2).mean() + criterion(p2, z1).mean()))
losses.update(loss.item(), images[0].size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((((idx + 1) % cfg.log_interval) == 0) and (logger is not None)):
lr = optimizer.param_groups[0]['lr']
logger.info(f'Epoch [{epoch}][{(idx + 1)}/{num_iter}] - data_time: {data_time.avg:.3f}, batch_time: {batch_time.avg:.3f}, lr: {lr:.5f}, loss: {loss:.3f}({losses.avg:.3f})')
if (logger is not None):
time2 = time.time()
epoch_time = format_time((time2 - time1))
logger.info(f'Epoch [{epoch}] - epoch_time: {epoch_time}, train_loss: {losses.avg:.3f}')
if (writer is not None):
lr = optimizer.param_groups[0]['lr']
writer.add_scalar('Pretrain/lr', lr, epoch)
writer.add_scalar('Pretrain/loss', losses.avg, epoch) |
def test_generate_env_name_ignores_case_for_case_insensitive_fs(poetry: Poetry, tmp_path: Path) -> None:
venv_name1 = EnvManager.generate_env_name(poetry.package.name, 'MyDiR')
venv_name2 = EnvManager.generate_env_name(poetry.package.name, 'mYdIr')
if (sys.platform == 'win32'):
assert (venv_name1 == venv_name2)
else:
assert (venv_name1 != venv_name2) |
def test_marker_union_intersect_marker_union() -> None:
m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')
intersection = m.intersect(parse_marker('implementation_name == "cpython" or os_name == "Windows"'))
assert (str(intersection) == 'sys_platform == "darwin" and implementation_name == "cpython" or sys_platform == "darwin" and os_name == "Windows" or python_version < "3.4" and implementation_name == "cpython" or python_version < "3.4" and os_name == "Windows"') |
(m2m_changed, sender=Topic.mirrors.through)
def update_topic_disambiguation(instance, action, pk_set, **kwargs):
appended_topics = Topic.objects.filter(pk__in=pk_set)
current = instance.mirrors.all()
if (action not in ('post_add', 'post_remove')):
return
for topic in appended_topics:
related = topic.mirrors.all()
for mirror in related:
if (mirror != instance):
if (action == 'post_add'):
if (mirror not in current):
instance.mirrors.add(mirror)
elif (mirror in current):
instance.mirrors.remove(mirror)
for neighbor in current:
if (neighbor != topic):
if (neighbor not in related):
if (action == 'post_add'):
topic.mirrors.add(neighbor)
elif (action == 'post_remove'):
topic.mirrors.remove(neighbor) |
class Torus(DynSys):
def _rhs(x, y, z, t, a, n, r):
xdot = (((((- a) * n) * np.sin((n * t))) * np.cos(t)) - ((r + (a * np.cos((n * t)))) * np.sin(t)))
ydot = (((((- a) * n) * np.sin((n * t))) * np.sin(t)) + ((r + (a * np.cos((n * t)))) * np.cos(t)))
zdot = ((a * n) * np.cos((n * t)))
return (xdot, ydot, zdot) |
class TestNumeric():
def klass(self):
return configtypes._Numeric
def test_minval_gt_maxval(self, klass):
with pytest.raises(ValueError):
klass(minval=2, maxval=1)
def test_special_bounds(self, klass):
numeric = klass(minval='maxint', maxval='maxint64')
assert (numeric.minval == qtutils.MAXVALS['int'])
assert (numeric.maxval == qtutils.MAXVALS['int64'])
.parametrize('kwargs, val, valid', [({}, 1337, True), ({}, 0, True), ({'minval': 2}, 2, True), ({'maxval': 2}, 2, True), ({'minval': 2, 'maxval': 3}, 2, True), ({'minval': 2, 'maxval': 3}, 3, True), ({}, None, True), ({'minval': 2}, 1, False), ({'maxval': 2}, 3, False), ({'minval': 2, 'maxval': 3}, 1, False), ({'minval': 2, 'maxval': 3}, 4, False), ({'zero_ok': False}, 0, False), ({'minval': (- 1)}, 0, True)])
def test_validate_bounds_invalid(self, klass, kwargs, val, valid):
if valid:
klass(**kwargs)._validate_bounds(val)
else:
with pytest.raises(configexc.ValidationError):
klass(**kwargs)._validate_bounds(val)
def test_suffix(self, klass):
with pytest.raises(configexc.ValidationError, match='must be 2% or smaller'):
klass(maxval=2)._validate_bounds(3, suffix='%') |
def test_walsh_control():
with pytest.raises(ArgumentsValueError):
_ = new_wamf1_control(rabi_rotation=0.3, maximum_rabi_rate=np.pi)
walsh_pi = new_wamf1_control(rabi_rotation=np.pi, azimuthal_angle=(- 0.35), maximum_rabi_rate=(2 * np.pi))
pi_segments = np.vstack((walsh_pi.amplitude_x, walsh_pi.amplitude_y, walsh_pi.detunings, walsh_pi.durations)).T
_pi_segments = np.array([[5., (- 2.), 0.0, 0.5], [2., (- 1.), 0.0, 0.5], [2., (- 1.), 0.0, 0.5], [5., (- 2.), 0.0, 0.5]])
assert np.allclose(pi_segments, _pi_segments)
walsh_pi_on_2 = new_wamf1_control(rabi_rotation=(np.pi / 2.0), azimuthal_angle=0.57, maximum_rabi_rate=(2 * np.pi))
pi_on_2_segments = np.vstack((walsh_pi_on_2.amplitude_x, walsh_pi_on_2.amplitude_y, walsh_pi_on_2.detunings, walsh_pi_on_2.durations)).T
_pi_on_2_segments = np.array([[5., 3., 0.0, 0.], [3., 1.9799236, 0.0, 0.], [3., 1.9799236, 0.0, 0.], [5., 3., 0.0, 0.]])
assert np.allclose(pi_on_2_segments, _pi_on_2_segments)
walsh_pi_on_4 = new_wamf1_control(rabi_rotation=(np.pi / 4.0), azimuthal_angle=(- 0.273), maximum_rabi_rate=(2 * np.pi))
pi_on_4_segments = np.vstack((walsh_pi_on_4.amplitude_x, walsh_pi_on_4.amplitude_y, walsh_pi_on_4.detunings, walsh_pi_on_4.durations)).T
_pi_on_4_segments = np.array([[6., (- 1.), 0.0, 0.3265702], [4., (- 1.), 0.0, 0.3265702], [4., (- 1.), 0.0, 0.3265702], [6., (- 1.), 0.0, 0.3265702]])
assert np.allclose(pi_on_4_segments, _pi_on_4_segments) |
def test_renext_bottleneck():
with pytest.raises(AssertionError):
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
block = BottleneckX(64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert (block.conv2.stride == (2, 2))
assert (block.conv2.groups == 32)
assert (block.conv2.out_channels == 128)
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
BottleneckX(64, 64, groups=32, base_width=4, dcn=dcn, conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56])) |
def set_client(scheduler=None):
if ((scheduler != config._SCHEDULER) and (config._CLIENT is not None)):
try:
config._CLIENT.shutdown()
config._CLIENT = None
except Exception:
pass
config._SCHEDULER = scheduler
if (scheduler is not None):
config._CLIENT = Client(scheduler)
return config._CLIENT |
def _get_builtin_metadata():
thing_dataset_id_to_contiguous_id = {x['id']: i for (i, x) in enumerate(sorted(categories, key=(lambda x: x['id'])))}
thing_classes = [x['name'] for x in sorted(categories, key=(lambda x: x['id']))]
return {'thing_dataset_id_to_contiguous_id': thing_dataset_id_to_contiguous_id, 'thing_classes': thing_classes} |
def mnist_generator(data, batch_size, n_labelled, limit=None, selecting_label=None, bias=None, portion=1):
(images, targets) = data
if (bias is not None):
images = images[(targets != bias)]
targets = targets[(targets != bias)]
if (selecting_label is None):
rng_state = numpy.random.get_state()
numpy.random.shuffle(images)
numpy.random.set_state(rng_state)
numpy.random.shuffle(targets)
else:
for label in range(0, len(selecting_label)):
images = numpy.concatenate([images, images[(targets == selecting_label[label])]])
targets = numpy.concatenate([targets, targets[(targets == selecting_label[label])]])
rng_state = numpy.random.get_state()
numpy.random.shuffle(images)
numpy.random.set_state(rng_state)
numpy.random.shuffle(targets)
def rounddown(x):
return (int(math.floor((x / 100.0))) * 100)
if (limit is not None):
if (len(images) > 4500):
L = 4500
elif ((len(images) > 4400) and (len(images) < 4500)):
L = 4400
elif ((len(images) > 4300) and (len(images) < 4400)):
L = 4300
elif ((len(images) > 4200) and (len(images) < 4300)):
L = 4200
else:
L = 1000
L = (L * portion)
print('WARNING ONLY FIRST {} MNIST DIGITS'.format(L))
images = images.astype('float32')[:L]
targets = targets.astype('int32')[:L]
if (n_labelled is not None):
labelled = numpy.zeros(len(images), dtype='int32')
labelled[:n_labelled] = 1
def get_epoch():
rng_state = numpy.random.get_state()
numpy.random.shuffle(images)
numpy.random.set_state(rng_state)
numpy.random.shuffle(targets)
if (n_labelled is not None):
numpy.random.set_state(rng_state)
numpy.random.shuffle(labelled)
image_batches = images.reshape((- 1), batch_size, 784)
target_batches = targets.reshape((- 1), batch_size)
if (n_labelled is not None):
labelled_batches = labelled.reshape((- 1), batch_size)
for i in xrange(len(image_batches)):
(yield (numpy.copy(image_batches[i]), numpy.copy(target_batches[i]), numpy.copy(labelled)))
else:
for i in xrange(len(image_batches)):
(yield (numpy.copy(image_batches[i]), numpy.copy(target_batches[i])))
return get_epoch |
class BoundaryValue(BoundaryOperator):
def __init__(self, child, side):
super().__init__('boundary value', child, side)
def _unary_new_copy(self, child):
return boundary_value(child, self.side)
def _sympy_operator(self, child):
sympy = have_optional_dependency('sympy')
if ((self.child.domain[0] in ['negative particle', 'positive particle']) and (self.side == 'right')):
if (str(child) == '1'):
return child
else:
latex_child = (sympy.latex(child) + '^{surf}')
return sympy.Symbol(latex_child)
elif (self.side == 'positive tab'):
return child
else:
latex_child = (((sympy.latex(child) + '^{') + sympy.latex(self.side)) + '}')
return sympy.Symbol(latex_child) |
def wildcard_file_resolution(glob_search_string):
filepaths = glob(glob_search_string)
if (len(filepaths) < 1):
raise FileNotFoundError('No file found that matches the provided path')
if (len(filepaths) > 1):
raise TypeError('More than one file found that matches the search string')
found_filepath = filepaths[0]
return found_filepath |
def match(y_true, y_pred):
y_true = y_true.astype(np.int64)
y_pred = y_pred.astype(np.int64)
assert (y_pred.size == y_true.size)
D = (max(y_pred.max(), y_true.max()) + 1)
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[(y_pred[i], y_true[i])] += 1
(row_ind, col_ind) = linear_sum_assignment((w.max() - w))
new_y = np.zeros(y_true.shape[0])
for i in range(y_pred.size):
for j in row_ind:
if (y_true[i] == col_ind[j]):
new_y[i] = row_ind[j]
new_y = torch.from_numpy(new_y).long().to(device)
new_y = new_y.view(new_y.size()[0])
return new_y |
def cv_select_tune_param(cv_results, metric='score', rule='best', prefer_larger_param=True):
if (metric is None):
metric = 'score'
test_key = ('mean_test_' + metric)
if (test_key not in cv_results):
raise ValueError('{} was not found in cv_results'.format(test_key))
if (rule not in ['best', '1se']):
raise ValueError("rule must be one of ['best', '1se'], not {}".format(rule))
cols_we_need = [test_key, 'params']
if (rule == '1se'):
se_key = ('se_test_' + metric)
cols_we_need.append(se_key)
if (se_key not in cv_results):
cv_results = _add_se(cv_results)
df = pd.DataFrame({c: cv_results[c] for c in cols_we_need})
param_names = list(cv_results['params'][0].keys())
if (len(param_names) == 1):
single_param = True
param_name = param_names[0]
n_param_values = len(cv_results['params'])
df['param_values'] = [cv_results['params'][i][param_name] for i in range(n_param_values)]
else:
single_param = False
if (rule == '1se'):
raise NotImplementedError('1se rule not currently implemnetedfor multiple tuning parameters')
best_score = df[('mean_test_' + metric)].max()
if (rule == 'best'):
candidates = df.query('mean_test_{} == _score'.format(metric))
elif (rule == '1se'):
score_se = df.query('mean_test_{} == _score'.format(metric))['se_test_{}'.format(metric)].max()
score_lbd = (best_score - score_se)
candidates = df.query('mean_test_{} >= _lbd'.format(metric))
if single_param:
if prefer_larger_param:
tune_idx_selected = candidates['param_values'].idxmax()
else:
tune_idx_selected = candidates['param_values'].idxmin()
else:
tune_idx_selected = candidates.index.values[0]
params_selected = cv_results['params'][tune_idx_selected]
return (tune_idx_selected, params_selected) |
def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
pred = {}
gt = {}
for img_id in pred_all.keys():
for (classname, bbox, score) in pred_all[img_id]:
if (classname not in pred):
pred[classname] = {}
if (img_id not in pred[classname]):
pred[classname][img_id] = []
if (classname not in gt):
gt[classname] = {}
if (img_id not in gt[classname]):
gt[classname][img_id] = []
pred[classname][img_id].append((bbox, score))
for img_id in gt_all.keys():
for (classname, bbox) in gt_all[img_id]:
if (classname not in gt):
gt[classname] = {}
if (img_id not in gt[classname]):
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
for classname in gt.keys():
(rec[classname], prec[classname], ap[classname]) = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func)
return (rec, prec, ap) |
_fixtures(ReahlSystemFixture, PartyAccountFixture)
def test_request_new_password(reahl_system_fixture, party_account_fixture):
fixture = party_account_fixture
system_account = fixture.new_system_account(activated=False)
account_management_interface = fixture.new_account_management_interface(system_account=system_account)
mailer_stub = fixture.mailer
account_management_interface.email = ('another.%s' % system_account.email)
with expected(NoSuchAccountException):
account_management_interface.request_password_reset()
assert isinstance(system_account.status, AccountNotActivated)
account_management_interface.email = system_account.email
def exception_matches_system_account_status(ex):
assert (ex.account_status.as_user_message() == system_account.status.as_user_message())
with expected(AccountNotActiveException, test=exception_matches_system_account_status):
account_management_interface.request_password_reset()
system_account.activate()
system_account.disable()
account_management_interface.email = system_account.email
assert isinstance(system_account.status, AccountDisabled)
with expected(AccountNotActiveException, test=exception_matches_system_account_status):
account_management_interface.request_password_reset()
system_account.enable()
assert isinstance(system_account.status, AccountActive)
assert (Session.query(NewPasswordRequest).count() == 0)
account_management_interface.email = system_account.email
account_management_interface.request_password_reset()
[new_password_request] = Session.query(NewPasswordRequest).filter_by(system_account=system_account).all()
assert (mailer_stub.mail_recipients == [system_account.email])
assert (mailer_stub.mail_sender == reahl_system_fixture.config.accounts.admin_email)
substitutions = {'email': system_account.email, 'secret_key': new_password_request.as_secret_key()}
expected_subject = Template(reahl_system_fixture.config.accounts.new_password_subject).substitute(substitutions)
assert (mailer_stub.mail_subject == expected_subject)
expected_message = Template(reahl_system_fixture.config.accounts.new_password_email).substitute(substitutions)
assert (mailer_stub.mail_message == expected_message)
assert isinstance(system_account.status, AccountActive)
assert (Session.query(NewPasswordRequest).count() == 1)
mailer_stub.reset()
account_management_interface.request_password_reset()
assert (Session.query(NewPasswordRequest).count() == 1)
[new_password_request] = Session.query(NewPasswordRequest).filter_by(system_account=system_account).all()
assert (mailer_stub.mail_recipients == [system_account.email])
assert (mailer_stub.mail_sender == reahl_system_fixture.config.accounts.admin_email)
substitutions = {'email': system_account.email, 'secret_key': new_password_request.as_secret_key()}
expected_subject = Template(reahl_system_fixture.config.accounts.new_password_subject).substitute(substitutions)
assert (mailer_stub.mail_subject == expected_subject)
expected_message = Template(reahl_system_fixture.config.accounts.new_password_email).substitute(substitutions)
assert (mailer_stub.mail_message == expected_message) |
class DataloaderAsyncGPUWrapper(DataloaderWrapper):
def __init__(self, dataloader: Iterable) -> None:
assert torch.cuda.is_available(), 'This Dataloader wrapper needs a CUDA setup'
super().__init__(dataloader)
self.cache = None
self.cache_next = None
self.stream = torch.cuda.Stream()
self._iter = None
def __iter__(self) -> Iterator[Any]:
self._iter = iter(self.dataloader)
self.preload()
return self
def preload(self):
try:
self.cache_next = next(self._iter)
with torch.cuda.stream(self.stream):
self.cache = recursive_copy_to_gpu(self.cache_next, non_blocking=True)
except StopIteration:
self.cache = None
return
def __next__(self) -> Any:
torch.cuda.current_stream().wait_stream(self.stream)
result = self.cache
if (self.cache is None):
raise StopIteration
self.preload()
return result
def __len__(self) -> int:
return len(self.dataloader) |
def amsgrad(func, x, n_iter, learning_rate=0.001, beta1=0.9, beta2=0.999, eps=1e-07):
V = 0.0
S = 0.0
S_hat = 0.0
for i in range((n_iter + 1)):
(_, grad) = func(x)
V = ((beta1 * V) + ((1 - beta1) * grad))
S = ((beta2 * S) + ((1 - beta2) * (grad ** 2)))
S_hat = np.maximum(S, S_hat)
x -= ((learning_rate * V) / (np.sqrt(S_hat) + eps))
return x |
class Emeter(Usage):
def realtime(self) -> EmeterStatus:
return EmeterStatus(self.data['get_realtime'])
def emeter_today(self) -> Optional[float]:
raw_data = self.daily_data
today = datetime.now().day
data = self._convert_stat_data(raw_data, entry_key='day')
return data.get(today)
def emeter_this_month(self) -> Optional[float]:
raw_data = self.monthly_data
current_month = datetime.now().month
data = self._convert_stat_data(raw_data, entry_key='month')
return data.get(current_month)
async def erase_stats(self):
return (await self.call('erase_emeter_stat'))
async def get_realtime(self):
return (await self.call('get_realtime'))
async def get_daystat(self, *, year=None, month=None, kwh=True) -> Dict:
data = (await self.get_raw_daystat(year=year, month=month))
data = self._convert_stat_data(data['day_list'], entry_key='day', kwh=kwh)
return data
async def get_monthstat(self, *, year=None, kwh=True) -> Dict:
data = (await self.get_raw_monthstat(year=year))
data = self._convert_stat_data(data['month_list'], entry_key='month', kwh=kwh)
return data
def _convert_stat_data(self, data, entry_key, kwh=True) -> Dict:
if (not data):
return {}
scale: float = 1
if ('energy_wh' in data[0]):
value_key = 'energy_wh'
if kwh:
scale = (1 / 1000)
else:
value_key = 'energy'
if (not kwh):
scale = 1000
data = {entry[entry_key]: (entry[value_key] * scale) for entry in data}
return data |
def ensure_trees_loaded(manager: BuildManager, graph: dict[(str, State)], initial: Sequence[str]) -> None:
to_process = find_unloaded_deps(manager, graph, initial)
if to_process:
if is_verbose(manager):
manager.log_fine_grained('Calling process_fresh_modules on set of size {} ({})'.format(len(to_process), sorted(to_process)))
process_fresh_modules(graph, to_process, manager) |
class CourseraOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.coursera.CourseraOAuth2'
user_data_url = '
expected_username = '560e7ed2076e0d589e88bd74b6aad4b7'
access_token_body = json.dumps({'access_token': 'foobar', 'token_type': 'Bearer', 'expires_in': 1795})
request_token_body = json.dumps({'code': 'foobar-code', 'client_id': 'foobar-client-id', 'client_secret': 'foobar-client-secret', 'redirect_uri': ' 'grant_type': 'authorization_code'})
user_data_body = json.dumps({'token_type': 'Bearer', 'paging': None, 'elements': [{'id': '560e7ed2076e0d589e88bd74b6aad4b7'}], 'access_token': 'foobar', 'expires_in': 1800, 'linked': None})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline() |
def _unparse_paren(level_lst):
line = level_lst[0][0][0]
for level in level_lst[1:]:
for group in level:
new_string = group[(- 1)]
if ((new_string[:2] == '((') and (new_string[(- 2):] == '))')):
new_string = new_string[1:(- 1)]
line = line.replace(group[(- 2)], new_string)
return line |
class XMLResponse(HttpResponse):
def __init__(self, xml, name=None):
super().__init__(prettify_xml(xml), content_type='application/xml')
if (name and (settings.EXPORT_CONTENT_DISPOSITION == 'attachment')):
self['Content-Disposition'] = 'attachment; filename="{}.xml"'.format(name.replace('/', '_')) |
def create_context(default_value: _Type) -> Context[_Type]:
def context(*children: Any, value: _Type=default_value, key: (Key | None)=None) -> _ContextProvider[_Type]:
return _ContextProvider(*children, value=value, key=key, type=context)
context.__qualname__ = 'context'
return context |
(frozen=True)
class AndConstraint(AbstractConstraint):
constraints: Tuple[(AbstractConstraint, ...)]
def apply(self) -> Iterable['Constraint']:
for cons in self.constraints:
(yield from cons.apply())
def invert(self) -> 'OrConstraint':
return OrConstraint(tuple((cons.invert() for cons in self.constraints)))
def make(cls, constraints: Iterable[AbstractConstraint]) -> AbstractConstraint:
processed = {}
for cons in constraints:
if isinstance(cons, AndConstraint):
for subcons in cons.constraints:
processed[id(subcons)] = subcons
continue
processed[id(cons)] = cons
final = []
for constraint in processed.values():
if isinstance(constraint, OrConstraint):
if any(((id(subcons) in processed) for subcons in constraint.constraints)):
continue
final.append(constraint)
if (not final):
return NULL_CONSTRAINT
if (len(final) == 1):
(cons,) = final
return cons
return cls(tuple(final))
def __str__(self) -> str:
children = ' AND '.join(map(str, self.constraints))
return f'({children})' |
def LFP_electrolyte_exchange_current_density_kashkooli2017(c_e, c_s_surf, c_s_max, T):
m_ref = (6 * (10 ** (- 7)))
E_r = 39570
arrhenius = np.exp(((E_r / pybamm.constants.R) * ((1 / 298.15) - (1 / T))))
return ((((m_ref * arrhenius) * (c_e ** 0.5)) * (c_s_surf ** 0.5)) * ((c_s_max - c_s_surf) ** 0.5)) |
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
assert (not torch.is_tensor(obj)), 'Tensors not supported in scatter.'
if isinstance(obj, tuple):
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list):
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
return scatter_map(inputs) |
def link_AGL(name, restype, argtypes, requires=None, suggestions=None):
try:
func = getattr(agl_lib, name)
func.restype = restype
func.argtypes = argtypes
decorate_function(func, name)
return func
except AttributeError:
return missing_function(name, requires, suggestions) |
def transform_2d(arr, kpts, ki, rmat, label, trans):
ki_ibz = kpts.bz2ibz[ki]
ki_ibz_bz = kpts.ibz2bz[ki_ibz]
if (ki == ki_ibz_bz):
return arr[ki_ibz]
(pi, pj) = label
rmat_i = getattr(rmat, (pi * 2))
rmat_j = getattr(rmat, (pj * 2))
iop = kpts.stars_ops_bz[ki]
rot_i = rmat_i[ki_ibz_bz][iop]
rot_j = rmat_j[ki_ibz_bz][iop]
(ti, tj) = trans
if (ti == 'c'):
rot_i = rot_i.conj()
if (tj == 'c'):
rot_j = rot_j.conj()
out = reduce(np.dot, (rot_i.T, arr[ki_ibz], rot_j))
return out |
def test_tdm_fmcw_tx():
print('#### TDM FMCW transmitter ####')
tdm = tdm_fmcw_tx()
print('# TDM FMCW transmitter parameters #')
assert (tdm.waveform_prop['pulse_length'] == 8e-05)
assert (tdm.waveform_prop['bandwidth'] == .0)
assert (tdm.rf_prop['tx_power'] == 20)
assert (tdm.waveform_prop['prp'][0] == 0.0002)
assert (tdm.waveform_prop['pulses'] == 2)
print('# TDM FMCW transmitter channel #')
assert (tdm.txchannel_prop['size'] == 2)
assert np.array_equal(tdm.txchannel_prop['locations'], np.array([[0, (((- 4) * const.c) / .0), 0], [0, 0, 0]]))
assert np.array_equal(tdm.txchannel_prop['az_angles'], [np.arange((- 90), 91, 180), np.arange((- 90), 91, 180)])
assert np.array_equal(tdm.txchannel_prop['az_patterns'], [np.zeros(2), np.zeros(2)])
assert np.array_equal(tdm.txchannel_prop['el_angles'], [np.arange((- 90), 91, 180), np.arange((- 90), 91, 180)])
assert np.array_equal(tdm.txchannel_prop['el_patterns'], [np.zeros(2), np.zeros(2)])
print('# TDM FMCW transmitter modulation #')
assert np.array_equal(tdm.txchannel_prop['pulse_mod'], [np.ones(tdm.waveform_prop['pulses']), np.ones(tdm.waveform_prop['pulses'])]) |
('requires_bandmat')
def test_linalg_choleskey_inv():
from nnmnkwii.paramgen import build_win_mats
for windows in _get_windows_set():
for T in [5, 10]:
win_mats = build_win_mats(windows, T)
P = _get_banded_test_mat(win_mats, T).full()
L = scipy.linalg.cholesky(P, lower=True)
U = scipy.linalg.cholesky(P, lower=False)
assert np.allclose(L.dot(L.T), P)
assert np.allclose(U.T.dot(U), P)
Pinv = np.linalg.inv(P)
Pinv_hat = cholesky_inv(L, lower=True)
assert np.allclose(Pinv, Pinv_hat)
Pinv_hat = cholesky_inv(U, lower=False)
assert np.allclose(Pinv, Pinv_hat)
Pinv_hat = cholesky_inv_banded(L, width=3)
assert np.allclose(Pinv, Pinv_hat) |
def L2_PGD(x_in, y_true, net, steps, eps):
if (eps == 0):
return x_in
training = net.training
if training:
net.eval()
x_adv = x_in.clone().requires_grad_()
optimizer = Adam([x_adv], lr=0.01)
eps = torch.tensor(eps).view(1, 1, 1, 1).cuda()
for _ in range(steps):
optimizer.zero_grad()
net.zero_grad()
(out, _) = net(x_adv)
loss = (- F.cross_entropy(out, y_true))
loss.backward()
optimizer.step()
diff = (x_adv - x_in)
norm = torch.sqrt(torch.sum((diff * diff), (1, 2, 3)))
norm = norm.view(norm.size(0), 1, 1, 1)
norm_out = torch.min(norm, eps)
diff = ((diff / norm) * norm_out)
x_adv.detach().copy_((diff + x_in).clamp_(0, 1))
net.zero_grad()
if training:
net.train()
return x_adv |
def add_single_database_parameters(add_in_memory=False):
def add_single_database_parameters_decorator(command):
click.argument('database', metavar='DATABASE')(command)
if add_in_memory:
_in_memory_option(command)
def wrapped_command(**kwargs):
popped_kwargs = {'database': kwargs.pop('database')}
if add_in_memory:
popped_kwargs['copy_to_memory'] = kwargs.pop('in_memory')
kwargs['database_options'] = DatabaseOptions(**popped_kwargs)
return command(**kwargs)
set_click_attrs(wrapped_command, command)
return wrapped_command
return add_single_database_parameters_decorator |
class FrontPage(Element):
def __init__(self, header: str, title: str, subtitle: str, logo: str, grid_proportion: GridProportion=GridProportion.Eight):
super().__init__(grid_proportion)
self.header = header
self.title = title
self.subtitle = subtitle
self.logo = logo
def generate_html(self, document: Optional[Document]=None) -> str:
env = templates.environment
template = env.get_template('frontpage.html')
return template.render(header=self.header, title=self.title, subtitle=self.subtitle, logo_url=self.logo) |
def conv3x3(mode, in_planes, out_planes, k_in_mask, k_out_mask, output, stride=1):
if ((mode == 'finetune') or (mode == 'full')):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
if (mode == 'sparse'):
return SparseConv2d(in_planes, out_planes, kernel_size=3, k=out_planes, rho_init=0.001, rho_maximum=6, mu=1.005, stride=stride, padding=1, bias=False)
if (mode == 'inference'):
return PrunedConv2d(in_planes, out_planes, k_in_mask, k_out_mask, output, kernel_size=3, stride=stride, padding=1, bias=False) |
def get_plot(title):
possibles_edit = [(i + 'Edit') for i in possibles]
all_possibles = (possibles + possibles_edit)
try:
title = urllib.parse.unquote(title.replace('_', ' '))
wik = wikipedia.WikipediaPage(title)
except:
wik = np.NaN
plot = None
try:
for j in all_possibles:
if (wik.section(j) is not None):
plot = section(wik, j).replace('\n', '').replace("'", '')
break
except:
pass
return plot |
def test_bad_extra(base_object: dict[(str, Any)]) -> None:
bad_extra = 'a{[*+'
base_object['extras'] = {}
base_object['extras']['test'] = [bad_extra]
errors = validate_object(base_object, 'poetry-schema')
assert (len(errors) == 1)
assert (errors[0] == 'data.extras.test[0] must match pattern ^[a-zA-Z-_.0-9]+$') |
def train_and_validate(args):
set_seed(args.seed)
vocab = Vocab()
vocab.load(args.vocab_path)
args.vocab = vocab
model = build_model(args)
if (args.pretrained_model_path is not None):
model = load_model(model, args.pretrained_model_path)
else:
for (n, p) in list(model.named_parameters()):
if (('gamma' not in n) and ('beta' not in n)):
p.data.normal_(0, 0.02)
if args.dist_train:
mp.spawn(worker, nprocs=args.ranks_num, args=(args.gpu_ranks, args, model), daemon=False)
elif args.single_gpu:
worker(args.gpu_id, None, args, model)
else:
worker(None, None, args, model) |
def test_pattern_str() -> None:
assert (str(Pattern(Conc(Mult(Charclass('a'), ONE)), Conc(Mult(Charclass('b'), ONE)))) == 'a|b')
assert (str(Pattern(Conc(Mult(Charclass('a'), ONE)), Conc(Mult(Charclass('a'), ONE)))) == 'a')
assert (str(Pattern(Conc(Mult(Charclass('a'), ONE), Mult(Charclass('b'), ONE), Mult(Charclass('c'), ONE)), Conc(Mult(Charclass('d'), ONE), Mult(Charclass('e'), ONE), Mult(Charclass('f'), ONE), Mult(Pattern(Conc(Mult(Charclass('g'), ONE), Mult(Charclass('h'), ONE), Mult(Charclass('i'), ONE)), Conc(Mult(Charclass('j'), ONE), Mult(Charclass('k'), ONE), Mult(Charclass('l'), ONE))), ONE)))) == 'abc|def(ghi|jkl)') |
def test_slicing_basic(do_test):
a = CaseBits32Bits64SlicingBasicComp.DUT()
a._rtlir_test_ref = {'slicing_basic': CombUpblk('slicing_basic', [Assign([Slice(Attribute(Base(a), 'out'), Number(0), Number(16))], Slice(Attribute(Base(a), 'in_'), Number(16), Number(32)), True), Assign([Slice(Attribute(Base(a), 'out'), Number(16), Number(32))], Slice(Attribute(Base(a), 'in_'), Number(0), Number(16)), True)])}
do_test(a) |
_torch
_vision
class DPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = (DPTFeatureExtractor if is_vision_available() else None)
def setUp(self):
self.feature_extract_tester = DPTFeatureExtractionTester(self)
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, 'image_mean'))
self.assertTrue(hasattr(feature_extractor, 'image_std'))
self.assertTrue(hasattr(feature_extractor, 'do_normalize'))
self.assertTrue(hasattr(feature_extractor, 'do_resize'))
self.assertTrue(hasattr(feature_extractor, 'size'))
def test_call_pil(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
def test_call_numpy(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
def test_call_pytorch(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size)) |
def get_unbalanced(task, pos_pos, pos_neg, neg_pos, neg_neg):
x_train = []
x_test = []
if (('age' in task) or ('mention2' in task)):
lim_1_train = 40000
lim_1_test = 42000
lim_2_train = 10000
lim_2_test = 11000
else:
lim_1_train = 66400
lim_1_test = 70400
lim_2_train = 16600
lim_2_test = 17600
for x in pos_pos[:lim_1_train]:
x_train.append((x, 1, 0))
for x in pos_pos[lim_1_train:lim_1_test]:
x_test.append((x, 1, 0))
for x in pos_neg[:lim_2_train]:
x_train.append((x, 1, 1))
for x in pos_neg[lim_2_train:lim_2_test]:
x_test.append((x, 1, 1))
for x in neg_pos[:lim_2_train]:
x_train.append((x, 0, 0))
for x in neg_pos[lim_2_train:lim_2_test]:
x_test.append((x, 0, 0))
for x in neg_neg[:lim_1_train]:
x_train.append((x, 0, 1))
for x in neg_neg[lim_1_train:lim_1_test]:
x_test.append((x, 0, 1))
return (x_train, x_test) |
def prepare_dataloader(device: torch.device) -> torch.utils.data.DataLoader:
num_samples = (NUM_BATCHES * BATCH_SIZE)
data = torch.randn(num_samples, 128, device=device)
labels = torch.randint(low=0, high=2, size=(num_samples,), device=device)
return torch.utils.data.DataLoader(TensorDataset(data, labels), batch_size=BATCH_SIZE) |
def _load_info(root, basename='info'):
info_json = os.path.join(root, (basename + '.json'))
info_yaml = os.path.join(root, (basename + '.yaml'))
err_str = ''
try:
with wds.gopen.gopen(info_json) as f:
info_dict = json.load(f)
return info_dict
except Exception as e:
err_str = str(e)
try:
with wds.gopen.gopen(info_yaml) as f:
info_dict = yaml.safe_load(f)
return info_dict
except Exception:
pass
_logger.warning(f'Dataset info file not found at {info_json} or {info_yaml}. Error: {err_str}. Falling back to provided split and size arg.')
return {} |
class Migration(migrations.Migration):
dependencies = [('api', '0071_increase_message_content_4000')]
operations = [migrations.AlterField(model_name='documentationlink', name='base_url', field=models.URLField(blank=True, help_text='The base URL from which documentation will be available for this project. Used to generate links to various symbols within this package.', validators=[pydis_site.apps.api.models.bot.documentation_link.ends_with_slash_validator]))] |
class ShardingPlan():
plan: Dict[(str, ModuleShardingPlan)]
def get_plan_for_module(self, module_path: str) -> Optional[ModuleShardingPlan]:
return self.plan.get(module_path, None)
def __str__(self) -> str:
out = ''
for (i, (module_path, module_plan)) in enumerate(self.plan.items()):
if (i > 0):
out += '\n\n'
out += ('module: ' + module_path)
out += str(module_plan)
return out |
class ChannelAttention(nn.Module):
def __init__(self, channel, reduction=16):
super().__init__()
self.maxpool = nn.AdaptiveMaxPool2d(1)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.se = nn.Sequential(nn.Conv2d(channel, (channel // reduction), 1, bias=False), nn.ReLU(), nn.Conv2d((channel // reduction), channel, 1, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
max_result = self.maxpool(x)
avg_result = self.avgpool(x)
max_out = self.se(max_result)
avg_out = self.se(avg_result)
output = self.sigmoid((max_out + avg_out))
return output |
def printFlakeOutput(text):
ret = 0
gotError = False
for line in text.split('\n'):
m = re.match('[^\\:]+\\:\\d+\\:\\d+\\: (\\w+) .*', line)
if (m is None):
print(line)
else:
gotError = True
error = m.group(1)
if (error in FLAKE_MANDATORY):
print((('\x1b[0;31m' + line) + '\x1b[0m'))
ret |= 2
elif (error in FLAKE_RECOMMENDED):
print((('\x1b[0;33m' + line) + '\x1b[0m'))
elif (error in FLAKE_OPTIONAL):
print((('\x1b[0;32m' + line) + '\x1b[0m'))
elif (error in FLAKE_IGNORE):
continue
else:
print((('\x1b[0;36m' + line) + '\x1b[0m'))
if (not gotError):
print(' [ no errors ]\n')
return ret |
def assert_named_modules_identical(actual, desired, equality_sufficient=False):
(actual_names, actual_modules) = zip(*actual)
(desired_names, desired_modules) = zip(*desired)
assert (actual_names == desired_names)
assert_modules_identical(actual_modules, desired_modules, equality_sufficient=equality_sufficient) |
_config
def test_spiral_left_anticlockwise(manager):
manager.test_window('one')
assert_dimensions(manager, 0, 0, 798, 598)
manager.test_window('two')
assert_dimensions(manager, 400, 0, 398, 598)
manager.test_window('three')
assert_dimensions(manager, 400, 0, 398, 298)
manager.test_window('four')
assert_dimensions(manager, 400, 0, 198, 298)
manager.test_window('five')
assert_dimensions(manager, 400, 150, 198, 148) |
class VisibilityTracing(nn.Module):
def __init__(self, object_bounding_sphere=1.0, sphere_tracing_iters=30, initial_epsilon=0.001):
super().__init__()
self.object_bounding_sphere = object_bounding_sphere
self.sphere_tracing_iters = sphere_tracing_iters
self.start_epsilon = initial_epsilon
def forward(self, sdf, unique_camera_centers, points):
assert (sdf is not None), 'sdf cannot be None'
assert (unique_camera_centers is not None), 'camera_centers cannot be None'
assert (points is not None), 'points cannot be None'
(num_points, _) = points.shape
ray_directions = (unique_camera_centers.to(device).unsqueeze(0) - points.unsqueeze(1))
point_to_camera_distance = ray_directions.norm(dim=(- 1))
unit_ray_directions = (ray_directions / point_to_camera_distance.unsqueeze((- 1)))
visibility_mask = self.sphere_tracing_for_visibility(sdf, points, unit_ray_directions, unique_camera_centers)
return visibility_mask
def sphere_tracing_for_visibility(self, sdf, points, unit_ray_directions, unique_camera_centers):
(num_points, _) = points.shape
(num_cams, _) = unique_camera_centers.shape
in_progress_mask = torch.ones(num_points, num_cams).to(device).bool()
(dist_from_sphere_intersection_to_points, mask_intersection) = get_sphere_intersection_for_visibility(points, unit_ray_directions, r=self.object_bounding_sphere)
current_points = (points.unsqueeze(1) + (self.start_epsilon * unit_ray_directions))
current_sdf = sdf(current_points)
in_progress_mask[(current_sdf < 0)] = 0
current_distance_to_start_points = (self.start_epsilon + current_sdf)
in_progress_mask_old = in_progress_mask.detach().clone()
iters = 0
while True:
current_points = (points.unsqueeze(1) + (current_distance_to_start_points.unsqueeze((- 1)) * unit_ray_directions))
current_sdf[in_progress_mask] = sdf(current_points[in_progress_mask])
in_progress_mask[(current_sdf < 5e-07)] = 0
current_distance_to_start_points[in_progress_mask] = (current_distance_to_start_points[in_progress_mask] + current_sdf[in_progress_mask])
in_progress_mask[(current_distance_to_start_points > dist_from_sphere_intersection_to_points)] = 0
if ((iters == self.sphere_tracing_iters) or (in_progress_mask_old.sum() == 0)):
break
iters += 1
visibility_mask = (current_distance_to_start_points > dist_from_sphere_intersection_to_points)
visibility_mask = (visibility_mask & mask_intersection)
return visibility_mask |
def add_flops_counting_methods(net_main_module):
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
return net_main_module |
def _report_unserialization_failure(type_name: str, report_class: Type[BaseReport], reportdict) -> NoReturn:
url = '
stream = StringIO()
pprint(('-' * 100), stream=stream)
pprint(('INTERNALERROR: Unknown entry type returned: %s' % type_name), stream=stream)
pprint(('report_name: %s' % report_class), stream=stream)
pprint(reportdict, stream=stream)
pprint(('Please report this bug at %s' % url), stream=stream)
pprint(('-' * 100), stream=stream)
raise RuntimeError(stream.getvalue()) |
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
dev = args.dev
if (not dev):
cmd = 'devlink -j dev show'
(stdout, stderr) = run_command(cmd)
assert (stderr == '')
devs = json.loads(stdout)['dev']
if devs:
dev = list(devs.keys())[0]
else:
print('no devlink device was found, test skipped')
sys.exit(KSFT_SKIP)
cmd = ('devlink dev show %s' % dev)
(stdout, stderr) = run_command(cmd)
if (stderr != ''):
print(('devlink device %s can not be found' % dev))
sys.exit(1)
ports = devlink_ports(dev)
for port in ports.if_names:
max_lanes = get_max_lanes(port.name)
if (max_lanes == 0):
continue
elif (max_lanes == 1):
test((not get_split_ability(port)), ('%s should not be able to split' % port.name))
split_unsplittable_port(port, max_lanes)
else:
lane = max_lanes
test(get_split_ability(port), ('%s should be able to split' % port.name))
while (lane > 1):
split_splittable_port(port, lane, max_lanes, dev)
lane //= 2 |
class LLaMATokenizer():
def __init__(self, model_path: str):
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f'Reloaded SentencePiece model from {model_path}')
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(f'#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}')
assert (self.sp_model.vocab_size() == self.sp_model.get_piece_size())
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert (type(s) is str)
t = self.sp_model.encode(s)
if bos:
t = ([self.bos_id] + t)
if eos:
t = (t + [self.eos_id])
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t) |
class Bert4RecDataloader():
def __init__(self, dataset: Dict[(str, Any)], train_batch_size: int, val_batch_size: int, test_batch_size: int) -> None:
self.train: pd.DataFrame = dataset['train']
self.val: pd.DataFrame = dataset['val']
self.test: pd.DataFrame = dataset['test']
self.train_batch_size: int = train_batch_size
self.val_batch_size: int = val_batch_size
self.test_batch_size: int = test_batch_size
def get_pytorch_dataloaders(self, rank: int, world_size: int) -> Tuple[(data_utils.DataLoader, data_utils.DataLoader, data_utils.DataLoader)]:
train_loader = self._get_train_loader(rank, world_size)
val_loader = self._get_val_loader(rank, world_size)
test_loader = self._get_test_loader(rank, world_size)
return (train_loader, val_loader, test_loader)
def _get_train_loader(self, rank: int, world_size: int) -> data_utils.DataLoader:
sampler = DistributedSampler(Bert4RecTrainDataset(self.train), num_replicas=world_size, rank=rank, shuffle=True, drop_last=False)
dataloader = data_utils.DataLoader(Bert4RecTrainDataset(self.train), batch_size=self.train_batch_size, pin_memory=True, sampler=sampler)
return dataloader
def _get_val_loader(self, rank: int, world_size: int) -> data_utils.DataLoader:
sampler = DistributedSampler(Bert4RecTrainDataset(self.val), num_replicas=world_size, rank=rank, shuffle=False, drop_last=False)
dataloader = data_utils.DataLoader(BertEvalDataset(self.val), batch_size=self.val_batch_size, pin_memory=True, sampler=sampler)
return dataloader
def _get_test_loader(self, rank: int, world_size: int) -> data_utils.DataLoader:
sampler = DistributedSampler(Bert4RecTrainDataset(self.test), num_replicas=world_size, rank=rank, shuffle=False, drop_last=False)
dataloader = data_utils.DataLoader(BertEvalDataset(self.test), batch_size=self.test_batch_size, pin_memory=True, sampler=sampler)
return dataloader |
class _Coefficients():
LUTS: list[np.ndarray] = []
COEFF_INDEX_MAP: dict[(int, dict[(Union[(tuple, str)], int)])] = {}
def __init__(self, wavelength_range, resolution=0):
self._wv_range = wavelength_range
self._resolution = resolution
def __call__(self):
idx = self._find_coefficient_index(self._wv_range, resolution=self._resolution)
band_luts = [lut_array[idx] for lut_array in self.LUTS]
return band_luts
def _find_coefficient_index(self, wavelength_range, resolution=0):
index_map = self.COEFF_INDEX_MAP
for res in sorted(index_map.keys()):
if (resolution <= res):
index_map = index_map[res]
break
else:
raise ValueError('Unrecognized data resolution: {}', resolution)
if isinstance(wavelength_range, str):
return index_map[wavelength_range]
for (lut_wvl_range, v) in index_map.items():
if isinstance(lut_wvl_range, str):
continue
if (wavelength_range[1] in lut_wvl_range):
return v
raise ValueError(f"Can't find LUT for {wavelength_range}.") |
def numbered_glob(pattern, last=True, decimal=False, every=False):
repat = '(\\d+(?:\\.\\d*)?)'.join((re.escape(s) for s in pattern.split('*', 1)))
best_fn = None
best_n = None
all_results = []
for c in glob.glob(pattern):
m = re.match(repat, c)
if m:
if decimal:
if ('.' in m.group(1)):
n = float(m.group(1))
else:
n = float(('0.' + m.group(1)))
else:
n = int(m.group(1).strip('.'))
all_results.append((c, n))
if ((best_n is None) or ((best_n < n) == last)):
best_n = n
best_fn = c
if every:
return all_results
if (best_fn is None):
raise IOError(pattern)
return (best_n, best_fn) |
class MBConvBlockWithoutDepthwise(MBConvBlock):
def _build(self):
filters = (self._block_args.input_filters * self._block_args.expand_ratio)
if (self._block_args.expand_ratio != 1):
self._expand_conv = tf.layers.Conv2D(filters, kernel_size=[3, 3], strides=[1, 1], kernel_initializer=conv_kernel_initializer, padding='same', use_bias=False)
self._bn0 = self._batch_norm(axis=self._channel_axis, momentum=self._batch_norm_momentum, epsilon=self._batch_norm_epsilon)
filters = self._block_args.output_filters
self._project_conv = tf.layers.Conv2D(filters, kernel_size=[1, 1], strides=self._block_args.strides, kernel_initializer=conv_kernel_initializer, padding='same', use_bias=False)
self._bn1 = self._batch_norm(axis=self._channel_axis, momentum=self._batch_norm_momentum, epsilon=self._batch_norm_epsilon)
def call(self, inputs, training=True, survival_prob=None):
logging.info('Block input: %s shape: %s', inputs.name, inputs.shape)
if (self._block_args.expand_ratio != 1):
x = self._relu_fn(self._bn0(self._expand_conv(inputs), training=training))
else:
x = inputs
logging.info('Expand: %s shape: %s', x.name, x.shape)
self.endpoints = {'expansion_output': x}
x = self._bn1(self._project_conv(x), training=training)
x = tf.identity(x)
if self._clip_projection_output:
x = tf.clip_by_value(x, (- 6), 6)
if self._block_args.id_skip:
if (all(((s == 1) for s in self._block_args.strides)) and (self._block_args.input_filters == self._block_args.output_filters)):
if survival_prob:
x = utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
logging.info('Project: %s shape: %s', x.name, x.shape)
return x |
class TestModelValidation(TestCase):
def setUp(self):
super(TestModelValidation, self).setUp()
self.env = get_env()
self.tm = self.env.type_manager
self.fm = self.env.formula_manager
def test_basic(self):
model_source = '(model\n ;; universe for U:\n ;; (as U) (as U)\n (define-fun b () U\n (as U))\n (define-fun a () U\n (as U))\n (define-fun f ((x!0 U)) U\n (ite (= x!0 (as U)) (as U)\n (as U)))\n)\n'
model_buf = StringIO(model_source)
parser = SmtLibParser()
simplifier = SmtLibModelValidationSimplifier(self.env)
U = self.tm.Type('U', 0)
a = self.fm.Symbol('a', U)
b = self.fm.Symbol('b', U)
f = self.fm.Symbol('f', FunctionType(U, [U]))
formula = self.fm.And(self.fm.Not(self.fm.Equals(a, b)), self.fm.Equals(self.fm.Function(f, [a]), b))
(model, interpretations) = parser.parse_model(model_buf)
simp = simplifier.simplify(formula.substitute(model, interpretations))
self.assertEqual(simp, self.fm.TRUE())
def test_basic2(self):
model_source = '(model\n ;; universe for U:\n ;; (as U) (as U)\n (define-fun b () U\n (as U))\n (define-fun a () U\n (as U))\n (define-fun f ((x!0 U)) U\n (ite (= x!0 (as U)) (as U)\n (as U)))\n)\n'
model_buf = StringIO(model_source)
parser = SmtLibParser()
simplifier = SmtLibModelValidationSimplifier(self.env)
U = self.tm.Type('U', 0)
(model, interpretations) = parser.parse_model(model_buf)
a = self.fm.Symbol('a', U)
b = self.fm.Symbol('b', U)
f = self.fm.Symbol('f', FunctionType(U, [U]))
formula = self.fm.And(self.fm.Not(self.fm.Equals(a, b)), self.fm.Equals(self.fm.Function(f, [a]), b))
simp = simplifier.simplify(formula.substitute(model, interpretations))
self.assertEqual(simp, self.fm.TRUE()) |
def test_filerewriter_files_in_to_out_edit_dir_slash(temp_dir, temp_file_creator):
rewriter = ArbRewriter('formatter')
temp_file_creator()
temp_file_creator()
in_path = temp_dir.joinpath('*')
out = (str(temp_dir) + os.sep)
with patch_logger('pypyr.utils.filesystem', logging.INFO) as mock_logger_info:
rewriter.files_in_to_out(in_path, out)
assert (mock_logger_info.mock_calls == [call(f'read {in_path}, formatted and wrote 2 file(s) to {out}')])
rewriter.assert_in_to_out_call_count(2) |
def test_date_convert_parity():
path = pymedphys.data_path('negative-metersetmap.trf')
(header, _) = pymedphys.trf.read(path)
utc_date = header['date'][0]
timezone = 'Australia/Sydney'
dateutil_version = identify._date_convert_using_dateutil(utc_date, timezone)
pandas_version = identify.date_convert(utc_date, timezone)
assert (dateutil_version == pandas_version) |
class DNN(Network):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
inp = None
output = None
if (self.shared_network is None):
inp = Input((self.input_dim,))
output = self.get_network_head(inp).output
else:
inp = self.shared_network.input
output = self.shared_network.output
output = Dense(self.output_dim, activation=self.activation, kernel_initializer='random_normal')(output)
self.model = Model(inp, output)
self.model.compile(optimizer=SGD(learning_rate=self.lr), loss=self.loss)
def get_network_head(inp):
output = Dense(256, activation='sigmoid', kernel_initializer='random_normal')(inp)
output = BatchNormalization()(output)
output = Dropout(0.1)(output)
output = Dense(128, activation='sigmoid', kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = Dropout(0.1)(output)
output = Dense(64, activation='sigmoid', kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = Dropout(0.1)(output)
output = Dense(32, activation='sigmoid', kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = Dropout(0.1)(output)
return Model(inp, output)
def train_on_batch(self, x, y):
x = np.array(x).reshape(((- 1), self.input_dim))
return super().train_on_batch(x, y)
def predict(self, sample):
sample = np.array(sample).reshape((1, self.input_dim))
return super().predict(sample) |
def test_custom_dataset():
tmp_dir = tempfile.TemporaryDirectory()
ann_file = osp.join(tmp_dir.name, 'fake_data.txt')
_create_dummy_ann_file(ann_file)
loader = _create_dummy_loader()
for mode in [True, False]:
dataset = BaseDataset(ann_file, loader, pipeline=[], test_mode=mode)
assert (len(dataset) == len(dataset.data_infos))
assert np.allclose(dataset.flag, [0, 0])
expect_results = {'img_info': {'file_name': 'sample1.jpg', 'text': 'hello'}, 'img_prefix': ''}
assert (dataset.prepare_train_img(0) == expect_results)
assert (dataset.prepare_test_img(0) == expect_results)
assert (dataset[0] == expect_results)
assert (dataset._get_next_index(0) == 1)
expect_results_copy = {key: value for (key, value) in expect_results.items()}
dataset.format_results(expect_results)
assert (expect_results_copy == expect_results)
with pytest.raises(NotImplementedError):
dataset.evaluate(expect_results)
tmp_dir.cleanup() |
def roll(*args, **kwargs):
func = kwargs.pop('function')
window = kwargs.pop('window')
if (len(args) > 2):
raise ValueError('Cannot pass more than 2 return sets')
if (len(args) == 2):
if (not isinstance(args[0], type(args[1]))):
raise ValueError('The two returns arguments are not the same.')
if isinstance(args[0], np.ndarray):
return _roll_ndarray(func, window, *args, **kwargs)
return _roll_pandas(func, window, *args, **kwargs) |
class TVolume(TestCase):
def setUp(self):
self.p = NullPlayer()
self.v = Volume(self.p)
def test_setget(self):
for i in [0.0, 1.2, 0.24, 1.0, 0.9]:
self.v.set_value(i)
self.assertAlmostEqual(self.p.volume, self.v.get_value())
def test_add(self):
self.v.set_value(0.5)
self.v += 0.1
self.assertAlmostEqual(self.p.volume, 0.6)
def test_sub(self):
self.v.set_value(0.5)
self.v -= 0.1
self.assertAlmostEqual(self.p.volume, 0.4)
def test_add_boundry(self):
self.v.set_value(0.95)
self.v += 0.1
self.assertAlmostEqual(self.p.volume, 1.0)
def test_sub_boundry(self):
self.v.set_value(0.05)
self.v -= 0.1
self.assertAlmostEqual(self.p.volume, 0.0)
def tearDown(self):
self.p.destroy()
self.v.destroy() |
def cached_function(inputs, outputs):
import theano
with Message('Hashing theano fn'):
if hasattr(outputs, '__len__'):
hash_content = tuple(map(theano.pp, outputs))
else:
hash_content = theano.pp(outputs)
cache_key = hex((hash(hash_content) & ((2 ** 64) - 1)))[:(- 1)]
cache_dir = Path('~/.hierctrl_cache')
cache_dir = cache_dir.expanduser()
cache_dir.mkdir_p()
cache_file = (cache_dir / ('%s.pkl' % cache_key))
if cache_file.exists():
with Message('unpickling'):
with open(cache_file, 'rb') as f:
try:
return pickle.load(f)
except Exception:
pass
with Message('compiling'):
fun = compile_function(inputs, outputs)
with Message('picking'):
with open(cache_file, 'wb') as f:
pickle.dump(fun, f, protocol=pickle.HIGHEST_PROTOCOL)
return fun |
class Line(entity):
def __init__(self):
self.parent = False
self.children = []
self.feats = {}
self.featpaths = {}
self.finished = False
self.__parses = {}
self.__bestparse = {}
self.__boundParses = {}
def parse(self, meter=None, init=None):
wordtoks = self.wordtokens(include_punct=False)
numSyll = 0
if (not wordtoks):
return None
for wordtok in wordtoks:
wordtok_words = wordtok.children
if ((not wordtok_words) or (True in [word.isBroken() for word in wordtok_words])):
return None
numSyll += wordtok_words[0].getNumSyll()
(self.__parses[meter.id], self.__boundParses[meter.id]) = meter.parse(wordtoks, numSyll)
self.__bestparse[meter.id] = None
try:
self.__bestparse[meter.id] = self.__parses[meter.id][0]
except (KeyError, IndexError) as e:
try:
self.__bestparse[meter.id] = self.__boundParses[meter.id][0]
except (KeyError, IndexError) as e:
self.__bestparse[meter.id] = None
bp = self.__bestparse[meter.id]
if bp:
bp.set_wordtokens_to_best_word_options()
'def store_stats(self,meter,init):\n\t\ttextname=init.getName()\n\t\tif not textname: textname=str(self).replace(" ","_")\n\n\t\t## store stats\n\t\tif (not textname in init.meter_stats[\'lines\']):\n\t\t\tinit.meter_stats[\'lines\'][textname]={}\n\t\tif (not textname in init.meter_stats[\'positions\']):\n\t\t\tinit.meter_stats[\'positions\'][textname]={}\n\t\tif (not textname in init.meter_stats[\'texts\']):\n\t\t\tinit.meter_stats[\'texts\'][textname]={}\n\t\tif (not textname in init.meter_stats[\'_ot\']):\n\t\t\tinit.meter_stats[\'_ot\'][textname]=makeminlength("line",being.linelen)+"\tmeter\t"+init.ckeys+"\n"\n\n\t\ttry:\n\n\t\t\tparsedat=[]\n\t\t\tfor k,v in sorted(self.__bestparse[meter.id].constraintScores.items()):\n\t\t\t\tif (not k in init.meter_stats[\'texts\'][textname]):\n\t\t\t\t\tinit.meter_stats[\'texts\'][textname][k]=[]\n\t\t\t\tinit.meter_stats[\'texts\'][textname][k].append(v)\n\n\t\t\t\t#parsedat.append(v/len(self.__bestparse.positions))\t#???\n\t\t\t\tparsedat.append(v)\n\n\t\t\tlinekey=str(len(init.meter_stats[\'lines\'][textname])+1).zfill(6)+"_"+str(self.__bestparse[meter.id].posString())\n\t\t\tinit.meter_stats[\'lines\'][textname][linekey]=parsedat\n\n\t\t\t## OT stats\n\t\t\tparses=self.__parses[meter.id]\n\t\t\tinit.meter_stats[\'_ot\'][textname]+=makeminlength(str(self),being.linelen)+"\t"+parses[0].str_ot()+"\n"\n\t\t\tif len(parses)>1:\n\t\t\t\tfor parse in parses[1:]:\n\t\t\t\t\tinit.meter_stats[\'_ot\'][textname]+=makeminlength("",being.linelen)+"\t"+parse.str_ot()+"\n"\n\n\n\n\t\t\tfor posn in range(len(self.__bestparse[meter.id].positions)):\n\t\t\t\tpos=self.__bestparse[meter.id].positions[posn]\n\t\t\t\t(posdat,ckeys)=pos.formatConstraints(normalize=True,getKeys=True)\n\n\t\t\t\tfor cnum in range(len(ckeys)):\n\t\t\t\t\tif (not posn in init.meter_stats[\'positions\'][textname]):\n\t\t\t\t\t\tinit.meter_stats[\'positions\'][textname][posn]={}\n\t\t\t\t\tif (not ckeys[cnum] in init.meter_stats[\'positions\'][textname][posn]):\n\t\t\t\t\t\tinit.meter_stats[\'positions\'][textname][posn][ckeys[cnum]]=[]\n\t\t\t\t\tinit.meter_stats[\'positions\'][textname][posn][ckeys[cnum]].append(posdat[cnum])\n\t\texcept (IndexError,KeyError,AttributeError) as e:\n\t\t\t#print "!! no lines successfully parsed with any meter"\n\t\t\tpass\n\t'
def scansion(self, meter=None, conscious=False):
bp = self.bestParse(meter)
config = being.config
lowestScore = ''
str_ot = ''
count = ''
meterstr = ''
if bp:
meterstr = bp.str_meter()
str_ot = bp.str_ot()
lowestScore = bp.score()
count = bp.totalCount
from tools import makeminlength
data = [makeminlength(str(self), config['linelen']), makeminlength((str(bp) if bp else ''), config['linelen']), meterstr, len(self.allParses(meter)), count, lowestScore, str_ot]
data_unicode = [str(x) for x in data]
self.om('\t'.join(data_unicode), conscious=conscious)
def allParses(self, meter=None, one_per_meter=True):
if (not meter):
itms = list(self.__parses.items())
if (not len(itms)):
return
for (mtr, parses) in itms:
return parses
try:
parses = self.__parses[meter.id]
if one_per_meter:
toreturn = []
sofar = set()
for _p in parses:
_pm = _p.str_meter()
if (not (_pm in sofar)):
sofar |= {_pm}
if (_p.isBounded and (_p.boundedBy.str_meter() == _pm)):
pass
else:
toreturn += [_p]
parses = toreturn
return parses
except KeyError:
return []
def boundParses(self, meter=None):
if (not meter):
itms = sorted(self.__boundParses.items())
if (not len(itms)):
return []
for (mtr, parses) in itms:
return parses
try:
return self.__boundParses[meter.id]
except KeyError:
return []
def bestParse(self, meter=None):
if (not meter):
itms = list(self.__bestparse.items())
if (not len(itms)):
return
for (mtr, parses) in itms:
return parses
try:
return self.__bestparse[meter.id]
except KeyError:
return
def finish(self):
if ((not hasattr(self, 'finished')) or (not self.finished)):
self.finished = True
if (not hasattr(self, 'broken')):
self.broken = False
if (len(self.children) == 0):
self.broken = True
if (not self.broken):
for words in self.words(flattenList=False):
assert (type(words) == list)
for word in words:
if word.isBroken():
self.broken = True
def __repr__(self):
return self.txt
def txt(self):
x = ''
for wordtok in self.wordtokens():
if (not wordtok.is_punct):
x += (' ' + wordtok.token)
else:
x += wordtok.token
return x.strip()
def str_wordbound(self):
o = []
for word in self.words():
e = ''
for x in word.children:
e += 'X'
o.append(e)
return '#'.join(o)
def str_weight(self, word_sep=''):
o = []
for word in self.words():
o.append(''.join((x.str_weight() for x in word.children)))
return word_sep.join(o)
def str_stress(self, word_sep=''):
o = []
for word in self.words():
o.append(''.join((x.str_stress() for x in word.children)))
return word_sep.join(o)
def str_sonority(self, word_sep=''):
o = []
for word in self.words():
o.append(''.join((x.str_sonority() for x in word.children)))
return word_sep.join(o) |
def test_tagulous_in_migrations(apps, schema_editor):
model = apps.get_model('tagulous_tests_migration', 'MigrationTestModel')
assertIsSubclass(model, tagulous.models.TaggedModel)
assertIsInstance(model.singletag, tagulous.models.SingleTagDescriptor)
assertIsSubclass(model.singletag.tag_model, tagulous.models.BaseTagModel)
assertIsInstance(model.tags, tagulous.models.TagDescriptor)
assertIsSubclass(model.tags.tag_model, tagulous.models.BaseTagTreeModel) |
class TestInline():
def test_inlonly(self, header_checker):
header_checker.check_ignored('inline')
def test_inlonlyquoted(self, header_checker):
header_checker.check_ignored('"inline"')
def test_inlwithasciifilename(self, header_checker):
header_checker.check_filename('inline; filename="foo.html"', 'foo.html', expected_inline=True)
def test_inlwithfnattach(self, header_checker):
header_checker.check_filename('inline; filename="Not an attachment!"', 'Not an attachment!', expected_inline=True)
def test_inlwithasciifilenamepdf(self, header_checker):
header_checker.check_filename('inline; filename="foo.pdf"', 'foo.pdf', expected_inline=True) |
class Discriminator(nn.Module):
def __init__(self, num_classes, image_size=224, conv_dim=64, repeat_num=5):
super(Discriminator, self).__init__()
layers = []
layers.append(SpectralNorm(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)))
layers.append(nn.LeakyReLU(0.01))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(SpectralNorm(nn.Conv2d(curr_dim, (curr_dim * 2), kernel_size=4, stride=2, padding=1)))
layers.append(nn.LeakyReLU(0.01))
curr_dim = (curr_dim * 2)
kernel_size = int((image_size / (2 ** repeat_num)))
self.main = nn.Sequential(*layers)
self.fc = nn.Conv2d(curr_dim, (num_classes + 1), kernel_size=kernel_size, bias=False)
def forward(self, x):
h = self.main(x)
out = self.fc(h)
return out.squeeze() |
class BookSettings():
def __init__(self) -> None:
self.order: Order = Order.SHORT_TO_LONG
self.books_string: str = 'Book A\n1. e4 e5\n\nBook B\n1. e4 e5 2. f4'
def order_callback(self, _, order_value):
self.order = Order(order_value)
def books_string_callback(self, _, books_string):
self.books_string = books_string
def get_books(self) -> List[Book]:
books = list()
lines = self.books_string.splitlines()
non_empty_lines = iter([line.strip() for line in lines if (line and line.strip())])
for book_name in non_empty_lines:
books.append(Book(book_name, next(non_empty_lines, None)))
return books |
class CDAE(nn.Module):
def __init__(self, NUM_USER, NUM_MOVIE, NUM_BOOK, EMBED_SIZE, dropout, is_sparse=False):
super(CDAE, self).__init__()
self.NUM_MOVIE = NUM_MOVIE
self.NUM_BOOK = NUM_BOOK
self.NUM_USER = NUM_USER
self.emb_size = EMBED_SIZE
self.user_embeddings = nn.Embedding(self.NUM_USER, EMBED_SIZE, sparse=is_sparse)
self.user_embeddings.weight.data = torch.from_numpy(np.random.normal(0, 0.01, size=[self.NUM_USER, EMBED_SIZE])).float()
self.encoder_x = nn.Sequential(nn.Linear(self.NUM_MOVIE, EMBED_SIZE), nn.ReLU(), nn.Linear(EMBED_SIZE, EMBED_SIZE))
self.decoder_x = nn.Sequential(nn.Linear(EMBED_SIZE, EMBED_SIZE), nn.ReLU(), nn.Linear(EMBED_SIZE, self.NUM_MOVIE))
self.encoder_y = nn.Sequential(nn.Linear(self.NUM_BOOK, EMBED_SIZE), nn.ReLU(), nn.Linear(EMBED_SIZE, EMBED_SIZE))
self.decoder_y = nn.Sequential(nn.Linear(EMBED_SIZE, EMBED_SIZE), nn.ReLU(), nn.Linear(EMBED_SIZE, self.NUM_BOOK))
self.orthogonal_w1 = nn.Parameter(nn.init.xavier_uniform(torch.Tensor(EMBED_SIZE, EMBED_SIZE).type((torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor)), gain=np.sqrt(2.0)), requires_grad=True)
self.orthogonal_w2 = nn.Parameter(nn.init.xavier_uniform(torch.Tensor(EMBED_SIZE, EMBED_SIZE).type((torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor)), gain=np.sqrt(2.0)), requires_grad=True)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU
def orthogonal_map(self, z_x, z_y):
mapped_z_x = torch.matmul(z_x, self.orthogonal_w1)
mapped_z_y = torch.matmul(z_y, self.orthogonal_w2)
return (mapped_z_x, mapped_z_y)
def forward(self, batch_user, batch_user_x, batch_user_y):
h_user_x = self.encoder_x(self.dropout(batch_user_x))
h_user_y = self.encoder_y(self.dropout(batch_user_y))
h_user = self.user_embeddings(batch_user)
feature_x = torch.add(h_user_x, h_user)
feature_y = torch.add(h_user_y, h_user)
z_x = F.relu(feature_x)
z_y = F.relu(feature_y)
preds_x = self.decoder_x(z_x)
preds_y = self.decoder_y(z_y)
(mapped_z_x, mapped_z_y) = self.orthogonal_map(z_x, z_y)
preds_x2y = self.decoder_y(mapped_z_x)
preds_y2x = self.decoder_x(mapped_z_y)
z_x_ = torch.matmul(mapped_z_x, self.orthogonal_w2)
z_y_ = torch.matmul(mapped_z_y, self.orthogonal_w1)
z_x_reg_loss = torch.norm((z_x - z_x_), p=1, dim=1)
z_y_reg_loss = torch.norm((z_y - z_y_), p=1, dim=1)
return (preds_x, preds_y, preds_x2y, preds_y2x, feature_x, feature_y, z_x_reg_loss, z_y_reg_loss)
def get_user_embedding(self, batch_user_x, batch_user_y):
h_user_x = self.encoder_x(self.dropout(batch_user_x))
h_user_y = self.encoder_y(self.dropout(batch_user_y))
return (h_user_x, h_user_y) |
def assert_mirror(original: NettingChannelState, mirror: NettingChannelState) -> None:
original_locked_amount = channel.get_amount_locked(original.our_state)
mirror_locked_amount = channel.get_amount_locked(mirror.partner_state)
assert (original_locked_amount == mirror_locked_amount)
balance0 = channel.get_balance(original.our_state, original.partner_state)
balance1 = channel.get_balance(mirror.partner_state, mirror.our_state)
assert (balance0 == balance1)
balanceproof0 = channel.get_current_balanceproof(original.our_state)
balanceproof1 = channel.get_current_balanceproof(mirror.partner_state)
assert (balanceproof0 == balanceproof1)
distributable0 = channel.get_distributable(original.our_state, original.partner_state)
distributable1 = channel.get_distributable(mirror.partner_state, mirror.our_state)
assert (distributable0 == distributable1) |
def affiliation_recall_distance(Is=[(1, 2), (3, 4), (5, 6)], J=(2, 5.5)):
Is = [I for I in Is if (I is not None)]
if (len(Is) == 0):
return math.inf
E_gt_recall = get_all_E_gt_func(Is, ((- math.inf), math.inf))
Js = affiliation_partition([J], E_gt_recall)
return (sum([integral_interval_distance(J[0], I) for (I, J) in zip(Is, Js)]) / interval_length(J)) |
_callback_query((tools.option_filter('show') & tools.is_admin))
def show_option(bot: AutoPoster, callback_query: CallbackQuery):
data = callback_query.data.split()
bot.reload_config()
if (data[2] == 'send_reposts'):
info = '** :**\n\n'
button_list = [InlineKeyboardButton('', callback_data='reposts {} 0'.format(data[1])), InlineKeyboardButton('', callback_data='reposts {} 1'.format(data[1]))]
footer_buttons = [InlineKeyboardButton(' ', callback_data='reposts {} post_only'.format(data[1]))]
button_list = tools.build_menu(button_list, n_cols=2, footer_buttons=footer_buttons)
option = bot.config['settings'].get('send_reposts', False)
if (data[1] != 'global'):
button_list.append([InlineKeyboardButton(' ', callback_data='reposts {} reset'.format(data[1]))])
if ('send_reposts' in bot.config['domains'][data[1]].keys()):
option = bot.config['domains'][data[1]].get('send_reposts')
else:
info = messages.SOURCE_USE_GLOBAL_SETTINGS
if (option == 'post_only'):
info += (' ' + messages.PARTIAL_REPOSTS)
elif (not option):
info += ' '
elif option:
info += ' '
reply_markup = InlineKeyboardMarkup(button_list)
callback_query.edit_message_text(info, reply_markup=reply_markup)
elif (data[2] == 'wts'):
(info, reply_markup) = tools.generate_what_to_send_info(bot, data[1])
callback_query.edit_message_text(info, reply_markup=reply_markup, disable_web_page_preview=True) |
def test_transform_matrix():
r = wp.quat_from_axis_angle(wp.vec3(1.0, 0.0, 0.0), 0.5)
t = wp.vec3(0.25, 0.5, (- 0.75))
s = wp.vec3(2.0, 0.5, 0.75)
m = wp.mat44(t, r, s)
p = wp.vec3(1.0, 2.0, 3.0)
r_0 = (wp.quat_rotate(r, wp.cw_mul(s, p)) + t)
r_1 = wp.transform_point(m, p)
r_2 = wp.transform_vector(m, p)
wp.expect_near(r_0, r_1, 0.0001)
wp.expect_near(r_2, (r_0 - t), 0.0001) |
_solaranywhere_credentials
.remote_data
.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_solaranywhere_probability_exceedance_error(solaranywhere_api_key):
with pytest.raises(ValueError, match='start and end time must be null'):
(data, meta) = pvlib.iotools.get_solaranywhere(latitude=44.4675, longitude=(- 73.2075), api_key=solaranywhere_api_key, start=pd.Timestamp('2020-01-01 00:00:00+0000'), end=pd.Timestamp('2020-01-05 12:00:00+0000'), source='SolarAnywherePOELatest', probability_of_exceedance=20) |
.skipif((python_implementation() == 'PyPy'), reason='no orjson on PyPy')
(everythings(min_int=(- ), max_int=, allow_inf=False))
def test_orjson_converter_unstruct_collection_overrides(everything: Everything):
from cattrs.preconf.orjson import make_converter as orjson_make_converter
converter = orjson_make_converter(unstruct_collection_overrides={AbstractSet: sorted})
raw = converter.unstructure(everything)
assert (raw['a_set'] == sorted(raw['a_set']))
assert (raw['a_mutable_set'] == sorted(raw['a_mutable_set']))
assert (raw['a_frozenset'] == sorted(raw['a_frozenset'])) |
class Solution(object):
def closestValue(self, root, target):
kid = (root.left if (target < root.val) else root.right)
if (not kid):
return root.val
kid_min = self.closestValue(kid, target)
return min((kid_min, root.val), key=(lambda x: abs((target - x)))) |
class TestFlumeCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('FlumeCollector', {'interval': 10})
self.collector = FlumeCollector(config, None)
def test_import(self):
self.assertTrue(FlumeCollector)
(Collector, 'publish')
(Collector, 'publish_gauge')
(Collector, 'publish_counter')
def test_collect_should_work(self, publish_mock, publish_gauge_mock, publish_counter_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(return_value=self.getFixture('metrics')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {'CHANNEL.channel1.ChannelFillPercentage': 0.0, 'CHANNEL.channel1.EventPutAttempt': , 'CHANNEL.channel1.EventPutSuccess': , 'CHANNEL.channel1.EventTakeAttempt': , 'CHANNEL.channel1.EventTakeSuccess': , 'SINK.sink1.BatchComplete': 251705, 'SINK.sink1.BatchEmpty': 76250, 'SINK.sink1.BatchUnderflow': 379, 'SINK.sink1.ConnectionClosed': 6, 'SINK.sink1.ConnectionCreated': 7, 'SINK.sink1.ConnectionFailed': 0, 'SINK.sink1.EventDrainAttempt': , 'SINK.sink1.EventDrainSuccess': , 'SOURCE.source1.AppendAccepted': 0, 'SOURCE.source1.AppendBatchAccepted': 56227, 'SOURCE.source1.AppendBatchReceived': 56258, 'SOURCE.source1.AppendReceived': 0, 'SOURCE.source1.EventAccepted': , 'SOURCE.source1.EventReceived': , 'SOURCE.source1.OpenConnection': 0}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany([publish_mock, publish_gauge_mock, publish_counter_mock], metrics)
(Collector, 'publish')
def test_blank_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(return_value=self.getFixture('metrics_blank')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
(Collector, 'publish')
def test_invalid_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(return_value=self.getFixture('metrics_invalid')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {}) |
class CategoricalConditionalBatchNorm2d(ConditionalBatchNorm2d):
def __init__(self, num_classes, num_features, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True):
super(CategoricalConditionalBatchNorm2d, self).__init__(num_features, eps, momentum, affine, track_running_stats)
self.weights = nn.Embedding(num_classes, num_features)
self.biases = nn.Embedding(num_classes, num_features)
self._initialize()
def _initialize(self):
init.ones_(self.weights.weight.data)
init.zeros_(self.biases.weight.data)
def forward(self, input, c, **kwargs):
weight = self.weights(c)
bias = self.biases(c)
return super(CategoricalConditionalBatchNorm2d, self).forward(input, weight, bias) |
class CheckCommand(Command):
name = 'check'
description = 'Validates the content of the <comment>pyproject.toml</> file and its consistency with the poetry.lock file.'
options = [option('lock', None, 'Checks that <comment>poetry.lock</> exists for the current version of <comment>pyproject.toml</>.')]
def _validate_classifiers(self, project_classifiers: set[str]) -> tuple[(list[str], list[str])]:
from trove_classifiers import classifiers
from trove_classifiers import deprecated_classifiers
errors = []
warnings = []
unrecognized = sorted(((project_classifiers - set(classifiers)) - set(deprecated_classifiers)))
unrecognized = [u for u in unrecognized if (not u.startswith('Private ::'))]
if unrecognized:
errors.append(f'Unrecognized classifiers: {unrecognized!r}.')
deprecated = sorted(project_classifiers.intersection(set(deprecated_classifiers)))
if deprecated:
for old_classifier in deprecated:
new_classifiers = deprecated_classifiers[old_classifier]
if new_classifiers:
message = f'Deprecated classifier {old_classifier!r}. Must be replaced by {new_classifiers!r}.'
else:
message = f'Deprecated classifier {old_classifier!r}. Must be removed.'
warnings.append(message)
return (errors, warnings)
def _validate_readme(self, readme: (str | list[str]), poetry_file: Path) -> list[str]:
readmes = ([readme] if isinstance(readme, str) else readme)
errors = []
for name in readmes:
if (not (poetry_file.parent / name).exists()):
errors.append(f'Declared README file does not exist: {name}')
return errors
def _validate_dependencies_source(self, config: dict[(str, Any)]) -> list[str]:
sources = {k['name'] for k in config.get('source', [])}
dependency_declarations: list[dict[(str, ((str | dict[(str, str)]) | list[dict[(str, str)]]))]] = []
if ('dependencies' in config):
dependency_declarations.append(config['dependencies'])
for group in config.get('group', {}).values():
if ('dependencies' in group):
dependency_declarations.append(group['dependencies'])
all_referenced_sources: set[str] = set()
for dependency_declaration in dependency_declarations:
for declaration in dependency_declaration.values():
if isinstance(declaration, list):
for item in declaration:
if ('source' in item):
all_referenced_sources.add(item['source'])
elif (isinstance(declaration, dict) and ('source' in declaration)):
all_referenced_sources.add(declaration['source'])
return [f'Invalid source "{source}" referenced in dependencies.' for source in sorted((all_referenced_sources - sources))]
def handle(self) -> int:
from poetry.factory import Factory
from poetry.pyproject.toml import PyProjectTOML
poetry_file = self.poetry.file.path
config = PyProjectTOML(poetry_file).poetry_config
check_result = Factory.validate(config, strict=True)
project_classifiers = set(config.get('classifiers', []))
(errors, warnings) = self._validate_classifiers(project_classifiers)
check_result['errors'].extend(errors)
check_result['warnings'].extend(warnings)
if ('readme' in config):
errors = self._validate_readme(config['readme'], poetry_file)
check_result['errors'].extend(errors)
check_result['errors'] += self._validate_dependencies_source(config)
if (self.option('lock') and (not self.poetry.locker.is_locked())):
check_result['errors'] += ['poetry.lock was not found.']
if (self.poetry.locker.is_locked() and (not self.poetry.locker.is_fresh())):
check_result['errors'] += ['poetry.lock is not consistent with pyproject.toml. Run `poetry lock [--no-update]` to fix it.']
if ((not check_result['errors']) and (not check_result['warnings'])):
self.info('All set!')
return 0
for error in check_result['errors']:
self.line_error(f'<error>Error: {error}</error>')
for error in check_result['warnings']:
self.line_error(f'<warning>Warning: {error}</warning>')
return 1 |
class TestAssertIs(TestCase):
def test_you(self):
self.assertIs(abc, 'xxx')
def test_me(self):
self.assertIs(123, (xxx + y))
self.assertIs(456, (aaa and bbb))
self.assertIs(789, (ccc or ddd))
self.assertIs(123, (True if You else False))
def test_everybody(self):
self.assertIs('abc', 'def')
def test_message(self):
self.assertIs((123 + z), (xxx + z), msg='This is wrong!')
self.assertIs(123, (xxx + z), 'This is wrong!') |
def recall_cap(qrels: Dict[(str, Dict[(str, int)])], results: Dict[(str, Dict[(str, float)])], k_values: List[int]) -> Tuple[Dict[(str, float)]]:
capped_recall = {}
for k in k_values:
capped_recall[f'R_{k}'] = 0.0
k_max = max(k_values)
logging.info('\n')
for (query_id, doc_scores) in results.items():
top_hits = sorted(doc_scores.items(), key=(lambda item: item[1]), reverse=True)[0:k_max]
query_relevant_docs = [doc_id for doc_id in qrels[query_id] if (qrels[query_id][doc_id] > 0)]
for k in k_values:
retrieved_docs = [row[0] for row in top_hits[0:k] if (qrels[query_id].get(row[0], 0) > 0)]
denominator = min(len(query_relevant_docs), k)
capped_recall[f'R_{k}'] += (len(retrieved_docs) / denominator)
for k in k_values:
capped_recall[f'R_{k}'] = round((capped_recall[f'R_{k}'] / len(results)), 5)
logging.info('R_{}: {:.4f}'.format(k, capped_recall[f'R_{k}']))
return capped_recall |
class DecoderSPP(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(256, 48, 1, bias=False)
self.bn = nn.BatchNorm2d(48)
self.relu = nn.ReLU(inplace=True)
self.sep1 = SeparableConv2d(304, 256, relu_first=False)
self.sep2 = SeparableConv2d(256, 256, relu_first=False)
def forward(self, x, low_level_feat):
x = F.interpolate(x, size=low_level_feat.shape[2:], mode='bilinear', align_corners=True)
low_level_feat = self.conv(low_level_feat)
low_level_feat = self.bn(low_level_feat)
low_level_feat = self.relu(low_level_feat)
x = torch.cat((x, low_level_feat), dim=1)
x = self.sep1(x)
x = self.sep2(x)
return x |
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
if (config.embedding_pretrained is not None):
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
else:
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=(config.n_vocab - 1))
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers, bidirectional=True, batch_first=True, dropout=config.dropout)
self.tanh1 = nn.Tanh()
self.w = nn.Parameter(torch.zeros((config.hidden_size * 2)))
self.tanh2 = nn.Tanh()
self.fc1 = nn.Linear((config.hidden_size * 2), config.hidden_size2)
self.fc = nn.Linear(config.hidden_size2, config.num_classes)
def forward(self, x):
(x, _) = x
emb = self.embedding(x)
(H, _) = self.lstm(emb)
M = self.tanh1(H)
alpha = F.softmax(torch.matmul(M, self.w), dim=1).unsqueeze((- 1))
out = (H * alpha)
out = torch.sum(out, 1)
out = F.relu(out)
out = self.fc1(out)
out = self.fc(out)
return out |
class CustomCategorical(Categorical):
def __init__(self, *args, **kwargs):
super(CustomCategorical, self).__init__(*args, **kwargs)
def log_prob(self, value):
logits_dim = self.logits.ndim
if (value.ndim == logits_dim):
assert (value.shape[(- 1)] == 1), f'Shape error {value.shape}'
value = value[(..., 0)]
return super(CustomCategorical, self).log_prob(value) |
class _TAACFileMixin():
def test_basic(self):
self.song['title'] = 'SomeTestValue'
self.song.write()
self.song.reload()
self.assertEqual(self.song('title'), 'SomeTestValue')
def test_write(self):
self.song.write()
def test_can_change(self):
self.assertTrue(self.song.can_change('title'))
self.assertFalse(self.song.can_change('foobar'))
self.assertTrue(('title' in self.song.can_change()))
def test_can_multiple_values(self):
self.assertEqual(self.song.can_multiple_values(), True)
self.assertTrue(self.song.can_multiple_values('title'))
def test_invalid(self):
path = get_data_path('empty.xm')
self.assertTrue(os.path.exists(path))
self.assertRaises(Exception, AACFile, path)
def test_format_codec(self):
self.assertEqual(self.song('~format'), 'AAC')
self.assertEqual(self.song('~codec'), 'AAC')
self.assertEqual(self.song('~encoding'), '')
def test_channels(self):
assert (self.song('~#channels') == 2) |
class _BotUnpickler(pickle.Unpickler):
__slots__ = ('_bot',)
def __init__(self, bot: Bot, *args: Any, **kwargs: Any):
self._bot = bot
super().__init__(*args, **kwargs)
def persistent_load(self, pid: str) -> Optional[Bot]:
if (pid == _REPLACED_KNOWN_BOT):
return self._bot
if (pid == _REPLACED_UNKNOWN_BOT):
return None
raise pickle.UnpicklingError('Found unknown persistent id when unpickling!') |
class PhaseFitEstimator(_VPEEstimator):
def __init__(self, evals: numpy.ndarray, ref_eval: float=0):
self.evals = evals
self.ref_eval = ref_eval
def get_simulation_points(self, safe: bool=True) -> numpy.ndarray:
if safe:
numsteps = (len(self.evals) * 2)
step_size = (numpy.pi / (max(self.evals) - min(self.evals)))
else:
numsteps = len(self.evals)
step_size = (numpy.pi / (max(self.evals) - min(self.evals)))
maxtime = (step_size * (numsteps - 1))
times = numpy.linspace(0, maxtime, numsteps)
return times
def get_amplitudes(self, phase_function: numpy.ndarray) -> numpy.ndarray:
times = self.get_simulation_points()
phase_function_shifted = (numpy.array(phase_function) * numpy.exp(((1j * times) * self.ref_eval)))
amplitudes = fit_known_frequencies(phase_function_shifted, times, self.evals)
return amplitudes
def get_expectation_value(self, phase_function: numpy.ndarray) -> numpy.ndarray:
amplitudes = self.get_amplitudes(phase_function)
expectation_value = (numpy.dot(numpy.abs(amplitudes), self.evals) / numpy.sum(numpy.abs(amplitudes)))
return expectation_value |
class TestComment(unittest.TestCase):
def test_ok(self):
test_comment(POEntry(msgid='Hello, I am a test string'))
test_comment(POEntry(msgid='c', comment="TRANSLATORS: 'c' to continue"))
def test_no_comment(self):
self.assertRaises(AssertionError, test_comment, POEntry(msgid='c')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.