code stringlengths 281 23.7M |
|---|
def _include_extra(req: str, extra: str, condition: str) -> Requirement:
r = Requirement(req)
parts = ((f'({r.marker})' if r.marker else None), (f'({condition})' if condition else None), (f'extra == {extra!r}' if extra else None))
r.marker = Marker(' and '.join((x for x in parts if x)))
return r |
class Identity(UnaryScalarOp):
def impl(self, input):
return input
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return f'{z} = {x};'
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if (x.type in continuous_types):
return (gz,)
else:
return (x.zeros_like(dtype=config.floatX),) |
def LibriSpeech(root: Union[(str, Path)], url: str=URL, folder_in_archive: str=FOLDER_IN_ARCHIVE):
if (url in ['dev-clean', 'dev-other', 'test-clean', 'test-other', 'train-clean-100', 'train-clean-360', 'train-other-500']):
url = ((BASE_URL + url) + '.tar.gz')
root = os.fspath(root)
checksum_dict = {os.path.join(root, key): value for (key, value) in _CHECKSUMS.items()}
url_dp = IterableWrapper([url])
cache_compressed_dp = url_dp.on_disk_cache(filepath_fn=(lambda url: os.path.join(root, os.path.basename(url))), hash_dict=checksum_dict, hash_type='sha256')
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=(lambda tar_path: os.path.join(root, folder_in_archive, tar_path.split('.')[0])))
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode='b').load_from_tar()
cache_decompressed_dp = cache_decompressed_dp.end_caching(filepath_fn=functools.partial(decompress_filepath_fn, root_path=os.path.join(root, folder_in_archive)))
(audio_dp, txt_dp) = cache_decompressed_dp.demux(2, classify_file_fn, drop_none=True, buffer_size=(- 1))
txt_dp = FileOpener(txt_dp, mode='t').readlines(return_path=False).map(text_split_fn)
transcript_map_dp = txt_dp.to_map_datapipe()
audio_transcript_dp = audio_dp.zip_with_map(transcript_map_dp, key_fn=audio_key_fn)
return audio_transcript_dp.map(load_librispeech_item) |
def stop_evennia():
def _portal_stopped(*args):
print('... Portal stopped.\nEvennia shut down.')
_reactor_stop()
def _server_stopped(*args):
print('... Server stopped.\nStopping Portal ...')
send_instruction(PSHUTD, {})
wait_for_status(False, None, _portal_stopped)
def _portal_running(response):
(prun, srun, ppid, spid, _, _) = _parse_status(response)
if srun:
print('Server stopping ...')
send_instruction(SSHUTD, {})
wait_for_status_reply(_server_stopped)
else:
print('Server already stopped.\nStopping Portal ...')
send_instruction(PSHUTD, {})
wait_for_status(False, None, _portal_stopped)
def _portal_not_running(fail):
print('Evennia not running.')
_reactor_stop()
send_instruction(PSTATUS, None, _portal_running, _portal_not_running) |
def add_eval_sample_opts(parser):
parser.add_argument('--sample_method', type=str, default='greedy', help='greedy; sample; gumbel; top<int>, top<0-1>')
parser.add_argument('--beam_size', type=int, default=1, help='used when sample_method = greedy, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
parser.add_argument('--max_length', type=int, default=20, help='Maximum length during sampling')
parser.add_argument('--length_penalty', type=str, default='', help='wu_X or avg_X, X is the alpha')
parser.add_argument('--group_size', type=int, default=1, help="used for diverse beam search. if group_size is 1, then it's normal beam search")
parser.add_argument('--diversity_lambda', type=float, default=0.5, help='used for diverse beam search. Usually from 0.2 to 0.8. Higher value of lambda produces a more diverse list')
parser.add_argument('--temperature', type=float, default=1.0, help='temperature when sampling from distributions (i.e. when sample_method = sample). Lower = "safer" predictions.')
parser.add_argument('--decoding_constraint', type=int, default=0, help='If 1, not allowing same word in a row')
parser.add_argument('--block_trigrams', type=int, default=0, help='block repeated trigram.')
parser.add_argument('--remove_bad_endings', type=int, default=0, help='Remove bad endings')
parser.add_argument('--suppress_UNK', type=int, default=1, help='Not predicting UNK') |
def tail_events(benchmark_tms: QFSeries, examined_tms: QFSeries, tail_percentile: float) -> [QFSeries, QFSeries]:
assert benchmark_tms.index.equals(examined_tms.index)
percentile = np.percentile(benchmark_tms, tail_percentile)
indices_of_tail_events = (benchmark_tms < percentile)
benchmark_tail_tms = benchmark_tms[indices_of_tail_events]
examined_tail_tms = examined_tms[indices_of_tail_events]
return (benchmark_tail_tms, examined_tail_tms) |
_required
def feed_delete(request, feed_pk, template='yarr/confirm.html'):
feed = get_object_or_404(models.Feed, pk=feed_pk, user=request.user)
if request.POST:
feed.delete()
messages.success(request, 'Feed deleted')
return HttpResponseRedirect(reverse(settings.INDEX_URL))
return render(request, template, {'title': 'Delete feed', 'message': ('Are you sure you want to delete the feed "%s"?' % feed.title), 'submit_label': 'Delete feed', 'DEV_MODE': settings.DEV_MODE}) |
class LinkedinOAuth2(BaseOAuth2):
name = 'linkedin-oauth2'
AUTHORIZATION_URL = '
ACCESS_TOKEN_URL = '
USER_DETAILS_URL = '
USER_EMAILS_URL = '
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
DEFAULT_SCOPE = ['r_liteprofile']
EXTRA_DATA = [('id', 'id'), ('expires_in', 'expires'), ('firstName', 'first_name'), ('lastName', 'last_name')]
def user_details_url(self):
fields_selectors = list(set((['id', 'firstName', 'lastName'] + self.setting('FIELD_SELECTORS', []))))
fields_selectors.sort()
fields_selectors = ','.join(fields_selectors)
return self.USER_DETAILS_URL.format(projection=fields_selectors)
def user_emails_url(self):
return self.USER_EMAILS_URL
def user_data(self, access_token, *args, **kwargs):
response = self.get_json(self.user_details_url(), headers=self.user_data_headers(access_token))
if ('emailAddress' in set(self.setting('FIELD_SELECTORS', []))):
emails = self.email_data(access_token, *args, **kwargs)
if emails:
response['emailAddress'] = emails[0]
return response
def email_data(self, access_token, *args, **kwargs):
response = self.get_json(self.user_emails_url(), headers=self.user_data_headers(access_token))
email_addresses = []
for element in response.get('elements', []):
email_address = element.get('handle~', {}).get('emailAddress')
email_addresses.append(email_address)
return list(filter(None, email_addresses))
def get_user_details(self, response):
def get_localized_name(name):
locale = '{}_{}'.format(name['preferredLocale']['language'], name['preferredLocale']['country'])
return name['localized'].get(locale, '')
(fullname, first_name, last_name) = self.get_user_names(first_name=get_localized_name(response['firstName']), last_name=get_localized_name(response['lastName']))
email = response.get('emailAddress', '')
return {'username': (first_name + last_name), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name, 'email': email}
def user_data_headers(self, access_token):
headers = {}
lang = self.setting('FORCE_PROFILE_LANGUAGE')
if lang:
headers['Accept-Language'] = (lang if (lang is not True) else self.strategy.get_language())
headers['Authorization'] = 'Bearer {access_token}'.format(access_token=access_token)
return headers
def request_access_token(self, *args, **kwargs):
kwargs['params'] = kwargs.pop('data')
return super().request_access_token(*args, **kwargs)
def process_error(self, data):
super().process_error(data)
if data.get('serviceErrorCode'):
raise AuthCanceled(self, (data.get('message') or data.get('status'))) |
def get_distribution(dist):
if isinstance(dist, str):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if (not isinstance(dist, Distribution)):
raise TypeError('Expected string, Requirement, or Distribution', dist)
return dist |
def makeRunCommand(cmd, case_path, source_env=True):
installation_path = getFoamDir()
if (installation_path is None):
raise IOError('OpenFOAM installation directory not found')
source = ''
if source_env:
env_setup_script = '{}/etc/bashrc'.format(installation_path)
source = 'source "{}"'.format(env_setup_script)
if case_path:
if (not os.path.exists(case_path)):
raise IOError('case path: `{}` does not exist'.format(case_path))
cd = 'cd "{}"'.format(translatePath(case_path))
else:
cd = 'cd'
if isinstance(cmd, list):
cmd = ' '.join(cmd)
if (getFoamRuntime() == 'BashWSL'):
cmdline = 'wsl {{ {}; {}; {}; }}'.format(source, cd, cmd)
return cmdline
elif (getFoamRuntime() == 'BlueCFD'):
short_bluecfd_path = getShortWindowsPath('{}\\..'.format(installation_path))
with open('{}\\..\\msys64\\home\\ofuser\\.blueCFDOrigin'.format(installation_path), 'w') as f:
f.write(short_bluecfd_path)
f.close()
cmdline = ' '.join(['{}\\msys64\\usr\\bin\\bash'.format(short_bluecfd_path), '--login', '-O', 'expand_aliases', '-c', ((cd + ' && ') + cmd)])
return cmdline
else:
cmdline = "bash -c '{} && {} && {}'".format(source, cd, cmd)
print('cmdline = ', cmdline)
return cmdline |
class TestTransform():
.parametrize('ndim', (0, 1))
def test_fallback_log_jac_det(self, ndim):
class SquareTransform(Transform):
name = 'square'
ndim_supp = ndim
def forward(self, value, *inputs):
return pt.power(value, 2)
def backward(self, value, *inputs):
return pt.sqrt(value)
square_tr = SquareTransform()
value = pt.vector('value')
value_tr = square_tr.forward(value)
log_jac_det = square_tr.log_jac_det(value_tr)
test_value = np.r_[(3, 4)]
expected_log_jac_det = (- np.log((2 * test_value)))
if (ndim == 1):
expected_log_jac_det = expected_log_jac_det.sum()
np.testing.assert_array_equal(log_jac_det.eval({value: test_value}), expected_log_jac_det)
.parametrize('ndim', (None, 2))
def test_fallback_log_jac_det_undefined_ndim(self, ndim):
class SquareTransform(Transform):
name = 'square'
ndim_supp = ndim
def forward(self, value, *inputs):
return pt.power(value, 2)
def backward(self, value, *inputs):
return pt.sqrt(value)
with pytest.raises(NotImplementedError, match='only implemented for ndim_supp in \\(0, 1\\)'):
SquareTransform().log_jac_det(0)
def test_chained_transform(self):
loc = 5
scale = 0.1
ch = ChainedTransform(transform_list=[ScaleTransform(transform_args_fn=(lambda *inputs: pt.constant(scale))), ExpTransform(), LocTransform(transform_args_fn=(lambda *inputs: pt.constant(loc)))])
x = pt.random.multivariate_normal(np.zeros(3), np.eye(3))
x_val = x.eval()
x_val_forward = ch.forward(x_val, *x.owner.inputs).eval()
np.testing.assert_allclose(x_val_forward, (np.exp((x_val * scale)) + loc))
x_val_backward = ch.backward(x_val_forward, *x.owner.inputs, scale, loc).eval()
np.testing.assert_allclose(x_val_backward, x_val)
log_jac_det = ch.log_jac_det(x_val_forward, *x.owner.inputs, scale, loc)
np.testing.assert_allclose(pt.sum(log_jac_det).eval(), np.sum(((- np.log(scale)) - np.log((x_val_forward - loc)))))
.parametrize('transform', [ErfTransform(), ErfcTransform(), ErfcxTransform(), SinhTransform(), CoshTransform(), TanhTransform(), ArcsinhTransform(), ArccoshTransform(), ArctanhTransform(), LogTransform(), ExpTransform()])
def test_check_jac_det(self, transform):
check_jacobian_det(transform, Vector(Rplusbig, 2), pt.dvector, [0.1, 0.1], elemwise=True, rv_var=pt.random.normal(0.5, 1, name='base_rv')) |
def gen_candidate(level):
global candidate
size = len(freArr[(level - 1)])
start = 0
for i in range(size):
Q = freArr[(level - 1)][start][0:(level - 1)]
R = freArr[(level - 1)][i][1:level]
if (Q != R):
start = binary_search(level, R, 0, (size - 1))
if ((start < 0) or (start >= size)):
start = 0
else:
Q = freArr[(level - 1)][start][0:(level - 1)]
while (Q == R):
cand = (freArr[(level - 1)][i][0:level] + freArr[(level - 1)][start][(level - 1):level])
candidate.append(cand)
start = (start + 1)
if (start >= size):
start = 0
break
Q = freArr[(level - 1)][start][0:(level - 1)]
candidate = sorted(candidate) |
class clean(distutils.command.clean.clean):
def run(self):
distutils.command.clean.clean.run(self)
for path in (ROOT_DIR / 'torcharrow').glob('**/*.so'):
print(f"removing '{path}'")
path.unlink()
build_dirs = [(ROOT_DIR / 'build')]
for path in build_dirs:
if path.exists():
print(f"removing '{path}' (and everything under it)")
shutil.rmtree(str(path), ignore_errors=True) |
class GHMCLoss(nn.Module):
def __init__(self, bins=30, momentum=0.5):
super(GHMCLoss, self).__init__()
self.bins = bins
self.momentum = momentum
self.edges = [(t / bins) for t in range((bins + 1))]
self.edges[(- 1)] += 1e-06
if (momentum > 0):
self.acc_sum = [0.0 for _ in range(bins)]
def forward(self, input, target):
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(input)
g = torch.abs((input.sigmoid().detach() - target))
tot = input.numel()
n = 0
for i in range(self.bins):
inds = ((g >= edges[i]) & (g < edges[(i + 1)]))
num_in_bin = inds.sum().item()
if (num_in_bin > 0):
if (mmt > 0):
self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin))
weights[inds] = (tot / self.acc_sum[i])
else:
weights[inds] = (tot / num_in_bin)
n += 1
if (n > 0):
weights /= weights.mean()
loss = (F.binary_cross_entropy_with_logits(input, target, weights, reduction='sum') / tot)
return loss |
class TestLogging(QiskitChemistryTestCase):
def setUp(self):
super().setUp()
self.current_level = get_qiskit_chemistry_logging()
set_qiskit_chemistry_logging(logging.INFO)
def tearDown(self):
set_qiskit_chemistry_logging(self.current_level)
super().tearDown()
def test_logging_emit(self):
with self.assertLogs(QiskitLogDomains.DOMAIN_CHEMISTRY.value, level='INFO') as log:
try:
driver = PySCFDriver(atom='H .0 .0 .0; H .0 .0 0.735', unit=UnitsType.ANGSTROM, basis='sto3g')
except QiskitChemistryError:
self.skipTest('PYSCF driver does not appear to be installed')
return
_ = driver.run()
self.assertIn('PySCF', log.output[0]) |
def main() -> None:
application = Application.builder().token('TOKEN').build()
application.add_handler(ChatMemberHandler(track_chats, ChatMemberHandler.MY_CHAT_MEMBER))
application.add_handler(CommandHandler('show_chats', show_chats))
application.add_handler(ChatMemberHandler(greet_chat_members, ChatMemberHandler.CHAT_MEMBER))
application.add_handler(MessageHandler(filters.ALL, start_private_chat))
application.run_polling(allowed_updates=Update.ALL_TYPES) |
class NDCGMetricValueTest(unittest.TestCase):
def setUp(self) -> None:
self.non_exponential_ndcg = NDCGMetric(world_size=WORLD_SIZE, my_rank=0, batch_size=BATCH_SIZE, tasks=[DefaultTaskInfo], exponential_gain=False, session_key=SESSION_KEY)
self.exponential_ndcg = NDCGMetric(world_size=WORLD_SIZE, my_rank=0, batch_size=BATCH_SIZE, tasks=[DefaultTaskInfo], exponential_gain=True, session_key=SESSION_KEY)
def test_single_session(self) -> None:
model_output = generate_model_output_multiple_sessions()
self.non_exponential_ndcg.update(predictions={DefaultTaskInfo.name: model_output['predictions'][0]}, labels={DefaultTaskInfo.name: model_output['labels'][0]}, weights={DefaultTaskInfo.name: model_output['weights'][0]}, required_inputs={SESSION_KEY: model_output['session_ids'][0]})
metric = self.non_exponential_ndcg.compute()
actual_metric = metric[f'ndcg-{DefaultTaskInfo.name}|lifetime_ndcg']
expected_metric = model_output['expected_ndcg_non_exp']
torch.testing.assert_close(actual_metric, expected_metric, atol=0.0001, rtol=0.0001, check_dtype=False, equal_nan=True, msg=f'Actual: {actual_metric}, Expected: {expected_metric}')
self.exponential_ndcg.update(predictions={DefaultTaskInfo.name: model_output['predictions'][0]}, labels={DefaultTaskInfo.name: model_output['labels'][0]}, weights={DefaultTaskInfo.name: model_output['weights'][0]}, required_inputs={SESSION_KEY: model_output['session_ids'][0]})
metric = self.exponential_ndcg.compute()
actual_metric = metric[f'ndcg-{DefaultTaskInfo.name}|lifetime_ndcg']
expected_metric = model_output['expected_ndcg_exp']
torch.testing.assert_close(actual_metric, expected_metric, atol=0.0001, rtol=0.0001, check_dtype=False, equal_nan=True, msg=f'Actual: {actual_metric}, Expected: {expected_metric}')
def test_multiple_sessions(self) -> None:
model_output = generate_model_output_single_session()
self.non_exponential_ndcg.update(predictions={DefaultTaskInfo.name: model_output['predictions'][0]}, labels={DefaultTaskInfo.name: model_output['labels'][0]}, weights={DefaultTaskInfo.name: model_output['weights'][0]}, required_inputs={SESSION_KEY: model_output['session_ids'][0]})
metric = self.non_exponential_ndcg.compute()
actual_metric = metric[f'ndcg-{DefaultTaskInfo.name}|lifetime_ndcg']
expected_metric = model_output['expected_ndcg_non_exp']
torch.testing.assert_close(actual_metric, expected_metric, atol=0.0001, rtol=0.0001, check_dtype=False, equal_nan=True, msg=f'Actual: {actual_metric}, Expected: {expected_metric}')
self.exponential_ndcg.update(predictions={DefaultTaskInfo.name: model_output['predictions'][0]}, labels={DefaultTaskInfo.name: model_output['labels'][0]}, weights={DefaultTaskInfo.name: model_output['weights'][0]}, required_inputs={SESSION_KEY: model_output['session_ids'][0]})
metric = self.exponential_ndcg.compute()
actual_metric = metric[f'ndcg-{DefaultTaskInfo.name}|lifetime_ndcg']
expected_metric = model_output['expected_ndcg_exp']
torch.testing.assert_close(actual_metric, expected_metric, atol=0.0001, rtol=0.0001, check_dtype=False, equal_nan=True, msg=f'Actual: {actual_metric}, Expected: {expected_metric}')
def test_negtive_sessions(self) -> None:
model_output = generate_model_output_negative_sessions()
self.non_exponential_ndcg.update(predictions={DefaultTaskInfo.name: model_output['predictions'][0]}, labels={DefaultTaskInfo.name: model_output['labels'][0]}, weights={DefaultTaskInfo.name: model_output['weights'][0]}, required_inputs={SESSION_KEY: model_output['session_ids'][0]})
metric = self.non_exponential_ndcg.compute()
actual_metric = metric[f'ndcg-{DefaultTaskInfo.name}|lifetime_ndcg']
expected_metric = model_output['expected_ndcg_non_exp']
torch.testing.assert_close(actual_metric, expected_metric, atol=0.0001, rtol=0.0001, check_dtype=False, equal_nan=True, msg=f'Actual: {actual_metric}, Expected: {expected_metric}')
self.exponential_ndcg.update(predictions={DefaultTaskInfo.name: model_output['predictions'][0]}, labels={DefaultTaskInfo.name: model_output['labels'][0]}, weights={DefaultTaskInfo.name: model_output['weights'][0]}, required_inputs={SESSION_KEY: model_output['session_ids'][0]})
metric = self.exponential_ndcg.compute()
actual_metric = metric[f'ndcg-{DefaultTaskInfo.name}|lifetime_ndcg']
expected_metric = model_output['expected_ndcg_exp']
torch.testing.assert_close(actual_metric, expected_metric, atol=0.0001, rtol=0.0001, check_dtype=False, equal_nan=True, msg=f'Actual: {actual_metric}, Expected: {expected_metric}') |
class BamBlock(nn.Module):
def __init__(self, channels):
super(BamBlock, self).__init__()
self.ch_att = ChannelGate(channels=channels)
self.sp_att = SpatialGate(channels=channels)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
att = (1 + self.sigmoid((self.ch_att(x) * self.sp_att(x))))
x = (x * att)
return x |
class TestVMStatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('VMStatCollector', {'interval': 10})
self.collector = VMStatCollector(config, None)
def test_import(self):
self.assertTrue(VMStatCollector)
('__builtin__.open')
('os.access', Mock(return_value=True))
(Collector, 'publish')
def test_should_open_proc_vmstat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/vmstat')
(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
VMStatCollector.PROC = self.getFixturePath('proc_vmstat_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
VMStatCollector.PROC = self.getFixturePath('proc_vmstat_2')
self.collector.collect()
metrics = {'pgfault': 71.1, 'pgmajfault': 0.0, 'pgpgin': 0.0, 'pgpgout': 9.2, 'pswpin': 0.0, 'pswpout': 0.0}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics) |
def create_door_frame(bm, face, prop):
normal = face.normal.copy()
min_frame_size = (min(calc_face_dimensions(face)) / 2)
prop.frame_thickness = clamp(prop.frame_thickness, 0.01, (min_frame_size - 0.001))
(door_face, frame_faces) = make_door_inset(bm, face, prop)
arch_face = None
if prop.add_arch:
frame_faces.remove(get_top_faces(frame_faces).pop())
top_edges = get_top_edges({e for f in get_bottom_faces(frame_faces, n=2) for e in f.edges}, n=2)
(arch_face, arch_frame_faces) = create_arch(bm, top_edges, frame_faces, prop.arch, prop.frame_thickness, local_xyz(face))
frame_faces += arch_frame_faces
else:
merge_loose_split_verts(bm, door_face, prop)
bmesh.ops.recalc_face_normals(bm, faces=list(bm.faces))
if prop.add_arch:
([door_face], _, [arch_face], frame_faces) = add_frame_depth(bm, [door_face], [], [arch_face], frame_faces, prop.frame_depth, normal)
(arch_face, new_frame_faces) = add_arch_depth(bm, arch_face, prop.arch.depth, normal)
frame_faces += new_frame_faces
else:
([door_face], _, _, frame_faces) = add_frame_depth(bm, [door_face], [], [], frame_faces, prop.frame_depth, normal)
(door_face, new_frame_faces) = add_door_depth(bm, door_face, prop.door_depth, normal)
frame_faces += new_frame_faces
add_faces_to_group(bm, [door_face], MaterialGroup.DOOR)
add_faces_to_group(bm, validate(frame_faces), MaterialGroup.FRAME)
if prop.add_arch:
add_faces_to_group(bm, [arch_face], MaterialGroup.DOOR)
return (door_face, arch_face) |
class BaseBatteryModel(pybamm.BaseModel):
def __init__(self, options=None, name='Unnamed battery model'):
super().__init__(name)
self.options = options
def deserialise(cls, properties: dict):
instance = cls.__new__(cls)
instance.__init__(options=properties['options'], name=(properties['name'] + '_saved'))
instance._concatenated_rhs = properties['concatenated_rhs']
instance._concatenated_algebraic = properties['concatenated_algebraic']
instance._concatenated_initial_conditions = properties['concatenated_initial_conditions']
instance.len_rhs = instance.concatenated_rhs.size
instance.len_alg = instance.concatenated_algebraic.size
instance.len_rhs_and_alg = (instance.len_rhs + instance.len_alg)
instance.bounds = properties['bounds']
instance.events = properties['events']
instance.mass_matrix = properties['mass_matrix']
instance.mass_matrix_inv = properties['mass_matrix_inv']
if properties['variables']:
instance._variables = pybamm.FuzzyDict(properties['variables'])
for var in instance._variables.values():
if (var.domain != []):
var.mesh = properties['mesh'][var.domain]
else:
var.mesh = None
if (var.domains['secondary'] != []):
var.secondary_mesh = properties['mesh'][var.domains['secondary']]
else:
var.secondary_mesh = None
instance._geometry = pybamm.Geometry(properties['geometry'])
else:
instance._variables = pybamm.FuzzyDict({})
instance.is_discretised = True
return instance
def default_geometry(self):
return pybamm.battery_geometry(options=self.options)
def default_var_pts(self):
base_var_pts = {'x_n': 20, 'x_s': 20, 'x_p': 20, 'r_n': 20, 'r_p': 20, 'r_n_prim': 20, 'r_p_prim': 20, 'r_n_sec': 20, 'r_p_sec': 20, 'y': 10, 'z': 10, 'R_n': 30, 'R_p': 30}
if (self.options['dimensionality'] == 2):
base_var_pts.update({'x_n': 10, 'x_s': 10, 'x_p': 10})
return base_var_pts
def default_submesh_types(self):
base_submeshes = {'negative electrode': pybamm.Uniform1DSubMesh, 'separator': pybamm.Uniform1DSubMesh, 'positive electrode': pybamm.Uniform1DSubMesh, 'negative particle': pybamm.Uniform1DSubMesh, 'positive particle': pybamm.Uniform1DSubMesh, 'negative primary particle': pybamm.Uniform1DSubMesh, 'positive primary particle': pybamm.Uniform1DSubMesh, 'negative secondary particle': pybamm.Uniform1DSubMesh, 'positive secondary particle': pybamm.Uniform1DSubMesh, 'negative particle size': pybamm.Uniform1DSubMesh, 'positive particle size': pybamm.Uniform1DSubMesh}
if (self.options['dimensionality'] == 0):
base_submeshes['current collector'] = pybamm.SubMesh0D
elif (self.options['dimensionality'] == 1):
base_submeshes['current collector'] = pybamm.Uniform1DSubMesh
elif (self.options['dimensionality'] == 2):
base_submeshes['current collector'] = pybamm.ScikitUniform2DSubMesh
return base_submeshes
def default_spatial_methods(self):
base_spatial_methods = {'macroscale': pybamm.FiniteVolume(), 'negative particle': pybamm.FiniteVolume(), 'positive particle': pybamm.FiniteVolume(), 'negative primary particle': pybamm.FiniteVolume(), 'positive primary particle': pybamm.FiniteVolume(), 'negative secondary particle': pybamm.FiniteVolume(), 'positive secondary particle': pybamm.FiniteVolume(), 'negative particle size': pybamm.FiniteVolume(), 'positive particle size': pybamm.FiniteVolume()}
if (self.options['dimensionality'] == 0):
base_spatial_methods['current collector'] = pybamm.ZeroDimensionalSpatialMethod()
elif (self.options['dimensionality'] == 1):
base_spatial_methods['current collector'] = pybamm.FiniteVolume()
elif (self.options['dimensionality'] == 2):
base_spatial_methods['current collector'] = pybamm.ScikitFiniteElement()
return base_spatial_methods
def options(self):
return self._options
def options(self, extra_options):
if ((extra_options is None) or (type(extra_options) == dict)):
options = BatteryModelOptions(extra_options)
else:
options = extra_options
if isinstance(self, pybamm.lithium_ion.BaseModel):
if (options['convection'] != 'none'):
raise pybamm.OptionError('convection not implemented for lithium-ion models')
if isinstance(self, pybamm.lithium_ion.SPMe):
if (options['electrolyte conductivity'] not in ['default', 'composite', 'integrated']):
raise pybamm.OptionError("electrolyte conductivity '{}' not suitable for SPMe".format(options['electrolyte conductivity']))
if (isinstance(self, pybamm.lithium_ion.SPM) and (not isinstance(self, pybamm.lithium_ion.SPMe))):
if (options['x-average side reactions'] == 'false'):
raise pybamm.OptionError("x-average side reactions cannot be 'false' for SPM models")
if isinstance(self, pybamm.lithium_ion.SPM):
if (('distribution' in options['particle size']) and (options['surface form'] == 'false')):
raise pybamm.OptionError("surface form must be 'algebraic' or 'differential' if 'particle size' contains a 'distribution'")
if isinstance(self, pybamm.lead_acid.BaseModel):
if ((options['thermal'] != 'isothermal') and (options['dimensionality'] != 0)):
raise pybamm.OptionError('Lead-acid models can only have thermal effects if dimensionality is 0.')
if ((options['SEI'] != 'none') or (options['SEI film resistance'] != 'none')):
raise pybamm.OptionError('Lead-acid models cannot have SEI formation')
if (options['lithium plating'] != 'none'):
raise pybamm.OptionError('Lead-acid models cannot have lithium plating')
if (options['open-circuit potential'] == 'MSMR'):
raise pybamm.OptionError('Lead-acid models cannot use the MSMR open-circuit potential model')
if (isinstance(self, pybamm.lead_acid.LOQS) and (options['surface form'] == 'false') and (options['hydrolysis'] == 'true')):
raise pybamm.OptionError(f'must use surface formulation to solve {self!s} with hydrolysis')
self._options = options
def set_standard_output_variables(self):
self.variables.update({'Time [s]': pybamm.t, 'Time [min]': (pybamm.t / 60), 'Time [h]': (pybamm.t / 3600)})
var = pybamm.standard_spatial_vars
self.variables.update({'x [m]': var.x, 'x_n [m]': var.x_n, 'x_s [m]': var.x_s, 'x_p [m]': var.x_p})
if (self.options['dimensionality'] == 1):
self.variables.update({'z [m]': var.z})
elif (self.options['dimensionality'] == 2):
self.variables.update({'y [m]': var.y, 'z [m]': var.z})
def build_model_equations(self):
for (submodel_name, submodel) in self.submodels.items():
pybamm.logger.verbose(f'Setting rhs for {submodel_name} submodel ({self.name})')
submodel.set_rhs(self.variables)
pybamm.logger.verbose(f'Setting algebraic for {submodel_name} submodel ({self.name})')
submodel.set_algebraic(self.variables)
pybamm.logger.verbose('Setting boundary conditions for {} submodel ({})'.format(submodel_name, self.name))
submodel.set_boundary_conditions(self.variables)
pybamm.logger.verbose(f'Setting initial conditions for {submodel_name} submodel ({self.name})')
submodel.set_initial_conditions(self.variables)
submodel.set_events(self.variables)
pybamm.logger.verbose(f'Updating {submodel_name} submodel ({self.name})')
self.update(submodel)
self.check_no_repeated_keys()
def build_model(self):
self._build_model()
pybamm.logger.debug(f'Setting voltage variables ({self.name})')
self.set_voltage_variables()
pybamm.logger.debug(f'Setting SoC variables ({self.name})')
self.set_soc_variables()
pybamm.logger.debug(f'Setting degradation variables ({self.name})')
self.set_degradation_variables()
self.set_summary_variables()
self._built = True
pybamm.logger.info(f'Finish building {self.name}')
def summary_variables(self):
return self._summary_variables
_variables.setter
def summary_variables(self, value):
for var in value:
if (var not in self.variables):
raise KeyError(f"No cycling variable defined for summary variable '{var}'")
self._summary_variables = value
def set_summary_variables(self):
self._summary_variables = []
def get_intercalation_kinetics(self, domain):
options = getattr(self.options, domain)
if (options['intercalation kinetics'] == 'symmetric Butler-Volmer'):
return pybamm.kinetics.SymmetricButlerVolmer
elif (options['intercalation kinetics'] == 'asymmetric Butler-Volmer'):
return pybamm.kinetics.AsymmetricButlerVolmer
elif (options['intercalation kinetics'] == 'linear'):
return pybamm.kinetics.Linear
elif (options['intercalation kinetics'] == 'Marcus'):
return pybamm.kinetics.Marcus
elif (options['intercalation kinetics'] == 'Marcus-Hush-Chidsey'):
return pybamm.kinetics.MarcusHushChidsey
elif (options['intercalation kinetics'] == 'MSMR'):
return pybamm.kinetics.MSMRButlerVolmer
def get_inverse_intercalation_kinetics(self):
if (self.options['intercalation kinetics'] == 'symmetric Butler-Volmer'):
return pybamm.kinetics.InverseButlerVolmer
else:
raise pybamm.OptionError("Inverse kinetics are only implemented for symmetric Butler-Volmer. Use option {'surface form': 'algebraic'} to use forward kinetics instead.")
def set_external_circuit_submodel(self):
if (self.options['operating mode'] == 'current'):
model = pybamm.external_circuit.ExplicitCurrentControl(self.param, self.options)
elif (self.options['operating mode'] == 'voltage'):
model = pybamm.external_circuit.VoltageFunctionControl(self.param, self.options)
elif (self.options['operating mode'] == 'power'):
model = pybamm.external_circuit.PowerFunctionControl(self.param, self.options, 'algebraic')
elif (self.options['operating mode'] == 'differential power'):
model = pybamm.external_circuit.PowerFunctionControl(self.param, self.options, 'differential without max')
elif (self.options['operating mode'] == 'explicit power'):
model = pybamm.external_circuit.ExplicitPowerControl(self.param, self.options)
elif (self.options['operating mode'] == 'resistance'):
model = pybamm.external_circuit.ResistanceFunctionControl(self.param, self.options, 'algebraic')
elif (self.options['operating mode'] == 'differential resistance'):
model = pybamm.external_circuit.ResistanceFunctionControl(self.param, self.options, 'differential without max')
elif (self.options['operating mode'] == 'explicit resistance'):
model = pybamm.external_circuit.ExplicitResistanceControl(self.param, self.options)
elif (self.options['operating mode'] == 'CCCV'):
model = pybamm.external_circuit.CCCVFunctionControl(self.param, self.options)
elif callable(self.options['operating mode']):
model = pybamm.external_circuit.FunctionControl(self.param, self.options['operating mode'], self.options)
self.submodels['external circuit'] = model
def set_transport_efficiency_submodels(self):
self.submodels['electrolyte transport efficiency'] = pybamm.transport_efficiency.Bruggeman(self.param, 'Electrolyte', self.options)
self.submodels['electrode transport efficiency'] = pybamm.transport_efficiency.Bruggeman(self.param, 'Electrode', self.options)
def set_thermal_submodel(self):
if (self.options['thermal'] == 'isothermal'):
thermal_submodel = pybamm.thermal.isothermal.Isothermal
elif (self.options['thermal'] == 'lumped'):
thermal_submodel = pybamm.thermal.Lumped
elif (self.options['thermal'] == 'x-lumped'):
if (self.options['dimensionality'] == 0):
thermal_submodel = pybamm.thermal.Lumped
elif (self.options['dimensionality'] == 1):
thermal_submodel = pybamm.thermal.pouch_cell.CurrentCollector1D
elif (self.options['dimensionality'] == 2):
thermal_submodel = pybamm.thermal.pouch_cell.CurrentCollector2D
elif (self.options['thermal'] == 'x-full'):
if (self.options['dimensionality'] == 0):
thermal_submodel = pybamm.thermal.pouch_cell.OneDimensionalX
self.submodels['thermal'] = thermal_submodel(self.param, self.options)
def set_current_collector_submodel(self):
if (self.options['current collector'] in ['uniform']):
submodel = pybamm.current_collector.Uniform(self.param)
elif (self.options['current collector'] == 'potential pair'):
if (self.options['dimensionality'] == 1):
submodel = pybamm.current_collector.PotentialPair1plus1D(self.param)
elif (self.options['dimensionality'] == 2):
submodel = pybamm.current_collector.PotentialPair2plus1D(self.param)
self.submodels['current collector'] = submodel
def set_interface_utilisation_submodel(self):
for domain in ['negative', 'positive']:
Domain = domain.capitalize()
util = getattr(self.options, domain)['interface utilisation']
if (util == 'full'):
submodel = pybamm.interface_utilisation.Full(self.param, domain, self.options)
elif (util == 'constant'):
submodel = pybamm.interface_utilisation.Constant(self.param, domain, self.options)
elif (util == 'current-driven'):
if (self.options.electrode_types[domain] == 'planar'):
reaction_loc = 'interface'
elif self.x_average:
reaction_loc = 'x-average'
else:
reaction_loc = 'full electrode'
submodel = pybamm.interface_utilisation.CurrentDriven(self.param, domain, self.options, reaction_loc)
self.submodels[f'{Domain} interface utilisation'] = submodel
def set_voltage_variables(self):
if (self.options.negative['particle phases'] == '1'):
phase_n = ''
else:
phase_n = 'primary '
if (self.options.positive['particle phases'] == '1'):
phase_p = ''
else:
phase_p = 'primary '
ocp_surf_n_av = self.variables[f'X-averaged negative electrode {phase_n}open-circuit potential [V]']
ocp_surf_p_av = self.variables[f'X-averaged positive electrode {phase_p}open-circuit potential [V]']
ocp_n_bulk = self.variables[f'Negative electrode {phase_n}bulk open-circuit potential [V]']
ocp_p_bulk = self.variables[f'Positive electrode {phase_p}bulk open-circuit potential [V]']
eta_particle_n = self.variables[f'Negative {phase_n}particle concentration overpotential [V]']
eta_particle_p = self.variables[f'Positive {phase_p}particle concentration overpotential [V]']
ocv_surf = (ocp_surf_p_av - ocp_surf_n_av)
ocv_bulk = (ocp_p_bulk - ocp_n_bulk)
eta_particle = (eta_particle_p - eta_particle_n)
if (self.options.electrode_types['negative'] == 'planar'):
eta_r_n_av = self.variables['Lithium metal interface reaction overpotential [V]']
else:
eta_r_n_av = self.variables[f'X-averaged negative electrode {phase_n}reaction overpotential [V]']
eta_r_p_av = self.variables[f'X-averaged positive electrode {phase_p}reaction overpotential [V]']
eta_r_av = (eta_r_p_av - eta_r_n_av)
delta_phi_s_n_av = self.variables['X-averaged negative electrode ohmic losses [V]']
delta_phi_s_p_av = self.variables['X-averaged positive electrode ohmic losses [V]']
delta_phi_s_av = (delta_phi_s_p_av - delta_phi_s_n_av)
if (self.options.electrode_types['negative'] == 'planar'):
eta_sei_n_av = self.variables['Negative electrode SEI film overpotential [V]']
else:
eta_sei_n_av = self.variables[f'X-averaged negative electrode {phase_n}SEI film overpotential [V]']
eta_sei_p_av = self.variables[f'X-averaged positive electrode {phase_p}SEI film overpotential [V]']
eta_sei_av = (eta_sei_n_av + eta_sei_p_av)
self.variables.update({'Surface open-circuit voltage [V]': ocv_surf, 'Bulk open-circuit voltage [V]': ocv_bulk, 'Particle concentration overpotential [V]': eta_particle, 'X-averaged reaction overpotential [V]': eta_r_av, 'X-averaged SEI film overpotential [V]': eta_sei_av, 'X-averaged solid phase ohmic losses [V]': delta_phi_s_av})
V = self.variables['Voltage [V]']
eta_e_av = self.variables['X-averaged electrolyte ohmic losses [V]']
eta_c_av = self.variables['X-averaged concentration overpotential [V]']
num_cells = pybamm.Parameter('Number of cells connected in series to make a battery')
self.variables.update({'Battery open-circuit voltage [V]': (ocv_bulk * num_cells), 'Battery negative electrode bulk open-circuit potential [V]': (ocp_n_bulk * num_cells), 'Battery positive electrode bulk open-circuit potential [V]': (ocp_p_bulk * num_cells), 'Battery particle concentration overpotential [V]': (eta_particle * num_cells), 'Battery negative particle concentration overpotential [V]': (eta_particle_n * num_cells), 'Battery positive particle concentration overpotential [V]': (eta_particle_p * num_cells), 'X-averaged battery reaction overpotential [V]': (eta_r_av * num_cells), 'X-averaged battery negative reaction overpotential [V]': (eta_r_n_av * num_cells), 'X-averaged battery positive reaction overpotential [V]': (eta_r_p_av * num_cells), 'X-averaged battery solid phase ohmic losses [V]': (delta_phi_s_av * num_cells), 'X-averaged battery negative solid phase ohmic losses [V]': (delta_phi_s_n_av * num_cells), 'X-averaged battery positive solid phase ohmic losses [V]': (delta_phi_s_p_av * num_cells), 'X-averaged battery electrolyte ohmic losses [V]': (eta_e_av * num_cells), 'X-averaged battery concentration overpotential [V]': (eta_c_av * num_cells), 'Battery voltage [V]': (V * num_cells)})
v_ecm = (ocv_bulk - V)
def x_not_zero(x):
return ((((x > 0) + (x < 0)) * x) + ((x >= 0) * (x <= 0)))
i_cc = self.variables['Current collector current density [A.m-2]']
i_cc_not_zero = x_not_zero(i_cc)
A_cc = self.param.A_cc
self.variables.update({'Local ECM resistance [Ohm]': ((pybamm.sign(i_cc) * v_ecm) / (i_cc_not_zero * A_cc))})
self.events.append(pybamm.Event('Minimum voltage [V]', (V - self.param.voltage_low_cut), pybamm.EventType.TERMINATION))
self.events.append(pybamm.Event('Maximum voltage [V]', (self.param.voltage_high_cut - V), pybamm.EventType.TERMINATION))
tol = 0.1
self.events.append(pybamm.Event('Minimum voltage switch [V]', (V - (self.param.voltage_low_cut - tol)), pybamm.EventType.SWITCH))
self.events.append(pybamm.Event('Maximum voltage switch [V]', (V - (self.param.voltage_high_cut + tol)), pybamm.EventType.SWITCH))
I = self.variables['Current [A]']
I_not_zero = x_not_zero(I)
self.variables.update({'Terminal power [W]': (I * V), 'Power [W]': (I * V), 'Resistance [Ohm]': ((pybamm.sign(I) * V) / I_not_zero)})
def set_degradation_variables(self):
pass
def set_soc_variables(self):
pass
def save_model(self, filename=None, mesh=None, variables=None):
if (variables and (not mesh)):
raise ValueError('Serialisation: Please provide the mesh if variables are required')
Serialise().save_model(self, filename=filename, mesh=mesh, variables=variables) |
def test_do():
rng = np.random.default_rng(seed=435)
with pm.Model() as m_old:
x = pm.Normal('x', 0, 0.001)
y = pm.Normal('y', x, 0.001)
z = pm.Normal('z', (y + x), 0.001)
assert ((- 5) < pm.draw(z, random_seed=rng) < 5)
m_new = do(m_old, {y: (x + 100)})
assert (len(m_new.free_RVs) == 2)
assert (m_new['x'] in m_new.free_RVs)
assert (m_new['y'] in m_new.deterministics)
assert (m_new['z'] in m_new.free_RVs)
assert (95 < pm.draw(m_new['z'], random_seed=rng) < 105)
with m_old:
switch = pm.MutableData('switch', 1)
m_new = do(m_old, {y: (100 * switch), x: (100 * switch)})
assert (len(m_new.free_RVs) == 1)
assert (m_new['y'] not in m_new.deterministics)
assert (m_new['x'] not in m_new.deterministics)
assert (m_new['z'] in m_new.free_RVs)
assert (195 < pm.draw(m_new['z'], random_seed=rng) < 205)
with m_new:
pm.set_data({'switch': 0})
assert ((- 5) < pm.draw(m_new['z'], random_seed=rng) < 5) |
def action_modify(actions):
triple = ['intent', 'slot', 'value1', 'value2']
res = ''
temp = {}
for action in actions:
if (('value1' in action.keys()) and (action['value1'] != '')):
temp[('' + action['value1'])] = random_modify(action['value1'])
for x in triple:
if (x in action.keys()):
res += (action[('' + x)] + ' ')
else:
res += "''"
res = res[0:(- 1)]
res += ' <|continue|> '
return (res[:(- 14)], temp) |
def optimalK(data, num_fold, maxClusters=5, THRE_PS=0.9):
num_data = data.shape[0]
num_feat = data.shape[1]
pred_strength_avg = np.zeros((maxClusters + 1))
for nf in range(num_fold):
inds_train = np.random.choice(num_data, int((num_data * 0.5)), replace=False)
inds_test = list(set(range(num_data)).difference(inds_train))
data_train = data[inds_train]
data_test = data[inds_test]
pred_strength_cur = np.zeros((maxClusters + 1))
for c in range(1, (maxClusters + 1)):
train_cluster = KMeans(n_clusters=c).fit(data_train)
test_cluster = KMeans(n_clusters=c).fit(data_test)
pred_strength_cur[c] = PredictionStrength(data_test, test_cluster.labels_, train_cluster.cluster_centers_, c)
pred_strength_avg += pred_strength_cur
pred_strength_avg /= num_fold
k_optimal = max([i for (i, j) in enumerate(pred_strength_avg) if (j > THRE_PS)])
return k_optimal |
def _create_dummy_icdar_json(json_name):
image_1 = {'id': 0, 'width': 640, 'height': 640, 'file_name': 'fake_name.jpg'}
image_2 = {'id': 1, 'width': 640, 'height': 640, 'file_name': 'fake_name1.jpg'}
annotation_1 = {'id': 1, 'image_id': 0, 'category_id': 0, 'area': 400, 'bbox': [50, 60, 20, 20], 'iscrowd': 0, 'segmentation': [[50, 60, 70, 60, 70, 80, 50, 80]]}
annotation_2 = {'id': 2, 'image_id': 0, 'category_id': 0, 'area': 900, 'bbox': [100, 120, 30, 30], 'iscrowd': 0, 'segmentation': [[100, 120, 130, 120, 120, 150, 100, 150]]}
annotation_3 = {'id': 3, 'image_id': 0, 'category_id': 0, 'area': 1600, 'bbox': [150, 160, 40, 40], 'iscrowd': 1, 'segmentation': [[150, 160, 190, 160, 190, 200, 150, 200]]}
annotation_4 = {'id': 4, 'image_id': 0, 'category_id': 0, 'area': 10000, 'bbox': [250, 260, 100, 100], 'iscrowd': 1, 'segmentation': [[250, 260, 350, 260, 350, 360, 250, 360]]}
annotation_5 = {'id': 5, 'image_id': 1, 'category_id': 0, 'area': 10000, 'bbox': [250, 260, 100, 100], 'iscrowd': 1, 'segmentation': [[250, 260, 350, 260, 350, 360, 250, 360]]}
categories = [{'id': 0, 'name': 'text', 'supercategory': 'text'}]
fake_json = {'images': [image_1, image_2], 'annotations': [annotation_1, annotation_2, annotation_3, annotation_4, annotation_5], 'categories': categories}
mmcv.dump(fake_json, json_name) |
def check_match(op_list, op_map=None):
if (not op_list):
raise ValueError('Empty op_list passed to check_match')
if (not op_map):
op_map = default_op_map
op_type_list = [op.type for op in op_list]
_log.debug('Checking matches for op_type_list: %s', op_type_list)
op_index = op_type_list[0]
if (op_index in op_map):
ret_max_len = 0
ret_match_len = 0
op_map_entry = op_map[op_index]
for (_, sequence) in op_map_entry.items():
match_len = _check_sequence(op_type_list, sequence)
max_len = len(sequence)
if (match_len > ret_match_len):
ret_match_len = match_len
ret_max_len = max_len
elif (match_len == ret_match_len):
ret_max_len = max(max_len, ret_max_len)
return (ret_match_len, ret_max_len)
return (0, 0) |
class ContainerPage(HTML5Page):
def __init__(self, view):
super().__init__(view)
page_layout = PageLayout(contents_layout=CenteredLayout(), header_layout=Container(fluid=True), footer_layout=Container(fluid=True))
self.use_layout(page_layout)
self.layout.header.add_child(P(view, text='The header'))
self.layout.footer.add_child(P(view, text='The footer'))
columns = page_layout.contents_layout.columns
left = columns['left']
left.add_child(P(view, text='To the left'))
right = columns['right']
right.add_child(P(view, text='To the right')) |
_infer_shape
_useless
_canonicalize
_rewriter([SpecifyShape])
def local_merge_consecutive_specify_shape(fgraph, node):
if (not isinstance(node.op, SpecifyShape)):
return False
obj = node.inputs[0]
if (not (obj.owner and isinstance(obj.owner.op, SpecifyShape))):
return False
(inner_obj, *shape) = obj.owner.inputs
for (dim, sh) in enumerate(node.inputs[1:]):
if (not NoneConst.equals(sh)):
shape[dim] = sh
return [specify_shape(inner_obj, shape)] |
class TestDOTARSDet(TestDOTA):
def eval(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
real_test_img_list = self.get_test_image()
rsdet = build_whole_network_5p.DetectionNetworkRSDet(cfgs=self.cfgs, is_training=False)
self.test_dota(det_net=rsdet, real_test_img_list=real_test_img_list, txt_name=txt_name)
if (not self.args.show_box):
os.remove(txt_name) |
def test_dependency_from_pep_508_with_python_full_version_pep440_compatible_release_tilde() -> None:
name = 'pathlib2 ; python_version ~= "3.4" or python_version < "3"'
dep = Dependency.create_from_pep_508(name)
assert (dep.name == 'pathlib2')
assert (str(dep.constraint) == '*')
assert (dep.python_versions == '~=3.4 || <3') |
class _GoogleDocstringToMarkdown(GoogleDocstring):
def _load_custom_sections(self) -> None:
super()._load_custom_sections()
self._sections['registers'] = self._parse_registers_section
def _parse_parameters_section(self, section: str) -> List[str]:
def _template(name, desc_lines):
desc = ' '.join(desc_lines)
return f' - `{name}`: {desc}'
return ['#### Parameters', *[_template(name, desc) for (name, _type, desc) in self._consume_fields()], '']
def _parse_references_section(self, section: str) -> List[str]:
lines = self._dedent(self._consume_to_next_section())
return ['#### References', '\n'.join((line for line in lines)), '']
def _parse_registers_section(self, section: str) -> List[str]:
def _template(name, desc_lines):
desc = ' '.join(desc_lines)
return f' - `{name}`: {desc}'
return ['#### Registers', *[_template(name, desc) for (name, _type, desc) in self._consume_fields()], ''] |
def test_basic_chain_alt_az(sam_data, cec_inverter_parameters, sapm_temperature_cs5p_220m):
times = pd.date_range(start=' 1200-0700', end=' 1800-0700', freq='6H')
latitude = 32.2
longitude = (- 111)
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
temp_model_params = sapm_temperature_cs5p_220m.copy()
with pytest.warns(pvlibDeprecationWarning, match='with_pvwatts'):
(dc, ac) = modelchain.basic_chain(times, latitude, longitude, surface_tilt, surface_azimuth, module_parameters, temp_model_params, cec_inverter_parameters)
expected = pd.Series(np.array([111.621405, (- 0.02)]), index=times)
assert_series_equal(ac, expected) |
class BaseOptions():
def __init__(self):
self._parser = argparse.ArgumentParser()
self._initialized = False
def initialize(self):
self._parser.add_argument('--load_epoch', type=int, default=(- 1), help='which epoch to load? set to -1 to use latest cached model')
self._parser.add_argument('--temperature', type=float, default=1.5, help='temperature in distillation loss')
self._parser.add_argument('--AU_label_size', type=int, default=8, help='# of AUs')
self._parser.add_argument('--EXPR_label_size', type=int, default=7, help='# of EXpressions')
self._parser.add_argument('--VA_label_size', type=int, default=2, help='# of VA ')
self._parser.add_argument('--digitize_num', type=int, default=20, choices=[1, 20], help='1 means no digitization, 20 means to digitize continuous label to 20 one hot vector ')
self._parser.add_argument('--AU_criterion', type=str, default='BCE', choices=['FocalLoss', 'BCE'])
self._parser.add_argument('--EXPR_criterion', type=str, default='CE', choices=['FocalLoss', 'CE'])
self._parser.add_argument('--VA_criterion', type=str, default='CCC_CE', choices=['CCC', 'CCC_CE', 'CCC_FocalLoss'])
self._parser.add_argument('--lambda_teacher', type=float, default=0.4, help='weight for distillation loss when the ground truth exists (between 0 to 1)')
self._parser.add_argument('--lambda_AU', type=float, default=8.0, help='weight for AU.')
self._parser.add_argument('--lambda_EXPR', type=float, default=1.0, help='weight for EXPR.')
self._parser.add_argument('--lambda_V', type=float, default=1.0, help='weight for valence.')
self._parser.add_argument('--lambda_A', type=float, default=1.0, help='weight for arousal.')
self._parser.add_argument('--lambda_ccc', type=float, default=1.0, help='weight for ccc loss in (CE + lambda_ccc*ccc).')
self._parser.add_argument('--dataset_names', type=str, default=['Mixed_EXPR', 'Mixed_AU', 'Mixed_VA'], nargs='+')
self._parser.add_argument('--tasks', type=str, default=['EXPR', 'AU', 'VA'], nargs='+')
self._parser.add_argument('--seq_len', type=int, default=64, help='length of input seq ')
self._parser.add_argument('--frozen', action='store_true')
self._parser.add_argument('--hidden_size', type=int, default=128, help='the embedding size of each output head')
self._parser.add_argument('--batch_size', type=int, default=20, help='input batch size per task')
self._parser.add_argument('--image_size', type=int, default=224, help='input image size')
self._parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self._parser.add_argument('--name', type=str, default='experiment_1', help='name of the experiment. It decides where to store samples and models')
self._parser.add_argument('--n_threads_train', default=8, type=int, help='# threads for loading data')
self._parser.add_argument('--n_threads_test', default=8, type=int, help='# threads for loading data')
self._parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self._parser.add_argument('--loggings_dir', type=str, default='./loggings', help='loggings are saved here')
self._parser.add_argument('--model_name', type=str, default='resnet50', help='the name of model')
self._parser.add_argument('--pretrained_dataset', type=str, default='ferplus', choices=['ferplus', 'sfew', 'imagenet'], help="the pretrained_dataset of the face feature extractor, choices:['ferplus', 'sfew','imagenet']")
self._parser.add_argument('--pretrained_resnet50_model', type=str, default='', help='pretrained model')
self._parser.add_argument('--pretrained_teacher_model', type=str, default='')
self._initialized = True
def parse(self):
if (not self._initialized):
self.initialize()
self._opt = self._parser.parse_args()
self._opt.is_train = self.is_train
self._set_and_check_load_epoch()
self._get_set_gpus()
args = vars(self._opt)
self._print(args)
self._save(args)
return self._opt
def _set_and_check_load_epoch(self):
models_dir = os.path.join(self._opt.checkpoints_dir, self._opt.name)
if os.path.exists(models_dir):
if (self._opt.load_epoch == (- 1)):
load_epoch = 0
if self.is_train:
for file in os.listdir(models_dir):
if file.startswith('net_epoch_'):
load_epoch = max(load_epoch, int(file.split('_')[2]))
self._opt.load_epoch = load_epoch
else:
found = False
for file in os.listdir(models_dir):
if file.startswith('net_epoch_'):
found = (int(file.split('_')[2]) == self._opt.load_epoch)
if found:
break
assert found, ('Model for epoch %i not found' % self._opt.load_epoch)
else:
assert (self._opt.load_epoch < 1), ('Model for epoch %i not found' % self._opt.load_epoch)
self._opt.load_epoch = 0
def _get_set_gpus(self):
str_ids = self._opt.gpu_ids.split(',')
self._opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self._opt.gpu_ids.append(id)
if (len(self._opt.gpu_ids) > 0):
torch.cuda.set_device(self._opt.gpu_ids[0])
def _print(self, args):
print(' Options ')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print(' End ')
def _save(self, args):
expr_dir = os.path.join(self._opt.checkpoints_dir, self._opt.name)
print(expr_dir)
if self.is_train:
os.makedirs(expr_dir)
else:
assert os.path.exists(expr_dir)
file_name = os.path.join(expr_dir, ('opt_%s.txt' % ('train' if self.is_train else 'test')))
with open(file_name, 'wt') as opt_file:
opt_file.write(' Options \n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write(' End \n') |
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv2d'):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv |
.parametrize(('given', 'tag', 'number', 'node', 'dirty'), [('3.3.1-rc26-0-g9df187b', '3.3.1-rc26', 0, 'g9df187b', False), ('17.33.0-rc-17-g38c3047c0', '17.33.0-rc', 17, 'g38c3047c0', False)])
def test_parse_describe_output(given: str, tag: str, number: int, node: str, dirty: bool) -> None:
parsed = git._git_parse_describe(given)
assert (parsed == (tag, number, node, dirty)) |
class CleanChannels(Converter):
_channel_converter = TextChannelConverter()
async def convert(self, ctx: Context, argument: str) -> (Literal['*'] | list[TextChannel]):
if (argument == '*'):
return '*'
return [(await self._channel_converter.convert(ctx, channel)) for channel in argument.split()] |
def test_get_srv_pn(np_junction):
from solcore.sesame_drift_diffusion.process_structure import get_srv, process_structure
from solcore import material, si
from solcore.structure import Junction, Layer
from solcore.state import State
options = State(T=300)
GaAs_p = material('GaAs')(T=300, Na=1e+24, hole_minority_lifetime=1e-06, electron_minority_lifetime=1e-06)
GaAs_n = material('GaAs')(T=300, Nd=1e+24, hole_minority_lifetime=1e-06, electron_minority_lifetime=1e-06)
junction = Junction([Layer(si('200nm'), GaAs_p, role='emitter'), Layer(si('2000nm'), GaAs_n, role='base')])
junction.sn = 5
junction.sp = 8
process_structure(junction, options)
(Sfront_e, Sfront_h, Sback_e, Sback_h) = get_srv(junction)
assert (Sfront_e == (junction.sp * 100))
assert (Sfront_h == (junction.sp * 100))
assert (Sback_e == (junction.sn * 100))
assert (Sback_e == (junction.sn * 100))
junction.sn_e = 2
junction.sn_h = 3
junction.sp_e = 4
junction.sp_h = 5
(Sfront_e, Sfront_h, Sback_e, Sback_h) = get_srv(junction)
assert (Sfront_e == (junction.sp_e * 100))
assert (Sfront_h == (junction.sp_h * 100))
assert (Sback_e == (junction.sn_e * 100))
assert (Sback_h == (junction.sn_h * 100)) |
def summarize_ratings(ratings_file, out_dir=None):
ratings_file = Path(ratings_file).resolve()
if (not pexists(ratings_file)):
raise IOError('Ratings file does not exist! : {}'.format(ratings_file))
if (out_dir is None):
out_dir = ratings_file.parents[0]
import re
clean = (lambda lbl: re.sub('\\W+', '_', lbl.lower()))
(rating_dict, notes) = load_ratings_csv(ratings_file)
rating_list = list()
uniq_labels = set()
all_labels = list()
for (sid, labels) in rating_dict.items():
sid_labels = [clean(lbl) for lbl in labels.split(cfg.rating_joiner)]
for lbl in sid_labels:
rating_list.append((sid, lbl))
uniq_labels.add(lbl)
all_labels.append(lbl)
if uniq_labels:
max_width = (1 + max([len(rt) for rt in uniq_labels]))
else:
print('No ratings to summarize! Returning empty dictionaries.')
return (dict(), dict())
counter = Counter(all_labels)
print('Ratings summary\n Counts (note some IDs can have multiple ratings):')
for (label, count) in counter.items():
print('\t{lbl:>{mw}} : {cnt:>7}'.format(lbl=label, cnt=count, mw=max_width))
id_lists = dict()
for label in uniq_labels:
id_lists[label] = list()
for (sid, label) in rating_list:
id_lists[label].append(sid)
print('\nList of IDs by rating:')
for label in uniq_labels:
print(' {lbl:>{mw}} (n={cnt:>}) : {lst}'.format(lbl=label, cnt=len(id_lists[label]), lst=id_lists[label], mw=max_width))
out_path = out_dir.joinpath('id_list_rating_{}.txt'.format(label))
with open(out_path, 'w') as of:
of.write('\n'.join(id_lists[label]))
return (counter, id_lists) |
(frozen=True)
class ContractSendChannelWithdraw(ContractSendEvent):
canonical_identifier: CanonicalIdentifier
total_withdraw: WithdrawAmount
expiration: BlockExpiration
partner_signature: Signature
def channel_identifier(self) -> ChannelID:
return self.canonical_identifier.channel_identifier
def token_network_address(self) -> TokenNetworkAddress:
return self.canonical_identifier.token_network_address
def __repr__(self) -> str:
return f'{self.__class__.__name__}< canonical_identifier: {self.canonical_identifier} total_withdraw: {self.total_withdraw} expiration: {self.expiration} partner_signature: {to_hex(self.partner_signature)} >' |
def EfficientNet(width_coefficient, depth_coefficient, default_resolution, dropout_rate=0.2, drop_connect_rate=0.2, depth_divisor=8, blocks_args=DEFAULT_BLOCKS_ARGS, model_name='efficientnet', include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, freeze_bn=False, **kwargs):
global backend, layers, models, keras_utils
(backend, layers, models, keras_utils) = get_submodules_from_kwargs(kwargs)
features = []
if (not ((weights in {'imagenet', None}) or os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000')
input_shape = _obtain_input_shape(input_shape, default_size=default_resolution, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights)
if (input_tensor is None):
img_input = layers.Input(shape=input_shape)
else:
if (backend.backend() == 'tensorflow'):
from tensorflow.python.keras.backend import is_keras_tensor
else:
is_keras_tensor = backend.is_keras_tensor
if (not is_keras_tensor(input_tensor)):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = (3 if (backend.image_data_format() == 'channels_last') else 1)
activation = get_swish(**kwargs)
x = img_input
x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor), 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name='stem_conv')(x)
x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation, name='stem_activation')(x)
num_blocks_total = sum((block_args.num_repeat for block_args in blocks_args))
block_num = 0
for (idx, block_args) in enumerate(blocks_args):
assert (block_args.num_repeat > 0)
block_args = block_args._replace(input_filters=round_filters(block_args.input_filters, width_coefficient, depth_divisor), output_filters=round_filters(block_args.output_filters, width_coefficient, depth_divisor), num_repeat=round_repeats(block_args.num_repeat, depth_coefficient))
drop_rate = ((drop_connect_rate * float(block_num)) / num_blocks_total)
x = mb_conv_block(x, block_args, activation=activation, drop_rate=drop_rate, prefix='block{}a_'.format((idx + 1)), freeze_bn=freeze_bn)
block_num += 1
if (block_args.num_repeat > 1):
block_args = block_args._replace(input_filters=block_args.output_filters, strides=[1, 1])
for bidx in xrange((block_args.num_repeat - 1)):
drop_rate = ((drop_connect_rate * float(block_num)) / num_blocks_total)
block_prefix = 'block{}{}_'.format((idx + 1), string.ascii_lowercase[(bidx + 1)])
x = mb_conv_block(x, block_args, activation=activation, drop_rate=drop_rate, prefix=block_prefix, freeze_bn=freeze_bn)
block_num += 1
if ((idx < (len(blocks_args) - 1)) and (blocks_args[(idx + 1)].strides[0] == 2)):
features.append(x)
elif (idx == (len(blocks_args) - 1)):
features.append(x)
return features |
def test_discover_cosine(local_client, grpc_client):
def f(client: QdrantBase, **kwargs: Dict[(str, Any)]) -> List[models.ScoredPoint]:
return client.discover(collection_name=COLLECTION_NAME, target=10, context=[models.ContextExamplePair(positive=11, negative=19)], with_payload=True, limit=10, using='image')
compare_client_results(grpc_client, f)
compare_client_results(local_client, f) |
class StateHandler(object):
def __init__(self, room):
self.room = room
self.current_state_name = (room.db.state or _FIRST_STATE)
self.prev_state_name = room.db.prev_state
self.current_state = None
self.current_state = self.load_state(self.current_state_name)
def load_state(self, statename):
try:
mod = utils.mod_import(f'{_ROOMSTATE_PACKAGE}.{statename}')
except Exception as err:
logger.log_trace()
self.room.msg_room(None, f'|rBUG: Could not load state {statename}: {err}!')
self.room.msg_room(None, f'|rBUG: Falling back to {self.current_state_name}')
return
state = mod.State(self, self.room)
return state
def init_state(self):
self.current_state.init()
def next_state(self, next_state=None):
next_state_name = self.current_state.next(next_state)
if next_state_name:
next_state = self.load_state(next_state_name)
if (not next_state):
raise RuntimeError(f'Could not load new state {next_state_name}!')
self.prev_state_name = self.current_state_name
self.current_state_name = next_state_name
self.current_state.clean()
self.prev_state = self.current_state
self.current_state = next_state
self.init_state()
self.room.db.prev_state = self.prev_state_name
self.room.db.state = self.current_state_name
return True
return False |
class TraceLocalSpanObserverTests(TraceTestBase):
def setUp(self):
super().setUp()
self.recorder = NullRecorder()
self.mock_context = mock.Mock()
self.span = ServerSpan('test-id', 'test-parent-id', 'test-span-id', None, 0, 'test', self.mock_context)
def test_init_local_component(self):
local_trace_observer = TraceLocalSpanObserver('test-service', 'test-component', 'test-host', self.span, self.recorder)
self.assertEqual(local_trace_observer.component_name, 'test-component')
def test_local_component_structure(self):
local_trace_observer = TraceLocalSpanObserver('test-service', 'test-component', 'test-host', self.span, self.recorder)
self.assertListEqual(local_trace_observer.binary_annotations, [{'key': ANNOTATIONS['COMPONENT'], 'value': 'baseplate', 'endpoint': {'ipv4': 'test-host', 'serviceName': 'test-service'}}, {'key': ANNOTATIONS['LOCAL_COMPONENT'], 'value': 'test-component', 'endpoint': {'ipv4': 'test-host', 'serviceName': 'test-service'}}])
def test_serialize(self):
local_trace_observer = TraceLocalSpanObserver('test-service', 'test-component', 'test-host', self.span, self.recorder)
local_trace_observer.on_start()
local_trace_observer.on_finish(None)
serialized_span = local_trace_observer._serialize()
self.assertIsNotNone(serialized_span['duration'])
self.assertEqual(serialized_span['name'], self.span.name)
annotations = serialized_span['binaryAnnotations']
for annotation in annotations:
self.assertTrue(('key' in annotation))
self.assertTrue(('value' in annotation))
self.assertTrue(('endpoint' in annotation)) |
class CoinCollectorLevel(gym.Env):
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, level, n_games, game_generator_seed, grammar_flags={}, request_infos=[]):
self.level = level
self.n_games = n_games
self.grammar_flags = grammar_flags
self.game_generator_seed = game_generator_seed
self.request_infos = request_infos
self.current_game = None
self.textworld_env = None
self.seed(1234)
vocab = textworld.text_utils.extract_vocab(self.grammar_flags)
vocab += ['coin']
self.action_space = text_spaces.Word(max_length=8, vocab=vocab)
self.observation_space = text_spaces.Word(max_length=200, vocab=vocab)
self.last_command = None
self.textworld_env = None
self.infos = None
def _get_seeds_per_game(self):
seeds_per_game = []
for i in range(self.n_games):
seeds = {}
seeds['map'] = self.rng_make.randint(65635)
seeds['objects'] = self.rng_make.randint(65635)
seeds['quest'] = self.rng_make.randint(65635)
seeds['grammar'] = self.rng_make.randint(65635)
seeds['inform7'] = self.seed_inform7
seeds_per_game.append(seeds)
return seeds_per_game
def _make_game(self, seeds):
options = GameOptions()
options.seeds = seeds
options.grammar = GrammarOptions(self.grammar_flags)
game = make_game_from_level(self.level, options)
hashid = encode_seeds(([self.game_generator_seed, self.level] + [seeds[k] for k in sorted(seeds)]))
game_name = '{}_{}'.format(self.spec.id, hashid)
game_file = textworld.generator.compile_game(game, path=pjoin('gen_games', str(self.spec.id), (game_name + '.ulx')))
return game_file
def _next_game(self):
seeds = next(self._game_seeds_iterator)
if (seeds not in self.games_collection):
self.games_collection[seeds] = self._make_game(dict(seeds))
self.rng_cmds = np.random.RandomState(self.seed_cmds)
return self.games_collection[seeds]
def seed(self, seed=None):
self.rng_games = np.random.RandomState((self.game_generator_seed + 1))
self.rng_make = np.random.RandomState((self.game_generator_seed + 2))
self.seed_cmds = (self.game_generator_seed + 3)
self.seed_map = self.rng_make.randint(65635)
self.seed_objects = self.rng_make.randint(65635)
self.seed_quest = self.rng_make.randint(65635)
self.seed_grammar = self.rng_make.randint(65635)
self.seed_inform7 = self.rng_make.randint(65635)
seeds_per_game = self._get_seeds_per_game()
self.seeds_per_game = [frozenset(seeds.items()) for seeds in seeds_per_game]
rng = np.random.RandomState(seed)
rng.shuffle(self.seeds_per_game)
self.games_collection = {}
self._game_seeds_iterator = make_infinite_shuffled_iterator(self.seeds_per_game, rng=self.rng_games)
return [seed]
def reset(self):
self.current_game = self._next_game()
self.infos = {}
self.infos['game_file'] = os.path.basename(self.current_game)
if (self.textworld_env is not None):
self.textworld_env.close()
self.textworld_env = textworld.start(self.current_game)
if ('admissible_commands' in self.request_infos):
self.textworld_env.activate_state_tracking()
if ('intermediate_reward' in self.request_infos):
self.textworld_env.activate_state_tracking()
self.textworld_env.compute_intermediate_reward()
self.infos['directions_names'] = self.textworld_env.game.directions_names
self.infos['verbs'] = self.textworld_env.game.verbs
self.infos['objects_names'] = self.textworld_env.game.objects_names
self.infos['objects_types'] = self.textworld_env.game.objects_types
self.infos['objects_names_and_types'] = self.textworld_env.game.objects_names_and_types
self.infos['max_score'] = 1
self.performed_actions = set()
self.game_state = self.textworld_env.reset()
ob = self.game_state.feedback
self._update_requested_infos()
return (ob, self.infos)
def _update_requested_infos(self):
for attr in self.request_infos:
self.infos[attr] = getattr(self.game_state, attr)
def skip(self, ngames=1):
for i in range(ngames):
next(self._game_seeds_iterator)
def step(self, action):
self.last_command = action
(self.game_state, reward, done) = self.textworld_env.step(self.last_command)
ob = self.game_state.feedback
self._update_requested_infos()
return (ob, reward, done, self.infos)
def render(self, mode='human'):
outfile = (StringIO() if (mode == 'ansi') else sys.stdout)
if (self.last_command is not None):
command = colorize(('> ' + self.last_command), 'yellow', highlight=False)
outfile.write((command + '\n\n'))
outfile.write((self.game_state.feedback + '\n'))
if (mode != 'human'):
return outfile
def close(self):
if (self.textworld_env is not None):
self.textworld_env.close()
self.textworld_env = None |
def get_externsheet_local_range(bk, refx, blah=0):
try:
info = bk._externsheet_info[refx]
except IndexError:
print(('!!! get_externsheet_local_range: refx=%d, not in range(%d)' % (refx, len(bk._externsheet_info))), file=bk.logfile)
return ((- 101), (- 101))
(ref_recordx, ref_first_sheetx, ref_last_sheetx) = info
if (ref_recordx == bk._supbook_addins_inx):
if blah:
print(('/// get_externsheet_local_range(refx=%d) -> addins %r' % (refx, info)), file=bk.logfile)
assert (ref_first_sheetx == 65534 == ref_last_sheetx)
return ((- 5), (- 5))
if (ref_recordx != bk._supbook_locals_inx):
if blah:
print(('/// get_externsheet_local_range(refx=%d) -> external %r' % (refx, info)), file=bk.logfile)
return ((- 4), (- 4))
if (ref_first_sheetx == 65534 == ref_last_sheetx):
if blah:
print(('/// get_externsheet_local_range(refx=%d) -> unspecified sheet %r' % (refx, info)), file=bk.logfile)
return ((- 1), (- 1))
if (ref_first_sheetx == 65535 == ref_last_sheetx):
if blah:
print(('/// get_externsheet_local_range(refx=%d) -> deleted sheet(s)' % (refx,)), file=bk.logfile)
return ((- 2), (- 2))
nsheets = len(bk._all_sheets_map)
if (not (0 <= ref_first_sheetx <= ref_last_sheetx < nsheets)):
if blah:
print(('/// get_externsheet_local_range(refx=%d) -> %r' % (refx, info)), file=bk.logfile)
print(('--- first/last sheet not in range(%d)' % nsheets), file=bk.logfile)
return ((- 102), (- 102))
xlrd_sheetx1 = bk._all_sheets_map[ref_first_sheetx]
xlrd_sheetx2 = bk._all_sheets_map[ref_last_sheetx]
if (not (0 <= xlrd_sheetx1 <= xlrd_sheetx2)):
return ((- 3), (- 3))
return (xlrd_sheetx1, xlrd_sheetx2) |
def verify_interface(test_interface, nodelst, template, kubecli: KrknKubernetes):
pod_index = random.randint(0, (len(nodelst) - 1))
pod_body = yaml.safe_load(template.render(nodename=nodelst[pod_index]))
logging.info(('Creating pod to query interface on node %s' % nodelst[pod_index]))
kubecli.create_pod(pod_body, 'default', 300)
try:
if (test_interface == []):
cmd = "ip r | grep default | awk '/default/ {print $5}'"
output = kubecli.exec_cmd_in_pod(cmd, 'fedtools', 'default')
test_interface = [output.replace('\n', '')]
else:
cmd = "ip -br addr show|awk -v ORS=',' '{print $1}'"
output = kubecli.exec_cmd_in_pod(cmd, 'fedtools', 'default')
interface_lst = output[:(- 1)].split(',')
for interface in test_interface:
if (interface not in interface_lst):
logging.error(('Interface %s not found in node %s interface list %s' % (interface, nodelst[pod_index], interface_lst)))
raise RuntimeError()
return test_interface
finally:
logging.info('Deleteing pod to query interface on node')
kubecli.delete_pod('fedtools', 'default') |
def add_pyscaffold(config: ConfigUpdater, opts: ScaffoldOpts) -> ConfigUpdater:
if ('pyscaffold' not in config):
config.add_section('pyscaffold')
pyscaffold = config['pyscaffold']
pyscaffold['version'] = pyscaffold_version
extensions = {ext.name for ext in opts.get('extensions', []) if ext.persist}
old = cast(str, pyscaffold.get('extensions', Object(value='')).value)
new = list(sorted((parse_extensions(old) | extensions)))
if new:
pyscaffold.set('extensions')
pyscaffold['extensions'].set_values(new)
allowed = ((k, v) for (k, v) in opts.items() if any(map(k.startswith, extensions)))
allowed_ = {k: ('' if (v is None) else v) for (k, v) in allowed}
pyscaffold.update(allowed_)
return config |
class MachoParser():
def __init__(self, ql, path, arch=None):
self.ql = ql
self.binary_file = self.readFile(path)
self.raw_data = self.binary_file
self.archtype = ql.arch.type
self.parseFile()
self.page_zero_size = 0
self.header_address = 0
for seg in self.segments:
if ((seg.vm_address == 0) and (seg.file_size == 0)):
ql.log.info('PageZero Size: {:X}'.format(seg.vm_size))
self.page_zero_size = seg.vm_size
self.header_address = seg.vm_size
def readFile(path):
with open(path, 'rb') as f:
return f.read()
def parseFile(self):
if (not self.binary_file):
return
if (not self.parseHeader()):
return
if (not self.parseLoadCommand()):
return
if (not self.parseData()):
return
def parseHeader(self):
self.magic = self.getMagic(self.binary_file)
if (self.magic in MAGIC_64):
self.ql.log.debug('Got a 64bit Header ')
self.header = BinaryHeader(self.binary_file)
elif (self.magic in MAGIC_FAT):
self.ql.log.debug('Got a fat header')
fat = FatHeader(self.binary_file)
file_info = fat.getBinary(self.archtype)
self.binary_file = self.binary_file[file_info.offset:(file_info.offset + file_info.size)]
self.header = BinaryHeader(self.binary_file)
else:
self.ql.log.info('unknow header!')
return False
if (not self.header):
self.ql.log.info('parse header error')
return False
return True
def parseLoadCommand(self):
self.ql.log.debug('Parse LoadCommand')
if ((not self.header.lc_num) or (not self.header.lc_size) or (not self.header.header_size)):
return False
FR = FileReader(self.binary_file)
FR.setOffset(self.header.header_size)
self.lc_raw = FR.read(self.header.lc_size)
self.commands = []
offset = 0
for i in range(self.header.lc_num):
if (self.header.lc_size >= 8):
lc = LoadCommand(self.lc_raw[offset:])
else:
self.ql.log.info('cmd size overflow')
return False
if (self.header.lc_size >= (offset + lc.cmd_size)):
complete_cmd = lc.get_complete()
pass
else:
self.ql.log.info('cmd size overflow')
return False
self.commands.append(complete_cmd)
offset += lc.cmd_size
return True
def parseData(self):
self.segments = []
self.sections = [None]
for command in self.commands:
if (command.cmd_id == LC_SEGMENT_64):
tmp = Segment(command, self.binary_file)
tmp.sections_index += range(len(self.sections), (len(self.sections) + len(tmp.sections)))
self.segments.append(tmp)
for section in tmp.sections:
self.sections.append(section)
elif (command.cmd_id == LC_SEGMENT):
tmp = Segment(command, self.binary_file)
self.segments.append(tmp)
for section in tmp.sections:
self.sections.append(section)
elif (command.cmd_id == LC_FUNCTION_STARTS):
self.function_starts = FunctionStarts(command, self.binary_file)
elif (command.cmd_id == LC_SYMTAB):
self.symbol_table = SymbolTable(command, self.binary_file)
self.string_table = StringTable(command, self.binary_file)
elif (command.cmd_id == LC_DATA_IN_CODE):
self.data_in_code = DataInCode(command, self.binary_file)
elif (command.cmd_id == LC_CODE_SIGNATURE):
self.code_signature = CodeSignature(command, self.binary_file)
elif (command.cmd_id == LC_SEGMENT_SPLIT_INFO):
self.seg_split_info = SegmentSplitInfo(command, self.binary_file)
elif (command.cmd_id == LC_DYSYMTAB):
self.dysymbol_table = DySymbolTable(command, self.binary_file)
return True
def getMagic(binary):
return unpack('<L', binary[:4])[0]
def get_segment(self, name):
for seg in self.segments:
if (seg.name == name):
return seg
return None |
class Audio_Visual_Separation():
def __init__(self):
self.Video_Path = ''
self.Video_Name = ''
self.Audio_Path = ''
self.Audio_Name = ''
def _path_check(path):
FileName = Path(path)
if FileName.exists():
return True
elif FileName.is_file():
return True
elif FileName.is_dir():
return True
else:
return False
def Audio_Visual_Separation(self, Video_Path, Video_Name, Audio_Path=None, Audio_Name=None):
self.Video_Path = Video_Path
self.Video_Name = Video_Name
if (Audio_Path is not None):
self.Audio_Path = Audio_Path
else:
self.Audio_Path = Video_Path
if (Audio_Name is not None):
self.Audio_Name = Audio_Name
else:
self.Audio_Name = Video_Name.replace('.mp4', '.mp3')
if self._path_check((self.Video_Path + self.Video_Name)):
if self._path_check(self.Audio_Path):
__video_path = (self.Video_Path + self.Video_Name)
__audio_path = (self.Audio_Path + self.Audio_Name)
my_audio_clip = AudioFileClip(__video_path)
my_audio_clip.write_audiofile(__audio_path)
else:
print('error! :audio file path cant find')
else:
print('error! :video file or file path cant find') |
class SequentialGraphRewriter(GraphRewriter, UserList):
def warn(cls, exc, self, rewriter):
_logger.error(f'{cls.__name__} apply {rewriter}')
_logger.error('Traceback:')
_logger.error(traceback.format_exc())
if (config.on_opt_error == 'raise'):
raise exc
elif (config.on_opt_error == 'pdb'):
pdb.post_mortem(sys.exc_info()[2])
def __init__(self, *rewrites, failure_callback=None):
if ((len(rewrites) == 1) and isinstance(rewrites[0], (list, tuple))):
rewrites = rewrites[0]
super().__init__(rewrites)
self.failure_callback = failure_callback
def apply(self, fgraph):
l = []
if fgraph.profile:
validate_before = fgraph.profile.validate_time
sub_validate_time = [validate_before]
callbacks_before = fgraph.execute_callbacks_times.copy()
else:
sub_validate_time = []
callbacks_before = []
callback_before = fgraph.execute_callbacks_time
nb_node_before = len(fgraph.apply_nodes)
sub_profs = []
nb_nodes = []
self.pre_profile = (self, l, (- 1), (- 1), nb_node_before, (- 1), sub_profs, sub_validate_time, nb_nodes, {})
try:
for rewriter in self.data:
try:
nb_nodes_before = len(fgraph.apply_nodes)
t0 = time.perf_counter()
sub_prof = rewriter.apply(fgraph)
l.append(float((time.perf_counter() - t0)))
sub_profs.append(sub_prof)
nb_nodes.append((nb_nodes_before, len(fgraph.apply_nodes)))
if fgraph.profile:
sub_validate_time.append(fgraph.profile.validate_time)
except AssertionError:
raise
except Exception as e:
if self.failure_callback:
self.failure_callback(e, self, rewriter)
continue
else:
raise
finally:
if fgraph.profile:
validate_time = (fgraph.profile.validate_time - validate_before)
callbacks_time = {}
for (k, v) in fgraph.execute_callbacks_times.items():
if (k in callbacks_before):
t = (v - callbacks_before[k])
if (t > 0):
callbacks_time[k] = t
else:
callbacks_time[k] = v
else:
validate_time = None
callbacks_time = {}
callback_time = (fgraph.execute_callbacks_time - callback_before)
self.pre_profile = (self, l, validate_time, callback_time, nb_node_before, len(fgraph.apply_nodes), sub_profs, sub_validate_time, nb_nodes, callbacks_time)
return self.pre_profile
def __repr__(self):
return f'{type(self).__name__}({self.data})'
def add_requirements(self, fgraph):
for rewrite in self.data:
rewrite.add_requirements(fgraph)
def print_summary(self, stream=sys.stdout, level=0, depth=(- 1)):
name = getattr(self, 'name', None)
print(f"{(' ' * level)}{self.__class__.__name__} {name} id={id(self)}", file=stream)
if (depth != 0):
depth -= 1
for rewrite in self.data:
rewrite.print_summary(stream, level=(level + 2), depth=depth)
def print_profile(cls, stream, prof, level=0):
(rewrites, prof, validate_time, callback_time, nb_node_before, nb_node_after, sub_profs, sub_validate_time, nb_nodes, callbacks_time) = prof
validate_time = (validate_time or float('nan'))
callback_time = (callback_time or float('nan'))
blanc = (' ' * level)
print(blanc, cls.__name__, end=' ', file=stream)
if hasattr(rewrites, 'name'):
print(blanc, rewrites.name, end=' ', file=stream)
elif hasattr(rewrites, '__name__'):
print(blanc, rewrites.__name__, end=' ', file=stream)
print(f' time {sum(prof):.3f}s for {int(nb_node_before)}/{int(nb_node_after)} nodes before/after rewriting', file=stream)
print(blanc, f' {callback_time:.3f}s for callback', file=stream)
print(blanc, f' {validate_time:.3f}s for fgraph.validate()', file=stream)
if (callback_time > 1):
print(blanc, ' callbacks_time', file=stream)
for i in sorted(callbacks_time.items(), key=(lambda a: (- a[1]))):
if (i[1] > 0):
print(blanc, ' ', i[0], ',', i[1], file=stream)
if (level == 0):
print(blanc, ' time - (name, class, index, nodes before, nodes after) - validate time', file=stream)
ll = []
for (rewrite, nb_n) in zip(rewrites, nb_nodes):
if hasattr(rewrite, '__name__'):
name = rewrite.__name__
else:
name = rewrite.name
idx = rewrites.index(rewrite)
ll.append(((name, rewrite.__class__.__name__, idx) + nb_n))
lll = sorted(zip(prof, ll), key=(lambda a: a[0]))
for (t, rewrite) in lll[::(- 1)]:
i = rewrite[2]
if sub_validate_time:
val_time = (sub_validate_time[(i + 1)] - sub_validate_time[i])
print(blanc, f' {t:.6f}s - {rewrite} - {val_time:.3f}s', file=stream)
else:
print(blanc, f' {t:.6f}s - {rewrite}', file=stream)
if sub_profs[i]:
rewrites[i].print_profile(stream, sub_profs[i], level=(level + 1))
print(file=stream)
def merge_profile(prof1, prof2):
new_t = []
new_l = []
new_sub_profile = []
for l in set(prof1[0]).intersection(set(prof2[0])):
idx1 = prof1[0].index(l)
idx2 = prof2[0].index(l)
new_t.append((prof1[1][idx1] + prof2[1][idx2]))
new_l.append(l)
if hasattr(l, 'merge_profile'):
assert (len(prof1[6][idx1]) == len(prof2[6][idx2]))
new_sub_profile.append(l.merge_profile(prof1[6][idx1], prof2[6][idx2]))
else:
new_sub_profile.append(None)
from io import StringIO
for l in set(prof1[0]).symmetric_difference(set(prof2[0])):
new_l_names = [o.name for o in new_l]
if (l.name in new_l_names):
idx = new_l_names.index(l.name)
io1 = StringIO()
io2 = StringIO()
l.print_summary(io1)
new_l[idx].print_summary(io2)
if (io1.read() == io2.read()):
if (l in prof1[0]):
p = prof1
else:
p = prof2
new_t[idx] += p[1][p[0].index(l)]
if hasattr(l, 'merge_profile'):
assert (len(p[6][p[0].index(l)]) == len(new_sub_profile[idx]))
new_sub_profile[idx] = l.merge_profile(new_sub_profile[idx], p[6][p[0].index(l)])
else:
new_sub_profile[idx] = None
continue
if (l in prof1[0]):
p = prof1
else:
p = prof2
new_t.append(p[1][p[0].index(l)])
idx = p[0].index(l)
new_l.append(l)
new_sub_profile.append(p[6][idx])
new_rewrite = SequentialGraphRewriter(*new_l)
new_nb_nodes = []
for (p1, p2) in zip(prof1[8], prof2[8]):
new_nb_nodes.append(((p1[0] + p2[0]), (p1[1] + p2[1])))
new_nb_nodes.extend(prof1[8][len(new_nb_nodes):])
new_nb_nodes.extend(prof2[8][len(new_nb_nodes):])
new_callbacks_times = merge_dict(prof1[9], prof2[9])
assert {l.name for l in prof1[0]}.issubset({l.name for l in new_l})
assert {l.name for l in prof2[0]}.issubset({l.name for l in new_l})
assert (len(new_t) == len(new_rewrite) == len(new_sub_profile))
return (new_rewrite, new_t, (prof1[2] + prof2[2]), (prof1[3] + prof2[3]), (- 1), (- 1), new_sub_profile, [], new_nb_nodes, new_callbacks_times) |
class GraphicsLayoutWidget(GraphicsView):
def __init__(self, **kwds):
super().__init__(**kwds)
self.gfxLayout = graphicsItems.GraphicsLayout.GraphicsLayout()
for n in ['nextRow', 'nextCol', 'nextColumn', 'addItem', 'getItem', 'addLayout', 'addLabel', 'removeItem', 'itemIndex', 'clear']:
setattr(self, n, getattr(self.gfxLayout, n))
self.gfxView.setCentralItem(self.gfxLayout)
def addPlot(self, *args, **kwds):
kwds['enableMenu'] = False
plotItem = self.gfxLayout.addPlot(*args, **kwds)
connect_viewbox_redraw(plotItem.getViewBox(), self.request_draw)
return plotItem
def addViewBox(self, *args, **kwds):
kwds['enableMenu'] = False
vb = self.gfxLayout.addViewBox(*args, **kwds)
connect_viewbox_redraw(vb, self.request_draw)
return vb |
_REGISTRY.register()
class SDLModel(SRModel):
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
self.ema_decay = train_opt.get('ema_decay', 0)
if (self.ema_decay > 0):
logger = get_root_logger()
logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}')
self.net_g_ema = build_network(self.opt['network_g']).to(self.device)
load_path = self.opt['path'].get('pretrain_network_g', None)
if (load_path is not None):
self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
else:
self.model_ema(0)
self.net_g_ema.eval()
if train_opt.get('pixel_opt'):
self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
else:
self.cri_pix = None
if train_opt.get('charbonnier_opt'):
self.cri_cb = build_loss_(train_opt['charbonnier_opt']).to(self.device)
else:
self.cri_cb = None
if train_opt.get('perceptual_opt'):
self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device)
else:
self.cri_perceptual = None
if ((self.cri_pix is None) and (self.cri_perceptual is None)):
raise ValueError('Both pixel and perceptual losses are None.')
self.setup_optimizers()
self.setup_schedulers()
def feed_data(self, data):
self.input = data['in'].to(self.device)
self.t = data['t'].to(self.device)
if ('gt' in data):
self.gt = data['gt'].to(self.device)
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
self.output = self.net_g(self.input, self.t)
l_total = 0
loss_dict = OrderedDict()
if self.cri_pix:
l_pix = self.cri_pix(self.output, self.gt)
l_total += l_pix
loss_dict['l_pix'] = l_pix
if self.cri_cb:
l_cb = self.cri_cb(self.output, self.gt)
l_total += l_cb
loss_dict['l_cb'] = l_cb
if self.cri_perceptual:
(l_percep, l_style) = self.cri_perceptual(self.output, self.gt)
if (l_percep is not None):
l_total += l_percep
loss_dict['l_percep'] = l_percep
if (l_style is not None):
l_total += l_style
loss_dict['l_style'] = l_style
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
if (self.ema_decay > 0):
self.model_ema(decay=self.ema_decay)
def test(self):
if hasattr(self, 'net_g_ema'):
self.net_g_ema.eval()
with torch.no_grad():
self.output = self.net_g_ema(self.input, self.t)
else:
self.net_g.eval()
with torch.no_grad():
self.output = self.net_g(self.input, self.t)
self.net_g.train()
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = (self.opt['val'].get('metrics') is not None)
if with_metrics:
self.metric_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()}
for (idx, val_data) in enumerate(dataloader):
img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
self.feed_data(val_data)
self.test()
visuals = self.get_current_visuals()
res_img = tensor2img([visuals['result']])
if ('gt' in visuals):
gt_img = tensor2img([visuals['gt']])
del self.gt
del self.input
del self.output
torch.cuda.empty_cache()
if save_img:
if self.opt['is_train']:
save_img_path = osp.join(self.opt['path']['visualization'], img_name, f'{img_name}_{current_iter}.png')
elif self.opt['val']['suffix']:
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f"{img_name}_{self.opt['val']['suffix']}.png")
else:
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f"{img_name}_{self.opt['name']}.png")
imwrite(res_img, save_img_path)
if with_metrics:
for (name, opt_) in self.opt['val']['metrics'].items():
metric_data = dict(img=res_img, img2=gt_img)
self.metric_results[name] += calculate_metric(metric_data, opt_)
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= (idx + 1)
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict['result'] = self.output.detach().cpu()
if hasattr(self, 'gt'):
out_dict['gt'] = self.gt.detach().cpu()
return out_dict |
def check_mopidy_extensions() -> Dict[(str, Tuple[(bool, str)])]:
try:
subprocess.check_call(['systemctl', 'is-active', 'mopidy'], stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
extensions = _check_mopidy_extensions_user()
else:
extensions = _check_mopidy_extensions_service()
return extensions |
class BucketStopwatchMeter(object):
def __init__(self, increment, max_length, sentences_per_batch):
self.increment = increment
self.n_buckets = ((max_length // increment) + 1)
self.sentences_per_batch = sentences_per_batch
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if (self.start_time is not None):
delta = (time.time() - self.start_time)
bucket_id = min((self.n_buckets - 1), (n // self.increment))
self.sum[bucket_id] += delta
self.n[bucket_id] += n
self.count[bucket_id] += 1
self.start_time = None
def reset(self):
self.sum = ([0] * self.n_buckets)
self.n = ([0] * self.n_buckets)
self.count = ([0] * self.n_buckets)
self.start_time = None
def reset_bucket(self, bucket_id):
if (self.start_time is None):
self.sum[bucket_id] = 0
self.n[bucket_id] = 0
self.count[bucket_id] = 0
def avg(self):
return (sum(self.sum) / sum(self.n))
def avgs(self):
result = ([0] * self.n_buckets)
for i in range(self.n_buckets):
if (self.n[i] != 0):
result[i] = (self.sum[i] / self.n[i])
else:
result[i] = 0
return result |
class WebKitCaret(browsertab.AbstractCaret):
_widget: webview.WebView
def __init__(self, tab: 'WebKitTab', mode_manager: modeman.ModeManager, parent: QWidget=None) -> None:
super().__init__(tab, mode_manager, parent)
self._selection_state = browsertab.SelectionState.none
(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if (mode != usertypes.KeyMode.caret):
return
if self._widget.hasSelection():
self._selection_state = browsertab.SelectionState.normal
else:
self._selection_state = browsertab.SelectionState.none
self.selection_toggled.emit(self._selection_state)
settings = self._widget.settings()
settings.setAttribute(QWebSettings.WebAttribute.CaretBrowsingEnabled, True)
if self._widget.isVisible():
self._widget.clearFocus()
self._widget.setFocus(Qt.FocusReason.OtherFocusReason)
if (self._selection_state is browsertab.SelectionState.none):
self._widget.page().currentFrame().evaluateJavaScript(resources.read_file('javascript/position_caret.js'))
(usertypes.KeyMode)
def _on_mode_left(self, _mode):
settings = self._widget.settings()
if settings.testAttribute(QWebSettings.WebAttribute.CaretBrowsingEnabled):
if ((self._selection_state is not browsertab.SelectionState.none) and self._widget.hasSelection()):
self._widget.triggerPageAction(QWebPage.WebAction.MoveToNextChar)
settings.setAttribute(QWebSettings.WebAttribute.CaretBrowsingEnabled, False)
self._selection_state = browsertab.SelectionState.none
def move_to_next_line(self, count=1):
if (self._selection_state is not browsertab.SelectionState.none):
act = QWebPage.WebAction.SelectNextLine
else:
act = QWebPage.WebAction.MoveToNextLine
for _ in range(count):
self._widget.triggerPageAction(act)
if (self._selection_state is browsertab.SelectionState.line):
self._select_line_to_end()
def move_to_prev_line(self, count=1):
if (self._selection_state is not browsertab.SelectionState.none):
act = QWebPage.WebAction.SelectPreviousLine
else:
act = QWebPage.WebAction.MoveToPreviousLine
for _ in range(count):
self._widget.triggerPageAction(act)
if (self._selection_state is browsertab.SelectionState.line):
self._select_line_to_start()
def move_to_next_char(self, count=1):
if (self._selection_state is browsertab.SelectionState.normal):
act = QWebPage.WebAction.SelectNextChar
elif (self._selection_state is browsertab.SelectionState.line):
return
else:
act = QWebPage.WebAction.MoveToNextChar
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_prev_char(self, count=1):
if (self._selection_state is browsertab.SelectionState.normal):
act = QWebPage.WebAction.SelectPreviousChar
elif (self._selection_state is browsertab.SelectionState.line):
return
else:
act = QWebPage.WebAction.MoveToPreviousChar
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_end_of_word(self, count=1):
if (self._selection_state is browsertab.SelectionState.normal):
act = [QWebPage.WebAction.SelectNextWord]
if utils.is_windows:
act.append(QWebPage.WebAction.SelectPreviousChar)
elif (self._selection_state is browsertab.SelectionState.line):
return
else:
act = [QWebPage.WebAction.MoveToNextWord]
if utils.is_windows:
act.append(QWebPage.WebAction.MoveToPreviousChar)
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_next_word(self, count=1):
if (self._selection_state is browsertab.SelectionState.normal):
act = [QWebPage.WebAction.SelectNextWord]
if (not utils.is_windows):
act.append(QWebPage.WebAction.SelectNextChar)
elif (self._selection_state is browsertab.SelectionState.line):
return
else:
act = [QWebPage.WebAction.MoveToNextWord]
if (not utils.is_windows):
act.append(QWebPage.WebAction.MoveToNextChar)
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_prev_word(self, count=1):
if (self._selection_state is browsertab.SelectionState.normal):
act = QWebPage.WebAction.SelectPreviousWord
elif (self._selection_state is browsertab.SelectionState.line):
return
else:
act = QWebPage.WebAction.MoveToPreviousWord
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_start_of_line(self):
if (self._selection_state is browsertab.SelectionState.normal):
act = QWebPage.WebAction.SelectStartOfLine
elif (self._selection_state is browsertab.SelectionState.line):
return
else:
act = QWebPage.WebAction.MoveToStartOfLine
self._widget.triggerPageAction(act)
def move_to_end_of_line(self):
if (self._selection_state is browsertab.SelectionState.normal):
act = QWebPage.WebAction.SelectEndOfLine
elif (self._selection_state is browsertab.SelectionState.line):
return
else:
act = QWebPage.WebAction.MoveToEndOfLine
self._widget.triggerPageAction(act)
def move_to_start_of_next_block(self, count=1):
if (self._selection_state is not browsertab.SelectionState.none):
act = [QWebPage.WebAction.SelectNextLine, QWebPage.WebAction.SelectStartOfBlock]
else:
act = [QWebPage.WebAction.MoveToNextLine, QWebPage.WebAction.MoveToStartOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
if (self._selection_state is browsertab.SelectionState.line):
self._select_line_to_end()
def move_to_start_of_prev_block(self, count=1):
if (self._selection_state is not browsertab.SelectionState.none):
act = [QWebPage.WebAction.SelectPreviousLine, QWebPage.WebAction.SelectStartOfBlock]
else:
act = [QWebPage.WebAction.MoveToPreviousLine, QWebPage.WebAction.MoveToStartOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
if (self._selection_state is browsertab.SelectionState.line):
self._select_line_to_start()
def move_to_end_of_next_block(self, count=1):
if (self._selection_state is not browsertab.SelectionState.none):
act = [QWebPage.WebAction.SelectNextLine, QWebPage.WebAction.SelectEndOfBlock]
else:
act = [QWebPage.WebAction.MoveToNextLine, QWebPage.WebAction.MoveToEndOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
if (self._selection_state is browsertab.SelectionState.line):
self._select_line_to_end()
def move_to_end_of_prev_block(self, count=1):
if (self._selection_state is not browsertab.SelectionState.none):
act = [QWebPage.WebAction.SelectPreviousLine, QWebPage.WebAction.SelectEndOfBlock]
else:
act = [QWebPage.WebAction.MoveToPreviousLine, QWebPage.WebAction.MoveToEndOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
if (self._selection_state is browsertab.SelectionState.line):
self._select_line_to_start()
def move_to_start_of_document(self):
if (self._selection_state is not browsertab.SelectionState.none):
act = QWebPage.WebAction.SelectStartOfDocument
else:
act = QWebPage.WebAction.MoveToStartOfDocument
self._widget.triggerPageAction(act)
if (self._selection_state is browsertab.SelectionState.line):
self._select_line()
def move_to_end_of_document(self):
if (self._selection_state is not browsertab.SelectionState.none):
act = QWebPage.WebAction.SelectEndOfDocument
else:
act = QWebPage.WebAction.MoveToEndOfDocument
self._widget.triggerPageAction(act)
def toggle_selection(self, line=False):
if line:
self._selection_state = browsertab.SelectionState.line
self._select_line()
self.reverse_selection()
self._select_line()
self.reverse_selection()
elif (self._selection_state is not browsertab.SelectionState.normal):
self._selection_state = browsertab.SelectionState.normal
else:
self._selection_state = browsertab.SelectionState.none
self.selection_toggled.emit(self._selection_state)
def drop_selection(self):
self._widget.triggerPageAction(QWebPage.WebAction.MoveToNextChar)
def selection(self, callback):
callback(self._widget.selectedText())
def reverse_selection(self):
self._tab.run_js_async('{\n const sel = window.getSelection();\n sel.setBaseAndExtent(\n sel.extentNode, sel.extentOffset, sel.baseNode,\n sel.baseOffset\n );\n }')
def _select_line(self):
self._widget.triggerPageAction(QWebPage.WebAction.SelectStartOfLine)
self.reverse_selection()
self._widget.triggerPageAction(QWebPage.WebAction.SelectEndOfLine)
self.reverse_selection()
def _select_line_to_end(self):
if self._js_selection_left_to_right():
self._widget.triggerPageAction(QWebPage.WebAction.SelectEndOfLine)
def _select_line_to_start(self):
if (not self._js_selection_left_to_right()):
self._widget.triggerPageAction(QWebPage.WebAction.SelectStartOfLine)
def _js_selection_left_to_right(self):
return self._tab.private_api.run_js_sync('\n var sel = window.getSelection();\n var position = sel.anchorNode.compareDocumentPosition(sel.focusNode);\n (!position && sel.anchorOffset < sel.focusOffset ||\n position === Node.DOCUMENT_POSITION_FOLLOWING);\n ')
def _follow_selected(self, *, tab=False):
if QWebSettings.globalSettings().testAttribute(QWebSettings.WebAttribute.JavascriptEnabled):
if tab:
self._tab.data.override_target = usertypes.ClickTarget.tab
self._tab.run_js_async('\n const aElm = document.activeElement;\n if (window.getSelection().anchorNode) {\n window.getSelection().anchorNode.parentNode.click();\n } else if (aElm && aElm !== document.body) {\n aElm.click();\n }\n ')
else:
selection = self._widget.selectedHtml()
if (not selection):
self._follow_enter(tab)
return
try:
selected_element = xml.etree.ElementTree.fromstring('<html>{}</html>'.format(selection)).find('a')
except xml.etree.ElementTree.ParseError:
raise browsertab.WebTabError('Could not parse selected element!')
if (selected_element is not None):
try:
href = selected_element.attrib['href']
except KeyError:
raise browsertab.WebTabError('Anchor element without href!')
url = self._tab.url().resolved(QUrl(href))
if tab:
self._tab.new_tab_requested.emit(url)
else:
self._tab.load_url(url)
def follow_selected(self, *, tab=False):
try:
self._follow_selected(tab=tab)
finally:
self.follow_selected_done.emit() |
class TensorboardLoggerHook(LoggerHook):
def __init__(self, log_dir=None, interval=10, ignore_last=True, reset_flag=True):
super(TensorboardLoggerHook, self).__init__(interval, ignore_last, reset_flag)
self.log_dir = log_dir
def before_run(self, runner):
if ((torch.__version__ >= '1.1') and ('.' in torch.__version__)):
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ImportError('Please run "pip install future tensorboard" to install the dependencies to use torch.utils.tensorboard (applicable to PyTorch 1.1 or higher)')
else:
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please install tensorboardX to use TensorboardLoggerHook.')
if (self.log_dir is None):
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
self.writer = SummaryWriter(self.log_dir)
def log(self, runner):
for var in runner.log_buffer.output:
if (var in ['time', 'data_time']):
continue
tag = '{}/{}'.format(var, runner.mode)
record = runner.log_buffer.output[var]
if isinstance(record, str):
self.writer.add_text(tag, record, runner.iter)
else:
self.writer.add_scalar(tag, runner.log_buffer.output[var], runner.iter)
def after_run(self, runner):
self.writer.close() |
def _test():
import torch
pretrained = False
models = [(shakedropresnet20_cifar10, 10), (shakedropresnet20_cifar100, 100), (shakedropresnet20_svhn, 10)]
for (model, num_classes) in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != shakedropresnet20_cifar10) or (weight_count == 272474))
assert ((model != shakedropresnet20_cifar100) or (weight_count == 278324))
assert ((model != shakedropresnet20_svhn) or (weight_count == 272474))
x = torch.randn(14, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, num_classes)) |
def test_asdict_modify_dict_does_not_change_object(fake_object):
result = fake_object.asdict()
result['attr1'] = 'testing'
result['alist'].append(4)
assert (result == {'attr1': 'testing', 'alist': [1, 2, 3, 4]})
assert (fake_object.attr1 == 'foo')
assert (fake_object.alist == [1, 2, 3]) |
def _parsemeta_tmy2(columns, line):
rawmeta = ' '.join(line.split()).split(' ')
meta = rawmeta[:3]
meta.append(int(rawmeta[3]))
longitude = ((float(rawmeta[5]) + (float(rawmeta[6]) / 60)) * ((2 * (rawmeta[4] == 'N')) - 1))
latitude = ((float(rawmeta[8]) + (float(rawmeta[9]) / 60)) * ((2 * (rawmeta[7] == 'E')) - 1))
meta.append(longitude)
meta.append(latitude)
meta.append(float(rawmeta[10]))
meta_dict = dict(zip(columns.split(','), meta))
return meta_dict |
def test_multiand_consistent_apply_classical():
rs = np.random.RandomState(52)
n = 5
all_cvs = rs.choice([0, 1], size=(2, n))
ctrl_strings = rs.choice([0, 1], size=(10, n))
for (cvs, ctrl_string) in itertools.product(all_cvs, ctrl_strings):
bloq = MultiAnd(cvs=cvs)
cbloq = bloq.decompose_bloq()
bloq_classical = bloq.call_classically(ctrl=ctrl_string)
cbloq_classical = cbloq.call_classically(ctrl=ctrl_string)
assert (len(bloq_classical) == len(cbloq_classical))
for i in range(len(bloq_classical)):
np.testing.assert_array_equal(bloq_classical[i], cbloq_classical[i]) |
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if (not requires_grad):
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple('VggOutputs', ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out |
class Diffusion(LightningModule):
def __init__(self, model, channels=3, timesteps=1000, initial_lr=0.0002, training_target='x0', noise_schedule='cosine', auto_sample=False, sample_every_n_steps=1000, sample_size=(32, 32)):
super().__init__()
self.step_counter = 0
self.auto_sample = auto_sample
self.sample_every_n_steps = sample_every_n_steps
self.sample_size = sample_size
self.channels = channels
self.model = model
self.initial_lr = initial_lr
self.training_target = training_target.lower()
assert (self.training_target in ['x0', 'noise'])
assert (noise_schedule in ['linear', 'cosine'])
if (noise_schedule == 'linear'):
betas = linear_noise_schedule(timesteps)
else:
betas = cosine_noise_schedule(timesteps)
self.num_timesteps = int(betas.shape[0])
alphas = (1.0 - betas)
alphas_hat = np.cumprod(alphas, axis=0)
alphas_hat_prev = np.append(1.0, alphas_hat[:(- 1)])
self.register_buffer('betas', to_torch(betas))
self.register_buffer('alphas_hat', to_torch(alphas_hat))
self.register_buffer('alphas_hat_prev', to_torch(alphas_hat_prev))
self.register_buffer('sqrt_alphas_hat', to_torch(np.sqrt(alphas_hat)))
self.register_buffer('sqrt_one_minus_alphas_hat', to_torch(np.sqrt((1.0 - alphas_hat))))
self.register_buffer('log_one_minus_alphas_hat', to_torch(np.log((1.0 - alphas_hat))))
self.register_buffer('sqrt_recip_alphas_hat', to_torch(np.sqrt((1.0 / alphas_hat))))
self.register_buffer('sqrt_recipm1_alphas_hat', to_torch(np.sqrt(((1.0 / alphas_hat) - 1))))
posterior_variance = ((betas * (1.0 - alphas_hat_prev)) / (1.0 - alphas_hat))
self.register_buffer('posterior_variance', to_torch(posterior_variance))
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
self.register_buffer('posterior_mean_coef1', to_torch(((betas * np.sqrt(alphas_hat_prev)) / (1.0 - alphas_hat))))
self.register_buffer('posterior_mean_coef2', to_torch((((1.0 - alphas_hat_prev) * np.sqrt(alphas)) / (1.0 - alphas_hat))))
def predict_start_from_noise(self, x_t, t, noise):
return ((self.sqrt_recip_alphas_hat[t] * x_t) - (self.sqrt_recipm1_alphas_hat[t] * noise))
def q_posterior(self, x_start, x_t, t):
posterior_mean = ((self.posterior_mean_coef1[t] * x_start) + (self.posterior_mean_coef2[t] * x_t))
posterior_variance = self.posterior_variance[t]
posterior_log_variance_clipped = self.posterior_log_variance_clipped[t]
return (posterior_mean, posterior_variance, posterior_log_variance_clipped)
def p_mean_variance(self, x, t, clip_denoised):
batch_size = x.shape[0]
t_tensor = torch.full((batch_size,), t, dtype=torch.int64, device=self.device)
if (self.training_target == 'x0'):
x_recon = self.model(x, t_tensor)
else:
x_recon = self.predict_start_from_noise(x, t=t, noise=self.model(x, t_tensor))
if clip_denoised:
x_recon.clamp_((- 1.0), 1.0)
(model_mean, posterior_variance, posterior_log_variance) = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return (model_mean, posterior_variance, posterior_log_variance)
_grad()
def p_sample(self, x, t, clip_denoised=True):
(model_mean, _, model_log_variance) = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = (torch.randn_like(x) if (t > 0) else torch.zeros_like(x))
return (model_mean + (noise * (0.5 * model_log_variance).exp()))
_grad()
def sample(self, image_size=(32, 32), batch_size=16, custom_initial_img=None, custom_timesteps=None):
image_size = two_tuple(image_size)
sample_shape = (batch_size, self.channels, image_size[0], image_size[1])
timesteps = (custom_timesteps or self.num_timesteps)
img = (custom_initial_img if (custom_initial_img is not None) else torch.randn(sample_shape, device=self.device))
for t in reversed(range(0, timesteps)):
img = self.p_sample(img, t)
return img
_grad()
def sample_ddim(self, x_T=None, image_size=(32, 32), batch_size=16, sampling_step_size=100):
seq = range(0, self.num_timesteps, sampling_step_size)
seq_next = ([(- 1)] + list(seq[:(- 1)]))
if (x_T is None):
image_size = two_tuple(image_size)
sample_shape = (batch_size, self.channels, image_size[0], image_size[1])
x_t = torch.randn(sample_shape, device=self.device)
else:
batch_size = (x_T.shape[0] if (len(x_T.shape) == 4) else 1)
x_t = x_T
zipped_reversed_seq = list(zip(reversed(seq), reversed(seq_next)))
for (t, t_next) in zipped_reversed_seq:
t_tensor = torch.full((batch_size,), t, dtype=torch.int64, device=self.device)
e_t = self.model(x_t, t_tensor)
predicted_x0 = ((x_t - (self.sqrt_one_minus_alphas_hat[t] * e_t)) / self.sqrt_alphas_hat[t])
if (t > 0):
direction_to_x_t = (self.sqrt_one_minus_alphas_hat[t_next] * e_t)
x_t = ((self.sqrt_alphas_hat[t_next] * predicted_x0) + direction_to_x_t)
else:
x_t = predicted_x0
return x_t
def q_sample(self, x_start, t, noise=None):
if (noise is None):
noise = torch.randn_like(x_start)
return ((self.sqrt_alphas_hat[t] * x_start) + (self.sqrt_one_minus_alphas_hat[t] * noise))
def forward(self, x, *args, **kwargs):
x = x.get('IMG')
batch_size = x.shape[0]
t = np.random.randint(0, self.num_timesteps)
noise = torch.randn_like(x)
x_noisy = self.q_sample(x_start=x, t=t, noise=noise)
t_tensor = torch.full((batch_size,), t, dtype=torch.int64, device=self.device)
if (self.training_target == 'x0'):
x0_recon = self.model(x_noisy, t_tensor)
return F.mse_loss(x, x0_recon)
else:
noise_recon = self.model(x_noisy, t_tensor)
return F.mse_loss(noise, noise_recon)
def training_step(self, batch, batch_idx):
if (self.auto_sample and ((self.step_counter % self.sample_every_n_steps) == 0)):
sample = self.sample(image_size=self.sample_size, batch_size=1)
save_diffusion_sample(sample, f'{self.logger.log_dir}/sample_{self.step_counter}.png')
loss = self.forward(batch)
self.log('train_loss', loss)
self.step_counter += 1
return loss
def configure_optimizers(self):
optim = torch.optim.Adam(self.parameters(), lr=self.initial_lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, milestones=[20], gamma=0.1, verbose=True)
return ([optim], [scheduler]) |
class ReahlWSGIApplication():
def from_directory(cls, directory, strict_checking=True, start_on_first_request=False):
config = StoredConfiguration(directory, strict_checking=strict_checking)
config.configure()
return cls(config, start_on_first_request=start_on_first_request)
def __init__(self, config, start_on_first_request=False):
self.start_on_first_request = start_on_first_request
self.start_lock = threading.Lock()
self.started = False
self.request_lock = threading.Lock()
self.config = config
self.system_control = SystemControl(self.config)
with ExecutionContext(name=('%s.__init__()' % self.__class__.__name__)) as context:
context.config = self.config
context.system_control = self.system_control
self.root_user_interface_factory = UserInterfaceFactory(None, RegexPath('/', '/', {}), IdentityDictionary(), self.config.web.site_root, 'site_root')
self.add_reahl_static_files()
def add_reahl_static_files(self):
static_files = self.config.web.frontend_libraries.packaged_files()
self.define_static_files('/static', static_files)
return static_files
def define_static_files(self, path, files):
ui_name = ('static_%s' % path)
ui_factory = UserInterfaceFactory(None, RegexPath(path, path, {}), IdentityDictionary(), StaticUI, ui_name, files=FileList(files))
self.root_user_interface_factory.predefine_user_interface(ui_factory)
return ui_factory
def start(self):
self.should_disconnect = False
with ExecutionContext(name=('%s.start()' % self.__class__.__name__)) as context:
context.config = self.config
context.system_control = self.system_control
if (not self.system_control.connected):
self.system_control.connect()
self.should_disconnect = True
self.started = True
def stop(self):
with ExecutionContext(name=('%s.stop()' % self.__class__.__name__)) as context:
context.config = self.config
context.system_control = self.system_control
if (self.should_disconnect and self.system_control.connected):
self.system_control.disconnect()
def resource_for(self, request):
root_ui = target_ui = current_view = None
try:
url = Url.get_current_url(request=request).as_locale_relative()
logging.getLogger(__name__).debug(('Finding Resource for URL: %s' % url.path))
try:
root_ui = self.root_user_interface_factory.create(url.path)
except:
root_ui = UserInterface(None, '/', {}, False, '__emergency_error_root_ui')
if (url.path != root_ui.get_bookmark_for_error('', None).href.as_locale_relative().path):
raise
(target_ui, page_factory) = root_ui.get_user_interface_for_full_path(url.path)
logging.getLogger(__name__).debug(('Found UserInterface %s' % target_ui))
current_view = target_ui.get_view_for_full_path(url.path)
logging.getLogger(__name__).debug(('Found View %s' % current_view))
current_view.check_precondition()
current_view.check_rights(request.method)
if current_view.is_dynamic:
page = current_view.create_page(url.path, page_factory)
self.check_scheme(page.is_security_sensitive)
else:
page = None
try:
return current_view.resource_for(url.path, page)
except HTTPNotFound:
if self.is_form_submit(url.path, request):
return MissingForm(current_view, root_ui, target_ui)
else:
raise
except HTTPException:
raise
except Exception as ex:
raise CouldNotConstructResource(current_view, root_ui, target_ui, ex)
def is_form_submit(self, full_path, request):
return (SubResource.is_for_sub_resource(full_path) and (request.method == 'POST') and any((name.endswith('_reahl_database_concurrency_digest') for name in request.POST.keys())))
def check_scheme(self, security_sensitive):
scheme_needed = self.config.web.default_
if security_sensitive:
scheme_needed = self.config.web.encrypted_
request = ExecutionContext.get_context().request
if (request.scheme.lower() != scheme_needed.lower()):
raise RedirectToScheme(scheme_needed)
def create_context_for_request(self):
return ExecutionContext(name=('%s.create_context_for_request()' % self.__class__.__name__))
def serialise_requests(self):
try:
self.request_lock.acquire()
(yield)
finally:
self.request_lock.release()
def allow_parallel_requests(self):
(yield)
def concurrency_manager(self):
if self.config.reahlsystem.serialise_parallel_requests:
return self.serialise_requests()
return self.allow_parallel_requests()
def ensure_started(self):
if (not self.started):
with self.start_lock:
if (not self.started):
self.start()
def __call__(self, environ, start_response):
if self.start_on_first_request:
self.ensure_started()
if ((not self.started) and self.config.strict_checking):
raise ProgrammerError(('%s is not started. Did you mean to set start_on_first_request=True?' % self))
request = Request(environ, charset='utf8')
context = self.create_context_for_request()
context.config = self.config
context.request = request
context.system_control = self.system_control
with context, self.concurrency_manager:
with self.system_control.nested_transaction():
self.config.web.session_class.initialise_web_session_on(context)
context.session.set_last_activity_time()
try:
try:
with self.system_control.nested_transaction() as veto:
veto.should_commit = False
resource = None
try:
resource = self.resource_for(request)
response = resource.handle_request(request)
veto.should_commit = resource.should_commit
except InternalRedirect as e:
if resource:
resource.cleanup_after_transaction()
request.internal_redirect = e
resource = self.resource_for(request)
response = resource.handle_request(request)
veto.should_commit = resource.should_commit
if (not veto.should_commit):
context.config.web.session_class.preserve_session(context.session)
if (not veto.should_commit):
context.config.web.session_class.restore_session(context.session)
if resource:
resource.cleanup_after_transaction()
except HTTPException as e:
response = e
except DisconnectionError as e:
response = HTTPInternalServerError(unicode_body=str(e))
except CouldNotConstructResource as e:
if self.config.reahlsystem.debug:
raise e.__cause__ from None
else:
response = UncaughtError(e.current_view, e.root_ui, e.target_ui, e.__cause__)
except Exception as e:
if self.config.reahlsystem.debug:
raise e
else:
logging.getLogger(__name__).exception(e)
response = UncaughtError(resource.view, resource.view.user_interface.root_ui, resource.view.user_interface, e)
context.session.set_session_key(response)
finally:
self.system_control.finalise_session()
for chunk in response(environ, start_response):
(yield chunk) |
def Popen23(*args, **kwargs):
if PY3:
(yield Popen(*args, **kwargs))
return
else:
popen2 = Popen(*args, **kwargs)
try:
(yield popen2)
finally:
if popen2.stdout:
popen2.stdout.close()
if popen2.stderr:
popen2.stderr.close()
try:
if popen2.stdin:
popen2.stdin.close()
finally:
popen2.wait() |
class ClassyHubInterface():
def __init__(self, task: Optional[ClassyTask]=None, model: Optional[ClassyModel]=None) -> None:
self.task = task
if (task is None):
assert (model is not None), 'Need to specify a model if task is None'
self.model = model
else:
assert (model is None), 'Cannot pass a model if task is not None'
self.model = task.model
def from_task(cls, task: ClassyTask) -> 'ClassyHubInterface':
return cls(task=task)
def from_model(cls, model: Union[(nn.Module, ClassyModel)]) -> 'ClassyHubInterface':
if (not isinstance(model, ClassyModel)):
model = ClassyModel.from_model(model)
return cls(model=model)
def create_image_dataset(self, batchsize_per_replica: int=32, shuffle: bool=True, transform: Optional[Union[(ClassyTransform, Callable)]]=None, num_samples: Optional[int]=None, image_folder: Optional[str]=None, image_files: Optional[List[str]]=None, phase_type: str='train') -> ClassyDataset:
if (transform is None):
if ((self.task is not None) and (phase_type in self.task.datasets)):
dataset = self.task.datasets[phase_type]
transform = dataset.transform
assert (transform is not None), 'Cannot infer transform from the task'
else:
transform = build_field_transform_default_imagenet(config=None, split=phase_type, key_map_transform=None)
return ImagePathDataset(batchsize_per_replica, shuffle, transform, num_samples, image_folder=image_folder, image_files=image_files)
def get_data_iterator(dataset: ClassyDataset) -> Iterator[Any]:
return iter(dataset.iterator())
def train(self) -> None:
torch.autograd.set_grad_enabled(True)
self.model.train()
def eval(self) -> None:
torch.autograd.set_grad_enabled(False)
self.model.eval()
def predict(self, sample):
output = self.model(sample['input'])
return output.squeeze()
def extract_features(self, sample):
output = self.model.extract_features(sample['input'])
return output.squeeze() |
class _cupy_convolve_2d_wrapper(object):
def __init__(self, grid, block, kernel):
if isinstance(grid, int):
grid = (grid,)
if isinstance(block, int):
block = (block,)
self.grid = grid
self.block = block
self.kernel = kernel
def __call__(self, d_inp, paddedW, paddedH, d_kernel, S0, S1, out, outW, outH, pick):
kernel_args = (d_inp, paddedW, paddedH, d_kernel, d_kernel.shape[0], d_kernel.shape[1], S0, S1, out, outW, outH, pick)
self.kernel(self.grid, self.block, kernel_args) |
class Prev(ScrimsButton):
def __init__(self, ctx: Context, row: int=None):
super().__init__(emoji='<:double_left:>', row=row)
self.ctx = ctx
async def callback(self, interaction: discord.Interaction):
(await interaction.response.defer())
_ids = [_.pk async for _ in Scrim.filter(guild_id=self.ctx.guild.id).order_by('open_time')]
current = _ids.index(self.view.record.pk)
try:
next_id = _ids[(current - 1)]
except IndexError:
next_id = _ids[(- 1)]
new_scrim = (await Scrim.get(pk=next_id))
if (not (self.view.record == new_scrim)):
self.view.record = new_scrim
(await self.view.refresh_view()) |
class FakeMonitor(object):
def __init__(self, device_to_emit):
(self._event_source, self._event_sink) = os.pipe()
self.device_to_emit = device_to_emit
self.started = False
def trigger_event(self):
os.write(self._event_sink, b'\x01')
def fileno(self):
return self._event_source
def filter_by(self, *args):
pass
def start(self):
self.started = True
def poll(self, timeout=None):
(rlist, _, _) = select([self._event_source], [], [], timeout)
if (self._event_source in rlist):
os.read(self._event_source, 1)
return self.device_to_emit
def close(self):
try:
os.close(self._event_source)
finally:
os.close(self._event_sink) |
_ignore_inferred
def _infer_assignment(assignment, pymodule):
result = _follow_pyname(assignment, pymodule)
if (result is None):
return None
(pyname, pyobject) = result
pyobject = _follow_evaluations(assignment, pyname, pyobject)
if (pyobject is None):
return None
return _follow_levels(assignment, pyobject) |
class ModuleLoadedBreakpoint():
def __init__(self, target):
breakpoint = target.BreakpointCreateByName('oe_debug_module_loaded_hook')
breakpoint.SetScriptCallbackFunction('lldb_sgx_plugin.ModuleLoadedBreakpoint.onHit')
def onHit(frame, bp_loc, dict):
library_image_addr = frame.FindValue('rdi', lldb.eValueTypeRegister).signed
library_image = oe_debug_module_t(library_image_addr)
load_enclave_symbol(library_image.path, library_image.base_address)
return False |
def _set_legacy_defaults(args, cls):
if (not hasattr(cls, 'add_args')):
return
import argparse
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, allow_abbrev=False)
cls.add_args(parser)
defaults = argparse.Namespace()
for action in parser._actions:
if (action.dest is not argparse.SUPPRESS):
if (not hasattr(defaults, action.dest)):
if (action.default is not argparse.SUPPRESS):
setattr(defaults, action.dest, action.default)
for (key, default_value) in vars(defaults).items():
if (not hasattr(args, key)):
setattr(args, key, default_value) |
def async_relational_query():
foriegn_child = reactpy_django.hooks.use_query(async_get_foriegn_child_query)
relational_parent = reactpy_django.hooks.use_query(async_get_relational_parent_query)
if ((not relational_parent.data) or (not foriegn_child.data)):
return
mtm = relational_parent.data.many_to_many.all()
oto = relational_parent.data.one_to_one
mto = relational_parent.data.many_to_one.all()
fk = foriegn_child.data.parent
return html.div({'id': 'async-relational-query', 'data-success': (bool(mtm) and bool(oto) and bool(mto) and bool(fk))}, html.p(inspect.currentframe().f_code.co_name), html.div(f'Relational Parent Many To Many: {mtm}'), html.div(f'Relational Parent One To One: {oto}'), html.div(f'Relational Parent Many to One: {mto}'), html.div(f'Relational Child Foreign Key: {fk}')) |
def test_transformer__operations__scope_remarks():
transformer = TransformerGroup(28356, 7856).transformers[0]
assert (transformer.scope is None)
assert ([op.scope for op in transformer.operations] == ['Engineering survey, topographic mapping.', 'Transformation of GDA94 coordinates that have been derived through GNSS CORS.', 'Engineering survey, topographic mapping.'])
assert ([str(op.remarks)[:5].strip() for op in transformer.operations] == ['Grid', 'Scale', 'Grid']) |
class TensorBoardLoggerTest(unittest.TestCase):
def test_log(self: TensorBoardLoggerTest) -> None:
with tempfile.TemporaryDirectory() as log_dir:
logger = TensorBoardLogger(path=log_dir)
for i in range(5):
logger.log('test_log', (float(i) ** 2), i)
logger.close()
acc = EventAccumulator(log_dir)
acc.Reload()
for (i, event) in enumerate(acc.Tensors('test_log')):
self.assertAlmostEqual(event.tensor_proto.float_val[0], (float(i) ** 2))
self.assertEqual(event.step, i)
def test_log_dict(self: TensorBoardLoggerTest) -> None:
with tempfile.TemporaryDirectory() as log_dir:
logger = TensorBoardLogger(path=log_dir)
metric_dict = {f'log_dict_{i}': (float(i) ** 2) for i in range(5)}
logger.log_dict(metric_dict, 1)
logger.close()
acc = EventAccumulator(log_dir)
acc.Reload()
for i in range(5):
tensor_tag = acc.Tensors(f'log_dict_{i}')[0]
self.assertAlmostEqual(tensor_tag.tensor_proto.float_val[0], (float(i) ** 2))
self.assertEqual(tensor_tag.step, 1)
def test_log_text(self: TensorBoardLoggerTest) -> None:
with tempfile.TemporaryDirectory() as log_dir:
logger = TensorBoardLogger(path=log_dir)
for i in range(5):
logger.log_text('test_text', f'iter:{i}', i)
logger.close()
acc = EventAccumulator(log_dir)
acc.Reload()
for (i, test_text_event) in enumerate(acc.Tensors('test_text/text_summary')):
self.assertEqual(test_text_event.tensor_proto.string_val[0].decode('ASCII'), f'iter:{i}')
self.assertEqual(test_text_event.step, i)
def test_log_rank_zero(self: TensorBoardLoggerTest) -> None:
with tempfile.TemporaryDirectory() as log_dir:
with patch.dict('os.environ', {'RANK': '1'}):
logger = TensorBoardLogger(path=log_dir)
self.assertEqual(logger.writer, None)
def _test_distributed() -> None:
dist.init_process_group('gloo')
rank = dist.get_rank()
with tempfile.TemporaryDirectory() as log_dir:
test_path = 'correct'
invalid_path = 'invalid'
if (rank == 0):
logger = TensorBoardLogger(os.path.join(log_dir, test_path))
else:
logger = TensorBoardLogger(os.path.join(log_dir, invalid_path))
assert (test_path in logger.path)
assert (invalid_path not in logger.path)
(bool(dist.is_available()), reason='Torch distributed is needed to run')
def test_multiple_workers(self: TensorBoardLoggerTest) -> None:
config = get_pet_launch_config(2)
launcher.elastic_launch(config, entrypoint=self._test_distributed)()
def test_add_scalars_call_is_correctly_passed_to_summary_writer(self: TensorBoardLoggerTest) -> None:
with patch('torchtnt.utils.loggers.tensorboard.SummaryWriter') as mock_summary_writer_class:
mock_summary_writer = Mock()
mock_summary_writer_class.return_value = mock_summary_writer
logger = TensorBoardLogger(path='/tmp')
logger.log_scalars('tnt_metrics', {'x': 0, 'y': 1}, 1, 2)
mock_summary_writer.add_scalars.assert_called_with(main_tag='tnt_metrics', tag_scalar_dict={'x': 0, 'y': 1}, global_step=1, walltime=2) |
.parametrize('x, full_matrices, compute_uv, exc', [(set_test_value(pt.dmatrix(), (lambda x: x.T.dot(x))(rng.random(size=(3, 3)).astype('float64'))), True, True, None), (set_test_value(pt.dmatrix(), (lambda x: x.T.dot(x))(rng.random(size=(3, 3)).astype('float64'))), False, True, None), (set_test_value(pt.lmatrix(), (lambda x: x.T.dot(x))(rng.integers(1, 10, size=(3, 3)).astype('int64'))), True, True, None), (set_test_value(pt.lmatrix(), (lambda x: x.T.dot(x))(rng.integers(1, 10, size=(3, 3)).astype('int64'))), True, False, None)])
def test_SVD(x, full_matrices, compute_uv, exc):
g = nlinalg.SVD(full_matrices, compute_uv)(x)
if isinstance(g, list):
g_fg = FunctionGraph(outputs=g)
else:
g_fg = FunctionGraph(outputs=[g])
cm = (contextlib.suppress() if (exc is None) else pytest.warns(exc))
with cm:
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))]) |
class STM32F4xxRccV3(STM32F4xxRcc):
class Type(ctypes.Structure):
_fields_ = [('CR', ctypes.c_uint32), ('PLLCFGR', ctypes.c_uint32), ('CFGR', ctypes.c_uint32), ('CIR', ctypes.c_uint32), ('AHB1RSTR', ctypes.c_uint32), ('AHB2RSTR', ctypes.c_uint32), ('AHB3RSTR', ctypes.c_uint32), ('RESERVED0', ctypes.c_uint32), ('APB1RSTR', ctypes.c_uint32), ('APB2RSTR', ctypes.c_uint32), ('RESERVED1', (ctypes.c_uint32 * 2)), ('AHB1ENR', ctypes.c_uint32), ('AHB2ENR', ctypes.c_uint32), ('AHB3ENR', ctypes.c_uint32), ('RESERVED2', ctypes.c_uint32), ('APB1ENR', ctypes.c_uint32), ('APB2ENR', ctypes.c_uint32), ('RESERVED3', (ctypes.c_uint32 * 2)), ('AHB1LPENR', ctypes.c_uint32), ('AHB2LPENR', ctypes.c_uint32), ('AHB3LPENR', ctypes.c_uint32), ('RESERVED4', ctypes.c_uint32), ('APB1LPENR', ctypes.c_uint32), ('APB2LPENR', ctypes.c_uint32), ('RESERVED5', (ctypes.c_uint32 * 2)), ('BDCR', ctypes.c_uint32), ('CSR', ctypes.c_uint32), ('RESERVED6', (ctypes.c_uint32 * 2)), ('SSCGR', ctypes.c_uint32), ('PLLI2SCFGR', ctypes.c_uint32), ('PLLSAICFGR', ctypes.c_uint32), ('DCKCFGR', ctypes.c_uint32)] |
class SecuredFunction(FunctionWrapper):
__bound_function_wrapper__ = SecuredMethod
def __init__(self, wrapped, read_check, write_check):
super().__init__(wrapped, self.check_call_wrapped)
self.check_and_setup_check(read_check)
self._self_read_check = self.read_check = read_check
self.check_and_setup_check(write_check)
self._self_write_check = self.write_check = write_check
def check_and_setup_check(self, check):
if isinstance(check, AdaptedMethod):
check.set_full_arg_names(self.get_declared_argument_names())
if ((not isinstance(check, AdaptedMethod)) and isinstance(check, Callable)):
self.check_method_signature(check, self.__wrapped__)
def check_call_wrapped(self, wrapped, instance, args, kwargs):
if (not (self.check_right(self.read_check, instance, *args, **kwargs) and self.check_right(self.write_check, instance, *args, **kwargs))):
raise AccessRestricted()
return wrapped(*args, **kwargs)
def check_right(self, right_to_check, instance, *args, **kwargs):
if right_to_check:
args_to_send = (args if (instance is None) else ((instance,) + args))
return right_to_check(*args_to_send, **kwargs)
else:
return True
def check_method_signature(self, check_method, original_method):
check_signature = inspect.signature(check_method)
expected_signature = inspect.signature(original_method)
if (check_signature != expected_signature):
messages = [(repr(method) + str(signature)) for (signature, method) in [(check_signature, check_method), (expected_signature, original_method)]]
message = ('signature of %s does not match expected signature of %s' % tuple(messages))
raise ProgrammerError(message)
def get_declared_argument_names(self):
arg_spec = inspect.getfullargspec(self.__wrapped__)
positional_args_end = (len(arg_spec.args) - len((arg_spec.defaults or [])))
return arg_spec.args[:positional_args_end] |
def get_operator(mdl: Model, auto_penalty: bool=True, default_penalty: float=100000.0) -> Tuple[(WeightedPauliOperator, float)]:
_validate_input_model(mdl)
if auto_penalty:
penalty = _auto_define_penalty(mdl, default_penalty)
else:
penalty = default_penalty
sign = 1
if mdl.is_maximized():
sign = (- 1)
q_d = {}
index = 0
for i in mdl.iter_variables():
if (i in q_d):
continue
q_d[i] = index
index += 1
num_nodes = len(q_d)
pauli_list = []
shift = 0.0
zero = np.zeros(num_nodes, dtype=bool)
shift += (mdl.get_objective_expr().get_constant() * sign)
l_itr = mdl.get_objective_expr().iter_terms()
for j in l_itr:
z_p = np.zeros(num_nodes, dtype=bool)
index = q_d[j[0]]
weight = ((j[1] * sign) / 2)
z_p[index] = True
pauli_list.append([(- weight), Pauli((z_p, zero))])
shift += weight
q_itr = mdl.get_objective_expr().iter_quads()
for i in q_itr:
index1 = q_d[i[0][0]]
index2 = q_d[i[0][1]]
weight = ((i[1] * sign) / 4)
if (index1 == index2):
shift += weight
else:
z_p = np.zeros(num_nodes, dtype=bool)
z_p[index1] = True
z_p[index2] = True
pauli_list.append([weight, Pauli((z_p, zero))])
z_p = np.zeros(num_nodes, dtype=bool)
z_p[index1] = True
pauli_list.append([(- weight), Pauli((z_p, zero))])
z_p = np.zeros(num_nodes, dtype=bool)
z_p[index2] = True
pauli_list.append([(- weight), Pauli((z_p, zero))])
shift += weight
for constraint in mdl.iter_constraints():
right_cst = constraint.get_right_expr().get_constant()
left_cst = constraint.get_left_expr().get_constant()
constant = float((right_cst - left_cst))
shift += (penalty * (constant ** 2))
for __l in _iter_net_linear_coeffs(constraint):
z_p = np.zeros(num_nodes, dtype=bool)
index = q_d[__l[0]]
weight = __l[1]
z_p[index] = True
pauli_list.append([((penalty * constant) * weight), Pauli((z_p, zero))])
shift += (((- penalty) * constant) * weight)
for __l in _iter_net_linear_coeffs(constraint):
for l_2 in _iter_net_linear_coeffs(constraint):
index1 = q_d[__l[0]]
index2 = q_d[l_2[0]]
weight1 = __l[1]
weight2 = l_2[1]
penalty_weight1_weight2 = (((penalty * weight1) * weight2) / 4)
if (index1 == index2):
shift += penalty_weight1_weight2
else:
z_p = np.zeros(num_nodes, dtype=bool)
z_p[index1] = True
z_p[index2] = True
pauli_list.append([penalty_weight1_weight2, Pauli((z_p, zero))])
z_p = np.zeros(num_nodes, dtype=bool)
z_p[index1] = True
pauli_list.append([(- penalty_weight1_weight2), Pauli((z_p, zero))])
z_p = np.zeros(num_nodes, dtype=bool)
z_p[index2] = True
pauli_list.append([(- penalty_weight1_weight2), Pauli((z_p, zero))])
shift += penalty_weight1_weight2
qubit_op = WeightedPauliOperator(paulis=pauli_list)
return (qubit_op, shift) |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dist_url', type=str, default='tcp://127.0.0.1:10002')
parser.add_argument('--num_gpu', type=int, default=8, help='num of gpus for cluter training')
parser.add_argument('--name', type=str, default='open-edit', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--vocab_path', type=str, default='vocab/conceptual_vocab.pkl', help='path to vocabulary')
parser.add_argument('--vse_enc_path', type=str, default='checkpoints/conceptual_model_best.pth.tar', help='path to the pretrained text encoder')
parser.add_argument('--edge_model_path', type=str, default='checkpoints/bdcn_pretrained_on_bsds500.pth', help='path to the pretrained edge extractor')
parser.add_argument('--model', type=str, default='OpenEdit', help='which model to use')
parser.add_argument('--norm_G', type=str, default='spectralsync_batch', help='instance normalization or batch normalization')
parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--batchSize', type=int, default=8, help='input batch size')
parser.add_argument('--img_size', type=int, default=224, help='image size')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--dataroot', type=str, default='./datasets/conceptual/')
parser.add_argument('--dataset_mode', type=str, default='conceptual')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--nThreads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
parser.add_argument('--netG', type=str, default='openedit', help='generator model')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')
parser.add_argument('--netE', type=str, default='resnetbdcn')
parser.add_argument('--edge_nc', type=int, default=1)
parser.add_argument('--edge_level', type=int, default=41)
parser.add_argument('--edge_tanh', action='store_true')
parser.add_argument('--reg_weight', type=float, default=0.0001)
parser.add_argument('--perturbation', action='store_true')
parser.add_argument('--manipulation', action='store_true')
parser.add_argument('--img_path', type=str)
parser.add_argument('--ori_cap', type=str)
parser.add_argument('--new_cap', type=str)
parser.add_argument('--global_edit', action='store_true')
parser.add_argument('--alpha', type=int, default=5)
parser.add_argument('--optimize_iter', type=int, default=50)
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, unknown) = parser.parse_known_args()
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
def option_file_path(self, opt, makedir=False):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
if makedir:
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt):
file_name = self.option_file_path(opt, makedir=True)
with open((file_name + '.txt'), 'wt') as opt_file:
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open((file_name + '.pkl'), 'wb') as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for (k, v) in sorted(vars(opt).items()):
if (hasattr(new_opt, k) and (v != getattr(new_opt, k))):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open((file_name + '.pkl'), 'rb'))
return new_opt
def parse(self, save=False):
opt = self.gather_options()
opt.isTrain = self.isTrain
self.print_options(opt)
if (opt.isTrain and save):
self.save_options(opt)
self.opt = opt
return self.opt |
def test_unavailable_chats(api, mock_req):
mock_req({'sendMessage': {'ok': True, 'result': {}}, 'forwardMessage': {'ok': False, 'error_code': 123, 'description': 'This is a message!'}, 'sendPhoto': {'ok': False, 'error_code': 403, 'description': 'This is not the message you want!'}, 'sendAudio': {'ok': False, 'error_code': 123, 'description': 'Bot was blocked by the user'}, 'sendDocument': {'ok': False, 'error_code': 403, 'description': 'Bot was blocked by the user'}, 'sendSticker': {'ok': False, 'error_code': 400, 'description': 'Bad request: chat not found'}, 'sendVideo': {'ok': False, 'error_code': 403, 'description': 'Forbidden: bot was kicked from the group chat'}, 'sendLocation': {'ok': False, 'error_code': 400, 'description': 'Bad request: PEER_ID_INVALID'}, 'sendVoice': {'ok': False, 'error_code': 403, 'description': 'Forbidden: user is deactivated'}, 'sendChatAction': {'ok': False, 'error_code': 400, 'description': 'Bad Request: group chat is migrated to a supergroup chat'}, 'getMe': {'ok': False, 'error_code': 403, 'description': 'Bot was blocked by the user'}})
api.call('sendMessage', {'chat_id': 123})
with pytest.raises(botogram.api.APIError) as e:
api.call('forwardMessage', {'chat_id': 123})
assert (e.type != botogram.api.ChatUnavailableError)
with pytest.raises(botogram.api.APIError) as e:
api.call('sendPhoto', {'chat_id': 123})
assert (e.type != botogram.api.ChatUnavailableError)
with pytest.raises(botogram.api.APIError) as e:
api.call('sendAudio', {'chat_id': 123})
assert (e.type != botogram.api.ChatUnavailableError)
with pytest.raises(botogram.api.ChatUnavailableError) as e:
api.call('sendDocument', {'chat_id': 123})
assert (e.value.chat_id == 123)
assert (e.value.reason == 'blocked')
with pytest.raises(botogram.api.ChatUnavailableError) as e:
api.call('sendSticker', {'chat_id': 123})
assert (e.value.chat_id == 123)
assert (e.value.reason == 'not_found')
with pytest.raises(botogram.api.ChatUnavailableError) as e:
api.call('sendVideo', {'chat_id': 123})
assert (e.value.chat_id == 123)
assert (e.value.reason == 'kicked')
with pytest.raises(botogram.api.ChatUnavailableError) as e:
api.call('sendLocation', {'chat_id': 123})
assert (e.value.chat_id == 123)
assert (e.value.reason == 'not_found')
with pytest.raises(botogram.api.ChatUnavailableError) as e:
api.call('sendVoice', {'chat_id': 123})
assert (e.value.chat_id == 123)
assert (e.value.reason == 'account_deleted')
with pytest.raises(botogram.api.APIError) as e:
api.call('getMe', {'chat_id': 123})
assert (e.type != botogram.api.ChatUnavailableError) |
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, act, gpu_ids, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.gpulist = gpu_ids
use_bias = (norm_layer == 'instance')
self.outermost = outermost
if (input_nc is None):
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = act
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = ((down + [submodule]) + up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = (down + up)
else:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
if self.outermost:
self.model0 = nn.Sequential(*down)
self.model0.cuda(self.gpulist[0])
self.model1 = submodule
self.model1.cuda(self.gpulist[1])
self.model2 = nn.Sequential(*up)
self.model2.cuda(self.gpulist[0])
else:
self.model = nn.Sequential(*model)
self.model.cuda(self.gpulist[1])
def forward(self, x):
if self.outermost:
x = x.cuda(self.gpulist[0])
x0 = self.model0(x).cuda(self.gpulist[1])
x1 = self.model1(x0).cuda(self.gpulist[0])
x2 = self.model2(x1)
return x2
else:
return torch.cat([x, self.model(x)], 1) |
def tracing_v2_enabled(session_name: Optional[str]=None, *, example_id: Optional[Union[(str, UUID)]]=None, tenant_id: Optional[str]=None, session_extra: Optional[Dict[(str, Any)]]=None) -> Generator[(TracerSession, None, None)]:
warnings.warn('The experimental tracing v2 is in development. This is not yet stable and may change in the future.')
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(tenant_id=tenant_id, session_name=session_name, example_id=example_id, session_extra=session_extra)
session = cb.ensure_session()
tracing_v2_callback_var.set(cb)
(yield session)
tracing_v2_callback_var.set(None) |
def _query_sponsors(client, conference_code):
return client.query('query Sponsors($code: String!) {\n conference(code: $code) {\n sponsorsByLevel {\n level\n sponsors {\n name\n image\n link\n }\n }\n }\n }', variables={'code': conference_code}) |
class _ChildEnv():
def __init__(self, id):
(self._pipe, child_pipe) = mp.Pipe()
self._process = mp.Process(target=_child, args=(id, child_pipe))
self._process.start()
def call(self, method, *args):
self._pipe.send(('call', method, args))
def get(self, attr):
self._pipe.send(('get', attr))
def hasattr(self, attr):
self._pipe.send(('hasattr', attr))
def result(self):
return self._pipe.recv()
def call_sync(self, *args):
self.call(*args)
return self.result()
def get_sync(self, *args):
self.get(*args)
return self.result()
def hasattr_sync(self, *args):
self.hasattr(*args)
return self.result()
def close(self):
self._pipe.close()
self._process.join() |
class MultipleReducers(BaseReducer):
def __init__(self, reducers, default_reducer=None, **kwargs):
super().__init__(**kwargs)
self.reducers = torch.nn.ModuleDict(reducers)
self.default_reducer = (MeanReducer() if (default_reducer is None) else default_reducer)
def forward(self, loss_dict, embeddings, labels):
self.reset_stats()
sub_losses = torch.zeros(len(loss_dict), dtype=embeddings.dtype, device=embeddings.device)
loss_count = 0
for (loss_name, loss_info) in loss_dict.items():
input_dict = {loss_name: loss_info}
if (loss_name in self.reducers):
loss_val = self.reducers[loss_name](input_dict, embeddings, labels)
else:
loss_val = self.default_reducer(input_dict, embeddings, labels)
sub_losses[loss_count] = loss_val
loss_count += 1
return self.sub_loss_reduction(sub_losses, embeddings, labels)
def sub_loss_reduction(self, sub_losses, embeddings=None, labels=None):
return torch.sum(sub_losses) |
class GlobalContextVit(nn.Module):
def __init__(self, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', img_size: Tuple[(int, int)]=224, window_ratio: Tuple[(int, ...)]=(32, 32, 16, 32), window_size: Tuple[(int, ...)]=None, embed_dim: int=64, depths: Tuple[(int, ...)]=(3, 4, 19, 5), num_heads: Tuple[(int, ...)]=(2, 4, 8, 16), mlp_ratio: float=3.0, qkv_bias: bool=True, layer_scale: Optional[float]=None, drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.0, weight_init='', act_layer: str='gelu', norm_layer: str='layernorm2d', norm_layer_cl: str='layernorm', norm_eps: float=1e-05):
super().__init__()
act_layer = get_act_layer(act_layer)
norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps)
norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps)
img_size = to_2tuple(img_size)
feat_size = tuple(((d // 4) for d in img_size))
self.global_pool = global_pool
self.num_classes = num_classes
self.drop_rate = drop_rate
num_stages = len(depths)
self.num_features = int((embed_dim * (2 ** (num_stages - 1))))
if (window_size is not None):
window_size = to_ntuple(num_stages)(window_size)
else:
assert (window_ratio is not None)
window_size = tuple([((img_size[0] // r), (img_size[1] // r)) for r in to_ntuple(num_stages)(window_ratio)])
self.stem = Stem(in_chs=in_chans, out_chs=embed_dim, act_layer=act_layer, norm_layer=norm_layer)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
for i in range(num_stages):
last_stage = (i == (num_stages - 1))
stage_scale = (2 ** max((i - 1), 0))
stages.append(GlobalContextVitStage(dim=(embed_dim * stage_scale), depth=depths[i], num_heads=num_heads[i], feat_size=((feat_size[0] // stage_scale), (feat_size[1] // stage_scale)), window_size=window_size[i], downsample=(i != 0), stage_norm=last_stage, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, layer_scale=layer_scale, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], act_layer=act_layer, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl))
self.stages = nn.Sequential(*stages)
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
if weight_init:
named_apply(partial(self._init_weights, scheme=weight_init), self)
def _init_weights(self, module, name, scheme='vit'):
if (scheme == 'vit'):
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
if (module.bias is not None):
if ('mlp' in name):
nn.init.normal_(module.bias, std=1e-06)
else:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=0.02)
if (module.bias is not None):
nn.init.zeros_(module.bias)
.ignore
def no_weight_decay(self):
return {k for (k, _) in self.named_parameters() if any(((n in k) for n in ['relative_position_bias_table', 'rel_pos.mlp']))}
.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem='^stem', blocks='^stages\\.(\\d+)')
return matcher
.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if (global_pool is None):
global_pool = self.head.global_pool.pool_type
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool=False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x |
def test_laneoffset_rel():
laneoffset = OSC.RelativeLaneOffsetAction(1, 'Ego', OSC.DynamicsShapes.step, 3, False)
prettyprint(laneoffset.get_element(), None)
laneoffset2 = OSC.RelativeLaneOffsetAction(1, 'Ego', OSC.DynamicsShapes.step, 3, False)
laneoffset3 = OSC.RelativeLaneOffsetAction(1, 'Ego', OSC.DynamicsShapes.linear, 3, False)
assert (laneoffset == laneoffset2)
assert (laneoffset != laneoffset3)
laneoffset4 = OSC.RelativeLaneOffsetAction.parse(laneoffset.get_element())
prettyprint(laneoffset4.get_element(), None)
assert (laneoffset4 == laneoffset)
assert (version_validation('PrivateAction', laneoffset, 0) == ValidationResponse.OK)
assert (version_validation('PrivateAction', laneoffset, 1) == ValidationResponse.OK)
assert (version_validation('PrivateAction', laneoffset, 2) == ValidationResponse.OK) |
class DevDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
self.tab_processor = get_default_processor(max_cell_length=100, tokenizer=AutoTokenizer.from_pretrained(args.bert.location, use_fast=False), max_input_length=args.seq2seq.table_truncation_max_length)
cache_path = os.path.join(cache_root, 'kvret_dev.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
expansion = (args.seq2seq.expansion if args.seq2seq.expansion else 1)
for expand_id in range(expansion):
for raw_data in tqdm(self.raw_datasets):
for i in range(1, (len(raw_data['dialogue']['driver']) + 1)):
if ((i > min(len(raw_data['dialogue']['driver']), len(raw_data['dialogue']['assistant']))) and (not (len(raw_data['dialogue']['driver']) == len(raw_data['dialogue']['assistant'])))):
continue
extend_data = copy.deepcopy(raw_data)
extend_data['dialogue']['driver'] = extend_data['dialogue']['driver'][:i]
extend_data['dialogue']['assistant'] = extend_data['dialogue']['assistant'][:i]
(history, gold_response) = kvret_get_constructed_history_and_golden_response(usr_utterances=extend_data['dialogue']['driver'], sys_utterances=extend_data['dialogue']['assistant'])
table_context = {'header': extend_data['kb']['header'], 'rows': extend_data['kb']['rows']}
for truncate_func in self.tab_processor.table_truncate_funcs:
truncate_func.truncate_table(table_context, history, [])
linear_table = self.tab_processor.table_linearize_func.process_table(table_context)
extend_data.update({'struct_in': linear_table.lower(), 'text_in': history.lower(), 'seq_out': gold_response.lower()})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
class AbstractComparisonNodeRecorder(NumpyArrayNodeRecorder):
def __init__(self, model, node, observed, **kwargs):
super(AbstractComparisonNodeRecorder, self).__init__(model, node, **kwargs)
self.observed = observed
self._aligned_observed = None
def setup(self):
super(AbstractComparisonNodeRecorder, self).setup()
from pywr.parameters import align_and_resample_dataframe
self._aligned_observed = align_and_resample_dataframe(self.observed, self.model.timestepper.datetime_index) |
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('username', type=str)
parser.add_argument('org', type=str)
def handle(self, *args, **options):
try:
user = PytitionUser.objects.get(user__username=options['username'])
except PytitionUser.DoesNotExist:
logger.error('%s user does not exist', options['username'])
return
try:
org = Organization.objects.get(name=options['org'])
except Organization.DoesNotExist:
logger.error('%s org does not exist', options['org'])
return
org.members.add(user)
(perms, created) = Permission.objects.get_or_create(organization=org, user=user)
perms.can_add_members = True
perms.can_remove_members = True
perms.can_create_petitions = True
perms.can_modify_petitions = True
perms.can_delete_petitions = True
perms.can_view_signatures = True
perms.can_modify_signatures = True
perms.can_delete_signatures = True
perms.can_modify_permissions = True
perms.save()
if created:
logger.info('%s user joined %s org', user, org)
else:
logger.info('%s user already joined %s org', user, org) |
def run(video_path: str, detect_labels, video_downscale: float=1.0, architecture: str='ssdlite320', confidence_threshold: float=0.5, tracker_min_iou: float=0.25, show_detections: bool=False, track_text_verbose: int=0, device: str='cpu', viz_wait_ms: int=1):
detector = CocoObjectDetector(class_ids=get_class_ids(detect_labels), confidence_threshold=confidence_threshold, architecture=architecture, device=device)
(cap, cap_fps) = read_video_file(video_path)
tracker = MultiObjectTracker(dt=(1 / cap_fps), tracker_kwargs={'max_staleness': 5}, model_spec={'order_pos': 1, 'dim_pos': 2, 'order_size': 0, 'dim_size': 2, 'q_var_pos': 5000.0, 'r_var_pos': 0.1}, matching_fn_kwargs={'min_iou': tracker_min_iou, 'multi_match_min_iou': 0.93})
while True:
(ret, frame) = cap.read()
if (not ret):
break
frame = cv2.resize(frame, fx=video_downscale, fy=video_downscale, dsize=None, interpolation=cv2.INTER_AREA)
detections = detector.process_image(frame)
_ = tracker.step(detections=detections)
active_tracks = tracker.active_tracks(min_steps_alive=3)
if show_detections:
for det in detections:
draw_detection(frame, det)
for track in active_tracks:
draw_track(frame, track, thickness=2, text_at_bottom=True, text_verbose=track_text_verbose)
cv2.imshow('frame', frame)
c = cv2.waitKey(viz_wait_ms)
if (c == ord('q')):
break |
def make_fake_hdf_epic(fname):
fid = h5py.File(fname, 'w')
g1 = fid.create_group('Band317nm')
g1.create_dataset('Image', shape=(100, 100), dtype=np.float32, data=b317_data)
g2 = fid.create_group('Band688nm')
g2.create_dataset('Image', shape=(100, 100), dtype=np.float32, data=b688_data)
g3 = g2.create_group('Geolocation')
g4 = g3.create_group('Earth')
g4.create_dataset('SunAngleZenith', shape=(100, 100), dtype=np.float32, data=sza_data)
g4.create_dataset('ViewAngleAzimuth', shape=(100, 100), dtype=np.float32, data=vaa_data)
g4.create_dataset('Mask', shape=(100, 100), dtype=int, data=mas_data)
g4.create_dataset('Latitude', shape=(100, 100), dtype=np.float32, data=lat_data)
g4.create_dataset('Longitude', shape=(100, 100), dtype=np.float32, data=lon_data)
fid.attrs.create('begin_time', '2015-06-13 12:00:37')
fid.attrs.create('end_time', '2015-06-13 12:05:01')
fid.close() |
_server.route('/services/<service>/keys/<kid>', methods=['DELETE'])
def delete_service_key(service, kid):
jwt_header = request.headers.get(JWT_HEADER_NAME, '')
match = jwtutil.TOKEN_REGEX.match(jwt_header)
if (match is None):
abort(400)
encoded_jwt = match.group(1)
signer_kid = _signer_kid(encoded_jwt)
signer_key = _lookup_service_key(service, signer_kid, approved_only=False)
self_signed = (kid == signer_kid)
approved_key_for_service = (signer_key.approval is not None)
if (self_signed or approved_key_for_service):
_validate_jwt(encoded_jwt, signer_key.jwk, service)
try:
model.delete_service_key(kid)
except ServiceKeyDoesNotExist:
abort(404)
logs_model.log_action('service_key_delete', ip=get_request_ip(), metadata={'kid': kid, 'signer_kid': signer_key.kid, 'service': service, 'name': signer_key.name, 'user_agent': request.headers.get('User-Agent'), 'ip': get_request_ip()})
return make_response('', 204)
abort(403) |
class HashBucketInput(Dict):
def of(annotated_delta: DeltaAnnotated, primary_keys: List[str], num_hash_buckets: int, num_hash_groups: int, enable_profiler: Optional[bool]=False, metrics_config: Optional[MetricsConfig]=None, read_kwargs_provider: Optional[ReadKwargsProvider]=None, object_store: Optional[IObjectStore]=None, deltacat_storage=unimplemented_deltacat_storage, deltacat_storage_kwargs: Optional[Dict[(str, Any)]]=None) -> HashBucketInput:
result = HashBucketInput()
result['annotated_delta'] = annotated_delta
result['primary_keys'] = primary_keys
result['num_hash_buckets'] = num_hash_buckets
result['num_hash_groups'] = num_hash_groups
result['enable_profiler'] = enable_profiler
result['metrics_config'] = metrics_config
result['read_kwargs_provider'] = read_kwargs_provider
result['object_store'] = object_store
result['deltacat_storage'] = deltacat_storage
result['deltacat_storage_kwargs'] = (deltacat_storage_kwargs or {})
return result
def annotated_delta(self) -> DeltaAnnotated:
return self['annotated_delta']
def primary_keys(self) -> List[str]:
return self['primary_keys']
def num_hash_buckets(self) -> int:
return self['num_hash_buckets']
def num_hash_groups(self) -> int:
return self['num_hash_groups']
def enable_profiler(self) -> Optional[bool]:
return self.get('enable_profiler')
def metrics_config(self) -> Optional[MetricsConfig]:
return self.get('metrics_config')
def read_kwargs_provider(self) -> Optional[ReadKwargsProvider]:
return self.get('read_kwargs_provider')
def object_store(self) -> Optional[IObjectStore]:
return self.get('object_store')
def deltacat_storage(self) -> unimplemented_deltacat_storage:
return self.get('deltacat_storage')
def deltacat_storage_kwargs(self) -> Optional[Dict[(str, Any)]]:
return self.get('deltacat_storage_kwargs') |
def main():
global args, best_prec1
args = parser.parse_args()
if args.tensorboard:
configure(('runs/%s' % args.name))
normalize = transforms.Normalize(mean=[(x / 255.0) for x in [125.3, 123.0, 113.9]], std=[(x / 255.0) for x in [63.0, 62.1, 66.7]])
if args.augment:
transform_train = transforms.Compose([transforms.ToTensor(), transforms.Lambda((lambda x: F.pad(x.unsqueeze(0), (4, 4, 4, 4), mode='reflect').squeeze())), transforms.ToPILImage(), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
else:
transform_train = transforms.Compose([transforms.ToTensor(), normalize])
transform_test = transforms.Compose([transforms.ToTensor(), normalize])
kwargs = {'num_workers': 1, 'pin_memory': True}
assert ((args.dataset == 'cifar10') or (args.dataset == 'cifar100'))
train_loader = torch.utils.data.DataLoader(datasets.__dict__[args.dataset.upper()]('../data', train=True, download=True, transform=transform_train), batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(datasets.__dict__[args.dataset.upper()]('../data', train=False, transform=transform_test), batch_size=args.batch_size, shuffle=True, **kwargs)
model = WideResNet(args.layers, (((args.dataset == 'cifar10') and 10) or 100), args.widen_factor, dropRate=args.droprate)
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
model = model.cuda()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, nesterov=args.nesterov, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, (len(train_loader) * args.epochs))
for epoch in range(args.start_epoch, args.epochs):
train(train_loader, model, criterion, optimizer, scheduler, epoch)
prec1 = validate(val_loader, model, criterion, epoch)
is_best = (prec1 > best_prec1)
best_prec1 = max(prec1, best_prec1)
save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'best_prec1': best_prec1}, is_best)
print('Best accuracy: ', best_prec1) |
.fast
def test_progress_bar(*args, **kwargs):
from time import sleep
from numpy.random import rand
from radis.misc.progress_bar import ProgressBar
print('Testing progress bar')
a = 0
r = list(range(200))
N = len(r)
pb = ProgressBar(N)
for i in r:
pb.update(i, modulo=10)
a += i
sleep((rand() * 0.003))
pb.done()
return True |
class FusedScaleMaskSoftmax(torch.nn.Module):
def __init__(self, input_in_fp16, upper_triang_mask, mask_func, softmax_in_fp32, scale):
super(FusedScaleMaskSoftmax, self).__init__()
self.input_in_fp16 = input_in_fp16
self.upper_triang_mask = upper_triang_mask
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.scale = scale
assert ((self.scale is None) or softmax_in_fp32), 'softmax should be in fp32 when scaled'
def forward(self, input, mask):
data_size = input.size()
assert (input.dim() == 4)
if (self.input_in_fp16 and self.upper_triang_mask and (data_size[(- 1)] <= 2048) and (input.size()[2] == input.size()[3])):
input = input.view((- 1), data_size[2], data_size[3])
scale = (self.scale if (self.scale is not None) else 1.0)
probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale)
probs = probs.view(*data_size)
else:
if (self.input_in_fp16 and self.softmax_in_fp32):
input = input.float()
mask_output = self.mask_func(input, mask)
if (self.scale is not None):
mask_output = (mask_output * self.scale)
probs = torch.nn.Softmax(dim=(- 1))(mask_output)
if (self.input_in_fp16 and self.softmax_in_fp32):
probs = probs.half()
return probs |
def plot_graph(filename, type_graph, output_filename):
(my_techniques, name, _, _) = load_techniques(filename)
graph_values = []
for t in my_techniques.values():
for item in t[type_graph]:
date = get_latest_date(item)
score = get_latest_score(item)
if (date and (score > 0)):
yyyymmdd = date.strftime('%Y-%m-%d')
graph_values.append({'date': yyyymmdd, 'count': 1})
import pandas as pd
df = pd.DataFrame(graph_values).groupby('date', as_index=False)[['count']].sum()
df['cumcount'] = df['count'].cumsum()
if (not output_filename):
output_filename = ('graph_' + type_graph)
elif output_filename.endswith('.html'):
output_filename = output_filename.replace('.html', '')
output_filename = get_non_existing_filename(('output/' + output_filename), 'html')
import plotly
import plotly.graph_objs as go
plotly.offline.plot({'data': [go.Scatter(x=df['date'], y=df['cumcount'])], 'layout': go.Layout(title=('# of %s items for %s' % (type_graph, name)))}, filename=output_filename, auto_open=False)
print(('File written: ' + output_filename)) |
class TransformerLanguageModelConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(default='relu', metadata={'help': 'activation function to use'})
dropout: float = field(default=0.1, metadata={'help': 'dropout probability'})
attention_dropout: float = field(default=0.0, metadata={'help': 'dropout probability for attention weights'})
activation_dropout: float = field(default=0.0, metadata={'help': 'dropout probability after activation in FFN.'})
relu_dropout: float = field(default=0.0, metadata={'help': 'dropout probability after activation in FFN.'})
decoder_embed_dim: int = field(default=512, metadata={'help': 'decoder embedding dimension'})
decoder_output_dim: int = field(default=512, metadata={'help': 'decoder output dimension'})
decoder_input_dim: int = field(default=512, metadata={'help': 'decoder input dimension'})
decoder_ffn_embed_dim: int = field(default=2048, metadata={'help': 'decoder embedding dimension for FFN'})
decoder_layers: int = field(default=6, metadata={'help': 'num decoder layers'})
decoder_attention_heads: int = field(default=8, metadata={'help': 'num decoder attention heads'})
decoder_normalize_before: bool = field(default=False, metadata={'help': 'apply layernorm before each decoder block'})
no_decoder_final_norm: bool = field(default=False, metadata={'help': "don't add an extra layernorm after the last decoder block"})
adaptive_softmax_cutoff: Optional[str] = field(default=None, metadata={'help': 'comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'})
adaptive_softmax_dropout: float = field(default=0, metadata={'help': 'sets adaptive softmax dropout for the tail projections'})
adaptive_softmax_factor: float = field(default=4, metadata={'help': 'adaptive input factor'})
no_token_positional_embeddings: bool = field(default=False, metadata={'help': 'if set, disables positional embeddings (outside self attention)'})
share_decoder_input_output_embed: bool = field(default=False, metadata={'help': 'share decoder input and output embeddings'})
character_embeddings: bool = field(default=False, metadata={'help': 'if set, uses character embedding convolutions to produce token embeddings'})
character_filters: str = field(default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', metadata={'help': 'size of character embeddings'})
character_embedding_dim: int = field(default=4, metadata={'help': 'size of character embeddings'})
char_embedder_highway_layers: int = field(default=2, metadata={'help': 'number of highway layers for character token embeddder'})
adaptive_input: bool = field(default=False, metadata={'help': 'if set, uses adaptive input'})
adaptive_input_factor: float = field(default=4, metadata={'help': 'adaptive input factor'})
adaptive_input_cutoff: Optional[str] = field(default=None, metadata={'help': 'comma separated list of adaptive input cutoff points.'})
tie_adaptive_weights: bool = field(default=False, metadata={'help': 'if set, ties the weights of adaptive softmax and adaptive input'})
tie_adaptive_proj: bool = field(default=False, metadata={'help': 'if set, ties the projection weights of adaptive softmax and adaptive input'})
decoder_learned_pos: bool = field(default=False, metadata={'help': 'use learned positional embeddings in the decoder'})
decoder_layerdrop: float = field(default=0.0, metadata={'help': 'LayerDrop probability for decoder'})
decoder_layers_to_keep: Optional[str] = field(default=None, metadata={'help': 'which layers to *keep* when pruning as a comma-separated list'})
layernorm_embedding: bool = field(default=False, metadata={'help': 'add layernorm to embedding'})
no_scale_embedding: bool = field(default=False, metadata={'help': 'if True, dont scale embeddings'})
quant_noise_pq: float = field(default=0.0, metadata={'help': 'iterative PQ quantization noise at training time'})
quant_noise_pq_block_size: int = field(default=8, metadata={'help': 'block size of quantization noise at training time'})
quant_noise_scalar: float = field(default=0.0, metadata={'help': 'scalar quantization noise and scalar quantization at training time'})
add_bos_token: bool = II('task.add_bos_token')
tokens_per_sample: int = II('task.tokens_per_sample')
max_target_positions: Optional[int] = II('task.max_target_positions')
tpu: bool = II('params.common.tpu') |
class _SSHFormatEd25519():
def get_public(self, data: memoryview) -> tuple[(tuple, memoryview)]:
(point, data) = _get_sshstr(data)
return ((point,), data)
def load_public(self, data: memoryview) -> tuple[(ed25519.Ed25519PublicKey, memoryview)]:
((point,), data) = self.get_public(data)
public_key = ed25519.Ed25519PublicKey.from_public_bytes(point.tobytes())
return (public_key, data)
def load_private(self, data: memoryview, pubfields) -> tuple[(ed25519.Ed25519PrivateKey, memoryview)]:
((point,), data) = self.get_public(data)
(keypair, data) = _get_sshstr(data)
secret = keypair[:32]
point2 = keypair[32:]
if ((point != point2) or ((point,) != pubfields)):
raise ValueError('Corrupt data: ed25519 field mismatch')
private_key = ed25519.Ed25519PrivateKey.from_private_bytes(secret)
return (private_key, data)
def encode_public(self, public_key: ed25519.Ed25519PublicKey, f_pub: _FragList) -> None:
raw_public_key = public_key.public_bytes(Encoding.Raw, PublicFormat.Raw)
f_pub.put_sshstr(raw_public_key)
def encode_private(self, private_key: ed25519.Ed25519PrivateKey, f_priv: _FragList) -> None:
public_key = private_key.public_key()
raw_private_key = private_key.private_bytes(Encoding.Raw, PrivateFormat.Raw, NoEncryption())
raw_public_key = public_key.public_bytes(Encoding.Raw, PublicFormat.Raw)
f_keypair = _FragList([raw_private_key, raw_public_key])
self.encode_public(public_key, f_priv)
f_priv.put_sshstr(f_keypair) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.