code stringlengths 281 23.7M |
|---|
def resize_pos_embed(state_dict, model, interpolation: str='bicubic', antialias: bool=True):
old_pos_embed = state_dict.get('visual.positional_embedding', None)
if ((old_pos_embed is None) or (not hasattr(model.visual, 'grid_size'))):
return
grid_size = to_2tuple(model.visual.grid_size)
extra_tokens = 1
new_seq_len = ((grid_size[0] * grid_size[1]) + extra_tokens)
if (new_seq_len == old_pos_embed.shape[0]):
return
if extra_tokens:
(pos_emb_tok, pos_emb_img) = (old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:])
else:
(pos_emb_tok, pos_emb_img) = (None, old_pos_embed)
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], (- 1)).permute(0, 3, 1, 2)
pos_emb_img = F.interpolate(pos_emb_img, size=grid_size, mode=interpolation, antialias=antialias, align_corners=False)
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, (grid_size[0] * grid_size[1]), (- 1))[0]
if (pos_emb_tok is not None):
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
else:
new_pos_embed = pos_emb_img
state_dict['visual.positional_embedding'] = new_pos_embed |
def test_unlocks_dependencies_if_necessary_to_ensure_that_a_new_dependency_is_satisfied(root: ProjectPackage, repo: Repository, pool: RepositoryPool) -> None:
root.add_dependency(Factory.create_dependency('foo', '*'))
root.add_dependency(Factory.create_dependency('newdep', '2.0.0'))
add_to_repo(repo, 'foo', '1.0.0', deps={'bar': '<2.0.0'})
add_to_repo(repo, 'bar', '1.0.0', deps={'baz': '<2.0.0'})
add_to_repo(repo, 'baz', '1.0.0', deps={'qux': '<2.0.0'})
add_to_repo(repo, 'qux', '1.0.0')
add_to_repo(repo, 'foo', '2.0.0', deps={'bar': '<3.0.0'})
add_to_repo(repo, 'bar', '2.0.0', deps={'baz': '<3.0.0'})
add_to_repo(repo, 'baz', '2.0.0', deps={'qux': '<3.0.0'})
add_to_repo(repo, 'qux', '2.0.0')
add_to_repo(repo, 'newdep', '2.0.0', deps={'baz': '>=1.5.0'})
locked = [get_package('foo', '2.0.0'), get_package('bar', '1.0.0'), get_package('baz', '1.0.0'), get_package('qux', '1.0.0')]
provider = Provider(root, pool, NullIO(), locked=locked)
check_solver_result(root, provider, result={'foo': '2.0.0', 'bar': '2.0.0', 'baz': '2.0.0', 'qux': '1.0.0', 'newdep': '2.0.0'}) |
class CenteredLayout(Layout):
def customise_widget(self):
self.container = self.widget.add_child(Div(self.view))
self.container.use_layout(Container(fluid=False))
self.centre = self.container.add_child(Div(self.view))
column_layout = ColumnLayout(ColumnOptions('left', ResponsiveSize(md=4)), ColumnOptions('right', ResponsiveSize(md=8)))
self.centre.use_layout(column_layout)
def columns(self):
return self.centre.layout.columns |
_bpe('byte_bpe', dataclass=ByteBpeConfig)
class ByteBPE(object):
def __init__(self, cfg):
vocab = file_utils.cached_path(cfg.sentencepiece_model_path)
try:
import sentencepiece as spm
self.sp = spm.SentencePieceProcessor()
self.sp.Load(vocab)
except ImportError:
raise ImportError('Please install sentencepiece with: pip install sentencepiece')
def encode(self, x: str) -> str:
byte_encoded = byte_encode(x)
return SPACE.join(self.sp.EncodeAsPieces(byte_encoded))
def decode(x: str) -> str:
unescaped = x.replace(SPACE, '').replace(SPACE_ESCAPE, SPACE)
return smart_byte_decode(unescaped) |
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id')
parser.add_argument('--resume', help='checkpoint path')
parser.add_argument('--max-iteration', type=int, default=100000, help='max iteration')
parser.add_argument('--lr', type=float, default=1e-14, help='learning rate')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.99, help='momentum')
parser.add_argument('--pretrained-model', default=torchfcn.models.FCN16s.download(), help='pretrained model of FCN16s')
args = parser.parse_args()
args.model = 'FCN8s'
args.git_hash = git_hash()
now = datetime.datetime.now()
args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f'))
os.makedirs(args.out)
with open(osp.join(args.out, 'config.yaml'), 'w') as f:
yaml.safe_dump(args.__dict__, f, default_flow_style=False)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
cuda = torch.cuda.is_available()
torch.manual_seed(1337)
if cuda:
torch.cuda.manual_seed(1337)
root = osp.expanduser('~/data/datasets')
kwargs = ({'num_workers': 4, 'pin_memory': True} if cuda else {})
train_loader = torch.utils.data.DataLoader(torchfcn.datasets.SBDClassSeg(root, split='train', transform=True), batch_size=1, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(torchfcn.datasets.VOC2011ClassSeg(root, split='seg11valid', transform=True), batch_size=1, shuffle=False, **kwargs)
model = torchfcn.models.FCN8s(n_class=21)
start_epoch = 0
start_iteration = 0
if args.resume:
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['model_state_dict'])
start_epoch = checkpoint['epoch']
start_iteration = checkpoint['iteration']
else:
fcn16s = torchfcn.models.FCN16s()
state_dict = torch.load(args.pretrained_model)
try:
fcn16s.load_state_dict(state_dict)
except RuntimeError:
fcn16s.load_state_dict(state_dict['model_state_dict'])
model.copy_params_from_fcn16s(fcn16s)
if cuda:
model = model.cuda()
optim = torch.optim.SGD([{'params': get_parameters(model, bias=False)}, {'params': get_parameters(model, bias=True), 'lr': (args.lr * 2), 'weight_decay': 0}], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
optim.load_state_dict(checkpoint['optim_state_dict'])
trainer = torchfcn.Trainer(cuda=cuda, model=model, optimizer=optim, train_loader=train_loader, val_loader=val_loader, out=args.out, max_iter=args.max_iteration, interval_validate=4000)
trainer.epoch = start_epoch
trainer.iteration = start_iteration
trainer.train() |
class aux_classifier(nn.Module):
def __init__(self, block, inplanes, planes, groups, base_width, num_classes):
super(aux_classifier, self).__init__()
downsample3 = nn.Sequential(conv1x1(inplanes, (planes * block.expansion), stride=2), nn.BatchNorm2d((planes * block.expansion)))
self.module3 = block(inplanes, planes, stride=2, downsample=downsample3, groups=groups, base_width=base_width)
self.fc = nn.Linear((64 * block.expansion), num_classes)
def forward(self, x):
x = self.module3(x)
x = torch.flatten(F.adaptive_avg_pool2d(x, 1), 1)
x = self.fc(x)
return x |
def get_parameter_values():
return {'chemistry': 'lithium_ion', 'Ratio of lithium moles to SEI moles': 2.0, 'Inner SEI reaction proportion': 0.5, 'Inner SEI partial molar volume [m3.mol-1]': 9.585e-05, 'Outer SEI partial molar volume [m3.mol-1]': 9.585e-05, 'SEI reaction exchange current density [A.m-2]': 1.5e-07, 'SEI resistivity [Ohm.m]': 200000.0, 'Outer SEI solvent diffusivity [m2.s-1]': 2.e-22, 'Bulk solvent concentration [mol.m-3]': 2636.0, 'Inner SEI open-circuit potential [V]': 0.1, 'Outer SEI open-circuit potential [V]': 0.8, 'Inner SEI electron conductivity [S.m-1]': 8.95e-14, 'Inner SEI lithium interstitial diffusivity [m2.s-1]': 1e-20, 'Lithium interstitial reference concentration [mol.m-3]': 15.0, 'Initial inner SEI thickness [m]': 2.5e-09, 'Initial outer SEI thickness [m]': 2.5e-09, 'EC initial concentration in electrolyte [mol.m-3]': 4541.0, 'EC diffusivity [m2.s-1]': 2e-18, 'SEI kinetic rate constant [m.s-1]': 1e-12, 'SEI open-circuit potential [V]': 0.4, 'SEI growth activation energy [J.mol-1]': 0.0, 'Negative electrode reaction-driven LAM factor [m3.mol-1]': 0.0, 'Positive electrode reaction-driven LAM factor [m3.mol-1]': 0.0, 'Negative current collector thickness [m]': 1.2e-05, 'Negative electrode thickness [m]': 8.52e-05, 'Separator thickness [m]': 1.2e-05, 'Positive electrode thickness [m]': 7.56e-05, 'Positive current collector thickness [m]': 1.6e-05, 'Electrode height [m]': 0.065, 'Electrode width [m]': 1.58, 'Cell cooling surface area [m2]': 0.00531, 'Cell volume [m3]': 2.42e-05, 'Cell thermal expansion coefficient [m.K-1]': 1.1e-06, 'Negative current collector conductivity [S.m-1]': .0, 'Positive current collector conductivity [S.m-1]': .0, 'Negative current collector density [kg.m-3]': 8960.0, 'Positive current collector density [kg.m-3]': 2700.0, 'Negative current collector specific heat capacity [J.kg-1.K-1]': 385.0, 'Positive current collector specific heat capacity [J.kg-1.K-1]': 897.0, 'Negative current collector thermal conductivity [W.m-1.K-1]': 401.0, 'Positive current collector thermal conductivity [W.m-1.K-1]': 237.0, 'Nominal cell capacity [A.h]': 5.0, 'Current function [A]': 5.0, 'Contact resistance [Ohm]': 0, 'Negative electrode conductivity [S.m-1]': 215.0, 'Maximum concentration in negative electrode [mol.m-3]': 33133.0, 'Negative electrode diffusivity [m2.s-1]': 3.3e-14, 'Negative electrode OCP [V]': graphite_LGM50_ocp_Chen2020, 'Negative electrode porosity': 0.25, 'Negative electrode active material volume fraction': 0.75, 'Negative particle radius [m]': 5.86e-06, 'Negative electrode Bruggeman coefficient (electrolyte)': 1.5, 'Negative electrode Bruggeman coefficient (electrode)': 0, 'Negative electrode charge transfer coefficient': 0.5, 'Negative electrode double-layer capacity [F.m-2]': 0.2, 'Negative electrode exchange-current density [A.m-2]': graphite_LGM50_electrolyte_exchange_current_density_Chen2020, 'Negative electrode density [kg.m-3]': 1657.0, 'Negative electrode specific heat capacity [J.kg-1.K-1]': 700.0, 'Negative electrode thermal conductivity [W.m-1.K-1]': 1.7, 'Negative electrode OCP entropic change [V.K-1]': 0.0, 'Positive electrode conductivity [S.m-1]': 0.18, 'Maximum concentration in positive electrode [mol.m-3]': 63104.0, 'Positive electrode diffusivity [m2.s-1]': 4e-15, 'Positive electrode OCP [V]': nmc_LGM50_ocp_Chen2020, 'Positive electrode porosity': 0.335, 'Positive electrode active material volume fraction': 0.665, 'Positive particle radius [m]': 5.22e-06, 'Positive electrode Bruggeman coefficient (electrolyte)': 1.5, 'Positive electrode Bruggeman coefficient (electrode)': 0, 'Positive electrode charge transfer coefficient': 0.5, 'Positive electrode double-layer capacity [F.m-2]': 0.2, 'Positive electrode exchange-current density [A.m-2]': nmc_LGM50_electrolyte_exchange_current_density_Chen2020, 'Positive electrode density [kg.m-3]': 3262.0, 'Positive electrode specific heat capacity [J.kg-1.K-1]': 700.0, 'Positive electrode thermal conductivity [W.m-1.K-1]': 2.1, 'Positive electrode OCP entropic change [V.K-1]': 0.0, 'Separator porosity': 0.47, 'Separator Bruggeman coefficient (electrolyte)': 1.5, 'Separator density [kg.m-3]': 397.0, 'Separator specific heat capacity [J.kg-1.K-1]': 700.0, 'Separator thermal conductivity [W.m-1.K-1]': 0.16, 'Initial concentration in electrolyte [mol.m-3]': 1000.0, 'Cation transference number': 0.2594, 'Thermodynamic factor': 1.0, 'Electrolyte diffusivity [m2.s-1]': electrolyte_diffusivity_Nyman2008, 'Electrolyte conductivity [S.m-1]': electrolyte_conductivity_Nyman2008, 'Reference temperature [K]': 298.15, 'Total heat transfer coefficient [W.m-2.K-1]': 10.0, 'Ambient temperature [K]': 298.15, 'Number of electrodes connected in parallel to make a cell': 1.0, 'Number of cells connected in series to make a battery': 1.0, 'Lower voltage cut-off [V]': 2.5, 'Upper voltage cut-off [V]': 4.2, 'Open-circuit voltage at 0% SOC [V]': 2.5, 'Open-circuit voltage at 100% SOC [V]': 4.2, 'Initial concentration in negative electrode [mol.m-3]': 29866.0, 'Initial concentration in positive electrode [mol.m-3]': 17038.0, 'Initial temperature [K]': 298.15, 'citations': ['Chen2020']} |
_uncanonicalize
_rewriter([Reshape])
def local_reshape_dimshuffle(fgraph, node):
if isinstance(node.op, Reshape):
input_ = node.inputs[0]
if (input_.owner and isinstance(input_.owner.op, DimShuffle)):
new_order = input_.owner.op.new_order
offset = 0
for dim in new_order:
if (dim == 'x'):
continue
elif (dim != offset):
return False
else:
offset += 1
return [reshape(input_.owner.inputs[0], node.inputs[1], ndim=node.outputs[0].ndim)]
return False |
_module()
class RandomSampleFrames():
def __call__(self, results):
assert (results['total_frames'] > 0)
results['frame_inds'] = np.array([0, (results['total_frames'] - 1)])
if (results['total_frames'] > 2):
results['frame_inds'] = np.concatenate([results['frame_inds'], np.random.randint(1, (results['total_frames'] - 1), 3)])
return results |
def test_project_issue_milestone_events(project, resp_project_issue_milestone_events):
issue = project.issues.list()[0]
milestone_events = issue.resourcemilestoneevents.list()
assert isinstance(milestone_events, list)
milestone_event = milestone_events[0]
assert isinstance(milestone_event, ProjectIssueResourceMilestoneEvent)
assert (milestone_event.resource_type == 'Issue') |
def test_perform_sequence():
def code_under_test():
r = (yield Effect(MyIntent('a')))
r2 = (yield Effect(OtherIntent('b')))
return (r, r2)
seq = [(MyIntent('a'), (lambda i: 'result1')), (OtherIntent('b'), (lambda i: 'result2'))]
eff = code_under_test()
assert (perform_sequence(seq, eff) == ('result1', 'result2')) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-folder', dest='cityscapesPath', help="path to the Cityscapes dataset 'gtFine' folder", default=None, type=str)
parser.add_argument('--output-folder', dest='outputFolder', help='path to the output folder.', default=None, type=str)
parser.add_argument('--use-train-id', action='store_true', dest='useTrainId')
parser.add_argument('--set-names', dest='setNames', help='set names to which apply the function to', nargs='+', default=['val', 'train', 'test'], type=str)
args = parser.parse_args()
convert2panoptic(args.cityscapesPath, args.outputFolder, args.useTrainId, args.setNames) |
def get_locale() -> Optional[Locale]:
ctx = _get_current_context()
if (ctx is None):
return None
locale = getattr(ctx, 'babel_locale', None)
if (locale is None):
babel = get_babel()
if (babel.locale_selector is None):
locale = babel.instance.default_locale
else:
rv = babel.locale_selector()
if (rv is None):
locale = babel.instance.default_locale
else:
locale = Locale.parse(rv)
ctx.babel_locale = locale
return locale |
def test_interconnect_exceptions():
P = ct.tf(1, [1, 0], input='u', output='y')
C = ct.tf(10, [1, 1], input='e', output='u')
sumblk = ct.summing_junction(inputs=['r', '-y'], output='e')
T = ct.interconnect((P, C, sumblk), input='r', output='y')
assert ((T.ninputs, T.noutputs, T.nstates) == (1, 1, 2))
with pytest.raises(TypeError, match='unrecognized keyword'):
P = ct.StateSpace(ct.rss(2, 1, 1), output_name='y')
with pytest.raises(TypeError, match='unrecognized keyword'):
T = ct.interconnect((P, C, sumblk), input_name='r', output='y')
with pytest.raises(TypeError, match='unrecognized keyword'):
T = ct.InterconnectedSystem((P, C, sumblk), input_name='r', output='y')
with pytest.raises(TypeError, match='unrecognized keyword'):
nlios = ct.NonlinearIOSystem(None, (lambda t, x, u, params: (u * u)), input_count=1, output_count=1)
with pytest.raises(TypeError, match='input specification is required'):
sumblk = ct.summing_junction()
with pytest.raises(TypeError, match='unrecognized keyword'):
sumblk = ct.summing_junction(input_count=2, output_count=2) |
.parametrize('changelog_template', (CHANGELOG_TEMPLATE,))
.parametrize('repo, commit_parser, expected_changelog', [(lazy_fixture('repo_with_single_branch_and_prereleases_angular_commits'), lazy_fixture('default_angular_parser'), EXPECTED_CHANGELOG_CONTENT_ANGULAR), (lazy_fixture('repo_with_single_branch_and_prereleases_emoji_commits'), lazy_fixture('default_emoji_parser'), EXPECTED_CHANGELOG_CONTENT_EMOJI), (lazy_fixture('repo_with_single_branch_and_prereleases_scipy_commits'), lazy_fixture('default_scipy_parser'), EXPECTED_CHANGELOG_CONTENT_SCIPY), (lazy_fixture('repo_with_single_branch_and_prereleases_tag_commits'), lazy_fixture('default_tag_parser'), EXPECTED_CHANGELOG_CONTENT_TAG)])
.parametrize('hvcs_client_class', (Github, Gitlab, Gitea))
.usefixtures('expected_changelog')
def test_changelog_context(repo, changelog_template, commit_parser, hvcs_client_class):
hvcs_client = hvcs_client_class(remote_url=repo.remote().url)
env = environment(lstrip_blocks=True, keep_trailing_newline=True, trim_blocks=True)
rh = ReleaseHistory.from_git_history(repo, VersionTranslator(), commit_parser)
context = make_changelog_context(hvcs_client=hvcs_client, release_history=rh)
context.bind_to_environment(env)
actual_content = env.from_string(changelog_template).render()
assert actual_content |
def install_urllib2_ca_file():
try:
import ssl
except ImportError:
return
base = request_module.HTTPSHandler
class MyHandler(base):
def __init__(self, debuglevel=0, context=None):
ca_file = get_ca_file()
if ((context is None) and (ca_file is not None)):
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_file)
base.__init__(self, debuglevel, context)
request_module.HTTPSHandler = MyHandler |
_start_docstrings('The bare EfficientNet model outputting raw features without any specific head on top.', EFFICIENTNET_START_DOCSTRING)
class EfficientNetModel(EfficientNetPreTrainedModel):
def __init__(self, config: EfficientNetConfig):
super().__init__(config)
self.config = config
self.embeddings = EfficientNetEmbeddings(config)
self.encoder = EfficientNetEncoder(config)
if (config.pooling_type == 'mean'):
self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
elif (config.pooling_type == 'max'):
self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
else:
raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
self.post_init()
_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING)
_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: torch.FloatTensor=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple, BaseModelOutputWithPoolingAndNoAttention)]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
pooled_output = pooled_output.reshape(pooled_output.shape[:2])
if (not return_dict):
return ((last_hidden_state, pooled_output) + encoder_outputs[1:])
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states) |
def get_cached_bind_group_layout(device, *args):
key = ('bind_group_layout', hash_from_value(args))
result = LAYOUT_CACHE.get(key)
if (result is None):
(entries,) = args
result = device.create_bind_group_layout(entries=entries)
LAYOUT_CACHE.set(key, result)
return result |
class TiddlyWiki5Lexer(RegexLexer):
name = 'tiddler'
url = '
aliases = ['tid']
filenames = ['*.tid']
mimetypes = ['text/vnd.tiddlywiki']
version_added = '2.7'
flags = re.MULTILINE
def _handle_codeblock(self, match):
from pygments.lexers import get_lexer_by_name
(yield (match.start(1), String, match.group(1)))
(yield (match.start(2), String, match.group(2)))
(yield (match.start(3), Text, match.group(3)))
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(2).strip())
except ClassNotFound:
pass
code = match.group(4)
if (lexer is None):
(yield (match.start(4), String, code))
return
(yield from do_insertions([], lexer.get_tokens_unprocessed(code)))
(yield (match.start(5), String, match.group(5)))
def _handle_cssblock(self, match):
from pygments.lexers import get_lexer_by_name
(yield (match.start(1), String, match.group(1)))
(yield (match.start(2), String, match.group(2)))
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name('css')
except ClassNotFound:
pass
code = match.group(3)
if (lexer is None):
(yield (match.start(3), String, code))
return
(yield from do_insertions([], lexer.get_tokens_unprocessed(code)))
(yield (match.start(4), String, match.group(4)))
tokens = {'root': [('^(title)(:\\s)(.+\\n)', bygroups(Keyword, Text, Generic.Heading)), ('^(!)([^!].+\\n)', bygroups(Generic.Heading, Text)), ('^(!{2,6})(.+\\n)', bygroups(Generic.Subheading, Text)), ('^(\\s*)([*#>]+)(\\s*)(.+\\n)', bygroups(Text, Keyword, Text, using(this, state='inline'))), ('^(<<<.*\\n)([\\w\\W]*?)(^<<<.*$)', bygroups(String, Text, String)), ('^(\\|.*?\\|h)$', bygroups(Generic.Strong)), ('^(\\|.*?\\|[cf])$', bygroups(Generic.Emph)), ('^(\\|.*?\\|k)$', bygroups(Name.Tag)), ('^(;.*)$', bygroups(Generic.Strong)), ('^(```\\n)([\\w\\W]*?)(^```$)', bygroups(String, Text, String)), ('^(```)(\\w+)(\\n)([\\w\\W]*?)(^```$)', _handle_codeblock), ('^(<style>)(\\n)([\\w\\W]*?)(^</style>$)', _handle_cssblock), include('keywords'), include('inline')], 'keywords': [(words(('\\define', '\\end', 'caption', 'created', 'modified', 'tags', 'title', 'type'), prefix='^', suffix='\\b'), Keyword)], 'inline': [('\\\\.', Text), ('\\d{17}', Number.Integer), ('(\\s)(//[^/]+//)((?=\\W|\\n))', bygroups(Text, Generic.Emph, Text)), ('(\\s)(\\^\\^[^\\^]+\\^\\^)', bygroups(Text, Generic.Emph)), ('(\\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)), ('(\\s)(__[^_]+__)', bygroups(Text, Generic.Strong)), ("(\\s)(''[^']+'')((?=\\W|\\n))", bygroups(Text, Generic.Strong, Text)), ('(\\s)(~~[^~]+~~)((?=\\W|\\n))', bygroups(Text, Generic.Deleted, Text)), ('<<[^>]+>>', Name.Tag), ('\\$\\$[^$]+\\$\\$', Name.Tag), ('\\$\\([^)]+\\)\\$', Name.Tag), ('^*$', Name.Tag), ('</?[^>]+>', Name.Tag), ('`[^`]+`', String.Backtick), ('&\\S*?;', String.Regex), ('(\\[{2})([^]\\|]+)(\\]{2})', bygroups(Text, Name.Tag, Text)), ('(\\[{2})([^]\\|]+)(\\|)([^]\\|]+)(\\]{2})', bygroups(Text, Name.Tag, Text, Name.Attribute, Text)), ('(\\{{2})([^}]+)(\\}{2})', bygroups(Text, Name.Tag, Text)), ('(\\b.?.?tps?://[^\\s"]+)', bygroups(Name.Attribute)), ('[\\w]+', Text), ('.', Text)]}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options) |
def test_dumping_subclass(retort, debug_trail):
class Parent():
foo: int
class Child(Parent):
bar: int
dumper_ = Retort(debug_trail=debug_trail).get_dumper(Union[(Parent, str)])
assert (dumper_(Parent(foo=1)) == {'foo': 1})
assert (dumper_(Child(foo=1, bar=2)) == {'foo': 1})
assert (dumper_('a') == 'a')
raises_exc(KeyError(list), (lambda : dumper_([]))) |
def strip_input_shape_fields(shape: InputShape, skipped_fields: Collection[str]) -> InputShape:
skipped_required_fields = [field.id for field in shape.fields if (field.is_required and (field.id in skipped_fields))]
if skipped_required_fields:
raise ValueError(f'Required fields {skipped_required_fields} are skipped')
return replace(shape, fields=tuple((field for field in shape.fields if (field.id not in skipped_fields))), params=tuple((param for param in shape.params if (param.field_id not in skipped_fields))), overriden_types=frozenset((field.id for field in shape.fields if (field.id not in skipped_fields)))) |
class NullXcelRTL(Component):
def construct(s, nbits=32):
dtype = mk_bits(32)
(xreq_class, xresp_class) = mk_xcel_msg(5, nbits)
s.xcel = XcelMinionIfcRTL(xreq_class, xresp_class)
s.xcelreq_q = NormalQueueRTL(xreq_class, 2)
s.xcelreq_q.enq //= s.xcel.req
s.xr0 = RegEn(dtype)
s.xr0.in_ //= s.xcelreq_q.deq.ret.data
def up_null_xcel():
if (s.xcelreq_q.deq.rdy & s.xcel.resp.rdy):
s.xcelreq_q.deq.en = 1
s.xcel.resp.en = 1
s.xcel.resp.msg.type_ = s.xcelreq_q.deq.ret.type_
if (s.xcelreq_q.deq.ret.type_ == XcelMsgType.WRITE):
s.xr0.en = 1
s.xcel.resp.msg.data = 0
else:
s.xr0.en = 0
s.xcel.resp.msg.data = s.xr0.out
else:
s.xcelreq_q.deq.en = 0
s.xcel.resp.en = 0
s.xr0.en = 0
s.xcel.resp.msg.data = 0
s.xcel.resp.msg.type_ = 0
def line_trace(s):
return str(s.xcel) |
class PywrSchematic():
def __init__(self, model, width=500, height=400, labels=False, attributes=False, css=None):
if isinstance(model, Model):
self.graph = pywr_model_to_d3_json(model, attributes)
self.json = None
else:
self.graph = pywr_json_to_d3_json(model, attributes)
if isinstance(model, str):
with open(model) as d:
self.json = json.load(d)
else:
self.json = model
self.height = height
self.width = width
self.labels = labels
self.attributes = attributes
if (css is None):
self.css = draw_graph_css
else:
self.css = css
def draw_graph(self):
js = draw_graph_template.render(graph=self.graph, width=self.width, height=self.height, element='element', labels=self.labels, attributes=self.attributes, css=self.css.replace('\n', ''))
display(Javascript(data=js))
def save_graph(self, filename, save_unfixed=False, filetype='json'):
if (filetype not in ['json', 'csv']):
warnings.warn(f"Output filetype '{filetype}' not recognised. Please use either 'json' or 'csv'</p>", stacklevel=2)
if ((self.json is None) and (filetype == 'json')):
warnings.warn('Node positions cannot be saved to JSON if PywrSchematic object has been instantiated using a pywr model object. Please use a JSON file path or model dict instead.', stacklevel=2)
else:
display(Javascript(save_graph_template.render(model_data=json.dumps(self.json), height=self.height, width=self.width, save_unfixed=json.dumps(save_unfixed), filename=json.dumps(filename), filetype=json.dumps(filetype))))
def to_html(self, filename='model.html', title='Model Schematic'):
js = draw_graph_template.render(graph=self.graph, width=self.width, height=self.height, element=json.dumps('.schematic'), labels=self.labels, attributes=self.attributes, css='')
html = html_template.render(title=title, css=self.css, d3_script=js)
with open(filename, 'w') as f:
f.write(html) |
(connect={'type': 'list', 'limits': ['all', 'pairs', 'finite', 'array']})
def update(antialias=pg.getConfigOption('antialias'), connect='all', skipFiniteCheck=False):
global ptr
if (next(iterations_counter) > args.iterations):
timer.stop()
app.quit()
return None
if (connect == 'array'):
connect = connect_array
curve.setData(data[ptr], antialias=antialias, connect=connect, skipFiniteCheck=skipFiniteCheck)
ptr = ((ptr + 1) % data.shape[0])
framecnt.update() |
def test_reconfigure_logging_on_change(capsys):
log = logging.getLogger('pyphi.config')
with config.override(LOG_STDOUT_LEVEL='WARNING'):
log.warning('Just a warning, folks.')
(out, err) = capsys.readouterr()
assert ('Just a warning, folks.' in err)
with config.override(LOG_STDOUT_LEVEL='ERROR'):
log.warning('Another warning.')
(out, err) = capsys.readouterr()
assert (err == '') |
('/sign-up', methods=['GET', 'POST'])
def sign_up():
form = SignUpForm()
user = User()
if form.validate_on_submit():
user_name = request.form.get('user_name')
user_email = request.form.get('user_email')
password = request.form.get('password')
password = generate_password_hash(password)
register_check = User.query.filter(db.or_((User.nickname == user_name), (User.email == user_email))).first()
if register_check:
flash('')
return redirect('/sign-up')
if (len(user_name) and len(user_email)):
user.nickname = user_name
user.email = user_email
user.role = ROLE_USER
user.password = password
try:
db.session.add(user)
db.session.commit()
except:
flash(',')
return redirect('/sign-up')
flash('')
return redirect('/login')
return render_template('sign_up.html', form=form) |
('PyQt6.QtWidgets.QGraphicsView.mousePressEvent')
def test_mouse_press_unhandled(mouse_event_mock, view):
event = MagicMock()
event.button.return_value = Qt.MouseButton.LeftButton
event.modifiers.return_value = None
view.mousePressEvent(event)
assert (view.pan_active is False)
assert (view.zoom_active is False)
assert (view.movewin_active is False)
mouse_event_mock.assert_called_once_with(event)
event.accept.assert_not_called() |
.parametrize('config', [btrack.datasets.cell_config(), btrack.datasets.particle_config()])
def test_config_to_widgets_round_trip(track_widget, config):
expected_config = btrack.config.load_config(config).json()
unscaled_config = btrack.napari.config.UnscaledTrackerConfig(config)
btrack.napari.sync.update_widgets_from_config(unscaled_config, track_widget)
btrack.napari.sync.update_config_from_widgets(unscaled_config, track_widget)
actual_config = unscaled_config.scale_config().json()
assert (json.loads(actual_config) == json.loads(expected_config)) |
def sit_heuristic(agent_id, char_index, unsatisfied, env_graph, simulator, object_target):
observations = simulator.get_observations(env_graph, char_index=char_index)
target_id = int(object_target.split('_')[(- 1)])
observed_ids = [node['id'] for node in observations['nodes']]
agent_close = [edge for edge in env_graph['edges'] if (((edge['from_id'] == agent_id) and (edge['to_id'] == target_id)) or (((edge['from_id'] == target_id) and (edge['to_id'] == agent_id)) and (edge['relation_type'] == 'CLOSE')))]
on_ids = [edge['to_id'] for edge in env_graph['edges'] if ((edge['from_id'] == agent_id) and ('ON' in edge['relation_type']))]
target_node = [node for node in env_graph['nodes'] if (node['id'] == target_id)][0]
if (target_id not in on_ids):
target_action = [('sit', (target_node['class_name'], target_id), None)]
cost = [0.05]
else:
target_action = []
cost = []
if ((len(agent_close) > 0) and (target_id in observed_ids)):
return (target_action, cost)
else:
(find_actions, find_costs) = find_heuristic(agent_id, char_index, unsatisfied, env_graph, simulator, object_target)
return ((find_actions + target_action), (find_costs + cost)) |
class Script_Object_TestCase(ParserTest):
def runTest(self):
self.get_parser()
body = 'import sys\nsys.exit(1)\n'
obj = Script(body, type=KS_SCRIPT_PRE, interp='/usr/bin/python', logfile='/tmp/log', errorOnFail=True)
self.assertEqual(obj.type, KS_SCRIPT_PRE)
self.assertEqual(obj.interp, '/usr/bin/python')
self.assertEqual(obj.logfile, '/tmp/log')
self.assertTrue(obj.errorOnFail)
self.assertEqual(str(obj), '\n%pre --interpreter=/usr/bin/python --logfile=/tmp/log --erroronfail\nimport sys\nsys.exit(1)\n%end\n')
obj = Script('ls /', type=KS_SCRIPT_POST, inChroot=False)
self.assertEqual(obj.type, KS_SCRIPT_POST)
self.assertFalse(obj.inChroot)
self.assertEqual(str(obj), '\n%post --nochroot\nls /\n%end\n')
obj = Script('ls /', type=KS_SCRIPT_PREINSTALL)
self.assertEqual(obj.type, KS_SCRIPT_PREINSTALL)
self.assertEqual(str(obj), '\n%pre-install\nls /\n%end\n')
obj = Script('ls /', type=KS_SCRIPT_TRACEBACK)
self.assertEqual(obj.type, KS_SCRIPT_TRACEBACK)
self.assertEqual(str(obj), '\n%traceback\nls /\n%end\n')
obj = Script('ls /', type=KS_SCRIPT_ONERROR)
self.assertEqual(obj.type, KS_SCRIPT_ONERROR)
self.assertEqual(str(obj), '\n%onerror\nls /\n%end\n') |
def kvector(a: float, t: float=0, p: float=np.pi, fraction: float=0.2, points: int=50, vin: np.ndarray=None) -> np.ndarray:
s3 = np.sqrt(3)
Xkmax = ((2 * np.pi) / a)
Lkmax = ((s3 * np.pi) / a)
X = np.array([[1, 0, 0], [(- 1), 0, 0], [0, 1, 0], [0, (- 1), 0], [0, 0, 1], [0, 0, (- 1)]])
L = ((1 / s3) * np.array([[1, 1, 1], [1, 1, (- 1)], [1, (- 1), 1], [1, (- 1), (- 1)], [(- 1), 1, 1], [(- 1), 1, (- 1)], [(- 1), (- 1), (- 1)], [(- 1), (- 1), 1]]))
if (vin is not None):
u = (vin / np.linalg.norm(vin))
else:
u = np.array([(np.cos(t) * np.sin(p)), (np.sin(t) * np.sin(p)), np.cos(p)])
Xcos = (Xkmax / max(abs(np.dot(X, u))))
Lcos = (Lkmax / max(abs(np.dot(L, u))))
kmax = min(Xcos, Lcos)
magnitudes = (kmax * np.linspace(0, fraction, points))
output = []
for i in range(len(magnitudes)):
output.append((magnitudes[i] * u))
return np.array(output) |
_grad()
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
num_items = output.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target)
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / num_items)))
return res |
def output_db(prefix, db):
res = ''
for (comp, group) in db.items():
if (len(group) > 2):
res = (res + ('%s%s: %s\n' % (prefix, comp, output_escape(group[2]))))
res = (res + output_db(((prefix + comp) + '.'), group[0]))
res = (res + output_db(((prefix + comp) + '*'), group[1]))
return res |
def main():
from optparse import OptionParser
parser = OptionParser(usage='\n %prog [binfile|-]\n %prog -r hexfile\n %prog --test [logfile]', version=__version__)
parser.add_option('-r', '--restore', action='store_true', help='restore binary from hex dump')
parser.add_option('--test', action='store_true', help='run hexdump sanity checks')
(options, args) = parser.parse_args()
if options.test:
if args:
runtest(logfile=args[0])
else:
runtest()
elif ((not args) or (len(args) > 1)):
parser.print_help()
sys.exit((- 1))
elif (not options.restore):
if (args[0] == '-'):
if (not PY3K):
hexdump(sys.stdin)
else:
hexdump(sys.stdin.buffer)
else:
hexdump(open(args[0], 'rb'))
else:
if (args[0] == '-'):
instream = sys.stdin
elif PY3K:
instream = open(args[0])
else:
instream = open(args[0], 'rb')
if PY3K:
sys.stdout.buffer.write(restore(instream.read()))
else:
normalize_py()
sys.stdout.write(restore(instream.read())) |
def test_print_available_captions(capsys):
caption1 = Caption({'url': 'url1', 'name': {'simpleText': 'name1'}, 'languageCode': 'en', 'vssId': '.en'})
caption2 = Caption({'url': 'url2', 'name': {'simpleText': 'name2'}, 'languageCode': 'fr', 'vssId': '.fr'})
query = CaptionQuery([caption1, caption2])
cli._print_available_captions(query)
captured = capsys.readouterr()
assert (captured.out == 'Available caption codes are: en, fr\n') |
_arg_scope
def pool(inputs, kernel_size, pooling_type, padding='VALID', data_format=None, dilation_rate=1, stride=1, outputs_collections=None, scope=None):
with ops.name_scope(scope, ('%s_pool' % pooling_type.lower()), [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if (input_rank is None):
raise ValueError('Rank of inputs must be known')
if (input_rank < 3):
raise ValueError('Rank of inputs must be >= 3')
num_spatial_dims = (input_rank - 2)
output = nn.pool(input=inputs, window_shape=utils.n_positive_integers(num_spatial_dims, kernel_size), pooling_type=pooling_type, padding=padding, data_format=data_format, dilation_rate=utils.n_positive_integers(num_spatial_dims, dilation_rate), strides=utils.n_positive_integers(num_spatial_dims, stride), name=sc)
return utils.collect_named_outputs(outputs_collections, sc, output) |
class AutoapiSummary(Autosummary):
def get_items(self, names):
items = []
env = self.state.document.settings.env
all_objects = env.autoapi_all_objects
max_item_chars = 60
for name in names:
obj = all_objects[name]
if isinstance(obj, PythonFunction):
if obj.overloads:
sig = '(...)'
else:
sig = f'({obj.args})'
if (obj.return_annotation is not None):
sig += f' {obj.return_annotation}'
else:
sig = ''
if sig:
max_sig_chars = max(10, (max_item_chars - len(obj.short_name)))
sig = mangle_signature(sig, max_chars=max_sig_chars)
item = (obj.short_name, sig, obj.summary, obj.id)
items.append(item)
return items |
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config['dask_profile'])
query = f'''
SELECT DISTINCT wcs_user_sk
FROM
(
SELECT DISTINCT
wcs_user_sk,
wcs_click_date_sk
FROM web_clickstreams, item
WHERE wcs_click_date_sk BETWEEN 37134 AND 37164
AND i_category IN ({q12_i_category_IN})
AND wcs_item_sk = i_item_sk
AND wcs_user_sk IS NOT NULL
AND wcs_sales_sk IS NULL
) webInRange,
(
SELECT DISTINCT
ss_customer_sk,
ss_sold_date_sk
FROM store_sales, item
WHERE ss_sold_date_sk BETWEEN 37134 AND 37224
AND i_category IN ({q12_i_category_IN}) -- filter given category
AND ss_item_sk = i_item_sk
AND ss_customer_sk IS NOT NULL
) storeInRange
WHERE wcs_user_sk = ss_customer_sk
AND wcs_click_date_sk < ss_sold_date_sk
ORDER BY wcs_user_sk
'''
result = bc.sql(query)
return result |
class InceptionB(nn.Module):
def __init__(self, in_channels, kernel_size=3, stride=2, padding=0):
self.stride = stride
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=kernel_size, stride=stride)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=stride)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=self.stride)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) |
def check_dev_info_removed(prog_file=None, map_file=None):
bpftool_prog_list(expected=0)
(ret, err) = bpftool(('prog show pin %s' % prog_file), fail=False)
fail((ret == 0), 'Showing prog with removed device did not fail')
fail((err['error'].find('No such device') == (- 1)), ('Showing prog with removed device expected ENODEV, error is %s' % err['error']))
bpftool_map_list(expected=0)
(ret, err) = bpftool(('map show pin %s' % map_file), fail=False)
fail((ret == 0), 'Showing map with removed device did not fail')
fail((err['error'].find('No such device') == (- 1)), ('Showing map with removed device expected ENODEV, error is %s' % err['error'])) |
def test_recursion_error_inferring_slice() -> None:
node = extract_node('\n class MyClass:\n def __init__(self):\n self._slice = slice(0, 10)\n\n def incr(self):\n self._slice = slice(0, self._slice.stop + 1)\n\n def test(self):\n self._slice #\n ')
inferred = next(node.infer())
assert isinstance(inferred, Slice) |
class BertSoftmaxForSequenceLabeling(BertPreTrainedModel):
def __init__(self, config):
super(BertSoftmaxForSequenceLabeling, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
if self.config.use_freezing:
self.bert = freezer.freeze_lm(self.bert)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_type = config.loss_type
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None, return_dict=False):
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = ((logits,) + outputs[2:])
if (labels is not None):
assert (self.loss_type in ['lsr', 'focal', 'ce'])
if (self.loss_type == 'lsr'):
loss_fct = LabelSmoothingCrossEntropy(ignore_index=0)
elif (self.loss_type == 'focal'):
loss_fct = FocalLoss(ignore_index=0)
else:
loss_fct = CrossEntropyLoss(ignore_index=0)
if (attention_mask is not None):
active_loss = (attention_mask.view((- 1)) == 1)
active_logits = logits.view((- 1), self.num_labels)[active_loss]
active_labels = labels.view((- 1))[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1)))
if (not return_dict):
outputs = ((loss,) + outputs)
return outputs
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) |
def _process_dt_keyword(keywords, defaults={}, static=False):
if (static and ('dt' not in keywords) and ('dt' not in defaults)):
dt = None
elif ('dt' in keywords):
dt = keywords.pop('dt')
elif ('dt' in defaults):
dt = defaults.pop('dt')
else:
dt = config.defaults['control.default_dt']
if (((dt is not None) and (not isinstance(dt, (bool, int, float)))) or (isinstance(dt, (bool, int, float)) and (dt < 0))):
raise ValueError(f'invalid timebase, dt = {dt}')
return dt |
class LinuxSocketSS(Socket):
def _iter_sockets(self, listening):
cmd = '%s --numeric'
if listening:
cmd += ' --listening'
else:
cmd += ' --all'
if (self.protocol == 'tcp'):
cmd += ' --tcp'
elif (self.protocol == 'udp'):
cmd += ' --udp'
elif (self.protocol == 'unix'):
cmd += ' --unix'
for line in self.run(cmd, self._command).stdout_bytes.splitlines()[1:]:
if (line.split(None, 1)[0] == b'u_dgr'):
continue
splitted = line.decode().split()
if (self.protocol in ('tcp', 'udp')):
protocol = self.protocol
(status, local, remote) = (splitted[0], splitted[3], splitted[4])
else:
(protocol, status, local, remote) = (splitted[0], splitted[1], splitted[4], splitted[5])
if (protocol == 'u_str'):
protocol = 'unix'
(host, port) = (local, None)
elif (protocol in ('tcp', 'udp')):
(host, port) = local.rsplit(':', 1)
port = int(port)
if (host and (host[0] == '[') and (host[(- 1)] == ']')):
host = host[1:(- 1)]
else:
continue
if (listening and (status in ('LISTEN', 'UNCONN'))):
if ((host == '*') and (protocol in ('tcp', 'udp'))):
(yield (protocol, '::', port))
(yield (protocol, '0.0.0.0', port))
elif (protocol in ('tcp', 'udp')):
(yield (protocol, host, port))
else:
(yield (protocol, host))
elif ((not listening) and (status == 'ESTAB')):
if (protocol in ('tcp', 'udp')):
(remote_host, remote_port) = remote.rsplit(':', 1)
remote_port = int(remote_port)
(yield (protocol, host, port, remote_host, remote_port))
else:
(yield (protocol, remote)) |
class ELMoTuner(Tuner):
def __init__(self, train_corpus_fname, test_corpus_fname, vocab_fname, options_fname, pretrain_model_fname, model_save_path, max_characters_per_token=30, batch_size=32, num_labels=2):
super().__init__(train_corpus_fname=train_corpus_fname, tokenized_train_corpus_fname=(train_corpus_fname + '.elmo-tokenized'), test_corpus_fname=test_corpus_fname, tokenized_test_corpus_fname=(test_corpus_fname + '.elmo-tokenized'), model_name='elmo', vocab_fname=vocab_fname, model_save_path=model_save_path, batch_size=batch_size)
self.options_fname = options_fname
self.pretrain_model_fname = pretrain_model_fname
self.max_characters_per_token = max_characters_per_token
self.num_labels = 2
self.num_train_steps = ((int(((len(self.train_data) - 1) / self.batch_size)) + 1) * self.num_epochs)
self.eval_every = int((self.num_train_steps / self.num_epochs))
self.batcher = Batcher(lm_vocab_file=vocab_fname, max_token_length=self.max_characters_per_token)
self.training = tf.placeholder(tf.bool)
(self.ids_placeholder, self.labels_placeholder, self.dropout_keep_prob, self.logits, self.loss) = make_elmo_graph(options_fname, pretrain_model_fname, max_characters_per_token, num_labels, tune=True)
def tune(self):
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
grads_and_vars = optimizer.compute_gradients(self.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
output_feed = [train_op, global_step, self.logits, self.loss]
saver = tf.train.Saver(max_to_keep=1)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
self.train(sess, saver, global_step, output_feed)
def make_input(self, sentences, labels, is_training):
current_input = self.batcher.batch_sentences(sentences)
current_output = np.array(labels)
if is_training:
input_feed = {self.ids_placeholder: current_input, self.labels_placeholder: current_output, self.dropout_keep_prob: self.dropout_keep_prob_rate, self.training: True}
else:
input_feed_ = {self.ids_placeholder: current_input, self.labels_placeholder: current_output, self.dropout_keep_prob: 1.0, self.training: False}
input_feed = [input_feed_, current_output]
return input_feed |
def test_retry_init_defaults_max():
rd = RetryDecorator({'max': 3})
assert (rd.backoff is None)
assert (rd.backoff_args is None)
assert (rd.jrc == 0)
assert (rd.max == 3)
assert (rd.sleep_max is None)
assert (rd.sleep == 0)
assert (rd.stop_on is None)
assert (rd.retry_on is None)
assert (rd.retry_counter is None) |
class Win32CanvasConfigARB(CanvasConfig):
attribute_ids = {'double_buffer': wglext_arb.WGL_DOUBLE_BUFFER_ARB, 'stereo': wglext_arb.WGL_STEREO_ARB, 'buffer_size': wglext_arb.WGL_COLOR_BITS_ARB, 'aux_buffers': wglext_arb.WGL_AUX_BUFFERS_ARB, 'sample_buffers': wglext_arb.WGL_SAMPLE_BUFFERS_ARB, 'samples': wglext_arb.WGL_SAMPLES_ARB, 'red_size': wglext_arb.WGL_RED_BITS_ARB, 'green_size': wglext_arb.WGL_GREEN_BITS_ARB, 'blue_size': wglext_arb.WGL_BLUE_BITS_ARB, 'alpha_size': wglext_arb.WGL_ALPHA_BITS_ARB, 'depth_size': wglext_arb.WGL_DEPTH_BITS_ARB, 'stencil_size': wglext_arb.WGL_STENCIL_BITS_ARB, 'accum_red_size': wglext_arb.WGL_ACCUM_RED_BITS_ARB, 'accum_green_size': wglext_arb.WGL_ACCUM_GREEN_BITS_ARB, 'accum_blue_size': wglext_arb.WGL_ACCUM_BLUE_BITS_ARB, 'accum_alpha_size': wglext_arb.WGL_ACCUM_ALPHA_BITS_ARB}
def __init__(self, canvas, pf, config):
super().__init__(canvas, config)
self._pf = pf
names = list(self.attribute_ids.keys())
attrs = list(self.attribute_ids.values())
attrs = (c_int * len(attrs))(*attrs)
values = (c_int * len(attrs))()
wglext_arb.wglGetPixelFormatAttribivARB(canvas.hdc, pf, 0, len(attrs), attrs, values)
for (name, value) in zip(names, values):
setattr(self, name, value)
def compatible(self, canvas):
return isinstance(canvas, Win32Canvas)
def create_context(self, share):
if wgl_info.have_extension('WGL_ARB_create_context'):
return Win32ARBContext(self, share)
else:
return Win32Context(self, share)
def _set_pixel_format(self, canvas):
_gdi32.SetPixelFormat(canvas.hdc, self._pf, None) |
_fixtures(ReahlSystemFixture, WebFixture, RemoteMethodFixture, RegenerateMethodResultScenarios)
def test_regenerating_method_results(reahl_system_fixture, web_fixture, remote_method_fixture, regenerate_method_result_scenarios):
wsgi_app = remote_method_fixture.new_wsgi_app(remote_method=regenerate_method_result_scenarios.remote_method)
browser = Browser(wsgi_app)
import sqlalchemy.orm
(sqlalchemy.orm.Session)
class TransactionStub():
is_active = True
def commit(self):
pass
def rollback(self):
pass
def wrapped_nested_transaction():
return web_fixture.nested_transaction
web_fixture.nested_transaction = TransactionStub()
with replaced(Session().begin_nested, wrapped_nested_transaction):
with CallMonitor(web_fixture.nested_transaction.commit) as monitor:
browser.post('/_amethod_method', {})
assert (browser.raw_html == regenerate_method_result_scenarios.expected_response)
assert (monitor.times_called == 2) |
class TestNetscalerSNMPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if (not allowed_names):
allowed_names = []
config = get_collector_config('NetscalerSNMPCollector', {'allowed_names': allowed_names, 'interval': 1})
self.collector = NetscalerSNMPCollector(config, None)
def test_import(self):
self.assertTrue(NetscalerSNMPCollector) |
def list_short(venv_dirs: Collection[Path]) -> VenvProblems:
all_venv_problems = VenvProblems()
for venv_dir in venv_dirs:
(venv_metadata, venv_problems, warning_str) = get_venv_metadata_summary(venv_dir)
if venv_problems.any_():
logger.warning(warning_str)
else:
print(venv_metadata.main_package.package, venv_metadata.main_package.package_version)
all_venv_problems.or_(venv_problems)
return all_venv_problems |
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 23, 3], last_stride=2, fc_dims=None, dropout_p=None, groups=32, width_per_group=8, **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnext101_32x8d'])
return model |
class R2():
def __init__(self, ql: 'Qiling', baseaddr=((1 << 64) - 1), loadaddr=0):
super().__init__()
self.ql = ql
self.baseaddr = baseaddr
self.loadaddr = loadaddr
self.analyzed = False
self._r2c = libr.r_core.r_core_new()
if ql.code:
self._setup_code(ql.code)
else:
self._setup_file(ql.path)
def _qlarch2r(self, archtype: QL_ARCH) -> str:
return {QL_ARCH.X86: 'x86', QL_ARCH.X8664: 'x86', QL_ARCH.ARM: 'arm', QL_ARCH.ARM64: 'arm', QL_ARCH.A8086: 'x86', QL_ARCH.EVM: 'evm.cs', QL_ARCH.CORTEX_M: 'arm', QL_ARCH.MIPS: 'mips', QL_ARCH.RISCV: 'riscv', QL_ARCH.RISCV64: 'riscv', QL_ARCH.PPC: 'ppc'}[archtype]
def _setup_code(self, code: bytes):
path = f'malloc://{len(code)}'.encode()
fh = libr.r_core.r_core_file_open(self._r2c, path, UC_PROT_ALL, self.loadaddr)
libr.r_core.r_core_bin_load(self._r2c, path, self.baseaddr)
self._cmd(f'wx {code.hex()}')
arch = self._qlarch2r(self.ql.arch.type)
self._cmd(f'e,asm.arch={arch},asm.bits={self.ql.arch.bits}')
def _setup_file(self, path: str):
path = path.encode()
fh = libr.r_core.r_core_file_open(self._r2c, path, (UC_PROT_READ | UC_PROT_EXEC), self.loadaddr)
libr.r_core.r_core_bin_load(self._r2c, path, self.baseaddr)
def _cmd(self, cmd: str) -> str:
r = libr.r_core.r_core_cmd_str(self._r2c, ctypes.create_string_buffer(cmd.encode('utf-8')))
return ctypes.string_at(r).decode('utf-8')
def _cmdj(self, cmd: str) -> Union[(Dict, List[Dict])]:
return json.loads(self._cmd(cmd))
def aaa(fun):
(fun)
def wrapper(self):
if (self.analyzed is False):
self._cmd('aaa')
self.analyzed = True
return fun(self)
return wrapper
_property
def binfo(self) -> Dict[(str, str)]:
return self._cmdj('iIj')
_property
def baddr(self) -> int:
return self.binfo['baddr']
_property
def bintype(self) -> str:
return self.binfo['bintype']
_property
def sections(self) -> Dict[(str, Section)]:
sec_lst = self._cmdj('iSj')
return {dic['name']: Section(**dic) for dic in sec_lst}
_property
def strings(self) -> Dict[(str, String)]:
str_lst = self._cmdj('izzj')
return {dic['string']: String(**dic) for dic in str_lst}
_property
def symbols(self) -> Dict[(str, Symbol)]:
sym_lst = self._cmdj('isj')
return {dic['name']: Symbol(**dic).vaddr for dic in sym_lst}
_property
def functions(self) -> Dict[(str, Function)]:
fcn_lst = self._cmdj('aflj')
return {dic['name']: Function(**dic) for dic in fcn_lst}
_property
def flags(self) -> List[Flag]:
return [Flag(**dic) for dic in self._cmdj('fj')]
_property
def xrefs(self) -> List[Xref]:
return [Xref(**dic) for dic in self._cmdj('axj')]
def at(self, addr: int, parse=False) -> Union[(str, Tuple[(str, int)])]:
name = self._cmd(f'fd {addr}').strip()
if parse:
try:
(name, offset) = name.split(' + ')
offset = int(offset)
except ValueError:
offset = 0
return (name, offset)
return name
def where(self, name: str, offset: int=0) -> int:
if (offset != 0):
name += f' + {offset}'
addr = self._cmd(f'?v {name}').strip()
return int(addr, 16)
def refrom(self, addr: int) -> List[Xref]:
return [x for x in self.xrefs if (x.fromaddr == addr)]
def refto(self, addr: int) -> List[Xref]:
return [x for x in self.xrefs if (x.addr == addr)]
def read(self, addr: int, size: int) -> bytes:
hexstr = self._cmd(f'p8 {size} {addr}')
return bytes.fromhex(hexstr)
def dis_nbytes(self, addr: int, size: int) -> List[Instruction]:
insts = [Instruction(**dic) for dic in self._cmdj(f'pDj {size} {addr}')]
return insts
def disassembler(self, ql: 'Qiling', addr: int, size: int, filt: Pattern[str]=None) -> int:
anibbles = (ql.arch.bits // 4)
progress = 0
for inst in self.dis_nbytes(addr, size):
if (inst.type.lower() == 'invalid'):
break
(name, offset) = self.at(inst.offset, parse=True)
if ((filt is None) or filt.search(name)):
ql.log.info(f"{inst.offset:0{anibbles}x} [{name:20s} + {offset:#08x}] {inst.bytes.hex(' '):20s} {inst.disasm}")
progress = ((inst.offset + inst.size) - addr)
if (progress < size):
ql.arch.utils.disassembler(ql, (addr + progress), (size - progress))
return progress
def enable_disasm(self, filt_str: str=''):
filt = re.compile(filt_str)
self.ql.hook_code(self.disassembler, filt)
def enable_trace(self, mode='full'):
self.ql.loader.symsmap = {flag.offset: flag.name for flag in self.flags}
if (mode == 'full'):
trace.enable_full_trace(self.ql)
elif (mode == 'history'):
trace.enable_history_trace(self.ql)
def __del__(self):
libr.r_core.r_core_free(self._r2c) |
class TestCGA2D(_TestBase):
(layout, blades, stuff) = clifford.conformalize(clifford.Cl(2)[0])
e1 = blades['e1']
e2 = blades['e2']
(params=[layout.scalar, e1, (e1 ^ e2)])
def direction(self, request):
return request.param
(params=[(layout.scalar * 0), (3 * e1)])
def location(self, request):
return request.param |
class BackwardsCompatibilityTests(DeprecationTestCase):
def test_server_connection_class(self):
with self.assertDeprecationWarning('ServerConnection was renamed to ServerProtocol'):
from websockets.server import ServerConnection
server = ServerConnection()
self.assertIsInstance(server, ServerProtocol) |
def windows_setup():
keymaps = ['Apple keyboard standard', 'Windows keyboard standard', 'Chromebook', 'IBM - No Super/Win', 'Uninstall']
for (index, item) in enumerate(keymaps):
print((' %i. %s' % ((index + 1), item)))
default = 0
while (not (int(default) in range(1, (len(keymaps) + 1)))):
default = int(input((('\nPlease enter your desired keymap (1 - ' + str(len(keymaps))) + ') : ')))
print('')
path = cmdline('echo %cd%')[:(- 1)]
if ((default > 0) and (default < 5)):
print('Will now install chocolatey and autohotkey with elevated privileges...')
print('This install will fail if you are not running with elevated privileges')
os.system('powershell -executionpolicy bypass ".\\windows\\autohotkey.ps1"')
print('Copying autohotkey combinations for Terminals & Editors...')
os.system((((('copy /Y "' + path) + '\\windows\\kinto.ahk" "') + homedir) + '\\kinto-new.ahk"'))
if (default < 3):
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; Default)(?!( - ST2CODE))(.*)/$2$3$5/gm" ' + homedir) + '\\kinto-new.ahk'))
if (default == 1):
kbtype = 'mac'
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; MacModifiers)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
elif (default == 2):
kbtype = 'win'
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; WinModifiers)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
elif (default == 5):
print('Removing any old registry keys from prior versions...')
p = subprocess.Popen(['powershell.exe', "Remove-ItemProperty -Path HKLM:'SYSTEM\\CurrentControlSet\\Control\\Keyboard Layout' -Name 'Scancode Map' -ErrorAction SilentlyContinue"], stdout=sys.stdout)
print('Removing Kinto from Startup folder...')
os.system('(del "C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp\\kinto.ahk") 2> nul')
os.system('(del "%userprofile%\\AppData\\Roaming\\Microsoft\\Windows\\STARTM~1\\Programs\\Startup\\kinto-start.vbs") 2> nul')
print('Ending any running Kinto tasks...')
os.system('(taskkill /IM autohotkey.exe) 2> nul')
print('Removing Kinto from users profile directory...')
os.system('(rd /s /q "%userprofile%\\.kinto") 2> nul')
print('')
print('Uninstall of Kinto is Complete.')
if (default == 3):
kbtype = 'chrome'
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; Chromebook)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; WinModifiers\\/CB)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
if ((default == 3) or (default == 4)):
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; CB\\/IBM)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; WinModifiers\\/CB\\/IBM)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
if (default == 4):
kbtype = 'ibm'
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; IBM)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
if ((default > 0) and (default < 5)):
stvscode = yn_choice(((bcolors.CYELLOW2 + 'Would you like to use Sublime Text 3 keymaps in VS Code?\n') + bcolors.ENDC))
print('\nWill now install Ubuntu Terminal Theme as default...')
os.system((('regedit "' + path) + '\\windows\\theme_ubuntu.reg"'))
os.system((('robocopy "' + path) + '\\assets" "%userprofile%\\.kinto\\assets" /E'))
if (stvscode and ((default > 0) or (default < 3))):
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; Default - ST2CODE)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
elif (stvscode and ((default == 3) or (default == 4))):
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/(; )(.*)(; CB/IBM - ST2CODE)/$2$3/gm" ' + homedir) + '\\kinto-new.ahk'))
os.system((('copy /Y "' + path) + '\\windows\\kinto-start.vbs" "%userprofile%\\.kinto\\kinto-start.vbs"'))
os.system((('C:\\Strawberry\\perl\\bin\\perl.exe -pi -e "s/{kbtype}/' + kbtype) + '/gm" "%userprofile%\\.kinto\\kinto-start.vbs"'))
os.system((('copy /Y "' + path) + '\\windows\\usb.vbs" "%userprofile%\\.kinto\\usb.vbs"'))
os.system((('copy /Y "' + path) + '\\windows\\detectUSB.ahk" "%userprofile%\\.kinto\\detectUSB.ahk"'))
os.system('mklink "%userprofile%\\AppData\\Roaming\\Microsoft\\Windows\\STARTM~1\\Programs\\Startup\\kinto-start.vbs" "%userprofile%\\.kinto\\kinto-start.vbs"')
os.system((('copy /Y "' + path) + '\\windows\\NoShell.vbs" "%userprofile%\\.kinto\\NoShell.vbs"'))
os.system((('copy /Y "' + path) + '\\windows\\toggle_kb.bat" "%userprofile%\\.kinto\\toggle_kb.bat"'))
os.system((('copy /Y "' + homedir) + '\\kinto-new.ahk" "%userprofile%\\.kinto\\kinto.ahk"'))
os.system((('del /f ' + homedir) + '\\kinto-new.ahk'))
os.system('del "C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp\\kinto.ahk" 2> nul')
userpath = cmdline('cmd /c for %A in ("%userprofile%") do %~sA')[:(- 1)]
print((('Starting... "' + userpath) + '\\AppData\\Roaming\\Microsoft\\Windows\\STARTM~1\\Programs\\Startup\\kinto-start.vbs"'))
os.system((('"' + userpath) + '\\AppData\\Roaming\\Microsoft\\Windows\\STARTM~1\\Programs\\Startup\\kinto-start.vbs"'))
print('If using WSL then please remember to right click on title bar -> Properties -> Edit Options -> Use Ctrl+Shift+C/V as Copy/Paste and enable it.')
else:
os.system('(del "C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp\\kinto.ahk") 2> nul') |
(kw_only=True)
class DataCatalog():
default_node: type[PNode] = PickleNode
entries: dict[(str, PNode)] = field(factory=dict)
name: str = 'default'
path: (Path | None) = None
_session_config: dict[(str, Any)] = field(factory=(lambda *x: {'check_casing_of_paths': True}))
_instance_path: Path = field(factory=_get_parent_path_of_data_catalog_module)
def __attrs_post_init__(self) -> None:
(root_path, _) = find_project_root_and_config((self._instance_path,))
self._session_config['paths'] = (root_path,)
if (not self.path):
self.path = (((root_path / '.pytask') / 'data_catalogs') / self.name)
self.path.mkdir(parents=True, exist_ok=True)
self._initialize()
def _initialize(self) -> None:
for path in self.path.glob('*-node.pkl'):
node = pickle.loads(path.read_bytes())
self.entries[node.name] = node
def __getitem__(self, name: str) -> PNode:
if (name not in self.entries):
self.add(name)
return self.entries[name]
def add(self, name: str, node: (PNode | None)=None) -> None:
assert isinstance(self.path, Path)
if (not isinstance(name, str)):
msg = 'The name of a catalog entry must be a string.'
raise TypeError(msg)
if (node is None):
filename = str(hashlib.sha256(name.encode()).hexdigest())
if isinstance(self.default_node, PPathNode):
self.entries[name] = self.default_node(name=name, path=(self.path / f'{filename}.pkl'))
else:
self.entries[name] = self.default_node(name=name)
self.path.joinpath(f'{filename}-node.pkl').write_bytes(pickle.dumps(self.entries[name]))
elif isinstance(node, PNode):
self.entries[name] = node
else:
session = Session(config=self._session_config, hook=storage.get().hook)
collected_node = session.hook.pytask_collect_node(session=session, path=self._instance_path, node_info=NodeInfo(arg_name=name, path=(), value=node, task_path=None, task_name=''))
if (collected_node is None):
msg = f'{node!r} cannot be parsed.'
raise NodeNotCollectedError(msg)
self.entries[name] = collected_node |
def quant_analyzer_example():
model = models.resnet18(pretrained=True).cuda().eval()
input_shape = (1, 3, 224, 224)
dummy_input = torch.randn(*input_shape).cuda()
prepared_model = prepare_model(model)
forward_pass_callback_fn = CallbackFunc(forward_pass_callback, func_callback_args=None)
eval_callback_fn = CallbackFunc(eval_callback, func_callback_args=None)
unlabeled_data_loader = _get_unlabled_data_loader()
quant_analyzer = QuantAnalyzer(model=prepared_model, dummy_input=dummy_input, forward_pass_callback=forward_pass_callback_fn, eval_callback=eval_callback_fn)
quant_analyzer.enable_per_layer_mse_loss(unlabeled_dataset_iterable=unlabeled_data_loader, num_batches=4)
quant_analyzer.analyze(quant_scheme=QuantScheme.post_training_tf_enhanced, default_param_bw=8, default_output_bw=8, config_file=None, results_dir='./quant_analyzer_results/') |
class GeneralError(Exception):
MAJOR_MESSAGE = 'General error'
FMT_STR = '{maj}: {min}.'
def __init__(self, minor_message: str, **kwargs: str) -> None:
maj_str = self.MAJOR_MESSAGE.format(**kwargs)
err_str = self.FMT_STR.format(maj=maj_str, min=minor_message)
super(GeneralError, self).__init__(err_str) |
def test_inject_decorated_singleton_class():
class A():
def __init__(self, b: SingletonB):
self.b = b
def configure(binder):
binder.bind(A)
binder.bind(SingletonB)
injector1 = Injector(configure)
a1 = injector1.get(A)
a2 = injector1.get(A)
assert (a1.b is a2.b)
assert (a1 is not a2) |
def _test_app():
test_folder = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'apps/MouseTester')
if (sys.platform == 'win32'):
return os.path.join(test_folder, 'mousebuttons.exe')
else:
return os.path.join(test_folder, 'mousebuttons') |
def convert_db_to_csv(filename: str, targetcsv: str) -> int:
with open(filename, 'r') as db:
json_loaded = json.load(db)['data']
csv_file = open(targetcsv, 'a')
csv_writer = csv.writer(csv_file)
try:
header = json_loaded[0].keys()
except IndexError:
print("can't convert, database is empty")
return 1
csv_writer.writerow(header)
print('File converted and saving to ', targetcsv)
for each in json_loaded:
csv_writer.writerow(each.values())
csv_file.close()
return 0 |
class TieredImageNet(Dataset):
def __init__(self, args, partition='train', pretrain=True, transform=None):
super(Dataset, self).__init__()
self.data_root = args.data_root
self.partition = partition
self.data_aug = args.data_aug
self.mean = [(120. / 255.0), (115. / 255.0), (104. / 255.0)]
self.std = [(70. / 255.0), (68. / 255.0), (72. / 255.0)]
self.normalize = transforms.Normalize(mean=self.mean, std=self.std)
self.pretrain = pretrain
if (transform is None):
if ((self.partition == 'train') and self.data_aug):
self.transform = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.RandomCrop(84, padding=8), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), transforms.RandomHorizontalFlip(), (lambda x: np.asarray(x).copy()), transforms.ToTensor(), self.normalize])
else:
self.transform = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.ToTensor(), self.normalize])
else:
self.transform = transform
if self.pretrain:
self.image_file_pattern = '%s_images.npz'
self.label_file_pattern = '%s_labels.pkl'
else:
self.image_file_pattern = '%s_images.npz'
self.label_file_pattern = '%s_labels.pkl'
self.data = {}
image_file = os.path.join(self.data_root, (self.image_file_pattern % partition))
self.imgs = np.load(image_file)['images']
label_file = os.path.join(self.data_root, (self.label_file_pattern % partition))
self.labels = self._load_labels(label_file)['labels']
def __getitem__(self, item):
img = np.asarray(self.imgs[item]).astype('uint8')
img = self.transform(img)
target = (self.labels[item] - min(self.labels))
return (img, target, item)
def __len__(self):
return len(self.labels)
def _load_labels(file):
try:
with open(file, 'rb') as fo:
data = pickle.load(fo)
return data
except:
with open(file, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
data = u.load()
return data |
class MSMR(DFN):
def __init__(self, options=None, name='MSMR', build=True):
options = (options or {})
if ('number of MSMR reactions' not in options):
raise pybamm.OptionError('number of MSMR reactions must be specified for MSMR')
if (('open-circuit potential' in options) and (options['open-circuit potential'] != 'MSMR')):
raise pybamm.OptionError("'open-circuit potential' must be 'MSMR' for MSMR not '{}'".format(options['open-circuit potential']))
elif (('particle' in options) and (options['particle'] != 'MSMR')):
raise pybamm.OptionError("'particle' must be 'MSMR' for MSMR not '{}'".format(options['particle']))
elif (('intercalation kinetics' in options) and (options['intercalation kinetics'] != 'MSMR')):
raise pybamm.OptionError("'intercalation kinetics' must be 'MSMR' for MSMR not '{}'".format(options['intercalation kinetics']))
else:
options.update({'open-circuit potential': 'MSMR', 'particle': 'MSMR', 'intercalation kinetics': 'MSMR'})
super().__init__(options=options, name=name)
def default_parameter_values(self):
return pybamm.ParameterValues('MSMR_Example') |
def tokenizer_class_from_name(class_name: str):
if (class_name == 'PreTrainedTokenizerFast'):
return PreTrainedTokenizerFast
for (module_name, tokenizers) in TOKENIZER_MAPPING_NAMES.items():
if (class_name in tokenizers):
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(f'.{module_name}', 'transformers.models')
try:
return getattr(module, class_name)
except AttributeError:
continue
for (config, tokenizers) in TOKENIZER_MAPPING._extra_content.items():
for tokenizer in tokenizers:
if (getattr(tokenizer, '__name__', None) == class_name):
return tokenizer
main_module = importlib.import_module('transformers')
if hasattr(main_module, class_name):
return getattr(main_module, class_name)
return None |
def get_externsheet_local_range_b57(bk, raw_extshtx, ref_first_sheetx, ref_last_sheetx, blah=0):
if (raw_extshtx > 0):
if blah:
print(('/// get_externsheet_local_range_b57(raw_extshtx=%d) -> external' % raw_extshtx), file=bk.logfile)
return ((- 4), (- 4))
if ((ref_first_sheetx == (- 1)) and (ref_last_sheetx == (- 1))):
return ((- 2), (- 2))
nsheets = len(bk._all_sheets_map)
if (not (0 <= ref_first_sheetx <= ref_last_sheetx < nsheets)):
if blah:
print(('/// get_externsheet_local_range_b57(%d, %d, %d) -> ???' % (raw_extshtx, ref_first_sheetx, ref_last_sheetx)), file=bk.logfile)
print(('--- first/last sheet not in range(%d)' % nsheets), file=bk.logfile)
return ((- 103), (- 103))
xlrd_sheetx1 = bk._all_sheets_map[ref_first_sheetx]
xlrd_sheetx2 = bk._all_sheets_map[ref_last_sheetx]
if (not (0 <= xlrd_sheetx1 <= xlrd_sheetx2)):
return ((- 3), (- 3))
return (xlrd_sheetx1, xlrd_sheetx2) |
class ELF64_Rel(ELF_Rel):
Rel_SIZE = (8 * 2)
def __init__(self, buf, endian=0, ptr=None):
if (len(buf) != self.Rel_SIZE):
raise
self.ptr = ptr
self.fmt = ('<QQ' if (endian == 0) else '>QQ')
(r_offset, r_info) = struct.unpack(self.fmt, buf)
super(ELF64_Rel, self).__init__(r_offset, r_info)
def r_type(self):
return (self.r_info & )
def r_sym(self):
return (self.r_info >> 32)
def pack(self):
return struct.pack(self.fmt, self.r_offset, self.r_info) |
class Migration(migrations.Migration):
dependencies = [('questions', '0014_meta')]
operations = [migrations.AddField(model_name='question', name='unit', field=models.CharField(blank=True, help_text='Unit for this question.', max_length=64, verbose_name='Unit', default=''), preserve_default=False), migrations.AddField(model_name='question', name='value_type', field=models.CharField(choices=[(b'text', 'Text'), (b'url', 'URL'), (b'integer', 'Integer'), (b'float', 'Float'), (b'boolean', 'Boolean'), (b'datetime', 'Datetime'), (b'options', 'Options')], default='text', help_text='Type of value for this question.', max_length=8, verbose_name='Value type'), preserve_default=False)] |
.parametrize('ignore_unknown_mediatypes', [True, False])
def test_validate_manifest_invalid_config_type(ignore_unknown_mediatypes):
manifest_bytes = '{\n "schemaVersion": 2,\n "config": {\n "mediaType": "application/some.other.thing",\n "digest": "sha256:6bd578ec7d1e7381f63184dfe5fbe7f2f15805ecc4bfd485e286b76b1e796524",\n "size": 145\n },\n "layers": [\n {\n "mediaType": "application/tar+gzip",\n "digest": "sha256:ce879e86a8f71031c0f1ab149a26b000b3b5b8810d8d047f240ef69a6b2516ee",\n "size": 2807\n }\n ]\n }'
if ignore_unknown_mediatypes:
OCIManifest(Bytes.for_string_or_unicode(manifest_bytes), ignore_unknown_mediatypes=ignore_unknown_mediatypes)
else:
with pytest.raises(MalformedOCIManifest):
OCIManifest(Bytes.for_string_or_unicode(manifest_bytes)) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys())))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--task_name', default=None, type=str, required=True, help=('The name of the task to train selected in the list: ' + ', '.join(processors.keys())))
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from huggingface.co')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--evaluate_during_training', action='store_true', help='Run evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=32, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--mask_scores_learning_rate', default=0.01, type=float, help='The Adam initial learning rate of the mask scores.')
parser.add_argument('--final_threshold', default=0.7, type=float, help='Final value of the threshold (for scheduling).')
parser.add_argument('--pruning_method', default='topK', type=str, help='Pruning Method (topK = MLPruning).')
parser.add_argument('--mask_init', default='constant', type=str, help='Initialization method for the mask scores. Choices: constant, uniform, kaiming.')
parser.add_argument('--mask_scale', default=0.0, type=float, help='Initialization parameter for the chosen initialization method.')
parser.add_argument('--final_lambda', default=0.0, type=float, help='Regularization intensity (used in conjunction with `regularization`.')
parser.add_argument('--teacher_type', default=None, type=str, help='Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.')
parser.add_argument('--teacher_name_or_path', default=None, type=str, help='Path to the already fine-tuned teacher model. Only for distillation.')
parser.add_argument('--alpha_ce', default=0.1, type=float, help='Cross entropy loss linear weight. Only for distillation.')
parser.add_argument('--alpha_distil', default=0.9, type=float, help='Distillation loss linear weight. Only for distillation.')
parser.add_argument('--temperature', default=2.0, type=float, help='Distillation temperature. Only for distillation.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=(- 1), type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--logging_steps', type=int, default=50, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=1000, help='Save checkpoint every X updates steps.')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank')
parser.add_argument('--block_rows', type=int, default=32, help='Number of rows in a block')
parser.add_argument('--block_cols', type=int, default=32, help='Number of cols in a block')
args = parser.parse_args()
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir)):
raise ValueError(f'Output directory ({args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = (0 if args.no_cuda else torch.cuda.device_count())
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
set_seed(args)
args.task_name = args.task_name.lower()
if (args.task_name not in processors):
raise ValueError(('Task not found: %s' % args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if (args.local_rank not in [(- 1), 0]):
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained((args.config_name if args.config_name else args.model_name_or_path), num_labels=num_labels, finetuning_task=args.task_name, cache_dir=(args.cache_dir if args.cache_dir else None), pruning_method=args.pruning_method, mask_init=args.mask_init, mask_scale=args.mask_scale, output_attentions=True, output_hidden_states=True)
tokenizer = tokenizer_class.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_name_or_path), cache_dir=(args.cache_dir if args.cache_dir else None), do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=(args.cache_dir if args.cache_dir else None))
def make_block_pruning(model):
if ('mask' in args.model_type):
model._make_structural_pruning([args.block_rows, args.block_cols])
for module in model.modules():
if isinstance(module, MaskedLinear):
module.enable_block_pruning([args.block_rows, args.block_cols])
return model
model = make_block_pruning(model)
if (args.teacher_type is not None):
assert (args.teacher_name_or_path is not None)
assert (args.alpha_distil > 0.0)
assert ((args.alpha_distil + args.alpha_ce) > 0.0)
(teacher_config_class, teacher_model_class, _) = MODEL_CLASSES[args.teacher_type]
teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path)
teacher_config.output_attentions = True
teacher_config.output_hidden_states = True
teacher = teacher_model_class.from_pretrained(args.teacher_name_or_path, from_tf=False, config=teacher_config, cache_dir=(args.cache_dir if args.cache_dir else None))
if ('mask' in args.teacher_type):
teacher._make_structural_pruning([None, None])
teacher.to(args.device)
else:
teacher = None
if (args.local_rank == 0):
torch.distributed.barrier()
model.to(args.device)
print(model)
print(f'Training/evaluation parameters {args}')
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
(global_step, tr_loss) = train(args, train_dataset, model, tokenizer, teacher=teacher)
if (args.do_train and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))):
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
tmp_result = evaluate(args, model, tokenizer, prefix='')
print(f'Final: {tmp_result}') |
def _segmentation_trainer_head(args: SharedArgs, head_name: str, training_set: Dataset, class_weights: np.ndarray) -> SegmentationTrainerHead:
if args.segmentation_weight_temperature:
weight_creator = SegmentationWeightCreator(training_set, args.segmentation_weight_temperature, class_weights)
else:
weight_creator = IdentityWeightCreator(class_weights)
segmentation_predictor_head = _segmentation_predictor_head(args, head_name, training_set.num_classes_from_task[Task.SEGMENTATION])
return SegmentationTrainerHead(segmentation_predictor_head, weight_creator) |
def _glibc_version_string_ctypes() -> Optional[str]:
try:
import ctypes
except ImportError:
return None
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
return None
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
if (not isinstance(version_str, str)):
version_str = version_str.decode('ascii')
return version_str |
class Video():
fourcc = VideoWriter_fourcc(*'mp4v')
fps = config['fps']
width = config['width']
height = config['height']
output_dir = os.sep.join(['.', 'output'])
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
def create_video(radio: Radio):
if (not os.path.exists(Video.output_dir)):
print('Folder', Video.output_dir, 'does not exist. Creating...')
os.makedirs(Video.output_dir)
video = VideoWriter((((Video.output_dir + os.sep) + str(radio.radio_id)) + '_temp.mp4'), Video.fourcc, Video.fps, (Video.width, Video.height))
clip_count = (len(radio.timestamps) - 1)
for i in range(clip_count):
if (radio.timestamps[i] not in radio.timeline.keys()):
print(radio.timestamps[i], 'has no corresponding image, load cover as backup')
frame = Frame.create_cover(radio)
else:
frame = Frame.create_page(radio.timeline[radio.timestamps[i]], radio)
frame_count = ((radio.timestamps[(i + 1)] - radio.timestamps[i]) * Video.fps)
for j in range(frame_count):
video.write(frame)
video.release()
video_clip = VideoFileClip((((Video.output_dir + os.sep) + str(radio.radio_id)) + '_temp.mp4'))
print(video_clip.duration)
audio_clip = AudioFileClip(os.sep.join(['.', 'cache', str(radio.radio_id), 'audio', radio.audio.local_name]))
video_clip.audio = audio_clip
if config['test']:
video_clip = video_clip.subclip(0, min(200, video_clip.duration))
video_clip.write_videofile((((((Video.output_dir + os.sep) + str(radio.radio_id)) + ' ') + radio.title) + '.mp4'), fps=Video.fps)
print('{} finished!'.format(radio.title)) |
class CopyrightChecker():
_UTF_STRING = '# -*- coding: utf-8 -*-'
_COPYRIGHT_STRING = '# (C) Copyright IBM '
def __init__(self, root_dir: str, check: bool) -> None:
self._root_dir = root_dir
self._check = check
self._current_year = datetime.datetime.now().year
self._changed_files = self._get_changed_files()
def _exception_to_string(excp: Exception) -> str:
stack = (traceback.extract_stack()[:(- 3)] + traceback.extract_tb(excp.__traceback__))
pretty = traceback.format_list(stack)
return (''.join(pretty) + f'''
{excp.__class__} {excp}''')
def _get_year_from_date(date) -> int:
if ((not date) or (len(date) < 4)):
return None
return int(date[:4])
def _cmd_execute(self, args: List[str]) -> Tuple[(str, Union[(None, str)])]:
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if (v is not None):
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
with subprocess.Popen(args, cwd=self._root_dir, env=env, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as popen:
(out, err) = popen.communicate()
popen.wait()
out_str = out.decode('utf-8').strip()
err_str = err.decode('utf-8').strip()
err_str = (err_str if err_str else None)
return (out_str, err_str)
def _get_changed_files(self) -> List[str]:
(out_str, err_str) = self._cmd_execute(['git', 'diff', '--name-only', 'HEAD'])
if err_str:
raise builtins.Exception(err_str)
return out_str.splitlines()
def _get_file_last_year(self, relative_path: str) -> int:
last_year = None
errors = []
try:
(out_str, err_str) = self._cmd_execute(['git', 'log', '-1', '--format=%cI', relative_path])
last_year = CopyrightChecker._get_year_from_date(out_str)
if err_str:
errors.append(err_str)
except Exception as ex:
errors.append(f"'{relative_path}' Last year: {str(ex)}")
if errors:
raise ValueError(' - '.join(errors))
return last_year
def check_copyright(self, file_path) -> Tuple[(bool, bool, bool)]:
file_with_utf8 = False
file_with_invalid_year = False
file_has_header = False
try:
new_line = '# (C) Copyright IBM '
idx_utf8 = (- 1)
idx_new_line = (- 1)
file_lines = None
with open(file_path, 'rt', encoding='utf8') as file:
file_lines = file.readlines()
for (idx, line) in enumerate(file_lines):
relative_path = os.path.relpath(file_path, self._root_dir)
if line.startswith(CopyrightChecker._UTF_STRING):
if self._check:
print(f"File contains utf-8 header: '{relative_path}'")
file_with_utf8 = True
idx_utf8 = idx
if (not line.startswith(CopyrightChecker._COPYRIGHT_STRING)):
continue
file_has_header = True
curr_years = []
for word in line.strip().split():
for year in word.strip().split(','):
if (year.startswith('20') and (len(year) >= 4)):
try:
curr_years.append(int(year[0:4]))
except ValueError:
pass
header_start_year = None
header_last_year = None
if (len(curr_years) > 1):
header_start_year = curr_years[0]
header_last_year = curr_years[1]
elif (len(curr_years) == 1):
header_start_year = header_last_year = curr_years[0]
if (relative_path in self._changed_files):
self._changed_files.remove(relative_path)
last_year = self._current_year
else:
last_year = self._get_file_last_year(relative_path)
if (last_year and (header_last_year != last_year)):
if (header_start_year and (header_start_year != last_year)):
new_line += f'{header_start_year}, '
new_line += f'''{self._current_year}.
'''
if self._check:
print(f"Wrong Copyright Year:'{relative_path}': ", f"Current:'{line[:(- 1)]}' Correct:'{new_line[:(- 1)]}'")
file_with_invalid_year = True
idx_new_line = idx
break
if ((not self._check) and ((idx_utf8 >= 0) or (idx_new_line >= 0))):
if (idx_new_line >= 0):
file_lines[idx_new_line] = new_line
if (idx_utf8 >= 0):
del file_lines[idx_utf8]
with open(file_path, 'w', encoding='utf8') as file:
file.writelines(file_lines)
if (idx_new_line >= 0):
file_with_invalid_year = False
print(f'Fixed copyright year for {relative_path}.')
if (idx_utf8 >= 0):
file_with_utf8 = False
print(f'Removed utf-8 header for {relative_path}.')
except UnicodeDecodeError:
return (file_with_utf8, file_with_invalid_year, file_has_header)
return (file_with_utf8, file_with_invalid_year, file_has_header)
def check(self) -> Tuple[(int, int, int)]:
return self._check_copyright(self._root_dir)
def _check_copyright(self, path: str) -> Tuple[(int, int, int)]:
files_with_utf8 = 0
files_with_invalid_year = 0
files_with_header = 0
for item in os.listdir(path):
fullpath = os.path.join(path, item)
if os.path.isdir(fullpath):
if (not item.startswith('.')):
files = self._check_copyright(fullpath)
files_with_utf8 += files[0]
files_with_invalid_year += files[1]
files_with_header += files[2]
continue
if os.path.isfile(fullpath):
(file_with_utf8, file_with_invalid_year, file_has_header) = self.check_copyright(fullpath)
if file_with_utf8:
files_with_utf8 += 1
if file_with_invalid_year:
files_with_invalid_year += 1
if file_has_header:
files_with_header += 1
return (files_with_utf8, files_with_invalid_year, files_with_header) |
class PyOggFLACSource(PyOggSource):
def _load_source(self):
if self.file:
self._stream = MemoryFLACFileStream(self.filename, self.file)
else:
self._stream = UnclosedFLACFileStream(self.filename)
self.sample_size = self._stream.bits_per_sample
self._duration = (self._stream.total_samples / self._stream.frequency)
if (self._stream.total_samples == 0):
if _debug:
warnings.warn(f'Unknown amount of samples found in {self.filename}. Seeking may be limited.')
self._duration_per_frame = 0
else:
self._duration_per_frame = (self._duration / self._stream.total_samples)
def seek(self, timestamp):
if self._stream.seekable:
if self._duration_per_frame:
timestamp = max(0.0, min(timestamp, self._duration))
position = int((timestamp / self._duration_per_frame))
else:
position = 0
seek_succeeded = pyogg.flac.FLAC__stream_decoder_seek_absolute(self._stream.decoder, position)
if (seek_succeeded is False):
warnings.warn(f'Failed to seek FLAC file: {self.filename}')
else:
warnings.warn(f'Stream is not seekable for FLAC file: {self.filename}.') |
def get_serializer(serializer):
if (serializer == 'json'):
return JSONSerializer
if (serializer == 'pickle'):
return PickleSerializer
if ((serializer == 'yaml') and (yaml is not None)):
return YAMLSerializer
if ((serializer == 'yaml') and (yaml is None)):
logger.warning('You must have PyYAML installed to use YAML as the serializer.Switching to JSON as the serializer.')
return JSONSerializer |
def get_reward_mask_and_value(shape: Tuple[(int, int)], token_offset_mapping: List[List[Tuple[(int, int)]]], reward_offset_mapping: List[Mapping[(Tuple[(int, int)], Union[(float, None)])]]) -> Tuple[(np.ndarray, np.ndarray)]:
has_reward_mask = np.zeros(shape, dtype=np.bool_)
reward_val = np.zeros(shape, dtype=np.float32)
for (i, offset_mapping) in enumerate(token_offset_mapping):
rw_mapping = deque(reward_offset_mapping[i].items())
for (j, (start, end)) in enumerate(offset_mapping):
assert (start <= end), f'start {start} must be <= end {end}'
(has_reward_mask[(i, j)], reward_val[(i, j)]) = is_cur_span_match_reward_span(start, end, rw_mapping)
assert ((~ np.isnan(reward_val)) == has_reward_mask).all(), 'has_reward_mask is not consistent with reward_val'
return (has_reward_mask, reward_val) |
def _get_earth_fixed_coords(point, unit_vector_x, unit_vector_y, unit_vector_z):
(ux, uy, uz) = (unit_vector_x, unit_vector_y, unit_vector_z)
return Vector3D(x=(((ux.x * point.x) + (uy.x * point.y)) + (uz.x * point.z)), y=(((ux.y * point.x) + (uy.y * point.y)) + (uz.y * point.z)), z=(((ux.z * point.x) + (uy.z * point.y)) + (uz.z * point.z))) |
def test_skip_test_with_unicode(pytester: Pytester) -> None:
pytester.makepyfile(" import unittest\n class TestClass():\n def test_io(self):\n raise unittest.SkipTest('')\n ")
result = pytester.runpytest()
result.stdout.fnmatch_lines(['* 1 skipped *']) |
def create_datetime_index(year=None, month=None, day=None, uts=None):
if (not hasattr(year, '__iter__')):
raise ValueError('Must provide an iterable for all inputs.')
if (len(year) == 0):
raise ValueError('Length of array must be larger than 0.')
if (month is None):
month = np.ones(shape=len(year))
day0 = np.ones(shape=len(year))
if (day is None):
day = day0
if (uts is None):
uts = np.zeros(shape=len(year))
df = pds.DataFrame({'year': year, 'month': month, 'day': day0})
index = pds.DatetimeIndex(pds.to_datetime(df))
index += (day - 1).astype('timedelta64[D]')
index += (.0 * uts).astype('timedelta64[ns]')
return index |
class CmdLineApp(cmd2.Cmd):
def __init__(self):
super().__init__(include_ipy=True)
self._set_prompt()
self.intro = 'Happy Day. Note the full Unicode support: '
def _set_prompt(self):
self.cwd = os.getcwd()
self.prompt = ansi.style(f'{self.cwd} $ ', fg='cyan')
def postcmd(self, stop: bool, line: str) -> bool:
self._set_prompt()
return stop
.with_argument_list
def do_cd(self, arglist):
if ((not arglist) or (len(arglist) != 1)):
self.perror('cd requires exactly 1 argument:')
self.do_help('cd')
self.last_result = 'Bad arguments'
return
path = os.path.abspath(os.path.expanduser(arglist[0]))
err = None
data = None
if (not os.path.isdir(path)):
err = f'{path} is not a directory'
elif (not os.access(path, os.R_OK)):
err = f'You do not have read access to {path}'
else:
try:
os.chdir(path)
except Exception as ex:
err = f'{ex}'
else:
self.poutput(f'Successfully changed directory to {path}')
data = path
if err:
self.perror(err)
self.last_result = data
def complete_cd(self, text, line, begidx, endidx):
return self.path_complete(text, line, begidx, endidx, path_filter=os.path.isdir)
dir_parser = cmd2.Cmd2ArgumentParser()
dir_parser.add_argument('-l', '--long', action='store_true', help='display in long format with one item per line')
.with_argparser(dir_parser, with_unknown_args=True)
def do_dir(self, args, unknown):
if unknown:
self.perror('dir does not take any positional arguments:')
self.do_help('dir')
self.last_result = 'Bad arguments'
return
contents = os.listdir(self.cwd)
for f in contents:
self.poutput(f'{f}')
self.poutput('')
self.last_result = contents |
class GrammarSymbol(object):
def __init__(self, lbp=0, value=None, payload=None):
self.lbp = lbp
self.value = value
self.payload = payload
def nud(self, parser):
raise PysmtSyntaxError(("Syntax error at token '%s'." % parser.token))
def led(self, parser, left):
raise PysmtSyntaxError(("Syntax error at token '%s' (Read: '%s')." % (parser.token, left))) |
def multistart_hyperparameter_optimization(log_likelihood_optimizer, num_multistarts, randomness=None, max_num_threads=DEFAULT_MAX_NUM_THREADS, status=None):
if (randomness is None):
randomness = C_GP.RandomnessSourceContainer(max_num_threads)
randomness.SetRandomizedUniformGeneratorSeed(0)
randomness.SetRandomizedNormalRNGSeed(0)
if (status is None):
status = {}
domain_bounds_log10 = numpy.log10(log_likelihood_optimizer.domain._domain_bounds)
hyperparameters_opt = C_GP.multistart_hyperparameter_optimization(log_likelihood_optimizer.optimizer_parameters, cpp_utils.cppify(domain_bounds_log10), cpp_utils.cppify(log_likelihood_optimizer.objective_function._points_sampled), cpp_utils.cppify(log_likelihood_optimizer.objective_function._points_sampled_value), log_likelihood_optimizer.objective_function.dim, log_likelihood_optimizer.objective_function._num_sampled, cpp_utils.cppify_hyperparameters(log_likelihood_optimizer.objective_function.cov_hyperparameters), cpp_utils.cppify(log_likelihood_optimizer.objective_function.noise_variance), cpp_utils.cppify(log_likelihood_optimizer.objective_function.derivatives), log_likelihood_optimizer.objective_function.num_derivatives, max_num_threads, randomness, status)
return numpy.array(hyperparameters_opt) |
def _evaluate_goal_directed_benchmarks(goal_directed_molecule_generator: GoalDirectedGenerator, benchmarks: List[GoalDirectedBenchmark]) -> List[GoalDirectedBenchmarkResult]:
logger.info(f'Number of benchmarks: {len(benchmarks)}')
results = []
for (i, benchmark) in enumerate(benchmarks, 1):
logger.info(f'Running benchmark {i}/{len(benchmarks)}: {benchmark.name}')
result = benchmark.assess_model(goal_directed_molecule_generator)
logger.info(f'Results for the benchmark "{result.benchmark_name}":')
logger.info(f' Score: {result.score:.6f}')
logger.info(f' Execution time: {str(datetime.timedelta(seconds=int(result.execution_time)))}')
logger.info(f' Metadata: {result.metadata}')
results.append(result)
logger.info('Finished execution of the benchmarks')
return results |
class Chat(Object):
def __init__(self, *, client: 'pyrogram.Client'=None, id: int, type: 'enums.ChatType', is_verified: bool=None, is_restricted: bool=None, is_creator: bool=None, is_scam: bool=None, is_fake: bool=None, is_support: bool=None, title: str=None, username: str=None, first_name: str=None, last_name: str=None, photo: 'types.ChatPhoto'=None, bio: str=None, description: str=None, dc_id: int=None, has_protected_content: bool=None, invite_link: str=None, pinned_message=None, sticker_set_name: str=None, can_set_sticker_set: bool=None, members_count: int=None, restrictions: List['types.Restriction']=None, permissions: 'types.ChatPermissions'=None, distance: int=None, linked_chat: 'types.Chat'=None, send_as_chat: 'types.Chat'=None, available_reactions: Optional['types.ChatReactions']=None):
super().__init__(client)
self.id = id
self.type = type
self.is_verified = is_verified
self.is_restricted = is_restricted
self.is_creator = is_creator
self.is_scam = is_scam
self.is_fake = is_fake
self.is_support = is_support
self.title = title
self.username = username
self.first_name = first_name
self.last_name = last_name
self.photo = photo
self.bio = bio
self.description = description
self.dc_id = dc_id
self.has_protected_content = has_protected_content
self.invite_link = invite_link
self.pinned_message = pinned_message
self.sticker_set_name = sticker_set_name
self.can_set_sticker_set = can_set_sticker_set
self.members_count = members_count
self.restrictions = restrictions
self.permissions = permissions
self.distance = distance
self.linked_chat = linked_chat
self.send_as_chat = send_as_chat
self.available_reactions = available_reactions
def _parse_user_chat(client, user: raw.types.User) -> 'Chat':
peer_id = user.id
return Chat(id=peer_id, type=(enums.ChatType.BOT if user.bot else enums.ChatType.PRIVATE), is_verified=getattr(user, 'verified', None), is_restricted=getattr(user, 'restricted', None), is_scam=getattr(user, 'scam', None), is_fake=getattr(user, 'fake', None), is_support=getattr(user, 'support', None), username=user.username, first_name=user.first_name, last_name=user.last_name, photo=types.ChatPhoto._parse(client, user.photo, peer_id, user.access_hash), restrictions=(types.List([types.Restriction._parse(r) for r in user.restriction_reason]) or None), dc_id=getattr(getattr(user, 'photo', None), 'dc_id', None), client=client)
def _parse_chat_chat(client, chat: raw.types.Chat) -> 'Chat':
peer_id = (- chat.id)
return Chat(id=peer_id, type=enums.ChatType.GROUP, title=chat.title, is_creator=getattr(chat, 'creator', None), photo=types.ChatPhoto._parse(client, getattr(chat, 'photo', None), peer_id, 0), permissions=types.ChatPermissions._parse(getattr(chat, 'default_banned_rights', None)), members_count=getattr(chat, 'participants_count', None), dc_id=getattr(getattr(chat, 'photo', None), 'dc_id', None), has_protected_content=getattr(chat, 'noforwards', None), client=client)
def _parse_channel_chat(client, channel: raw.types.Channel) -> 'Chat':
peer_id = utils.get_channel_id(channel.id)
restriction_reason = getattr(channel, 'restriction_reason', [])
return Chat(id=peer_id, type=(enums.ChatType.SUPERGROUP if getattr(channel, 'megagroup', None) else enums.ChatType.CHANNEL), is_verified=getattr(channel, 'verified', None), is_restricted=getattr(channel, 'restricted', None), is_creator=getattr(channel, 'creator', None), is_scam=getattr(channel, 'scam', None), is_fake=getattr(channel, 'fake', None), title=channel.title, username=getattr(channel, 'username', None), photo=types.ChatPhoto._parse(client, getattr(channel, 'photo', None), peer_id, getattr(channel, 'access_hash', 0)), restrictions=(types.List([types.Restriction._parse(r) for r in restriction_reason]) or None), permissions=types.ChatPermissions._parse(getattr(channel, 'default_banned_rights', None)), members_count=getattr(channel, 'participants_count', None), dc_id=getattr(getattr(channel, 'photo', None), 'dc_id', None), has_protected_content=getattr(channel, 'noforwards', None), client=client)
def _parse(client, message: Union[(raw.types.Message, raw.types.MessageService)], users: dict, chats: dict, is_chat: bool) -> 'Chat':
from_id = utils.get_raw_peer_id(message.from_id)
peer_id = utils.get_raw_peer_id(message.peer_id)
chat_id = ((peer_id or from_id) if is_chat else (from_id or peer_id))
if isinstance(message.peer_id, raw.types.PeerUser):
return Chat._parse_user_chat(client, users[chat_id])
if isinstance(message.peer_id, raw.types.PeerChat):
return Chat._parse_chat_chat(client, chats[chat_id])
return Chat._parse_channel_chat(client, chats[chat_id])
def _parse_dialog(client, peer, users: dict, chats: dict):
if isinstance(peer, raw.types.PeerUser):
return Chat._parse_user_chat(client, users[peer.user_id])
elif isinstance(peer, raw.types.PeerChat):
return Chat._parse_chat_chat(client, chats[peer.chat_id])
else:
return Chat._parse_channel_chat(client, chats[peer.channel_id])
async def _parse_full(client, chat_full: Union[(raw.types.messages.ChatFull, raw.types.users.UserFull)]) -> 'Chat':
users = {u.id: u for u in chat_full.users}
chats = {c.id: c for c in chat_full.chats}
if isinstance(chat_full, raw.types.users.UserFull):
full_user = chat_full.full_user
parsed_chat = Chat._parse_user_chat(client, users[full_user.id])
parsed_chat.bio = full_user.about
if full_user.pinned_msg_id:
parsed_chat.pinned_message = (await client.get_messages(parsed_chat.id, message_ids=full_user.pinned_msg_id))
else:
full_chat = chat_full.full_chat
chat_raw = chats[full_chat.id]
if isinstance(full_chat, raw.types.ChatFull):
parsed_chat = Chat._parse_chat_chat(client, chat_raw)
parsed_chat.description = (full_chat.about or None)
if isinstance(full_chat.participants, raw.types.ChatParticipants):
parsed_chat.members_count = len(full_chat.participants.participants)
else:
parsed_chat = Chat._parse_channel_chat(client, chat_raw)
parsed_chat.members_count = full_chat.participants_count
parsed_chat.description = (full_chat.about or None)
parsed_chat.can_set_sticker_set = full_chat.can_set_stickers
parsed_chat.sticker_set_name = getattr(full_chat.stickerset, 'short_name', None)
linked_chat_raw = chats.get(full_chat.linked_chat_id, None)
if linked_chat_raw:
parsed_chat.linked_chat = Chat._parse_channel_chat(client, linked_chat_raw)
default_send_as = full_chat.default_send_as
if default_send_as:
if isinstance(default_send_as, raw.types.PeerUser):
send_as_raw = users[default_send_as.user_id]
else:
send_as_raw = chats[default_send_as.channel_id]
parsed_chat.send_as_chat = Chat._parse_chat(client, send_as_raw)
if full_chat.pinned_msg_id:
parsed_chat.pinned_message = (await client.get_messages(parsed_chat.id, message_ids=full_chat.pinned_msg_id))
if isinstance(full_chat.exported_invite, raw.types.ChatInviteExported):
parsed_chat.invite_link = full_chat.exported_invite.link
parsed_chat.available_reactions = types.ChatReactions._parse(client, full_chat.available_reactions)
return parsed_chat
def _parse_chat(client, chat: Union[(raw.types.Chat, raw.types.User, raw.types.Channel)]) -> 'Chat':
if isinstance(chat, raw.types.Chat):
return Chat._parse_chat_chat(client, chat)
elif isinstance(chat, raw.types.User):
return Chat._parse_user_chat(client, chat)
else:
return Chat._parse_channel_chat(client, chat)
async def archive(self):
return (await self._client.archive_chats(self.id))
async def unarchive(self):
return (await self._client.unarchive_chats(self.id))
async def set_title(self, title: str) -> bool:
return (await self._client.set_chat_title(chat_id=self.id, title=title))
async def set_description(self, description: str) -> bool:
return (await self._client.set_chat_description(chat_id=self.id, description=description))
async def set_photo(self, *, photo: Union[(str, BinaryIO)]=None, video: Union[(str, BinaryIO)]=None, video_start_ts: float=None) -> bool:
return (await self._client.set_chat_photo(chat_id=self.id, photo=photo, video=video, video_start_ts=video_start_ts))
async def ban_member(self, user_id: Union[(int, str)], until_date: datetime=utils.zero_datetime()) -> Union[('types.Message', bool)]:
return (await self._client.ban_chat_member(chat_id=self.id, user_id=user_id, until_date=until_date))
async def unban_member(self, user_id: Union[(int, str)]) -> bool:
return (await self._client.unban_chat_member(chat_id=self.id, user_id=user_id))
async def restrict_member(self, user_id: Union[(int, str)], permissions: 'types.ChatPermissions', until_date: datetime=utils.zero_datetime()) -> 'types.Chat':
return (await self._client.restrict_chat_member(chat_id=self.id, user_id=user_id, permissions=permissions, until_date=until_date))
async def promote_member(self, user_id: Union[(int, str)], privileges: 'types.ChatPrivileges'=None) -> bool:
return (await self._client.promote_chat_member(chat_id=self.id, user_id=user_id, privileges=privileges))
async def join(self):
return (await self._client.join_chat((self.username or self.id)))
async def leave(self):
return (await self._client.leave_chat(self.id))
async def export_invite_link(self):
return (await self._client.export_chat_invite_link(self.id))
async def get_member(self, user_id: Union[(int, str)]) -> 'types.ChatMember':
return (await self._client.get_chat_member(self.id, user_id=user_id))
def get_members(self, query: str='', limit: int=0, filter: 'enums.ChatMembersFilter'=enums.ChatMembersFilter.SEARCH) -> Optional[AsyncGenerator[('types.ChatMember', None)]]:
return self._client.get_chat_members(self.id, query=query, limit=limit, filter=filter)
async def add_members(self, user_ids: Union[(Union[(int, str)], List[Union[(int, str)]])], forward_limit: int=100) -> bool:
return (await self._client.add_chat_members(self.id, user_ids=user_ids, forward_limit=forward_limit))
async def mark_unread(self) -> bool:
return (await self._client.mark_chat_unread(self.id))
async def set_protected_content(self, enabled: bool) -> bool:
return (await self._client.set_chat_protected_content(self.id, enabled=enabled))
async def unpin_all_messages(self) -> bool:
return (await self._client.unpin_all_chat_messages(self.id)) |
_module()
class TransformerEncoder(BaseModule):
def __init__(self, n_layers=2, n_head=8, d_model=512, d_inner=2048, dropout=0.1, max_len=(8 * 32), init_cfg=None):
super().__init__(init_cfg=init_cfg)
assert ((d_model % n_head) == 0), 'd_model must be divisible by n_head'
self.pos_encoder = PositionalEncoding(d_model, n_position=max_len)
encoder_layer = BaseTransformerLayer(operation_order=('self_attn', 'norm', 'ffn', 'norm'), attn_cfgs=dict(type='MultiheadAttention', embed_dims=d_model, num_heads=n_head, attn_drop=dropout, dropout_layer=dict(type='Dropout', drop_prob=dropout)), ffn_cfgs=dict(type='FFN', embed_dims=d_model, feedforward_channels=d_inner, ffn_drop=dropout), norm_cfg=dict(type='LN'))
self.transformer = ModuleList([copy.deepcopy(encoder_layer) for _ in range(n_layers)])
def forward(self, feature):
(n, c, h, w) = feature.shape
feature = feature.view(n, c, (- 1)).transpose(1, 2)
feature = self.pos_encoder(feature)
feature = feature.transpose(0, 1)
for m in self.transformer:
feature = m(feature)
feature = feature.permute(1, 2, 0).view(n, c, h, w)
return feature |
class SendSubmissionInput(BaseSubmissionInput):
conference: ID
title: MultiLingualInput
abstract: MultiLingualInput
languages: list[ID]
type: ID
duration: ID
elevator_pitch: MultiLingualInput
notes: str
audience_level: ID
short_social_summary: str
speaker_bio: str
speaker_photo: str
speaker_website: str
speaker_level: str
previous_talk_video: str
speaker_twitter_handle: str
speaker_instagram_handle: str
speaker_linkedin_url: str
speaker_facebook_url: str
speaker_mastodon_handle: str
topic: Optional[ID] = strawberry.field(default=None)
tags: list[ID] = strawberry.field(default_factory=list) |
class LogoutWorker(QThread):
succeeded = pyqtSignal()
msg = pyqtSignal(str, int)
def __init__(self, parent=None):
super(LogoutWorker, self).__init__(parent)
self._disk = None
self.update_ui = True
self._mutex = QMutex()
self._is_work = False
def set_disk(self, disk):
self._disk = disk
def set_values(self, update_ui=True):
self.update_ui = update_ui
self.start()
def __del__(self):
self.wait()
def stop(self):
self._mutex.lock()
self._is_work = False
self._mutex.unlock()
def run(self):
if (not self._is_work):
self._mutex.lock()
self._is_work = True
try:
res = self._disk.logout()
if (res == LanZouCloud.SUCCESS):
if self.update_ui:
self.succeeded.emit()
self.msg.emit('!', 4000)
else:
self.msg.emit(',!', 5000)
except TimeoutError:
self.msg.emit(',!', 6000)
except Exception as e:
logger.error(f'LogoutWorker error: e={e}')
self._is_work = False
self._mutex.unlock()
else:
self.msg.emit(',!', 3100) |
class SophiaLexer(RegexLexer):
name = 'Sophia'
aliases = ['sophia']
filenames = ['*.aes']
mimetypes = []
url = '
version_added = '2.11'
keywords = ('contract', 'include', 'let', 'switch', 'type', 'record', 'datatype', 'if', 'elif', 'else', 'function', 'stateful', 'payable', 'public', 'entrypoint', 'private', 'indexed', 'namespace', 'interface', 'main', 'using', 'as', 'for', 'hiding')
builtins = ('state', 'put', 'abort', 'require')
word_operators = ('mod', 'band', 'bor', 'bxor', 'bnot')
primitive_types = ('int', 'address', 'bool', 'bits', 'bytes', 'string', 'list', 'option', 'char', 'unit', 'map', 'event', 'hash', 'signature', 'oracle', 'oracle_query')
tokens = {'escape-sequence': [('\\\\[\\\\"\\\'ntbr]', String.Escape), ('\\\\[0-9]{3}', String.Escape), ('\\\\x[0-9a-fA-F]{2}', String.Escape)], 'root': [('\\s+', Text.Whitespace), ('(true|false)\\b', Keyword.Constant), ("\\b([A-Z][\\w\\']*)(?=\\s*\\.)", Name.Class, 'dotted'), ("\\b([A-Z][\\w\\']*)", Name.Function), ('//.*?\\n', Comment.Single), ('\\/\\*(?!/)', Comment.Multiline, 'comment'), ('0[xX][\\da-fA-F][\\da-fA-F_]*', Number.Hex), ('#[\\da-fA-F][\\da-fA-F_]*', Name.Label), ('\\d[\\d_]*', Number.Integer), (words(keywords, suffix='\\b'), Keyword), (words(builtins, suffix='\\b'), Name.Builtin), (words(word_operators, prefix='\\b', suffix='\\b'), Operator.Word), (words(primitive_types, prefix='\\b', suffix='\\b'), Keyword.Type), ('[=!<>+\\\\*/:&|?~^-]', Operator.Word), ('[.;:{}(),\\[\\]]', Punctuation), ("(ak_|ok_|oq_|ct_)[\\w']*", Name.Label), ("[^\\W\\d][\\w']*", Name), ('\'(?:(\\\\[\\\\\\"\'ntbr ])|(\\\\[0-9]{3})|(\\\\x[0-9a-fA-F]{2}))\'', String.Char), ("'.'", String.Char), ("'[a-z][\\w]*", Name.Variable), ('"', String.Double, 'string')], 'comment': [('[^/*]+', Comment.Multiline), ('\\/\\*', Comment.Multiline, '#push'), ('\\*\\/', Comment.Multiline, '#pop'), ('\\*', Comment.Multiline)], 'string': [('[^\\\\"]+', String.Double), include('escape-sequence'), ('\\\\\\n', String.Double), ('"', String.Double, '#pop')], 'dotted': [('\\s+', Text), ('\\.', Punctuation), ("[A-Z][\\w\\']*(?=\\s*\\.)", Name.Function), ("[A-Z][\\w\\']*", Name.Function, '#pop'), ("[a-z_][\\w\\']*", Name, '#pop'), default('#pop')]} |
_config
def test_togroup_toggle(manager):
manager.test_window('one')
assert (manager.c.group.info()['name'] == 'a')
assert (manager.c.get_groups()['a']['focus'] == 'one')
assert (manager.c.get_groups()['b']['focus'] is None)
manager.c.window.togroup('b', switch_group=True)
assert (manager.c.group.info()['name'] == 'b')
assert (manager.c.get_groups()['a']['focus'] is None)
assert (manager.c.get_groups()['b']['focus'] == 'one')
manager.c.window.togroup('b', switch_group=True)
assert (manager.c.group.info()['name'] == 'b')
assert (manager.c.get_groups()['a']['focus'] is None)
assert (manager.c.get_groups()['b']['focus'] == 'one')
manager.c.window.togroup('b', switch_group=True, toggle=True)
assert (manager.c.group.info()['name'] == 'a')
assert (manager.c.get_groups()['a']['focus'] == 'one')
assert (manager.c.get_groups()['b']['focus'] is None)
manager.c.window.togroup('b', switch_group=True, toggle=True)
manager.c.window.togroup('b', switch_group=True, toggle=True)
assert (manager.c.group.info()['name'] == 'a')
assert (manager.c.get_groups()['a']['focus'] == 'one')
assert (manager.c.get_groups()['b']['focus'] is None)
manager.c.window.togroup('b', toggle=True)
assert (manager.c.group.info()['name'] == 'a')
assert (manager.c.get_groups()['a']['focus'] is None)
assert (manager.c.get_groups()['b']['focus'] == 'one') |
class CO2Corrector(ModifierBase):
def __call__(self, projectables, optional_datasets=None, **info):
(ir_039, ir_108, ir_134) = projectables
logger.info('Applying CO2 correction')
dt_co2 = ((ir_108 - ir_134) / 4.0)
rcorr = ((ir_108 ** 4) - ((ir_108 - dt_co2) ** 4))
t4_co2corr = (((ir_039 ** 4) + rcorr).clip(0.0) ** 0.25)
t4_co2corr.attrs = ir_039.attrs.copy()
self.apply_modifier_info(ir_039, t4_co2corr)
return t4_co2corr |
class F17_Iscsi(RHEL6_Iscsi):
def _getParser(self):
op = super(F17_Iscsi, self)._getParser()
for action in op._actions:
if ('--iface' in action.option_strings):
action.help = action.help.replace(versionToLongString(RHEL6), versionToLongString(F17))
return op |
def process_prediction(action, objects, pad, vocab_action, clean_special_tokens, predict_object=True):
if (pad in action):
pad_start_idx = action.index(pad)
action = action[:pad_start_idx]
objects = objects[:pad_start_idx]
if clean_special_tokens:
stop_token = vocab_action.word2index('<<stop>>')
if (stop_token in action):
stop_start_idx = action.index(stop_token)
action = action[:stop_start_idx]
objects = objects[:stop_start_idx]
words = vocab_action.index2word(action)
if predict_object:
pred_object = objects[None].max(2)[1].cpu().numpy()
else:
pred_object = None
pred_processed = {'action': ' '.join(words), 'object': pred_object}
return pred_processed |
def test_successful_handshake() -> None:
client = H11Handshake(CLIENT)
server = H11Handshake(SERVER)
server.receive_data(client.send(Request(host='localhost', target='/')))
assert isinstance(next(server.events()), Request)
client.receive_data(server.send(AcceptConnection()))
assert isinstance(next(client.events()), AcceptConnection)
assert (client.state is ConnectionState.OPEN)
assert (server.state is ConnectionState.OPEN)
assert (repr(client) == 'H11Handshake(client=True, state=ConnectionState.OPEN)')
assert (repr(server) == 'H11Handshake(client=False, state=ConnectionState.OPEN)') |
class OpenDataStartTab(QtWidgets.QWidget):
def __init__(self, *args, m=None, **kwargs):
super().__init__(*args, **kwargs)
self.m = m
icon = (iconpath / 'open.png')
self.b_str = f"<img src={icon} height=20 style='display: inline; vertical-align:bottom;'></img>"
self.t1 = QtWidgets.QLabel()
self.t1.setText(f'<h3>Click on {self.b_str} or DRAG & DROP to plot data from files!</h3><p>Supported filetypes:<ul><li>NetCDF: <code>[.nc]<code></li><li>GeoTIFF: <code>[.tif, .tiff]<code></li><li>CSV: <code>[.csv]<code></li><li>Shapefile: <code>[.shp]<code></li></ul>')
layout = QtWidgets.QVBoxLayout()
layout.addSpacing(10)
layout.addWidget(self.t1)
layout.setAlignment((Qt.AlignCenter | Qt.AlignTop))
self.setLayout(layout)
self.setAcceptDrops(True)
def enterEvent(self, e):
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), f'<h3>Plot Data from Files</h3>Click on {self.b_str} or simply drag-and-drop one of the supported filetypes to get a popup window where you can specify how you want to visualize the data.<p>Supported filetypes:<ul><li>NetCDF: <code>[.nc]<code></li><li>GeoTIFF: <code>[.tif, .tiff]<code></li><li>CSV: <code>[.csv]<code></li><li>Shapefile: <code>[.shp]<code></li></ul><b>NOTE:</b> This capability is primarily intended as an easy way to get a <i>quick-look</i> at some data for comparison. It does not provide access to all plotting features of EOmaps!<p>Some additional notes:<ul><li>Make sure that the projection of the data-coordinates has been identified correctly prior to plotting!</li><li>Be aware that re-projecting large datasets might take quite some time and can require a lot of memory!</li></ul>') |
.skipif(PYPY, reason='garbage-collection differences make this flaky')
.filterwarnings('default::pytest.PytestUnraisableExceptionWarning')
def test_unraisable_in_teardown(pytester: Pytester) -> None:
pytester.makepyfile(test_it='\n import pytest\n\n class BrokenDel:\n def __del__(self):\n raise ValueError("del is broken")\n\n \n def broken_del():\n yield\n obj = BrokenDel()\n del obj\n\n def test_it(broken_del): pass\n def test_2(): pass\n ')
result = pytester.runpytest()
assert (result.ret == 0)
assert (result.parseoutcomes() == {'passed': 2, 'warnings': 1})
result.stdout.fnmatch_lines(['*= warnings summary =*', 'test_it.py::test_it', ' * PytestUnraisableExceptionWarning: Exception ignored in: <function BrokenDel.__del__ at *>', ' ', ' Traceback (most recent call last):', ' ValueError: del is broken', ' ', ' warnings.warn(pytest.PytestUnraisableExceptionWarning(msg))']) |
def table_row_wise(host_index: int) -> ParameterShardingGenerator:
def _parameter_sharding_generator(param: nn.Parameter, local_size: int, world_size: int, device_type: str, sharder: ModuleSharder[nn.Module]) -> ParameterSharding:
size_and_offsets = _get_parameter_size_offsets(param, ShardingType.TABLE_ROW_WISE, local_size, world_size)
size_offset_ranks = []
assert (len(size_and_offsets) <= local_size)
for ((size, offset), rank) in zip(size_and_offsets, range(local_size)):
rank_offset = (host_index * local_size)
size_offset_ranks.append((size, offset, (rank_offset + rank)))
return _get_parameter_sharding(param, ShardingType.TABLE_ROW_WISE.value, size_offset_ranks, local_size, device_type, sharder)
return _parameter_sharding_generator |
class SemiSupervisedTinyImagenet(SemiSupervisedDataset):
def load_base_dataset(self, train=False, **kwargs):
assert (self.base_dataset == 'tiny-imagenet'), 'Only semi-supervised tiny-imagenet is supported. Please use correct dataset!'
if train:
self.dataset = TinyImagenet(split='train', **kwargs)
else:
self.dataset = TinyImagenet(split='val', **kwargs)
self.dataset_size = len(self.dataset) |
def list_paths_with_dangerous_command(sub_parsers):
parser: ArgumentParser = sub_parsers.add_parser('list-dangerous-usage', help='List all connections that needs a resource to be missing.', formatter_class=argparse.MetavarTypeHelpFormatter)
parser.add_argument('--print-only-area', help='Only print the area names, not each specific path', action='store_true')
parser.set_defaults(func=list_paths_with_dangerous_logic) |
class ParetoRV(ScipyRandomVariable):
name = 'pareto'
ndim_supp = 0
ndims_params = [0, 0]
dtype = 'floatX'
_print_name = ('Pareto', '\\operatorname{Pareto}')
def __call__(self, b, scale=1.0, size=None, **kwargs):
return super().__call__(b, scale, size=size, **kwargs)
def rng_fn_scipy(cls, rng, b, scale, size):
return stats.pareto.rvs(b, scale=scale, size=size, random_state=rng) |
class CamRender(Render):
def __init__(self, width=1600, height=1200, name='Cam Renderer', program_files=['simple.fs', 'simple.vs'], color_size=1):
Render.__init__(self, width, height, name, program_files, color_size)
self.camera = None
glutDisplayFunc(self.display)
glutKeyboardFunc(self.keyboard)
def set_camera(self, camera):
self.camera = camera
(self.projection_matrix, self.model_view_matrix) = camera.get_gl_matrix()
def keyboard(self, key, x, y):
eps = 1
if (key == b'w'):
self.camera.center += (eps * self.camera.direction)
elif (key == b's'):
self.camera.center -= (eps * self.camera.direction)
if (key == b'a'):
self.camera.center -= (eps * self.camera.right)
elif (key == b'd'):
self.camera.center += (eps * self.camera.right)
if (key == b' '):
self.camera.center += (eps * self.camera.up)
elif (key == b'x'):
self.camera.center -= (eps * self.camera.up)
elif (key == b'i'):
self.camera.near += (0.1 * eps)
self.camera.far += (0.1 * eps)
elif (key == b'o'):
self.camera.near -= (0.1 * eps)
self.camera.far -= (0.1 * eps)
(self.projection_matrix, self.model_view_matrix) = self.camera.get_gl_matrix()
def show(self):
glutMainLoop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.