code stringlengths 281 23.7M |
|---|
class NotEinhornWorkerTests(unittest.TestCase):
def test_is_not_worker(self):
self.assertFalse(einhorn.is_worker())
def test_get_socket_count(self):
with self.assertRaises(einhorn.NotEinhornWorker):
einhorn.get_socket_count()
def test_get_socket(self):
with self.assertRaises(einhorn.NotEinhornWorker):
einhorn.get_socket()
def test_send_ack(self):
with self.assertRaises(einhorn.NotEinhornWorker):
einhorn.ack_startup() |
class IXIBrainInferDataset(Dataset):
def __init__(self, data_path, atlas_path, transforms):
self.atlas_path = atlas_path
self.paths = data_path
self.transforms = transforms
def one_hot(self, img, C):
out = np.zeros((C, img.shape[1], img.shape[2], img.shape[3]))
for i in range(C):
out[(i, ...)] = (img == i)
return out
def __getitem__(self, index):
path = self.paths[index]
(x, x_seg) = pkload(self.atlas_path)
(y, y_seg) = pkload(path)
(x, y) = (x[(None, ...)], y[(None, ...)])
(x_seg, y_seg) = (x_seg[(None, ...)], y_seg[(None, ...)])
(x, x_seg) = self.transforms([x, x_seg])
(y, y_seg) = self.transforms([y, y_seg])
x = np.ascontiguousarray(x)
y = np.ascontiguousarray(y)
x_seg = np.ascontiguousarray(x_seg)
y_seg = np.ascontiguousarray(y_seg)
(x, y, x_seg, y_seg) = (torch.from_numpy(x), torch.from_numpy(y), torch.from_numpy(x_seg), torch.from_numpy(y_seg))
return (x, y, x_seg, y_seg)
def __len__(self):
return len(self.paths) |
class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ChineseCLIPImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
def get_all_tilted_square_lattice_executables(*, n_instances=10, n_repetitions=1000, min_side_length=2, max_side_length=8, side_length_step=2, macrocycle_depths=None, seed=52, twoq_gate_name='sqrt_iswap') -> QuantumExecutableGroup:
rs = np.random.RandomState(seed)
specs = get_all_tilted_square_lattice_specs(n_instances=n_instances, n_repetitions=n_repetitions, min_side_length=min_side_length, max_side_length=max_side_length, side_length_step=side_length_step, macrocycle_depths=macrocycle_depths, twoq_gate_name=twoq_gate_name)
return QuantumExecutableGroup([tilted_square_lattice_spec_to_exe(spec, rs=rs) for spec in specs]) |
class CmdMail(default_cmds.MuxAccountCommand):
key = ''
aliases = ['mail']
lock = 'cmd:all()'
help_category = 'General'
def parse(self):
super().parse()
self.caller_is_account = bool(inherits_from(self.caller, 'evennia.accounts.accounts.DefaultAccount'))
def search_targets(self, namelist):
nameregex = '|'.join((('^%s$' % re.escape(name)) for name in make_iter(namelist)))
if self.caller_is_account:
matches = AccountDB.objects.filter(username__iregex=nameregex)
else:
matches = ObjectDB.objects.filter(db_key__iregex=nameregex)
return matches
def get_all_mail(self):
if self.caller_is_account:
return Msg.objects.get_by_tag(category='mail').filter(db_receivers_accounts=self.caller)
else:
return Msg.objects.get_by_tag(category='mail').filter(db_receivers_objects=self.caller)
def send_mail(self, recipients, subject, message, caller):
for recipient in recipients:
recipient.msg(('You have received a new from %s' % caller))
new_message = create.create_message(self.caller, message, receivers=recipient, header=subject)
new_message.tags.add('new', category='mail')
if recipients:
caller.msg('You sent your message.')
return
else:
caller.msg('No valid target(s) found. Cannot send message.')
return
def func(self):
subject = ''
body = ''
if (self.switches or self.args):
if (('delete' in self.switches) or ('del' in self.switches)):
try:
if (not self.lhs):
self.caller.msg('No Message ID given. Unable to delete.')
return
else:
all_mail = self.get_all_mail()
mind_max = max(0, (all_mail.count() - 1))
mind = max(0, min(mind_max, (int(self.lhs) - 1)))
if all_mail[mind]:
mail = all_mail[mind]
question = 'Delete message {} ({}) [Y]/N?'.format((mind + 1), mail.header)
ret = (yield question)
if ((not ret) or (ret.strip().upper() not in ('N', 'No'))):
all_mail[mind].delete()
self.caller.msg(('Message %s deleted' % ((mind + 1),)))
else:
self.caller.msg('Message not deleted.')
else:
raise IndexError
except IndexError:
self.caller.msg('That message does not exist.')
except ValueError:
self.caller.msg('Usage: /delete <message ID>')
elif (('forward' in self.switches) or ('fwd' in self.switches)):
try:
if (not self.rhs):
self.caller.msg('Cannot forward a message without a target list. Please try again.')
return
elif (not self.lhs):
self.caller.msg('You must define a message to forward.')
return
else:
all_mail = self.get_all_mail()
mind_max = max(0, (all_mail.count() - 1))
if ('/' in self.rhs):
(message_number, message) = self.rhs.split('/', 1)
mind = max(0, min(mind_max, (int(message_number) - 1)))
if all_mail[mind]:
old_message = all_mail[mind]
self.send_mail(self.search_targets(self.lhslist), ('FWD: ' + old_message.header), ((message + '\n---- Original Message ----\n') + old_message.message), self.caller)
self.caller.msg('Message forwarded.')
else:
raise IndexError
else:
mind = max(0, min(mind_max, (int(self.rhs) - 1)))
if all_mail[mind]:
old_message = all_mail[mind]
self.send_mail(self.search_targets(self.lhslist), ('FWD: ' + old_message.header), ('\n---- Original Message ----\n' + old_message.message), self.caller)
self.caller.msg('Message forwarded.')
old_message.tags.remove('new', category='mail')
old_message.tags.add('fwd', category='mail')
else:
raise IndexError
except IndexError:
self.caller.msg('Message does not exixt.')
except ValueError:
self.caller.msg('Usage: /forward <account list>=<#>[/<Message>]')
elif (('reply' in self.switches) or ('rep' in self.switches)):
try:
if (not self.rhs):
self.caller.msg('You must define a message to reply to.')
return
elif (not self.lhs):
self.caller.msg('You must supply a reply message')
return
else:
all_mail = self.get_all_mail()
mind_max = max(0, (all_mail.count() - 1))
mind = max(0, min(mind_max, (int(self.lhs) - 1)))
if all_mail[mind]:
old_message = all_mail[mind]
self.send_mail(old_message.senders, ('RE: ' + old_message.header), ((self.rhs + '\n---- Original Message ----\n') + old_message.message), self.caller)
old_message.tags.remove('new', category='mail')
old_message.tags.add('-', category='mail')
return
else:
raise IndexError
except IndexError:
self.caller.msg('Message does not exist.')
except ValueError:
self.caller.msg('Usage: /reply <#>=<message>')
elif self.rhs:
if ('/' in self.rhs):
(subject, body) = self.rhs.split('/', 1)
else:
body = self.rhs
self.send_mail(self.search_targets(self.lhslist), subject, body, self.caller)
else:
all_mail = self.get_all_mail()
mind_max = max(0, (all_mail.count() - 1))
try:
mind = max(0, min(mind_max, (int(self.lhs) - 1)))
message = all_mail[mind]
except (ValueError, IndexError):
self.caller.msg(("'%s' is not a valid mail id." % self.lhs))
return
messageForm = []
if message:
messageForm.append((_HEAD_CHAR * _WIDTH))
messageForm.append(('|wFrom:|n %s' % message.senders[0].get_display_name(self.caller)))
day = message.db_date_created.day
messageForm.append(('|wSent:|n %s' % message.db_date_created.strftime(f'%b {day}, %Y - %H:%M:%S')))
messageForm.append(('|wSubject:|n %s' % message.header))
messageForm.append((_SUB_HEAD_CHAR * _WIDTH))
messageForm.append(message.message)
messageForm.append((_HEAD_CHAR * _WIDTH))
self.caller.msg('\n'.join(messageForm))
message.tags.remove('new', category='mail')
message.tags.add('-', category='mail')
else:
messages = self.get_all_mail()
if messages:
table = evtable.EvTable('|wID|n', '|wFrom|n', '|wSubject|n', '|wArrived|n', '', table=None, border='header', header_line_char=_SUB_HEAD_CHAR, width=_WIDTH)
index = 1
for message in messages:
status = str(message.db_tags.last().db_key.upper())
if (status == 'NEW'):
status = '|gNEW|n'
table.add_row(index, message.senders[0].get_display_name(self.caller), message.header, datetime_format(message.db_date_created), status)
index += 1
table.reformat_column(0, width=6)
table.reformat_column(1, width=18)
table.reformat_column(2, width=34)
table.reformat_column(3, width=13)
table.reformat_column(4, width=7)
self.caller.msg((_HEAD_CHAR * _WIDTH))
self.caller.msg(str(table))
self.caller.msg((_HEAD_CHAR * _WIDTH))
else:
self.caller.msg('There are no messages in your inbox.') |
class Effect6771(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
lvl = src.level
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'buffDuration', (src.getModifiedItemAttr('durationBonus') * lvl), **kwargs) |
def noisyDataTraining(qnnArch, initialUnitaries, trainingData, noisyData, lda, ep, trainingRounds, numData, stepSize, alertP=0):
noisyDataPlot = [[], []]
i = 0
while (i <= numData):
if (alertP > 0):
print((('Currently at ' + str((i / numData))) + '% noisy data.'))
testData1 = sample(trainingData, (numData - i))
testData2 = sample(noisyData, i)
if (i == 0):
testData = testData1
elif (i == numData):
testData = testData2
else:
testData = (testData1 + testData2)
learnedUnitaries = qnnTraining(qnnArch, initialUnitaries, testData, lda, ep, trainingRounds)[1]
storedStates = feedforward(qnnArch, learnedUnitaries, trainingData)
outputStates = []
for k in range(len(storedStates)):
outputStates.append(storedStates[k][(- 1)])
noisyDataPlot[0].append(i)
noisyDataPlot[1].append(costFunction(trainingData, outputStates))
i += stepSize
return noisyDataPlot |
class STSEval(object):
def loadFile(self, fpath):
self.data = {}
self.samples = []
for dataset in self.datasets:
(sent1, sent2) = zip(*[l.split('\t') for l in io.open((fpath + ('/STS.input.%s.txt' % dataset)), encoding='utf8').read().splitlines()])
raw_scores = np.array([x for x in io.open((fpath + ('/STS.gs.%s.txt' % dataset)), encoding='utf8').read().splitlines()])
not_empty_idx = (raw_scores != '')
gs_scores = [float(x) for x in raw_scores[not_empty_idx]]
sent1 = np.array([s.split() for s in sent1], dtype=object)[not_empty_idx]
sent2 = np.array([s.split() for s in sent2], dtype=object)[not_empty_idx]
sorted_data = sorted(zip(sent1, sent2, gs_scores), key=(lambda z: (len(z[0]), len(z[1]), z[2])))
(sent1, sent2, gs_scores) = map(list, zip(*sorted_data))
self.data[dataset] = (sent1, sent2, gs_scores)
self.samples += (sent1 + sent2)
def do_prepare(self, params, prepare):
if ('similarity' in params):
self.similarity = params.similarity
else:
self.similarity = (lambda s1, s2: np.nan_to_num(cosine(np.nan_to_num(s1), np.nan_to_num(s2))))
return prepare(params, self.samples)
def run(self, params, batcher):
results = {}
all_sys_scores = []
all_gs_scores = []
for dataset in self.datasets:
sys_scores = []
(input1, input2, gs_scores) = self.data[dataset]
for ii in range(0, len(gs_scores), params.batch_size):
batch1 = input1[ii:(ii + params.batch_size)]
batch2 = input2[ii:(ii + params.batch_size)]
if ((len(batch1) == len(batch2)) and (len(batch1) > 0)):
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
for kk in range(enc2.shape[0]):
sys_score = self.similarity(enc1[kk], enc2[kk])
sys_scores.append(sys_score)
all_sys_scores.extend(sys_scores)
all_gs_scores.extend(gs_scores)
results[dataset] = {'pearson': pearsonr(sys_scores, gs_scores), 'spearman': spearmanr(sys_scores, gs_scores), 'nsamples': len(sys_scores)}
logging.debug(('%s : pearson = %.4f, spearman = %.4f' % (dataset, results[dataset]['pearson'][0], results[dataset]['spearman'][0])))
weights = [results[dset]['nsamples'] for dset in results.keys()]
list_prs = np.array([results[dset]['pearson'][0] for dset in results.keys()])
list_spr = np.array([results[dset]['spearman'][0] for dset in results.keys()])
avg_pearson = np.average(list_prs)
avg_spearman = np.average(list_spr)
wavg_pearson = np.average(list_prs, weights=weights)
wavg_spearman = np.average(list_spr, weights=weights)
all_pearson = pearsonr(all_sys_scores, all_gs_scores)
all_spearman = spearmanr(all_sys_scores, all_gs_scores)
results['all'] = {'pearson': {'all': all_pearson[0], 'mean': avg_pearson, 'wmean': wavg_pearson}, 'spearman': {'all': all_spearman[0], 'mean': avg_spearman, 'wmean': wavg_spearman}}
logging.debug(('ALL : Pearson = %.4f, Spearman = %.4f' % (all_pearson[0], all_spearman[0])))
logging.debug(('ALL (weighted average) : Pearson = %.4f, Spearman = %.4f' % (wavg_pearson, wavg_spearman)))
logging.debug(('ALL (average) : Pearson = %.4f, Spearman = %.4f\n' % (avg_pearson, avg_spearman)))
return results |
class SynapseDataset(Dataset):
def __init__(self, keys, args, mode='train'):
super().__init__()
self.patch_size = (args.img_size, args.img_size)
self.files = []
self.mode = mode
for key in keys:
key = key.split('.')[0]
slices = subfiles(join(args.data_dir, key))
for sl in slices:
self.files.append(sl)
print(f'dataset length: {len(self.files)}')
def __len__(self):
return len(self.files)
def __getitem__(self, index):
img = Image.open(self.files[index])
label = Image.open(self.files[index].replace('imgs/', 'annotations/'))
label = np.asarray(label)
(img, label) = self.transform(img, label)
return (img, label, self.files[index])
def transform(self, img, label):
img = np.asarray(img).astype(np.float32).transpose([2, 0, 1])
img = ((img - img.min()) / (img.max() - img.min()))
data_dict = {'data': img[None], 'seg': label[(None, None)]}
if (self.mode == 'train'):
aug_list = [BrightnessTransform(mu=1, sigma=1, p_per_sample=0.5), GammaTransform(p_per_sample=0.5), GaussianNoiseTransform(p_per_sample=0.5), ResizeTransform(target_size=self.patch_size, order=1), MirrorTransform(axes=(1,)), SpatialTransform(patch_size=self.patch_size, random_crop=False, patch_center_dist_from_border=(self.patch_size[0] // 2), do_elastic_deform=True, alpha=(100.0, 350.0), sigma=(40.0, 60.0), do_rotation=True, p_rot_per_sample=0.5, angle_x=((- 0.1), 0.1), angle_y=(0, 1e-08), angle_z=(0, 1e-08), scale=(0.5, 1.9), p_scale_per_sample=0.5, border_mode_data='nearest', border_mode_seg='nearest'), NumpyToTensor()]
aug = Compose(aug_list)
else:
aug_list = [ResizeTransform(target_size=self.patch_size, order=1), NumpyToTensor()]
aug = Compose(aug_list)
data_dict = aug(**data_dict)
img = data_dict.get('data')[0]
label = data_dict.get('seg')[0]
return (img, label) |
class SizeProjectMetadataFilter(FilterMetadataPlugin, AllowListProject):
name = 'size_project_metadata'
initialized = False
max_package_size: int = 0
allowlist_package_names: list[str] = []
def initialize_plugin(self) -> None:
if (not self.initialized):
try:
human_package_size = self.configuration['size_project_metadata']['max_package_size']
except KeyError:
logger.warning(f'Unable to initialise {self.name} plugin;must create max_package_size in configuration.')
return
try:
self.max_package_size = parse_size(human_package_size, binary=True)
except InvalidSize:
logger.warning(f'Unable to initialise {self.name} plugin;max_package_size of "{human_package_size}" is not valid.')
return
if (self.max_package_size > 0):
if (not self.allowlist_package_names):
self.allowlist_package_names = self._determine_unfiltered_package_names()
log_msg = (f'Initialized metadata plugin {self.name} to block projects ' + f'> {self.max_package_size} bytes')
if self.allowlist_package_names:
log_msg += ('; except packages in the allowlist: ' + f'{self.allowlist_package_names}')
logger.info(log_msg)
self.initialized = True
def filter(self, metadata: dict) -> bool:
if (self.max_package_size <= 0):
return True
if (self.allowlist_package_names and (not self.check_match(name=metadata['info']['name']))):
return True
total_size = 0
for release in metadata['releases'].values():
for file in release:
total_size += file['size']
return (total_size <= self.max_package_size) |
class Effect2302(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
for (layer, attrPrefix) in (('shield', 'shield'), ('armor', 'armor'), ('hull', '')):
for damageType in ('Kinetic', 'Thermal', 'Explosive', 'Em'):
bonus = ('%s%sDamageResonance' % (attrPrefix, damageType))
bonus = ('%s%s' % (bonus[0].lower(), bonus[1:]))
booster = ('%s%sDamageResonance' % (layer, damageType))
fit.ship.multiplyItemAttr(bonus, module.getModifiedItemAttr(booster), stackingPenalties=True, penaltyGroup='preMul', **kwargs) |
class SetChannel(discord.ui.Button):
def __init__(self, ctx: Context):
super().__init__(emoji=kd(1))
self.ctx = ctx
async def callback(self, interaction: discord.Interaction):
(await interaction.response.defer())
_m = (await self.ctx.simple('Mention the channel you want to use for ssverification.'))
channel = (await inputs.channel_input(self.ctx, delete_after=True))
(await self.ctx.safe_delete(_m))
if (await SSVerify.filter(channel_id=channel.id).exists()):
return (await self.ctx.error(f'{channel.mention} is already a ssverification channel.', 3))
if (not channel.permissions_for(self.ctx.guild.me).embed_links):
return (await self.ctx.error(f'I need `embed_links` permission in {channel.mention}', 3))
self.view.record.channel_id = channel.id
self.ctx.bot.cache.ssverify_channels.add(channel.id)
(await self.view.refresh_view()) |
def sequence_assigned_stmts(self: (nodes.Tuple | nodes.List), node: node_classes.AssignedStmtsPossibleNode=None, context: (InferenceContext | None)=None, assign_path: (list[int] | None)=None) -> Any:
if (assign_path is None):
assign_path = []
try:
index = self.elts.index(node)
except ValueError as exc:
raise InferenceError('Tried to retrieve a node {node!r} which does not exist', node=self, assign_path=assign_path, context=context) from exc
assign_path.insert(0, index)
return self.parent.assigned_stmts(node=self, context=context, assign_path=assign_path) |
def make_loader(split, dst_cls=DatasetAllTasks, repeat=None, is_training=True, unlabeled=False, task='', transforms_tr=None):
if is_training:
dst = dst_cls(split=split, repeat=repeat, unlabeled=unlabeled, transform=transforms_tr, task=task, num_cls=config.num_cls)
return DataLoader(dst, batch_size=config.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True, worker_init_fn=seed_worker, drop_last=True)
else:
dst = dst_cls(split=split, is_val=True, task=task, num_cls=config.num_cls, transform=transforms.Compose([CenterCrop(config.patch_size), ToTensor()]))
return DataLoader(dst, pin_memory=True) |
class SWOCTRL(IntEnum):
CH0OC = (1 << 0)
CH1OC = (1 << 1)
CH2OC = (1 << 2)
CH3OC = (1 << 3)
CH4OC = (1 << 4)
CH5OC = (1 << 5)
CH6OC = (1 << 6)
CH7OC = (1 << 7)
CH0OCV = (1 << 8)
CH1OCV = (1 << 9)
CH2OCV = (1 << 10)
CH3OCV = (1 << 11)
CH4OCV = (1 << 12)
CH5OCV = (1 << 13)
CH6OCV = (1 << 14)
CH7OCV = (1 << 15) |
class VariableBatchAll2AllPooledInfo(object):
batch_size_per_rank_per_feature: List[List[int]]
batch_size_per_feature_pre_a2a: List[int]
emb_dim_per_rank_per_feature: List[List[int]]
codecs: Optional[QuantizedCommCodecs] = None
input_splits: Optional[List[int]] = None
output_splits: Optional[List[int]] = None |
_required
_
def reviewer_comments_dashboard(request, conference_slug):
conference = get_object_or_404(Conference, slug=conference_slug)
if (not is_conference_moderator(user=request.user, conference=conference)):
raise PermissionDenied
conference_reviewers = ConferenceProposalReviewer.objects.filter(conference=conference, active=True)
proposals_qs = Proposal.objects.filter(conference=conference, status=ProposalStatus.PUBLIC)
by_conference = {}
by_section = {}
for reviewers in conference_reviewers:
id = reviewers.reviewer.id
by_conference.setdefault(id, [reviewers.reviewer, 0])
by_conference[id][1] = ProposalComment.objects.filter(commenter=reviewers.reviewer, deleted=False, private=True, proposal__status=ProposalStatus.PUBLIC, proposal__conference=conference).order_by('proposal').distinct('proposal').count()
by_section.setdefault(id, {'reviewer': reviewers.reviewer, 'interaction': []})
reviewers_section = ProposalSectionReviewer.objects.filter(conference_reviewer=reviewers)
for section in reviewers_section:
proposal_qs = proposals_qs.filter(proposal_section=section.proposal_section)
commented = 0
uncommented = 0
for proposal in proposal_qs:
private_comment_count = ProposalComment.objects.filter(proposal=proposal, deleted=False, private=True, commenter=reviewers.reviewer).count()
if private_comment_count:
commented = (commented + 1)
else:
uncommented = (uncommented + 1)
by_section[id]['interaction'].append([proposal_qs.count(), commented, uncommented, section.proposal_section.name])
ctx = {'conference': conference, 'conference_reviewers': conference_reviewers, 'by_conference': by_conference, 'by_section': by_section, 'is_proposal_reviewer': permissions.is_proposal_reviewer(user=request.user, conference=conference)}
return render(request, 'proposals/reviewers_dashboard.html', ctx) |
class TestFindUcs2Symbols():
def test_elf_find_ucs2_symbols(self):
elf = Mock()
asunicode = MockSymbol('PyUnicodeUCS2_AsUnicode', st_shndx='SHN_UNDEF', st_info=dict(type='STT_FUNC'))
symbols = (asunicode, Mock())
symbols[1].name = 'foobar'
elf.get_section_by_name.return_value.iter_symbols.return_value = symbols
symbols = list(elf_find_ucs2_symbols(elf))
assert (len(symbols) == 1)
assert (symbols[0] == 'PyUnicodeUCS2_AsUnicode')
def test_elf_find_ucs2_symbols_no_symbol(self):
elf = Mock()
symbols = (MockSymbol('FooSymbol'),)
elf.get_section_by_name.return_value.iter_symbols.return_value = symbols
symbols = list(elf_find_ucs2_symbols(elf))
assert (len(symbols) == 0) |
def get_residual_integral(s1: Spectrum, s2: Spectrum, var, ignore_nan=False, wunit='default', Iunit='default') -> float:
(var, wunit, Iunit) = get_default_units(s1, s2, var=var, wunit=wunit, Iunit=Iunit)
(w1, I1) = s1.get(var, wunit=wunit, Iunit=Iunit)
(wdiff, dI) = get_diff(s1, s2, var, wunit=wunit, Iunit=Iunit, resample=True)
if ignore_nan:
b = np.isnan(dI)
(wdiff, dI) = (wdiff[(~ b)], dI[(~ b)])
b = np.isnan(I1)
(w1, I1) = (w1[(~ b)], I1[(~ b)])
if (var in ['transmittance', 'transmittance_noslit']):
norm = (1 - np.trapz(I1, w1))
else:
norm = np.trapz(I1, w1)
return (np.abs(np.trapz(dI, wdiff)) / norm) |
_jcustomizer.JConversion('com.conveyal.r5.analyst.cluster.AnalysisWorkerTask', exact=RegionalTask)
_jcustomizer.JConversion('com.conveyal.r5.profile.ProfileRequest', exact=RegionalTask)
_jcustomizer.JConversion('com.conveyal.r5.analyst.cluster.RegionalTask', exact=RegionalTask)
def _cast_RegionalTask(java_class, object_):
return object_._regional_task.clone() |
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=(192 + 128)):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d((hidden_dim + input_dim), hidden_dim, 3, padding=1)
self.convr = nn.Conv2d((hidden_dim + input_dim), hidden_dim, 3, padding=1)
self.convq = nn.Conv2d((hidden_dim + input_dim), hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([(r * h), x], dim=1)))
h = (((1 - z) * h) + (z * q))
return h |
def rand_augment_ops(magnitude: Union[(int, float)]=10, prob: float=0.5, hparams: Optional[Dict]=None, transforms: Optional[Union[(Dict, List)]]=None):
hparams = (hparams or _HPARAMS_DEFAULT)
transforms = (transforms or _RAND_TRANSFORMS)
return [AugmentOp(name, prob=prob, magnitude=magnitude, hparams=hparams) for name in transforms] |
.parametrize('content_type, expected', [('application/rss', True), ('application/rss; charset=UTF-8', True), ('application/atom', True), ('application/xml', True), ('text/html', False)])
def test_is_feed_content_type(content_type, expected):
assert (helpers.is_feed_content_type(content_type) is expected) |
('PyQt6.QtWidgets.QGraphicsView.mousePressEvent')
def test_mouse_press_pan_middle_drag(mouse_event_mock, view):
event = MagicMock()
event.position.return_value = QtCore.QPointF(10.0, 20.0)
event.button.return_value = Qt.MouseButton.MiddleButton
event.modifiers.return_value = None
view.mousePressEvent(event)
assert (view.pan_active is True)
assert (view.zoom_active is False)
assert (view.movewin_active is False)
assert (view.event_start == QtCore.QPointF(10.0, 20.0))
mouse_event_mock.assert_not_called()
(view.cursor() == Qt.CursorShape.ClosedHandCursor)
event.accept.assert_called_once_with() |
class MPISolver(Solver):
CHECK_SYNC_ITERS = 1000
def __init__(self, sess, optimizer, vars):
super().__init__(vars)
self.sess = sess
self.optimizer = optimizer
self._build_grad_feed(vars)
self._update = optimizer.apply_gradients(zip(self._grad_tf_list, self.vars))
self._set_flat_vars = TFUtil.SetFromFlat(sess, self.vars)
self._get_flat_vars = TFUtil.GetFlat(sess, self.vars)
self.iter = 0
grad_dim = self._calc_grad_dim()
self._flat_grad = np.zeros(grad_dim, dtype=np.float32)
self._global_flat_grad = np.zeros(grad_dim, dtype=np.float32)
return
def get_stepsize(self):
return self.optimizer._learning_rate_tensor.eval()
def update(self, grads=None, grad_scale=1.0):
if (grads is not None):
self._flat_grad = MathUtil.flatten(grads)
else:
self._flat_grad.fill(0)
return self.update_flatgrad(self._flat_grad, grad_scale)
def update_flatgrad(self, flat_grad, grad_scale=1.0):
if ((self.iter % self.CHECK_SYNC_ITERS) == 0):
assert self.check_synced(), Logger.print('Network parameters desynchronized')
if (grad_scale != 1.0):
flat_grad *= grad_scale
MPI.COMM_WORLD.Allreduce(flat_grad, self._global_flat_grad, op=MPI.SUM)
self._global_flat_grad /= MPIUtil.get_num_procs()
self._load_flat_grad(self._global_flat_grad)
self.sess.run([self._update], self._grad_feed)
self.iter += 1
return
def sync(self):
vars = self._get_flat_vars()
MPIUtil.bcast(vars)
self._set_flat_vars(vars)
return
def check_synced(self):
synced = True
if self._is_root():
vars = self._get_flat_vars()
MPIUtil.bcast(vars)
else:
vars_local = self._get_flat_vars()
vars_root = np.empty_like(vars_local)
MPIUtil.bcast(vars_root)
synced = (vars_local == vars_root).all()
return synced
def _is_root(self):
return MPIUtil.is_root_proc()
def _build_grad_feed(self, vars):
self._grad_tf_list = []
self._grad_buffers = []
for v in self.vars:
shape = v.get_shape()
grad = np.zeros(shape)
grad_tf = tf.placeholder(tf.float32, shape=shape)
self._grad_buffers.append(grad)
self._grad_tf_list.append(grad_tf)
self._grad_feed = dict({g_tf: g for (g_tf, g) in zip(self._grad_tf_list, self._grad_buffers)})
return
def _calc_grad_dim(self):
grad_dim = 0
for grad in self._grad_buffers:
grad_dim += grad.size
return grad_dim
def _load_flat_grad(self, flat_grad):
start = 0
for g in self._grad_buffers:
size = g.size
np.copyto(g, np.reshape(flat_grad[start:(start + size)], g.shape))
start += size
return |
class NodeScenariosTest(unittest.TestCase):
def setUp(self):
vsphere_env_vars = ['VSPHERE_IP', 'VSPHERE_USERNAME', 'VSPHERE_PASSWORD']
self.credentials_present = all(((env_var in os.environ) for env_var in vsphere_env_vars))
def test_serialization(self):
plugin.test_object_serialization(vmware_plugin.NodeScenarioConfig(name='test', skip_openshift_checks=True), self.fail)
plugin.test_object_serialization(vmware_plugin.NodeScenarioSuccessOutput(nodes={}, action=Actions.START), self.fail)
plugin.test_object_serialization(vmware_plugin.NodeScenarioErrorOutput(error='Hello World', action=Actions.START), self.fail)
def test_node_start(self):
if (not self.credentials_present):
self.skipTest("Check if the environmental variables 'VSPHERE_IP', 'VSPHERE_USERNAME', 'VSPHERE_PASSWORD' are set")
vsphere = vmware_plugin.vSphere(verify=False)
(vm_id, vm_name) = vsphere.create_default_vm()
if (vm_id is None):
self.fail('Could not create test VM')
(output_id, output_data) = vmware_plugin.node_start(vmware_plugin.NodeScenarioConfig(name=vm_name, skip_openshift_checks=True, verify_session=False))
if (output_id == 'error'):
logging.error(output_data.error)
self.fail('The VMware VM did not start because an error occurred')
vsphere.release_instances(vm_name)
def test_node_stop(self):
if (not self.credentials_present):
self.skipTest("Check if the environmental variables 'VSPHERE_IP', 'VSPHERE_USERNAME', 'VSPHERE_PASSWORD' are set")
vsphere = vmware_plugin.vSphere(verify=False)
(vm_id, vm_name) = vsphere.create_default_vm()
if (vm_id is None):
self.fail('Could not create test VM')
vsphere.start_instances(vm_name)
(output_id, output_data) = vmware_plugin.node_stop(vmware_plugin.NodeScenarioConfig(name=vm_name, skip_openshift_checks=True, verify_session=False))
if (output_id == 'error'):
logging.error(output_data.error)
self.fail('The VMware VM did not stop because an error occurred')
vsphere.release_instances(vm_name)
def test_node_reboot(self):
if (not self.credentials_present):
self.skipTest("Check if the environmental variables 'VSPHERE_IP', 'VSPHERE_USERNAME', 'VSPHERE_PASSWORD' are set")
vsphere = vmware_plugin.vSphere(verify=False)
(vm_id, vm_name) = vsphere.create_default_vm()
if (vm_id is None):
self.fail('Could not create test VM')
vsphere.start_instances(vm_name)
(output_id, output_data) = vmware_plugin.node_reboot(vmware_plugin.NodeScenarioConfig(name=vm_name, skip_openshift_checks=True, verify_session=False))
if (output_id == 'error'):
logging.error(output_data.error)
self.fail('The VMware VM did not reboot because an error occurred')
vsphere.release_instances(vm_name)
def test_node_terminate(self):
if (not self.credentials_present):
self.skipTest("Check if the environmental variables 'VSPHERE_IP', 'VSPHERE_USERNAME', 'VSPHERE_PASSWORD' are set")
vsphere = vmware_plugin.vSphere(verify=False)
(vm_id, vm_name) = vsphere.create_default_vm()
if (vm_id is None):
self.fail('Could not create test VM')
vsphere.start_instances(vm_name)
(output_id, output_data) = vmware_plugin.node_terminate(vmware_plugin.NodeScenarioConfig(name=vm_name, skip_openshift_checks=True, verify_session=False))
if (output_id == 'error'):
logging.error(output_data.error)
self.fail('The VMware VM did not reboot because an error occurred') |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
(filters1, filters2, filters3) = filters
if (K.image_data_format() == 'channels_last'):
bn_axis = 3
else:
bn_axis = 1
conv_name_base = ((('res' + str(stage)) + block) + '_branch')
bn_name_base = ((('bn' + str(stage)) + block) + '_branch')
x = Conv2D(filters1, (1, 1), strides=strides, name=(conv_name_base + '2a'))(input_tensor)
x = BatchNormalization(axis=bn_axis, name=(bn_name_base + '2a'))(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', name=(conv_name_base + '2b'))(x)
x = BatchNormalization(axis=bn_axis, name=(bn_name_base + '2b'))(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=(conv_name_base + '2c'))(x)
x = BatchNormalization(axis=bn_axis, name=(bn_name_base + '2c'))(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides, name=(conv_name_base + '1'))(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=(bn_name_base + '1'))(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x |
def test_git_getdate(wd: WorkDir) -> None:
today = datetime.now(timezone.utc).date()
def parse_date() -> date:
parsed = git.parse(os.fspath(wd.cwd), Configuration())
assert (parsed is not None)
assert (parsed.node_date is not None)
return parsed.node_date
git_wd = git.GitWorkdir(wd.cwd)
assert (git_wd.get_head_date() is None)
assert (parse_date() == today)
wd.commit_testfile()
assert (git_wd.get_head_date() == today)
assert (parse_date() == today) |
def test_offroadcondition():
cond = OSC.OffroadCondition(20)
prettyprint(cond.get_element())
cond2 = OSC.OffroadCondition(20)
cond3 = OSC.OffroadCondition(23)
assert (cond == cond2)
assert (cond != cond3)
cond4 = OSC.OffroadCondition.parse(cond.get_element())
assert (cond == cond4)
assert (version_validation('EntityCondition', cond, 0) == ValidationResponse.OK)
assert (version_validation('EntityCondition', cond, 1) == ValidationResponse.OK)
assert (version_validation('EntityCondition', cond, 2) == ValidationResponse.OK) |
class PyProjectSource(DependencySource):
def __init__(self, filename: Path, index_url: (str | None)=None, extra_index_urls: list[str]=[], state: AuditState=AuditState()) -> None:
self.filename = filename
self.state = state
def collect(self) -> Iterator[Dependency]:
with self.filename.open('r') as f:
pyproject_data = toml.load(f)
project = pyproject_data.get('project')
if (project is None):
raise PyProjectSourceError(f'pyproject file {self.filename} does not contain `project` section')
deps = project.get('dependencies')
if (deps is None):
logger.warning(f'pyproject file {self.filename} does not contain `dependencies` list')
return
with TemporaryDirectory() as ve_dir, NamedTemporaryFile(dir=ve_dir, delete=False) as req_file:
req_file.write(os.linesep.join(deps).encode())
req_file.flush()
ve = VirtualEnv(install_args=['-r', req_file.name], state=self.state)
try:
ve.create(ve_dir)
except VirtualEnvError as exc:
raise PyProjectSourceError(str(exc)) from exc
for (name, version) in ve.installed_packages:
(yield ResolvedDependency(name=name, version=version))
def fix(self, fix_version: ResolvedFixVersion) -> None:
with self.filename.open('r+') as f, NamedTemporaryFile(mode='r+', delete=False) as tmp:
pyproject_data = toml.load(f)
project = pyproject_data.get('project')
if (project is None):
raise PyProjectFixError(f'pyproject file {self.filename} does not contain `project` section')
deps = project.get('dependencies')
if (deps is None):
logger.warning(f'pyproject file {self.filename} does not contain `dependencies` list')
return
reqs = [Requirement(dep) for dep in deps]
for i in range(len(reqs)):
req = reqs[i]
if ((req.name == fix_version.dep.name) and req.specifier.contains(fix_version.dep.version) and (not req.specifier.contains(fix_version.version))):
req.specifier = SpecifierSet(f'=={fix_version.version}')
deps[i] = str(req)
assert ((req.marker is None) or req.marker.evaluate())
toml.dump(pyproject_data, tmp)
os.replace(tmp.name, self.filename) |
def test_ChunkedReader() -> None:
t_body_reader(ChunkedReader, b'0\r\n\r\n', [EndOfMessage()])
t_body_reader(ChunkedReader, b'0\r\nSome: header\r\n\r\n', [EndOfMessage(headers=[('Some', 'header')])])
t_body_reader(ChunkedReader, (((b'5\r\n01234\r\n' + b'10\r\nabcdef\r\n') + b'0\r\n') + b'Some: header\r\n\r\n'), [Data(data=b'abcdef'), EndOfMessage(headers=[('Some', 'header')])])
t_body_reader(ChunkedReader, ((b'5\r\n01234\r\n' + b'10\r\nabcdef\r\n') + b'0\r\n\r\n'), [Data(data=b'abcdef'), EndOfMessage()])
t_body_reader(ChunkedReader, (((b'aA\r\n' + (b'x' * 170)) + b'\r\n') + b'0\r\n\r\n'), [Data(data=(b'x' * 170)), EndOfMessage()])
with pytest.raises(LocalProtocolError):
t_body_reader(ChunkedReader, ((b'9' * 100) + b'\r\nxxx'), [Data(data=b'xxx')])
with pytest.raises(LocalProtocolError):
t_body_reader(ChunkedReader, b'10\x00\r\nxxx', None)
t_body_reader(ChunkedReader, (((b'5; hello=there\r\n' + b'xxxxx') + b'\r\n') + b'0; random="junk"; some=more; canbe=lonnnnngg\r\n\r\n'), [Data(data=b'xxxxx'), EndOfMessage()])
t_body_reader(ChunkedReader, (b'5 \t \r\n01234\r\n' + b'0\r\n\r\n'), [Data(data=b'01234'), EndOfMessage()]) |
def _iterable_if_range(node: nodes.NodeNG) -> Optional[str]:
if ((not isinstance(node, nodes.Call)) or (not isinstance(node.func, nodes.Name)) or (not (node.func.name == 'range'))):
return None
if (len(node.args) > 1):
arg1 = node.args[0]
if ((not isinstance(arg1, nodes.Const)) or (arg1.value != 0)):
return None
if ((len(node.args) == 3) and ((not isinstance(node.args[2], nodes.Const)) or (node.args[2].value != 1))):
return None
if (len(node.args) == 1):
stop_arg = node.args[0]
else:
stop_arg = node.args[1]
if (isinstance(stop_arg, nodes.Call) and isinstance(stop_arg.func, nodes.Name) and (stop_arg.func.name == 'len') and (len(stop_arg.args) == 1) and isinstance(stop_arg.args[0], nodes.Name)):
return stop_arg.args[0].name |
class TestAssertIsNotNone(TestCase):
def test_you(self):
assert (abc is not None)
def test_me(self):
assert ((xxx + y) is not None)
assert ((aaa and bbb) is not None)
assert ((ccc or ddd) is not None)
assert ((True if You else False) is not None)
def test_everybody(self):
assert ('def' is not None)
def test_message(self):
assert ((123 + z) is not None), error_message
assert ((xxx + z) is not None), 'This is wrong!'
def test_generator(self):
assert ((x for x in range(1)) is not None)
assert ((x for x in range(1)) is not None)
assert ((x for x in range(1)) is not None), 'This is wrong' |
class AconC(nn.Module):
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, x):
dpx = ((self.p1 - self.p2) * x)
return ((dpx * torch.sigmoid((self.beta * dpx))) + (self.p2 * x)) |
class MyHardSingleTripletSelector():
def __init__(self, nbrs_num, rand_num, nbr_indices):
self.x = None
self.y = None
self.nbrs_num = nbrs_num
self.rand_num = rand_num
self.nbr_indices = nbr_indices
def get_triplets(self, anom_idx, x, y, normal_label=0):
self.x = x.cpu().data.numpy()
self.y = y.cpu().data.numpy()
noml_idx = np.where((self.y == normal_label))[0]
nbr_indices = self.nbr_indices
rand_num = self.rand_num
rand_canddt = np.setdiff1d(noml_idx, nbr_indices)
rand_indices = np.random.choice(rand_canddt, rand_num, replace=False)
triplets = [[anchor, positive, anom_idx] for anchor in rand_indices for positive in nbr_indices]
return torch.LongTensor(np.array(triplets)) |
class AnyStage(nn.Module):
def __init__(self, w_in, w_out, stride, d, block_class, norm, activation_class, params):
super().__init__()
for i in range(d):
block = block_class(w_in, w_out, stride, norm, activation_class, params)
self.add_module('b{}'.format((i + 1)), block)
(stride, w_in) = (1, w_out)
def forward(self, x):
for block in self.children():
x = block(x)
return x |
def allreduce_grads(model, coalesce=True, bucket_size_mb=(- 1)):
grads = [param.grad.data for param in model.parameters() if (param.requires_grad and (param.grad is not None))]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size)) |
class MSMROpenCircuitPotential(BaseOpenCircuitPotential):
def get_coupled_variables(self, variables):
(domain, Domain) = self.domain_Domain
phase_name = self.phase_name
if (self.reaction == 'lithium-ion main'):
T = variables[f'{Domain} electrode temperature [K]']
domain_options = getattr(self.options, domain)
if (domain_options['particle size'] == 'distribution'):
sto_surf = variables[f'{Domain} {phase_name}particle surface stoichiometry distribution']
ocp_surf = variables[f'{Domain} {phase_name}particle surface potential distribution [V]']
if (isinstance(sto_surf, pybamm.Broadcast) and isinstance(ocp_surf, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
sto_surf = sto_surf.orphans[0]
ocp_surf = ocp_surf.orphans[0]
T = T.orphans[0]
T = pybamm.PrimaryBroadcast(T, [f'{domain} particle size'])
else:
sto_surf = variables[f'{Domain} {phase_name}particle surface stoichiometry']
ocp_surf = variables[f'{Domain} {phase_name}particle surface potential [V]']
if (isinstance(sto_surf, pybamm.Broadcast) and isinstance(ocp_surf, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
sto_surf = sto_surf.orphans[0]
ocp_surf = ocp_surf.orphans[0]
T = T.orphans[0]
ocp_bulk = variables[f'Average {domain} {phase_name}particle potential [V]']
dUdT = self.phase_param.dUdT(sto_surf)
variables.update(self._get_standard_ocp_variables(ocp_surf, ocp_bulk, dUdT))
return variables |
class TestQueryBestSize(EndianTest):
def setUp(self):
self.req_args_0 = {'drawable': , 'height': 64528, 'item_class': 1, 'width': 8620}
self.req_bin_0 = b'a\x01\x00\x03u\xb4\x8a5!\xac\xfc\x10'
self.reply_args_0 = {'height': 2023, 'sequence_number': 41036, 'width': 35260}
self.reply_bin_0 = b'\x01\x00\xa0L\x00\x00\x00\x00\x89\xbc\x07\xe7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.QueryBestSize._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.QueryBestSize._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackReply0(self):
bin = request.QueryBestSize._reply.to_binary(*(), **self.reply_args_0)
self.assertBinaryEqual(bin, self.reply_bin_0)
def testUnpackReply0(self):
(args, remain) = request.QueryBestSize._reply.parse_binary(self.reply_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_0) |
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = (tgt + self.dropout1(tgt2))
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0]
tgt = (tgt + self.dropout2(tgt2))
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = (tgt + self.dropout3(tgt2))
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = (tgt + self.dropout1(tgt2))
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0]
tgt = (tgt + self.dropout2(tgt2))
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = (tgt + self.dropout3(tgt2))
return tgt
def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) |
class Generator():
def __init__(self, cnt_round, cnt_gen, dic_path, dic_exp_conf, dic_agent_conf, dic_traffic_env_conf, best_round=None):
self.cnt_round = cnt_round
self.cnt_gen = cnt_gen
self.dic_exp_conf = dic_exp_conf
self.dic_path = dic_path
self.dic_agent_conf = copy.deepcopy(dic_agent_conf)
self.dic_traffic_env_conf = dic_traffic_env_conf
self.agents = ([None] * dic_traffic_env_conf['NUM_AGENTS'])
if self.dic_exp_conf['PRETRAIN']:
self.path_to_log = os.path.join(self.dic_path['PATH_TO_PRETRAIN_WORK_DIRECTORY'], 'train_round', ('round_' + str(self.cnt_round)), ('generator_' + str(self.cnt_gen)))
else:
self.path_to_log = os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY'], 'train_round', ('round_' + str(self.cnt_round)), ('generator_' + str(self.cnt_gen)))
if (not os.path.exists(self.path_to_log)):
os.makedirs(self.path_to_log)
self.env = DIC_ENVS[dic_traffic_env_conf['SIMULATOR_TYPE']](path_to_log=self.path_to_log, path_to_work_directory=self.dic_path['PATH_TO_WORK_DIRECTORY'], dic_traffic_env_conf=self.dic_traffic_env_conf)
self.env.reset()
if self.dic_exp_conf['PRETRAIN']:
self.agent_name = self.dic_exp_conf['PRETRAIN_MODEL_NAME']
self.agent = DIC_AGENTS[self.agent_name](dic_agent_conf=self.dic_agent_conf, dic_traffic_env_conf=self.dic_traffic_env_conf, dic_path=self.dic_path, cnt_round=self.cnt_round, best_round=best_round)
else:
start_time = time.time()
for i in range(dic_traffic_env_conf['NUM_AGENTS']):
agent_name = self.dic_exp_conf['MODEL_NAME']
if (agent_name == 'CoLight_Signal'):
agent = DIC_AGENTS[agent_name](dic_agent_conf=self.dic_agent_conf, dic_traffic_env_conf=self.dic_traffic_env_conf, dic_path=self.dic_path, cnt_round=self.cnt_round, best_round=best_round, inter_info=self.env.list_intersection, intersection_id=str(i))
else:
agent = DIC_AGENTS[agent_name](dic_agent_conf=self.dic_agent_conf, dic_traffic_env_conf=self.dic_traffic_env_conf, dic_path=self.dic_path, cnt_round=self.cnt_round, best_round=best_round, intersection_id=str(i))
self.agents[i] = agent
print('Create intersection agent time: ', (time.time() - start_time))
def generate(self):
reset_env_start_time = time.time()
done = False
state = self.env.reset()
step_num = 0
reset_env_time = (time.time() - reset_env_start_time)
running_start_time = time.time()
while ((not done) and (step_num < int((self.dic_exp_conf['RUN_COUNTS'] / self.dic_traffic_env_conf['MIN_ACTION_TIME'])))):
action_list = []
step_start_time = time.time()
for i in range(self.dic_traffic_env_conf['NUM_AGENTS']):
if (self.dic_exp_conf['MODEL_NAME'] in ['CoLight', 'GCN', 'SimpleDQNOne']):
one_state = state
if (self.dic_exp_conf['MODEL_NAME'] == 'CoLight'):
(action, _) = self.agents[i].choose_action(step_num, one_state)
elif (self.dic_exp_conf['MODEL_NAME'] == 'GCN'):
action = self.agents[i].choose_action(step_num, one_state)
elif True:
action = self.agents[i].choose_action(step_num, one_state)
else:
action = self.agents[i].choose_action_separate(step_num, one_state)
action_list = action
else:
one_state = state[i]
action = self.agents[i].choose_action(step_num, one_state)
action_list.append(action)
(next_state, reward, done, _) = self.env.step(action_list)
print('time: {0}, running_time: {1}'.format((self.env.get_current_time() - self.dic_traffic_env_conf['MIN_ACTION_TIME']), (time.time() - step_start_time)))
state = next_state
step_num += 1
running_time = (time.time() - running_start_time)
log_start_time = time.time()
print('start logging')
self.env.bulk_log_multi_process()
log_time = (time.time() - log_start_time)
self.env.end_sumo()
print('reset_env_time: ', reset_env_time)
print('running_time: ', running_time)
print('log_time: ', log_time) |
class DIAYN(SAC):
def __init__(self, base_kwargs, env, policy, discriminator, qf, vf, pool, plotter=None, lr=0.003, scale_entropy=1, discount=0.99, tau=0.01, num_skills=20, save_full_state=False, find_best_skill_interval=10, best_skill_n_rollouts=10, learn_p_z=False, include_actions=False, add_p_z=True):
Serializable.quick_init(self, locals())
super(SAC, self).__init__(**base_kwargs)
self._env = env
self._policy = policy
self._discriminator = discriminator
self._qf = qf
self._vf = vf
self._pool = pool
self._plotter = plotter
self._policy_lr = lr
self._discriminator_lr = lr
self._qf_lr = lr
self._vf_lr = lr
self._scale_entropy = scale_entropy
self._discount = discount
self._tau = tau
self._num_skills = num_skills
self._p_z = np.full(num_skills, (1.0 / num_skills))
self._find_best_skill_interval = find_best_skill_interval
self._best_skill_n_rollouts = best_skill_n_rollouts
self._learn_p_z = learn_p_z
self._save_full_state = save_full_state
self._include_actions = include_actions
self._add_p_z = add_p_z
self._Da = self._env.action_space.flat_dim
self._Do = self._env.observation_space.flat_dim
self._training_ops = list()
self._init_placeholders()
self._init_actor_update()
self._init_critic_update()
self._init_discriminator_update()
self._init_target_ops()
self._sess.run(tf.global_variables_initializer())
def _init_placeholders(self):
self._obs_pl = tf.placeholder(tf.float32, shape=[None, (self._Do + self._num_skills)], name='observation')
self._obs_next_pl = tf.placeholder(tf.float32, shape=[None, (self._Do + self._num_skills)], name='next_observation')
self._action_pl = tf.placeholder(tf.float32, shape=[None, self._Da], name='actions')
self._terminal_pl = tf.placeholder(tf.float32, shape=[None], name='terminals')
self._p_z_pl = tf.placeholder(tf.float32, shape=[self._num_skills], name='p_z')
def _sample_z(self):
return np.random.choice(self._num_skills, p=self._p_z)
def _split_obs(self):
return tf.split(self._obs_pl, [self._Do, self._num_skills], 1)
def _init_critic_update(self):
self._qf_t = self._qf.get_output_for(self._obs_pl, self._action_pl, reuse=True)
(obs, z_one_hot) = self._split_obs()
if self._include_actions:
logits = self._discriminator.get_output_for(obs, self._action_pl, reuse=True)
else:
logits = self._discriminator.get_output_for(obs, reuse=True)
reward_pl = ((- 1) * tf.nn.softmax_cross_entropy_with_logits(labels=z_one_hot, logits=logits))
reward_pl = tf.check_numerics(reward_pl, 'Check numerics (1): reward_pl')
p_z = tf.reduce_sum((self._p_z_pl * z_one_hot), axis=1)
log_p_z = tf.log((p_z + EPS))
self._log_p_z = log_p_z
if self._add_p_z:
reward_pl -= log_p_z
reward_pl = tf.check_numerics(reward_pl, 'Check numerics: reward_pl')
self._reward_pl = reward_pl
with tf.variable_scope('target'):
vf_next_target_t = self._vf.get_output_for(self._obs_next_pl)
self._vf_target_params = self._vf.get_params_internal()
ys = tf.stop_gradient((reward_pl + (((1 - self._terminal_pl) * self._discount) * vf_next_target_t)))
self._td_loss_t = (0.5 * tf.reduce_mean(((ys - self._qf_t) ** 2)))
qf_train_op = tf.train.AdamOptimizer(self._qf_lr).minimize(loss=self._td_loss_t, var_list=self._qf.get_params_internal())
self._training_ops.append(qf_train_op)
def _init_actor_update(self):
self._policy_dist = self._policy.get_distribution_for(self._obs_pl, reuse=True)
log_pi_t = self._policy_dist.log_p_t
self._vf_t = self._vf.get_output_for(self._obs_pl, reuse=True)
self._vf_params = self._vf.get_params_internal()
log_target_t = self._qf.get_output_for(self._obs_pl, tf.tanh(self._policy_dist.x_t), reuse=True)
corr = self._squash_correction(self._policy_dist.x_t)
corr = tf.check_numerics(corr, 'Check numerics: corr')
scaled_log_pi = (self._scale_entropy * (log_pi_t - corr))
self._kl_surrogate_loss_t = tf.reduce_mean((log_pi_t * tf.stop_gradient(((scaled_log_pi - log_target_t) + self._vf_t))))
self._vf_loss_t = (0.5 * tf.reduce_mean(((self._vf_t - tf.stop_gradient((log_target_t - scaled_log_pi))) ** 2)))
policy_train_op = tf.train.AdamOptimizer(self._policy_lr).minimize(loss=(self._kl_surrogate_loss_t + self._policy_dist.reg_loss_t), var_list=self._policy.get_params_internal())
vf_train_op = tf.train.AdamOptimizer(self._vf_lr).minimize(loss=self._vf_loss_t, var_list=self._vf_params)
self._training_ops.append(policy_train_op)
self._training_ops.append(vf_train_op)
def _init_discriminator_update(self):
(obs, z_one_hot) = self._split_obs()
if self._include_actions:
logits = self._discriminator.get_output_for(obs, self._action_pl, reuse=True)
else:
logits = self._discriminator.get_output_for(obs, reuse=True)
self._discriminator_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=z_one_hot, logits=logits))
optimizer = tf.train.AdamOptimizer(self._discriminator_lr)
discriminator_train_op = optimizer.minimize(loss=self._discriminator_loss, var_list=self._discriminator.get_params_internal())
self._training_ops.append(discriminator_train_op)
def _get_feed_dict(self, batch):
feed_dict = {self._obs_pl: batch['observations'], self._action_pl: batch['actions'], self._obs_next_pl: batch['next_observations'], self._terminal_pl: batch['terminals'], self._p_z_pl: self._p_z}
return feed_dict
def _get_best_single_option_policy(self):
best_returns = float('-inf')
best_z = None
for z in range(self._num_skills):
fixed_z_policy = FixedOptionPolicy(self._policy, self._num_skills, z)
paths = rollouts(self._eval_env, fixed_z_policy, self._max_path_length, self._best_skill_n_rollouts, render=False)
total_returns = np.mean([path['rewards'].sum() for path in paths])
if (total_returns > best_returns):
best_returns = total_returns
best_z = z
return FixedOptionPolicy(self._policy, self._num_skills, best_z)
def _save_traces(self, filename):
utils._make_dir(filename)
obs_vec = []
for z in range(self._num_skills):
fixed_z_policy = FixedOptionPolicy(self._policy, self._num_skills, z)
paths = rollouts(self._eval_env, fixed_z_policy, self._max_path_length, n_paths=3, render=False)
obs_vec.append([path['observations'].tolist() for path in paths])
with open(filename, 'w') as f:
json.dump(obs_vec, f)
def _evaluate(self, epoch):
if (self._eval_n_episodes < 1):
return
if ((epoch % self._find_best_skill_interval) == 0):
self._single_option_policy = self._get_best_single_option_policy()
for (policy, policy_name) in [(self._single_option_policy, 'best_single_option_policy')]:
with logger.tabular_prefix((policy_name + '/')), logger.prefix((policy_name + '/')):
with self._policy.deterministic(self._eval_deterministic):
if self._eval_render:
paths = rollouts(self._eval_env, policy, self._max_path_length, self._eval_n_episodes, render=True, render_mode='rgb_array')
else:
paths = rollouts(self._eval_env, policy, self._max_path_length, self._eval_n_episodes)
total_returns = [path['rewards'].sum() for path in paths]
episode_lengths = [len(p['rewards']) for p in paths]
logger.record_tabular('return-average', np.mean(total_returns))
logger.record_tabular('return-min', np.min(total_returns))
logger.record_tabular('return-max', np.max(total_returns))
logger.record_tabular('return-std', np.std(total_returns))
logger.record_tabular('episode-length-avg', np.mean(episode_lengths))
logger.record_tabular('episode-length-min', np.min(episode_lengths))
logger.record_tabular('episode-length-max', np.max(episode_lengths))
logger.record_tabular('episode-length-std', np.std(episode_lengths))
self._eval_env.log_diagnostics(paths)
batch = self._pool.random_batch(self._batch_size)
self.log_diagnostics(batch)
def _train(self, env, policy, pool):
self._init_training(env, policy, pool)
with self._sess.as_default():
observation = env.reset()
policy.reset()
log_p_z_episode = []
path_length = 0
path_return = 0
last_path_return = 0
max_path_return = (- np.inf)
n_episodes = 0
if self._learn_p_z:
log_p_z_list = [deque(maxlen=self._max_path_length) for _ in range(self._num_skills)]
gt.rename_root('RLAlgorithm')
gt.reset()
gt.set_def_unique(False)
for epoch in gt.timed_for(range((self._n_epochs + 1)), save_itrs=True):
logger.push_prefix(('Epoch #%d | ' % epoch))
path_length_list = []
z = self._sample_z()
aug_obs = utils.concat_obs_z(observation, z, self._num_skills)
for t in range(self._epoch_length):
iteration = (t + (epoch * self._epoch_length))
(action, _) = policy.get_action(aug_obs)
if self._learn_p_z:
(obs, _) = utils.split_aug_obs(aug_obs, self._num_skills)
feed_dict = {self._discriminator._obs_pl: obs[None], self._discriminator._action_pl: action[None]}
logits = tf_utils.get_default_session().run(self._discriminator._output_t, feed_dict)[0]
log_p_z = np.log(utils._softmax(logits)[z])
if self._learn_p_z:
log_p_z_list[z].append(log_p_z)
(next_ob, reward, terminal, info) = env.step(action)
aug_next_ob = utils.concat_obs_z(next_ob, z, self._num_skills)
path_length += 1
path_return += reward
self._pool.add_sample(aug_obs, action, reward, terminal, aug_next_ob)
if (terminal or (path_length >= self._max_path_length)):
path_length_list.append(path_length)
observation = env.reset()
policy.reset()
log_p_z_episode = []
path_length = 0
max_path_return = max(max_path_return, path_return)
last_path_return = path_return
path_return = 0
n_episodes += 1
else:
aug_obs = aug_next_ob
gt.stamp('sample')
if (self._pool.size >= self._min_pool_size):
for i in range(self._n_train_repeat):
batch = self._pool.random_batch(self._batch_size)
self._do_training(iteration, batch)
gt.stamp('train')
if self._learn_p_z:
print('learning p(z)')
for z in range(self._num_skills):
if log_p_z_list[z]:
print(('\t skill = %d, min=%.2f, max=%.2f, mean=%.2f, len=%d' % (z, np.min(log_p_z_list[z]), np.max(log_p_z_list[z]), np.mean(log_p_z_list[z]), len(log_p_z_list[z]))))
log_p_z = [(np.mean(log_p_z) if log_p_z else np.log((1.0 / self._num_skills))) for log_p_z in log_p_z_list]
print(('log_p_z: %s' % log_p_z))
self._p_z = utils._softmax(log_p_z)
self._evaluate(epoch)
params = self.get_snapshot(epoch)
logger.save_itr_params(epoch, params)
times_itrs = gt.get_times().stamps.itrs
eval_time = (times_itrs['eval'][(- 1)] if (epoch > 1) else 0)
total_time = gt.get_times().total
logger.record_tabular('time-train', times_itrs['train'][(- 1)])
logger.record_tabular('time-eval', eval_time)
logger.record_tabular('time-sample', times_itrs['sample'][(- 1)])
logger.record_tabular('time-total', total_time)
logger.record_tabular('epoch', epoch)
logger.record_tabular('episodes', n_episodes)
logger.record_tabular('max-path-return', max_path_return)
logger.record_tabular('last-path-return', last_path_return)
logger.record_tabular('pool-size', self._pool.size)
logger.record_tabular('path-length', np.mean(path_length_list))
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
gt.stamp('eval')
env.terminate()
def log_diagnostics(self, batch):
feed_dict = self._get_feed_dict(batch)
log_pairs = [('qf', self._qf_t), ('vf', self._vf_t), ('bellman-error', self._td_loss_t), ('discriminator-loss', self._discriminator_loss), ('vf-loss', self._vf_loss_t), ('kl-surrogate-loss', self._kl_surrogate_loss_t), ('policy-reg-loss', self._policy_dist.reg_loss_t), ('discriminator_reward', self._reward_pl), ('log_p_z', self._log_p_z)]
log_ops = [op for (name, op) in log_pairs]
log_names = [name for (name, op) in log_pairs]
log_vals = self._sess.run(log_ops, feed_dict)
for (name, val) in zip(log_names, log_vals):
if np.isscalar(val):
logger.record_tabular(name, val)
else:
logger.record_tabular(('%s-avg' % name), np.mean(val))
logger.record_tabular(('%s-min' % name), np.min(val))
logger.record_tabular(('%s-max' % name), np.max(val))
logger.record_tabular(('%s-std' % name), np.std(val))
logger.record_tabular('z-entropy', scipy.stats.entropy(self._p_z))
self._policy.log_diagnostics(batch)
if self._plotter:
self._plotter.draw()
def get_snapshot(self, epoch):
if self._save_full_state:
return dict(epoch=epoch, algo=self)
else:
return dict(epoch=epoch, policy=self._policy, qf=self._qf, vf=self._vf, env=self._env, discriminator=self._discriminator)
def __getstate__(self):
d = Serializable.__getstate__(self)
d.update({'qf-params': self._qf.get_param_values(), 'vf-params': self._vf.get_param_values(), 'discriminator-params': self._discriminator.get_param_values(), 'policy-params': self._policy.get_param_values(), 'pool': self._pool.__getstate__(), 'env': self._env.__getstate__()})
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
self._qf.set_param_values(d['qf-params'])
self._vf.set_param_values(d['qf-params'])
self._discriminator.set_param_values(d['discriminator-params'])
self._policy.set_param_values(d['policy-params'])
self._pool.__setstate__(d['pool'])
self._env.__setstate__(d['env']) |
.requires_internet
def test_unknown_dynamic_feature(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
with temp_dir.as_cwd():
result = hatch('new', f'{project_name}1')
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['build-system']['requires'].append(f'my-app1 {path_to_uri(project_path).lower()}/../my-app1')
config['project']['dynamic'].append('optional-dependencies')
config['tool']['hatch']['metadata'] = {'hooks': {'custom': {}}}
project.save_config(config)
helpers.update_project_environment(project, 'default', {'features': ['foo'], 'post-install-commands': ['python -c "with open(\'test.txt\', \'w\') as f: f.write(\'content\')"'], **project.config.envs['default']})
helpers.update_project_environment(project, 'test', {})
build_script = (project_path / DEFAULT_BUILD_SCRIPT)
build_script.write_text(helpers.dedent("\n from hatchling.metadata.plugin.interface import MetadataHookInterface\n\n class CustomHook(MetadataHookInterface):\n def update(self, metadata):\n metadata['optional-dependencies'] = {'bar': ['binary']}\n "))
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}), pytest.raises(ValueError, match='Feature `foo` of field `tool.hatch.envs.test.features` is not defined in the dynamic field `project.optional-dependencies`'):
hatch('env', 'create', 'test') |
def simplify_hex(s):
if ((s[1] == s[2]) and (s[3] == s[4]) and (s[5] == s[6]) and ((len(s) == 9) and (s[7] == s[8]))):
s = ((((s[0] + s[1]) + s[3]) + s[5]) + (s[7] if (len(s) == 9) else ''))
if ((len(s) == 9) and (s[(- 2):].lower() == 'ff')):
s = s[:7]
elif ((len(s) == 5) and (s[(- 1):].lower() == 'f')):
s = s[:4]
return s |
def loadThemes():
def loadThemesFromDir(dname, isBuiltin=False):
if (not os.path.isdir(dname)):
return
for fname in [fname for fname in os.listdir(dname) if fname.endswith('.theme')]:
try:
theme = ssdf.load(os.path.join(dname, fname))
assert (theme.name.lower() == fname.lower().split('.')[0]), 'Theme name does not match filename'
theme.data = {key.replace('_', '.'): val for (key, val) in theme.data.items()}
theme['builtin'] = isBuiltin
pyzo.themes[theme.name.lower()] = theme
print(('Loaded theme %r' % theme.name))
except Exception as ex:
print(('Warning ! Error while reading %s: %s' % (fname, ex)))
loadThemesFromDir(os.path.join(pyzo.pyzoDir, 'resources', 'themes'), True)
loadThemesFromDir(os.path.join(pyzo.appDataDir, 'themes')) |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
def test_reiher_sf_lambda():
RANK = 200
NAME = path.join(path.dirname(__file__), '../integrals/eri_reiher.h5')
(_, reiher_mf) = load_casfile_to_pyscf(NAME, num_alpha=27, num_beta=27)
(eri_rr, sf_factors) = sf.factorize(reiher_mf._eri, RANK)
lambda_tot = sf.compute_lambda(reiher_mf, sf_factors)
assert ((eri_rr.shape[0] * 2) == 108)
assert np.isclose(lambda_tot, 4258.0) |
def run_clang_format(src, dst, exe, verbose, inplace):
dstdir = os.path.dirname(dst)
if (not os.path.exists(dstdir)):
os.makedirs(dstdir)
if (src == dst):
cmd = ('%s -i %s' % (exe, src))
else:
cmd = ('%s %s > %s' % (exe, src, dst))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError:
print('Failed to run clang-format! Maybe your env is not proper?')
raise
if inplace:
cmd = ('diff -q %s %s >/dev/null' % (src, dst))
else:
cmd = ('diff %s %s' % (src, dst))
try:
subprocess.check_call(cmd, shell=True)
if verbose:
print(('%s passed' % os.path.basename(src)))
except subprocess.CalledProcessError:
print(("%s failed! 'diff %s %s' will show formatting violations!" % (os.path.basename(src), src, dst)))
return False
return True |
def get_j_bot(x, w, l, s, d, alpha, bs, V, minority, T):
harg = ((x - w) / l)
cosh_harg = np.cosh(harg)
sinh_harg = np.sinh(harg)
lsod = ((l * s) / d)
j_bottom_light = (((((q * bs) * alpha) * l) / (((alpha ** 2) * (l ** 2)) - 1)) * ((l * alpha) - ((((lsod * cosh_harg) + sinh_harg) - ((lsod - (l * alpha)) * np.exp(((- alpha) * (x - w))))) / (cosh_harg + (lsod * sinh_harg)))))
J_bottom_dark = (((((q * d) * minority) / l) * (np.exp((((q * V) / kb) / T)) - 1)) * (((lsod * cosh_harg) + sinh_harg) / ((lsod * sinh_harg) + cosh_harg)))
return (j_bottom_light, J_bottom_dark) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
parser.add_argument('-g', '--github-mode', help='Produce output as a GitHub comment', action='store_true')
opts = parser.parse_args()
missing_entries = list(itertools.chain.from_iterable(map(find_missing_entries, opts.files)))
if (not missing_entries):
print(f"All entries translated, horray!{(' :tada:' if opts.github_mode else '')}")
else:
missing_entries.sort(key=(lambda entry: (entry.file, entry.line)))
print('Entries missing translation, details follow:\n')
print(tabulate.tabulate(missing_entries, headers=['Reason', 'File', 'Line'], tablefmt='github')) |
def generate_methods_table(cl: ClassIR, name: str, emitter: Emitter) -> None:
emitter.emit_line(f'static PyMethodDef {name}[] = {{')
for fn in cl.methods.values():
if (fn.decl.is_prop_setter or fn.decl.is_prop_getter):
continue
emitter.emit_line(f'{{"{fn.name}",')
emitter.emit_line(f' (PyCFunction){PREFIX}{fn.cname(emitter.names)},')
flags = ['METH_FASTCALL', 'METH_KEYWORDS']
if (fn.decl.kind == FUNC_STATICMETHOD):
flags.append('METH_STATIC')
elif (fn.decl.kind == FUNC_CLASSMETHOD):
flags.append('METH_CLASS')
emitter.emit_line(' {}, NULL}},'.format(' | '.join(flags)))
if ((not cl.has_method('__setstate__')) and (not cl.has_method('__getstate__'))):
emitter.emit_lines('{"__setstate__", (PyCFunction)CPyPickle_SetState, METH_O, NULL},', '{"__getstate__", (PyCFunction)CPyPickle_GetState, METH_NOARGS, NULL},')
emitter.emit_line('{NULL} /* Sentinel */')
emitter.emit_line('};') |
class MemcacheRateLimitBackend(RateLimitBackend):
def __init__(self, memcache: MonitoredMemcacheConnection, prefix: str='rl:'):
self.memcache = memcache
self.prefix = prefix
def consume(self, key: str, amount: int, allowance: int, interval: int) -> bool:
current_bucket = _get_current_bucket(interval)
key = ((self.prefix + key) + current_bucket)
ttl = (interval * 2)
self.memcache.add(key, 0, expire=ttl)
count = (self.memcache.incr(key, amount) or amount)
return (count <= allowance) |
def test_call_with_keyboard_interrupt(tmp_path: Path, tmp_venv: VirtualEnv, mocker: MockerFixture) -> None:
mocker.patch('subprocess.check_call', side_effect=KeyboardInterrupt())
kwargs = {'call': True}
with pytest.raises(KeyboardInterrupt):
tmp_venv.run('python', '-', **kwargs)
subprocess.check_call.assert_called_once() |
_db
def test_add_custom_item(conference_factory, day_factory, slot_factory, room, admin_graphql_client):
conference = conference_factory(start=datetime(2020, 4, 2, tzinfo=pytz.UTC), end=datetime(2020, 4, 2, tzinfo=pytz.UTC))
day = day_factory(conference=conference, day=date(2020, 4, 2))
slot = slot_factory(day=day, hour=time(8, 45), duration=60)
resp = admin_graphql_client.query('\n mutation($input: UpdateOrCreateSlotItemInput!) {\n updateOrCreateSlotItem(input: $input) {\n ... on UpdateOrCreateSlotItemResult {\n updatedSlots {\n items {\n type\n title\n rooms {\n id\n }\n }\n }\n }\n }\n }\n ', variables={'input': {'slotId': slot.id, 'title': 'Custom slot', 'rooms': [room.id]}})
assert ('errors' not in resp)
assert (resp['data']['updateOrCreateSlotItem']['updatedSlots'][0]['items'] == [{'title': 'Custom slot', 'type': 'custom', 'rooms': [{'id': str(room.id)}]}]) |
class MPM(SPM):
def __init__(self, options=None, name='Many-Particle Model', build=True):
options = (options or {})
if (('particle size' in options) and (options['particle size'] != 'distribution')):
raise pybamm.OptionError("particle size must be 'distribution' for MPM not '{}'".format(options['particle size']))
elif (('surface form' in options) and (options['surface form'] == 'false')):
raise pybamm.OptionError("surface form must be 'algebraic' or 'differential' for MPM not 'false'")
else:
surface_form = options.get('surface form', 'algebraic')
options.update({'particle size': 'distribution', 'surface form': surface_form})
super().__init__(options, name, build)
pybamm.citations.register('Kirk2020')
pybamm.citations.register('Kirk2021')
def default_parameter_values(self):
default_params = super().default_parameter_values
default_params = pybamm.get_size_distribution_parameters(default_params, working_electrode=self.options['working electrode'])
return default_params |
class SmilesFeaturizer():
def __init__(self, atm_featurizer: AtmFeaturizer):
self.atm_featurizer = atm_featurizer
self.bond_featurizer = BondFeaturizer()
def smi_to_feats(self, smi: str):
mol = Chem.MolFromSmiles(smi)
atm_feats = torch.stack([self.atm_featurizer.atom_to_feat(atm, mol, i) for (i, atm) in enumerate(mol.GetAtoms())])
(bonds, bond_features) = zip(*[self.bond_featurizer.bond_to_feat(bnd) for bnd in mol.GetBonds()])
bonds = torch.stack(bonds, dim=1)
bond_features = torch.stack(bond_features)
return (atm_feats, bonds, bond_features) |
def version(draw, min_digits=1, max_digits=None, min_version=None, max_version=None):
min_version_digits = (None if (min_version is None) else len(min_version.split('.')))
max_version_digits = (None if (max_version is None) else len(max_version.split('.')))
if (min_digits < 1):
raise ValueError('Minimum digits must be >= 1')
if (max_digits is None):
max_digits = max(filter(None, [(min_digits + 10), min_version_digits, max_version_digits]))
if (min_digits > max_digits):
raise ValueError('Maximum digits must be greater than the minimum digits.')
if ((min_version_digits is not None) and (min_version_digits > max_digits)):
raise ValueError('Cannot have a minimum version with more digits than the maximum number of digits.')
if ((max_version_digits is not None) and (max_version_digits > max_digits)):
raise ValueError('Cannot have a maximum version with more digits than the maximum number of digits.')
num_digits = draw(st.integers(min_value=min_digits, max_value=max_digits))
if (min_version is not None):
min_version = [int(i) for i in min_version.split('.')]
else:
min_version = [0]
min_version += [0 for _ in range((num_digits - len(min_version)))]
if (max_version is not None):
max_version = [int(i) for i in max_version.split('.')]
max_version += [0 for _ in range((num_digits - len(max_version)))]
else:
max_version = ([INF] * num_digits)
if (min_version > max_version):
raise ValueError('The mininum version *MUST* be less than the maximum version.')
version_strategies = [st.tuples(*[st.just(i) for i in min_version])]
while min_version:
incrementing_part = min_version.pop()
if ((len(min_version) + 1) > num_digits):
continue
if ((min_version + [(incrementing_part + 1)]) > max_version[:(len(min_version) + 1)]):
break
max_incrementing_part = max_version[len(min_version)]
parts = [st.just(i) for i in min_version]
if ((min_version + [(incrementing_part + 1)]) < max_version[:(len(min_version) + 1)]):
if ((max_incrementing_part is INF) or (min_version != max_version[:len(min_version)])):
max_incr_value = None
else:
max_incr_value = (max_incrementing_part - 1)
subparts = [st.integers(min_value=(incrementing_part + 1), max_value=max_incr_value)]
subparts += [st.integers(min_value=0) for _ in range(((num_digits - len(parts)) - 1))]
version_strategies.append(st.tuples(*(parts + subparts)))
if ((max_incrementing_part is not INF) and (min_version == max_version[:len(min_version)])):
parts += [st.just(max_incrementing_part)]
parts += [st.integers(min_value=0, max_value=_none_for_inf(max_version[i])) for i in range(len(parts), num_digits)]
version_strategies.append(st.tuples(*parts))
version = draw(st.one_of(version_strategies))
return '.'.join((str(i) for i in version)) |
class STDataArguments():
train_file: str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'})
infer_file: str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'})
eval_file: Optional[str] = dataclasses.field(default=None, metadata={'help': 'A csv or a json file containing the validation data.'})
task_name: Optional[str] = dataclasses.field(default=None, metadata={'help': 'The name of the task to train on.'})
label_list: Optional[List[str]] = dataclasses.field(default=None, metadata={'help': 'The list of labels for the task.'}) |
def test_mros() -> None:
failed_messages = []
for (module_name, module_value) in inspect.getmembers(gitlab.v4.objects):
if (not inspect.ismodule(module_value)):
continue
for (class_name, class_value) in inspect.getmembers(module_value):
if (not inspect.isclass(class_value)):
continue
if (class_value.__module__ == 'gitlab.base'):
continue
mro = class_value.mro()
has_base = False
for (count, obj) in enumerate(mro, start=1):
if (obj.__module__ == 'gitlab.base'):
has_base = True
base_classname = obj.__name__
if has_base:
filename = inspect.getfile(class_value)
if (mro[(- 2)].__module__ != 'gitlab.base'):
failed_messages.append(f'class definition for {class_name!r} in file {filename!r} must have {base_classname!r} as the last class in the class definition')
failed_msg = '\n'.join(failed_messages)
assert (not failed_messages), failed_msg |
class BUCCBitextMining(AbsTaskBitextMining, CrosslingualTask):
def description(self):
return {'name': 'BUCC', 'hf_hub_name': 'mteb/bucc-bitext-mining', 'description': 'BUCC bitext mining dataset', 'reference': ' 'type': 'BitextMining', 'category': 's2s', 'eval_splits': ['test'], 'eval_langs': _LANGUAGES, 'main_score': 'f1', 'revision': 'df32196a32af33b075a01d0e7c51e252'} |
class DatasetIterater(Dataset):
def __init__(self, src, tgt, attention_mask):
self.src = src
self.tgt = tgt
self.attention_mask = attention_mask
def __getitem__(self, index):
return (self.src[index], self.attention_mask[index], self.tgt[index])
def __len__(self):
return len(self.src) |
class HybridModel(torch.nn.Module):
def __init__(self, remote_emb_module, device):
super(HybridModel, self).__init__()
self.remote_emb_module = remote_emb_module
self.fc = DDP(torch.nn.Linear(16, 8).cuda(device), device_ids=[device])
self.device = device
def forward(self, indices, offsets):
emb_lookup = self.remote_emb_module.forward(indices, offsets)
return self.fc(emb_lookup.cuda(self.device)) |
class FormsDict(MultiDict):
input_encoding = 'utf8'
recode_unicode = True
def _fix(self, s, encoding=None):
if (isinstance(s, unicode) and self.recode_unicode):
return s.encode('latin1').decode((encoding or self.input_encoding))
elif isinstance(s, bytes):
return s.decode((encoding or self.input_encoding))
else:
return s
def decode(self, encoding=None):
copy = FormsDict()
enc = copy.input_encoding = (encoding or self.input_encoding)
copy.recode_unicode = False
for (key, value) in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
if (name.startswith('__') and name.endswith('__')):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default) |
class TSynchronizedTextSpec(TestCase):
def test_write(self):
s = SynchronizedTextSpec('name')
f = Frame()
values = [(u'A', 100), (u'axy', 0), (u'', 42), (u'', 0)]
f.encoding = 1
self.assertEqual(s.read(None, f, s.write(None, f, values)), (values, b''))
data = s.write(None, f, [(u'A', 100)])
self.assertEquals(data, b'\xff\xfeA\x00\x00\x00\x00\x00\x00d')
f.encoding = 2
self.assertEqual(s.read(None, f, s.write(None, f, values)), (values, b''))
self.assertEquals(s.write(None, f, [(u'A', 100)]), b'\x00A\x00\x00\x00\x00\x00d')
f.encoding = 3
self.assertEqual(s.read(None, f, s.write(None, f, values)), (values, b''))
self.assertEquals(s.write(None, f, [(u'A', 100)]), b'A\x00\x00\x00\x00d') |
def just_class_with_type_takes_self(tup):
nested_cl = tup[1][0]
default = attr.Factory((lambda _: nested_cl()), takes_self=True)
combined_attrs = list(tup[0])
combined_attrs.append((attr.ib(default=default, type=nested_cl), st.just(nested_cl())))
return _create_hyp_class(combined_attrs) |
class PolicyNet(nn.Module):
def __init__(self):
super(PolicyNet, self).__init__()
self.fc1 = nn.Linear(4, 24)
self.fc2 = nn.Linear(24, 36)
self.fc3 = nn.Linear(36, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x |
def test_single_marker_union_with_multi_union_is_union_of_single_markers() -> None:
m = parse_marker('python_version >= "3.6"')
union = m.union(parse_marker('python_version < "3.6" and sys_platform == "win32" or python_version < "3.6" and sys_platform == "linux"'))
assert (str(union) == 'sys_platform == "win32" or sys_platform == "linux" or python_version >= "3.6"') |
def _exit_cleanup():
for cache in USED_CACHES:
target = (cache.processes / os.path.basename(cache.process_pool.path))
try:
cache.lock.__enter__()
except Exception:
continue
else:
try:
if os.path.exists(target):
shutil.rmtree(target)
cache.garbage_collection()
finally:
cache.lock.__exit__() |
class ChatEventFilter(Object):
def __init__(self, *, new_restrictions: bool=False, new_privileges: bool=False, new_members: bool=False, chat_info: bool=False, chat_settings: bool=False, invite_links: bool=False, deleted_messages: bool=False, edited_messages: bool=False, pinned_messages: bool=False, leaving_members: bool=False, video_chats: bool=False):
super().__init__()
self.new_restrictions = new_restrictions
self.new_privileges = new_privileges
self.new_members = new_members
self.chat_info = chat_info
self.chat_settings = chat_settings
self.invite_links = invite_links
self.deleted_messages = deleted_messages
self.edited_messages = edited_messages
self.pinned_messages = pinned_messages
self.leaving_members = leaving_members
self.video_chats = video_chats
def write(self) -> 'raw.base.ChannelAdminLogEventsFilter':
join = False
leave = False
invite = False
ban = False
unban = False
kick = False
unkick = False
promote = False
demote = False
info = False
settings = False
pinned = False
edit = False
delete = False
group_call = False
invites = False
if self.new_restrictions:
ban = True
unban = True
kick = True
unkick = True
if self.new_privileges:
promote = True
demote = True
if self.new_members:
join = True
invite = True
if self.chat_info:
info = True
if self.chat_settings:
settings = True
if self.invite_links:
invites = True
if self.deleted_messages:
delete = True
if self.edited_messages:
edit = True
if self.pinned_messages:
pinned = True
if self.leaving_members:
leave = True
if self.video_chats:
group_call = True
return raw.types.ChannelAdminLogEventsFilter(join=join, leave=leave, invite=invite, ban=ban, unban=unban, kick=kick, unkick=unkick, promote=promote, demote=demote, info=info, settings=settings, pinned=pinned, edit=edit, delete=delete, group_call=group_call, invites=invites) |
def test_pattern_commonconc_suffix() -> None:
assert (str(parse('a|bc')._commonconc(suffix=True)) == '')
assert (str(parse('aa|bca')._commonconc(suffix=True)) == 'a')
assert (str(parse('xyza|abca|a')._commonconc(suffix=True)) == 'a')
assert (str(parse('f{2,3}c|fc')._commonconc(suffix=True)) == 'fc')
assert (str(parse('aa')._commonconc(suffix=True)) == 'aa') |
def prepare_datasets(config):
data = {}
if (config['data_type'] == 'network'):
(adj, features, labels, idx_train, idx_val, idx_test) = network_data_utils.load_data(config['data_dir'], config['dataset_name'], knn_size=config.get('input_graph_knn_size', None), epsilon=config.get('input_graph_epsilon', None), knn_metric=config.get('knn_metric', 'cosine'), prob_del_edge=config.get('prob_del_edge', None), prob_add_edge=config.get('prob_add_edge', None), seed=config.get('data_seed', config['seed']), sparse_init_adj=config.get('sparse_init_adj', False))
device = config['device']
data = {'adj': (adj.to(device) if device else adj), 'features': (features.to(device) if device else features), 'labels': (labels.to(device) if device else labels), 'idx_train': (idx_train.to(device) if device else idx_train), 'idx_val': (idx_val.to(device) if device else idx_val), 'idx_test': (idx_test.to(device) if device else idx_test)}
elif (config['data_type'] == 'dgl'):
(adj, features, labels, idx_train, idx_val, idx_test) = network_data_utils.load_dgl_graph(config['dgl_graph'], knn_size=config.get('input_graph_knn_size', None), epsilon=config.get('input_graph_epsilon', None), knn_metric=config.get('knn_metric', 'cosine'), prob_del_edge=config.get('prob_del_edge', None), prob_add_edge=config.get('prob_add_edge', None), seed=config.get('data_seed', config['seed']), sparse_init_adj=config.get('sparse_init_adj', False))
device = config['device']
data = {'adj': (adj.to(device) if device else adj), 'features': (features.to(device) if device else features), 'labels': (labels.to(device) if device else labels), 'idx_train': (idx_train.to(device) if device else idx_train), 'idx_val': (idx_val.to(device) if device else idx_val), 'idx_test': (idx_test.to(device) if device else idx_test)}
elif (config['data_type'] == 'uci'):
data_conf = uci_data_utils.UCI(seed=config.get('data_seed', config['seed']), dataset_name=config['dataset_name'], n_train=config['n_train'], n_val=config['n_val'])
(adj, features, labels, idx_train, idx_val, idx_test) = data_conf.load(data_dir=config.get('data_dir', None), knn_size=config['input_graph_knn_size'], epsilon=config.get('input_graph_epsilon', None), knn_metric=config.get('knn_metric', 'cosine'))
device = config['device']
data = {'adj': (adj.to(device) if (device and (adj is not None)) else adj), 'features': (features.to(device) if device else features), 'labels': (labels.to(device) if device else labels), 'idx_train': (idx_train.to(device) if device else idx_train), 'idx_val': (idx_val.to(device) if device else idx_val), 'idx_test': (idx_test.to(device) if device else idx_test)}
elif (config['data_type'] == 'text'):
(train_set, dev_set, test_set) = text_data_utils.load_data(config)
print('# of training examples: {}'.format(len(train_set)))
print('# of dev examples: {}'.format(len(dev_set)))
print('# of testing examples: {}'.format(len(test_set)))
data = {'train': train_set, 'dev': dev_set, 'test': test_set}
else:
raise ValueError('Unknown data_type: {}'.format(config['data_type']))
return data |
def convert(pronunc, source, dest):
assert (type(pronunc) in [bytes, unicode, list]), type(pronunc)
if (source == dest):
return pronunc
if (type(pronunc) == list):
return [convert(p, source, dest) for p in pronunc]
func = checkSetting(source, 'cvtOut_func')
if func:
pronunc = func(pronunc)
for (s, r) in checkSetting(source, 'cvtOut_regexps'):
pronunc = re.sub(maybe_bytes(s, pronunc), maybe_bytes(r, pronunc), pronunc)
ret = []
toAddAfter = None
dictionary = make_dictionary(source, dest)
maxLen = max((len(l) for l in dictionary.keys()))
debugInfo = ''
separator = checkSetting(dest, 'phoneme_separator', ' ')
safe_to_drop = checkSetting(source, 'safe_to_drop_characters')
while pronunc:
for lettersToTry in range(maxLen, (- 1), (- 1)):
if (not lettersToTry):
if (safe_to_drop == True):
pass
elif ((not safe_to_drop) or ((not (pronunc[:1] in maybe_bytes(safe_to_drop, pronunc))) and (not ((pronunc[:1], debugInfo) in warnedAlready)))):
warnedAlready.add((pronunc[:1], debugInfo))
pronunc = pronunc[1:]
elif (pronunc[:lettersToTry] in dictionary):
debugInfo = (' after ' + as_printable(pronunc[:lettersToTry]))
toAdd = dictionary[pronunc[:lettersToTry]]
assert (type(toAdd) in [bytes, unicode]), type(toAdd)
isStressMark = (toAdd and (toAdd in [maybe_bytes(lexFormats[dest].get(primary_stress, ''), toAdd), maybe_bytes(lexFormats[dest].get(secondary_stress, ''), toAdd)]))
if (toAdd == maybe_bytes(lexFormats[dest].get(syllable_separator, ''), toAdd)):
pass
elif (isStressMark and (not checkSetting(dest, 'stress_comes_before_vowel'))):
if checkSetting(source, 'stress_comes_before_vowel'):
(toAdd, toAddAfter) = (maybe_bytes('', toAdd), toAdd)
else:
r = (len(ret) - 1)
while ((ret[r] in dest_consonants) or ret[r].endswith(maybe_bytes('*added', ret[r]))):
r -= 1
ret.insert((r + 1), toAdd)
toAdd = maybe_bytes('', toAdd)
elif (isStressMark and (not checkSetting(source, 'stress_comes_before_vowel'))):
i = len(ret)
while (i and ((ret[(i - 1)] in dest_consonants) or ret[(i - 1)].endswith(maybe_bytes('*added', ret[(i - 1)])))):
i -= 1
if i:
i -= 1
ret.insert(i, toAdd)
if dest_syllable_sep:
ret.append(maybe_bytes(dest_syllable_sep, toAdd))
toAdd = maybe_bytes('', toAdd)
elif (implicit_vowel_before_NL and ret and ret[(- 1)] and (toAdd in [maybe_bytes('n', toAdd), maybe_bytes('l', toAdd)]) and (ret[(- 1)] in dest_consonants)):
ret.append((maybe_bytes(implicit_vowel_before_NL, toAdd) + maybe_bytes('*added', toAdd)))
elif ((len(ret) > 2) and ret[(- 2)].endswith(maybe_bytes('*added', ret[(- 2)])) and toAdd and (not (toAdd in dest_consonants)) and (not (toAdd == dest_syllable_sep))):
del ret[(- 2)]
if toAdd:
if separator:
toAddList = toAdd.split(separator)
else:
toAddList = [toAdd]
ret.append(toAddList[0])
if (toAddAfter and (not (toAddList[0] in dest_consonants))):
ret.append(toAddAfter)
toAddAfter = None
ret += toAddList[1:]
pronunc = pronunc[lettersToTry:]
break
if toAddAfter:
ret.append(toAddAfter)
if (ret and (ret[(- 1)] == dest_syllable_sep)):
del ret[(- 1)]
if (not ret):
ret = ''
else:
ret = maybe_bytes(separator, ret[0]).join(ret).replace(maybe_bytes('*added', ret[0]), maybe_bytes('', ret[0]))
for (s, r) in checkSetting(dest, 'cleanup_regexps'):
ret = re.sub(maybe_bytes(s, ret), maybe_bytes(r, ret), ret)
func = checkSetting(dest, 'cleanup_func')
if func:
return func(ret)
else:
return ret |
def test_unicode_conversion():
assert (m.good_utf8_string() == u'Say utf8 A')
assert (m.good_utf16_string() == u'bAz')
assert (m.good_utf32_string() == u'aAz')
assert (m.good_wchar_string() == u'aAz')
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_string()
with pytest.raises(UnicodeDecodeError):
m.bad_utf16_string()
if hasattr(m, 'bad_utf32_string'):
with pytest.raises(UnicodeDecodeError):
m.bad_utf32_string()
if hasattr(m, 'bad_wchar_string'):
with pytest.raises(UnicodeDecodeError):
m.bad_wchar_string()
assert (m.u8_Z() == 'Z')
assert (m.u8_eacute() == u'e')
assert (m.u16_ibang() == u'')
assert (m.u32_mathbfA() == u'A')
assert (m.wchar_heart() == u'') |
class kmod_info_t(ctypes.Structure):
_pack_ = 8
_fields_ = (('next', POINTER64), ('info_version', ctypes.c_int32), ('id', ctypes.c_uint32), ('name', (ctypes.c_char * 64)), ('version', (ctypes.c_char * 64)), ('reference_count', ctypes.c_int32), ('reference_list', POINTER64), ('address', POINTER64), ('size', ctypes.c_uint64), ('hdr_size', ctypes.c_uint64), ('start', POINTER64), ('stop', POINTER64))
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj |
class aggregation(nn.Module):
def __init__(self, channel):
super(aggregation, self).__init__()
self.relu = nn.ReLU(True)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_upsample1 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample2 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample3 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample4 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample5 = nn.Conv2d((2 * channel), (2 * channel), 3, padding=1)
self.conv_concat2 = nn.Conv2d((2 * channel), (2 * channel), 3, padding=1)
self.conv_concat3 = nn.Conv2d((3 * channel), (3 * channel), 3, padding=1)
self.conv4 = nn.Conv2d((3 * channel), (3 * channel), 3, padding=1)
self.conv5 = nn.Conv2d((3 * channel), 1, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(std=0.01)
m.bias.data.fill_(0)
def forward(self, x1, x2, x3):
x1_1 = x1
x2_1 = (self.conv_upsample1(self.upsample(x1)) * x2)
x3_1 = ((self.conv_upsample2(self.upsample(self.upsample(x1))) * self.conv_upsample3(self.upsample(x2))) * x3)
x2_2 = torch.cat((x2_1, self.conv_upsample4(self.upsample(x1_1))), 1)
x2_2 = self.conv_concat2(x2_2)
x3_2 = torch.cat((x3_1, self.conv_upsample5(self.upsample(x2_2))), 1)
x3_2 = self.conv_concat3(x3_2)
x = self.conv4(x3_2)
x = self.conv5(x)
return x |
class PythonQuery(QueryPlugin):
PLUGIN_ID = 'python_query'
PLUGIN_NAME = _('Python Query')
PLUGIN_DESC = _('Use Python expressions in queries.')
key = 'python'
query_syntax = _('(python: expression)')
query_description = _('The variable <tt>s</tt> (or <tt>a</tt>) is the song / album being matched.\n\n<tt>_ts</tt> is a (real number) timestamp at start of query.\n\nModules <tt>time</tt> and <tt>random</tt> are also available, and the class <tt>Random</tt> (==<tt>random.Random</tt>) too.')
usage = ((markup_for_syntax(query_syntax) + '\n\n') + query_description)
def __init__(self):
print_d('Initialising')
self._globals = {'random': random, 'Random': random.Random, 'time': time}
self._reported = set()
self._raw_body = None
def search(self, data, body):
try:
self._globals['s'] = data
self._globals['a'] = data
ret = eval(body, dict(self._globals))
return ret
except Exception as e:
key = str(e)
if (key not in self._reported):
self._reported.add(key)
print_w(f'{type(e).__name__}({key}) in expression {self._raw_body!r}. Example failing data: {self._globals}')
return False
def parse_body(self, body):
if (body is None):
raise QueryPluginError
self._raw_body = body.strip()
self._reported.clear()
try:
self._globals.update(_ts=time.time())
return compile(body.strip(), 'query', 'eval')
except SyntaxError as e:
print_w(("Couldn't compile query (%s)" % e))
raise QueryPluginError from e |
def _make_prepare_uv():
from qualtran.bloqs.chemistry.pbc.first_quantization.prepare_uv import PrepareUVFirstQuantization
num_bits_p = 5
eta = 10
num_atoms = 10
lambda_zeta = 10
m_param = (2 ** 8)
num_bits_nuc_pos = 16
prep = PrepareUVFirstQuantization(num_bits_p=num_bits_p, eta=eta, num_atoms=num_atoms, m_param=m_param, lambda_zeta=lambda_zeta, num_bits_nuc_pos=num_bits_nuc_pos)
return prep |
def test_load_backoff_callable_absolute():
f = backoffcache.load_backoff_callable('tests.arbpack.arbcallables.ArbCallable')
callable_ref = f('ctor in')
assert (callable_ref('arg in 1') == 'from callable: ctor in arg in 1')
assert (callable_ref('arg in 2') == 'from callable: ctor in arg in 2') |
def get_port_for_service(app: AppDef) -> str:
port = '29500'
for (role_idx, role) in enumerate(app.roles):
if (role.port_map is None):
continue
for value in role.port_map.values():
port = str(value)
if (not (0 < int(port) <= 65535)):
msg = 'Warning: port_map set to invalid port number. Value must be between 1-65535, with torchx default = 29500. Setting port to default = 29500'
port = '29500'
warnings.warn(msg)
return port |
.parametrize('username,password,email', site_managers)
def test_is_site_manager_returns_false_when_role_doesnotexist_(db, client, username, password, email):
client.login(username=username, password=password)
Role.objects.all().delete()
user = get_user_model().objects.get(username=username, email=email)
assert (is_site_manager(user) is False) |
def human_readable_time(timestamp):
date = datetime.fromtimestamp(timestamp)
datediff = (datetime.now().date() - date.date())
if (datediff.days >= 365):
return date.strftime('%-d %b %Y')
elif (datediff.days >= 7):
return date.strftime('%-d %b')
elif (datediff.days >= 1):
return date.strftime('%a')
return date.strftime('%H:%M') |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--profile', type=str, action='store', help='The credentials.response profile to use.')
parser.add_argument('--prefix', type=str, action='store', help='Output filename prefix.')
s = parser.add_mutually_exclusive_group(required=False)
s.add_argument('--group-id', type=int, action='store', help='Target sensor group based on numeric ID.')
s.add_argument('--hostname', type=str, action='store', help='Target sensor matching hostname.')
s.add_argument('--ip', type=str, action='store', help='Target sensor matching IP address (dotted quad).')
parser.add_argument('--process-count', action='store_true', help='Count processes associated with this sensor.')
parser.add_argument('--tamper-count', action='store_true', help='Count tamper events associated with this sensor.')
parser.add_argument('--checkin-ip', action='store_true', help='Return the latest public IP associated with the sensor.')
args = parser.parse_args()
if args.prefix:
output_filename = ('%s-sensors.csv' % args.prefix)
else:
output_filename = 'sensors.csv'
if args.profile:
cb = CbEnterpriseResponseAPI(profile=args.profile)
else:
cb = CbEnterpriseResponseAPI()
output_file = open(output_filename, 'w')
writer = csv.writer(output_file, quoting=csv.QUOTE_ALL)
header_row = ['computer_name', 'computer_dns_name', 'sensor_group_id', 'os', 'os_type', 'computer_sid', 'last_checkin_time', 'registration_time', 'network_adapters', 'id', 'group_id', 'group_name', 'num_eventlog_mb', 'num_storefiles_mb', 'systemvolume_free_size', 'systemvolume_total_size', 'health', 'commit_charge_mb', 'build_version_string', 'process_count', 'tamper_count', 'clock_delta', 'checkin_ip']
writer.writerow(header_row)
query_base = None
if args.group_id:
query_base = 'groupid:{0}'.format(args.group_id)
elif args.hostname:
query_base = 'hostname:{0}'.format(args.hostname)
elif args.ip:
query_base = 'ip:{0}'.format(args.ip)
if (query_base is None):
sensors = cb.select(Sensor)
else:
sensors = cb.select(Sensor).where(query_base)
num_sensors = len(sensors)
log_info('Found {0} sensors'.format(num_sensors))
counter = 1
for sensor in sensors:
if ((counter % 10) == 0):
print('{0} of {1}'.format(counter, num_sensors))
if (len(sensor.resource_status) > 0):
commit_charge = '{0:.2f}'.format(((float(sensor.resource_status[0]['commit_charge']) / 1024) / 1024))
else:
commit_charge = ''
num_eventlog_mb = '{0:.2f}'.format(((float(sensor.num_eventlog_bytes) / 1024) / 1024))
num_storefiles_mb = '{0:.2f}'.format(((float(sensor.num_storefiles_bytes) / 1024) / 1024))
systemvolume_free_size = '{0:.2f}'.format(((float(sensor.systemvolume_free_size) / 1024) / 1024))
systemvolume_total_size = '{0:.2f}'.format(((float(sensor.systemvolume_total_size) / 1024) / 1024))
if (args.process_count == True):
process_count = len(cb.select(Process).where('sensor_id:{0}'.format(sensor.id)))
else:
process_count = ''
if (args.checkin_ip == True):
try:
checkin_ip = cb.select(Process).where('sensor_id:{0}'.format(sensor.id)).first().comms_ip
except AttributeError:
checkin_ip = ''
else:
checkin_ip = ''
if (args.tamper_count == True):
tamper_count = len(cb.select(Process).where('tampered:true AND sensor_id:{0}'.format(sensor.id)))
else:
tamper_count = ''
output_fields = [sensor.computer_name.lower(), sensor.computer_dns_name.lower(), sensor.group_id, sensor.os, sensor.os_type, sensor.computer_sid, sensor.last_checkin_time, sensor.registration_time, sensor.network_adapters, sensor.id, sensor.group_id, sensor.group.name, num_eventlog_mb, num_storefiles_mb, systemvolume_free_size, systemvolume_total_size, sensor.sensor_health_message, commit_charge, sensor.build_version_string, process_count, tamper_count, sensor.clock_delta, checkin_ip]
if (_python3 == False):
row = [(col.encode('utf8') if isinstance(col, unicode) else col) for col in output_fields]
else:
row = output_fields
writer.writerow(row)
counter += 1
output_file.close() |
def import_objects(manage_dict):
auto_import = {}
auto_scripts = []
import_dict = manage_dict.get('shell', {}).get('auto_import', {})
object_list = import_dict.get('objects', [])
if isinstance(object_list, dict):
for (name, spec) in object_list.items():
_obj = import_module(name)
if spec:
if ('init' in spec):
init = spec['init']
if isinstance(init, dict):
method_name = init.keys()[0]
args = (init[method_name] or {}).get('args', [])
kwargs = (init[method_name] or {}).get('kwargs', {})
else:
method_name = init
args = []
kwargs = {}
getattr(_obj, method_name)(*args, **kwargs)
spec_as = spec.get('as', get_name(_obj, name))
if (not isinstance(spec_as, list)):
spec_as = [spec_as]
for as_name in spec_as:
auto_import[as_name] = _obj
if ('init_script' in spec):
auto_scripts.append(spec['init_script'])
if ('submodules' in spec):
submodules = spec['submodules']
if isinstance(submodules, list):
import_submodules(name, submodules)
else:
import_submodules(name)
else:
auto_import[get_name(_obj, name)] = _obj
else:
for name in object_list:
_obj = import_module(name)
auto_import[getattr(_obj, '__name__', name)] = _obj
for script in auto_scripts:
exec(script, auto_import)
return auto_import |
class _Arguments():
modules: list[str]
concise: bool
ignore_missing_stub: bool
ignore_positional_only: bool
allowlist: list[str]
generate_allowlist: bool
ignore_unused_allowlist: bool
mypy_config_file: str
custom_typeshed_dir: str
check_typeshed: bool
version: str |
def test(epoch, criterion_cls, criterion_div):
net.eval()
global best_acc
test_loss_cls = 0.0
test_loss_div = 0.0
correct = ([0] * (args.num_branches + 1))
total = ([0] * (args.num_branches + 1))
with torch.no_grad():
for (batch_idx, (inputs, target)) in enumerate(testloader):
(inputs, target) = (inputs.cuda(), target.cuda())
(logits, embedding) = net(inputs)
loss_cls = 0.0
loss_div = 0.0
ensemble_logits = 0.0
for i in range(len(logits)):
loss_cls = (loss_cls + criterion_cls(logits[i], target))
for i in range(len(logits)):
ensemble_logits = (ensemble_logits + logits[i])
ensemble_logits = (ensemble_logits / len(logits))
ensemble_logits = ensemble_logits.detach()
loss_div = (loss_div + criterion_div(logits[(- 1)], ensemble_logits))
test_loss_cls += (loss_cls.item() / len(testloader))
test_loss_div += (loss_div.item() / len(testloader))
for i in range((args.num_branches + 1)):
if (i == args.num_branches):
(_, predicted) = ensemble_logits.max(1)
else:
(_, predicted) = logits[i].max(1)
correct[i] += predicted.eq(target).sum().item()
total[i] += target.size(0)
acc = []
for i in range((args.num_branches + 1)):
acc.append((correct[i] / total[i]))
with open(((('result/' + str(os.path.basename(__file__).split('.')[0])) + args.arch) + '.txt'), 'a+') as f:
f.write('test epoch:{0}\t test_loss_cls:{1:.5f}\t test_loss_div:{2:.5f}\t accuracy:{3}\n'.format(epoch, test_loss_cls, test_loss_div, str(acc)))
print('test epoch:{0}\t accuracy:{1}\n'.format(epoch, str(acc)))
return max(acc) |
def visualize_solution(xc, yc, x, C, n, K, title_str):
plt.figure()
plt.scatter(xc, yc, s=200)
for i in range(len(xc)):
plt.annotate(i, ((xc[i] + 0.15), yc[i]), size=16, color='r')
plt.plot(xc[0], yc[0], 'r*', ms=20)
plt.grid()
for ii in range(0, (n ** 2)):
if (x[ii] > 0):
ix = (ii // n)
iy = (ii % n)
plt.arrow(xc[ix], yc[ix], (xc[iy] - xc[ix]), (yc[iy] - yc[ix]), length_includes_head=True, head_width=0.25)
plt.title(((title_str + ' cost = ') + str((int((C * 100)) / 100.0))))
plt.show() |
class Tokens(object):
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = (opts or {})
def __len__(self):
return len(self.data)
def slice(self, i=None, j=None):
new_tokens = copy.copy(self)
new_tokens.data = self.data[i:j]
return new_tokens
def untokenize(self):
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
return [t[self.SPAN] for t in self.data]
def pos(self):
if ('pos' not in self.annotators):
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
if ('lemma' not in self.annotators):
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
if ('ner' not in self.annotators):
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
def _skip(gram):
if (not filter_fn):
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, (e + 1)) for s in range(len(words)) for e in range(s, min((s + n), len(words))) if (not _skip(words[s:(e + 1)]))]
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
entities = self.entities()
if (not entities):
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while (idx < len(entities)):
ner_tag = entities[idx]
if (ner_tag != non_ent):
start = idx
while ((idx < len(entities)) and (entities[idx] == ner_tag)):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups |
class ReduceScatterBase_Req(Function):
def forward(ctx, pg: dist.ProcessGroup, myreq: Request[Tensor], rsi: ReduceScatterBaseInfo, inputs: Tensor) -> Tensor:
my_size = dist.get_world_size(pg)
assert ((inputs.size(0) % my_size) == 0)
if (rsi.codecs is not None):
inputs = rsi.codecs.forward.encode(inputs)
output = inputs.new_empty(((inputs.size(0) // my_size), inputs.size(1)))
with record_function('## reduce_scatter_base ##'):
req = dist._reduce_scatter_base(output, inputs, group=pg, async_op=True)
myreq.req = req
myreq.tensor = output
myreq.wait_function = ReduceScatterBase_Wait
myreq.rsi = rsi
myreq.tensor = output
ctx.myreq = myreq
ctx.pg = pg
return myreq.dummy_tensor
def backward(ctx, *unused: Tensor) -> Tuple[(Optional[Tensor], ...)]:
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_inputs = myreq.tensor
rsi = myreq.rsi
if (rsi.codecs is not None):
grad_inputs = rsi.codecs.backward.decode(grad_inputs)
if GRADIENT_DIVISION:
grad_inputs.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, grad_inputs) |
class CustomHandler(Handler):
def __init__(self, uuid='', logs='', custom_filter=None, config=None, drop=False):
self.db = {'db_postgres': None, 'db_sqlite': None}
self.logs = logs
self.uuid = uuid
self.custom_filter = custom_filter
if (config and (config != '') and ('db_postgres' in self.logs)):
parsed = urlparse(config['postgres'])
self.db['db_postgres'] = postgres_class(host=parsed.hostname, port=parsed.port, username=parsed.username, password=parsed.password, db=parsed.path[1:], uuid=self.uuid, drop=drop)
if (config and (config != '') and ('db_sqlite' in self.logs)):
self.db['db_sqlite'] = sqlite_class(file=config['sqlite_file'], drop=drop, uuid=self.uuid)
Handler.__init__(self)
def emit(self, record):
try:
if ('db_postgres' in self.logs):
if self.db['db_postgres']:
if isinstance(record.msg, list):
if ((record.msg[0] == 'sniffer') or (record.msg[0] == 'errors')):
self.db['db_postgres'].insert_into_data_safe(record.msg[0], dumps(serialize_object(record.msg[1]), cls=ComplexEncoder))
elif isinstance(record.msg, Mapping):
if ('server' in record.msg):
self.db['db_postgres'].insert_into_data_safe('servers', dumps(serialize_object(record.msg), cls=ComplexEncoder))
if ('db_sqlite' in self.logs):
_record = parse_record(record, self.custom_filter, 'db_sqlite')
if _record:
self.db['db_sqlite'].insert_into_data_safe(_record.msg)
if ('terminal' in self.logs):
_record = parse_record(record, self.custom_filter, 'terminal')
if _record:
stdout.write((_record.msg + '\n'))
if ('syslog' in self.logs):
_record = parse_record(record, self.custom_filter, 'terminal')
if _record:
stdout.write((_record.msg + '\n'))
except Exception as e:
if (self.custom_filter is not None):
if ('honeypots' in self.custom_filter):
if ('remove_errors' in self.custom_filter['honeypots']['options']):
return None
stdout.write((dumps({'error': repr(e), 'logger': repr(record)}, sort_keys=True, cls=ComplexEncoder) + '\n'))
stdout.flush() |
def contrastive_cross_entropy(logits, target, margin=0.0):
logp = F.log_softmax(logits, dim=(- 1))
target_one_hot = F.one_hot(target, num_classes=logp.shape[(- 1)])
logp_target = (logp * target_one_hot.to(logits.dtype)).sum((- 1))
logp_others = torch.where(target_one_hot.to(torch.uint8), torch.full_like(logp, (- float('inf'))), logp)
return F.relu(((margin + logp_others.max(dim=(- 1))[0]) - logp_target)).mean() |
class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig):
def __init__(self, config_file_or_dict):
super().__init__(config_file_or_dict)
self._dtype = None
self.mismatches = []
def dtype(self):
if (self._dtype is None):
raise ValueError("trainer_config_process() wasn't called yet to tell dtype")
return self._dtype
def is_auto(self, ds_key_long):
val = self.get_value(ds_key_long)
if (val is None):
return False
else:
return (val == 'auto')
def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True):
(config, ds_key) = self.find_config_node(ds_key_long)
if (config is None):
return
if (config.get(ds_key) == 'auto'):
config[ds_key] = hf_val
return
if (not must_match):
return
ds_val = config.get(ds_key)
if ((ds_val is not None) and (ds_val != hf_val)):
self.mismatches.append(f'- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}')
fill_only = partialmethod(fill_match, must_match=False)
def trainer_config_process(self, args):
train_batch_size = ((args.world_size * args.per_device_train_batch_size) * args.gradient_accumulation_steps)
self.fill_match('train_micro_batch_size_per_gpu', args.per_device_train_batch_size, 'per_device_train_batch_size')
self.fill_match('gradient_accumulation_steps', args.gradient_accumulation_steps, 'gradient_accumulation_steps')
self.fill_match('train_batch_size', train_batch_size, 'train_batch_size (calculated)')
self.fill_match('gradient_clipping', args.max_grad_norm, 'max_grad_norm')
self.fill_match('optimizer.params.lr', args.learning_rate, 'learning_rate')
self.fill_match('optimizer.params.betas', [args.adam_beta1, args.adam_beta2], 'adam_beta1+adam_beta2')
self.fill_match('optimizer.params.eps', args.adam_epsilon, 'adam_epsilon')
self.fill_match('optimizer.params.weight_decay', args.weight_decay, 'weight_decay')
self.fill_only('scheduler.params.warmup_min_lr', 0)
self.fill_match('scheduler.params.warmup_max_lr', args.learning_rate, 'learning_rate')
if (args.fp16 or args.fp16_full_eval):
fp16_backend = ('apex' if (args.fp16_backend == 'apex') else 'amp')
else:
fp16_backend = None
if args.save_on_each_node:
self.config['checkpoint'] = self.config.get('checkpoint', {})
self.config['checkpoint']['use_node_local_storage'] = args.save_on_each_node
self.fill_match('fp16.enabled', ((args.fp16 or args.fp16_full_eval) and (fp16_backend == 'amp')), 'fp16|fp16_full_eval+fp16_backend(amp)')
self.fill_match('amp.enabled', (fp16_backend == 'apex'), 'fp16+fp16_backend(apex)')
self.fill_match('amp.opt_level', args.fp16_opt_level, 'fp16_opt_level')
self.fill_match('bf16.enabled', (args.bf16 or args.bf16_full_eval), 'bf16|bf16_full_eval')
if self.is_true('bf16.enabled'):
self._dtype = torch.bfloat16
elif self.is_false('fp16.enabled'):
self._dtype = torch.float32
else:
self._dtype = torch.float16
def trainer_config_finalize(self, args, model, num_training_steps):
hidden_size_based_keys = ['zero_optimization.reduce_bucket_size', 'zero_optimization.stage3_prefetch_bucket_size', 'zero_optimization.stage3_param_persistence_threshold']
hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)]
if (len(hidden_size_auto_keys) > 0):
if hasattr(model.config, 'hidden_size'):
hidden_size = model.config.hidden_size
elif hasattr(model.config, 'hidden_sizes'):
hidden_size = max(model.config.hidden_sizes)
else:
raise ValueError(f"The model's config file has neither `hidden_size` nor `hidden_sizes` entry, therefore it's not possible to automatically fill out the following `auto` entries in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing `auto` values for these keys with an integer value of your choice.")
self.fill_only('zero_optimization.reduce_bucket_size', (hidden_size * hidden_size))
if self.is_zero3():
self.fill_only('zero_optimization.stage3_prefetch_bucket_size', ((0.9 * hidden_size) * hidden_size))
self.fill_only('zero_optimization.stage3_param_persistence_threshold', (10 * hidden_size))
self.fill_match('scheduler.params.total_num_steps', num_training_steps, 'num_training_steps (calculated)')
self.fill_match('scheduler.params.warmup_num_steps', args.get_warmup_steps(num_training_steps), 'warmup_steps')
if (len(self.mismatches) > 0):
mismatches = '\n'.join(self.mismatches)
raise ValueError(f'''Please correct the following DeepSpeed config values that mismatch TrainingArguments values:
{mismatches}
The easiest method is to set these DeepSpeed config values to 'auto'.''') |
.unit()
.parametrize(('arg_name', 'arg_value', 'i', 'id_func', 'expected'), [('arg', 1, 0, None, '1'), ('arg', True, 0, None, 'True'), ('arg', False, 0, None, 'False'), ('arg', 1.0, 0, None, '1.0'), ('arg', None, 0, None, 'arg0'), ('arg', (1,), 0, None, 'arg0'), ('arg', [1], 0, None, 'arg0'), ('arg', {1, 2}, 0, None, 'arg0'), ('arg', 1, 0, bool, 'True'), ('arg', 1, 1, (lambda x: None), '1'), ('arg', [1], 2, (lambda x: None), 'arg2')])
def test_arg_value_to_id_component(arg_name, arg_value, i, id_func, expected):
result = _arg_value_to_id_component(arg_name, arg_value, i, id_func)
assert (result == expected) |
def test_jax_FunctionGraph_once():
from pytensor.link.jax.dispatch import jax_funcify
x = vector('x')
y = vector('y')
class TestOp(Op):
def __init__(self):
self.called = 0
def make_node(self, *args):
return Apply(self, list(args), [x.type() for x in args])
def perform(self, inputs, outputs):
for (i, inp) in enumerate(inputs):
outputs[i][0] = inp[0]
_funcify.register(TestOp)
def jax_funcify_TestOp(op, **kwargs):
def func(*args, op=op):
op.called += 1
return list(args)
return func
op1 = TestOp()
op2 = TestOp()
(q, r) = op1(x, y)
outs = op2((q + r), (q + r))
out_fg = FunctionGraph([x, y], outs, clone=False)
assert (len(out_fg.outputs) == 2)
out_jx = jax_funcify(out_fg)
x_val = np.r_[(1, 2)].astype(config.floatX)
y_val = np.r_[(2, 3)].astype(config.floatX)
res = out_jx(x_val, y_val)
assert (len(res) == 2)
assert (op1.called == 1)
assert (op2.called == 1)
res = out_jx(x_val, y_val)
assert (len(res) == 2)
assert (op1.called == 2)
assert (op2.called == 2) |
class struct__EFI_IFR_VARSTORE(ctypes.Structure):
_pack_ = True
_fields_ = [('Header', EFI_IFR_OP_HEADER), ('PADDING_0', (ctypes.c_ubyte * 2)), ('Guid', EFI_GUID), ('VarStoreId', ctypes.c_uint16), ('Size', ctypes.c_uint16), ('Name', (ctypes.c_ubyte * 1)), ('PADDING_1', (ctypes.c_ubyte * 3))] |
class LithiumIonParameters(BaseParameters):
def __init__(self, options=None):
self.options = options
self.geo = pybamm.GeometricParameters(options)
self.elec = pybamm.electrical_parameters
self.therm = pybamm.thermal_parameters
self.n = DomainLithiumIonParameters('negative', self)
self.s = DomainLithiumIonParameters('separator', self)
self.p = DomainLithiumIonParameters('positive', self)
self.domain_params = {'negative': self.n, 'separator': self.s, 'positive': self.p}
self._set_parameters()
def _set_parameters(self):
self.R = pybamm.Parameter('Ideal gas constant [J.K-1.mol-1]')
self.F = pybamm.Parameter('Faraday constant [C.mol-1]')
self.k_b = pybamm.Parameter('Boltzmann constant [J.K-1]')
self.q_e = pybamm.Parameter('Elementary charge [C]')
self.T_ref = self.therm.T_ref
self.T_init = self.therm.T_init
self.T_amb = self.therm.T_amb
self.T_amb_av = self.therm.T_amb_av
self.h_edge = self.therm.h_edge
self.h_total = self.therm.h_total
self.rho_c_p_eff = self.therm.rho_c_p_eff
self.lambda_eff = self.therm.lambda_eff
self.L_x = self.geo.L_x
self.L = self.geo.L
self.L_y = self.geo.L_y
self.L_z = self.geo.L_z
self.r_inner = self.geo.r_inner
self.r_outer = self.geo.r_outer
self.A_cc = self.geo.A_cc
self.A_cooling = self.geo.A_cooling
self.V_cell = self.geo.V_cell
self.current_with_time = self.elec.current_with_time
self.current_density_with_time = self.elec.current_density_with_time
self.Q = self.elec.Q
self.R_contact = self.elec.R_contact
self.n_electrodes_parallel = self.elec.n_electrodes_parallel
self.n_cells = self.elec.n_cells
self.voltage_low_cut = self.elec.voltage_low_cut
self.voltage_high_cut = self.elec.voltage_high_cut
self.ocp_soc_0_dimensional = self.elec.ocp_soc_0_dimensional
self.ocp_soc_100_dimensional = self.elec.ocp_soc_100_dimensional
for domain in self.domain_params.values():
domain._set_parameters()
self.epsilon_init = pybamm.concatenation(*[self.domain_params[domain.split()[0]].epsilon_init for domain in self.options.whole_cell_domains])
self.V_bar_Li = pybamm.Parameter('Lithium metal partial molar volume [m3.mol-1]')
self.c_Li_typ = pybamm.Parameter('Typical plated lithium concentration [mol.m-3]')
self.c_plated_Li_0 = pybamm.Parameter('Initial plated lithium concentration [mol.m-3]')
self.alpha_plating = pybamm.Parameter('Lithium plating transfer coefficient')
self.alpha_stripping = (1 - self.alpha_plating)
self.c_e_init = pybamm.Parameter('Initial concentration in electrolyte [mol.m-3]')
self.c_e_init_av = pybamm.xyz_average(self.c_e_init)
self.c_e_init_av.print_name = 'c_e_init'
self.alpha_T_cell = pybamm.Parameter('Cell thermal expansion coefficient [m.K-1]')
c_e_av_init = (pybamm.xyz_average(self.epsilon_init) * self.c_e_init)
self.n_Li_e_init = ((c_e_av_init * self.L_x) * self.A_cc)
self.n_Li_particles_init = (self.n.n_Li_init + self.p.n_Li_init)
self.n_Li_init = (self.n_Li_particles_init + self.n_Li_e_init)
self.Q_Li_particles_init = ((self.n_Li_particles_init * self.F) / 3600)
self.Q_Li_init = ((self.n_Li_init * self.F) / 3600)
self.ocv_init = (self.p.prim.U_init - self.n.prim.U_init)
self.thermal_voltage = ((self.R * self.T_ref) / self.F)
self.I_typ = (self.Q / (self.A_cc * self.n_electrodes_parallel))
self.a_j_scale = (self.I_typ / self.L_x)
def chi(self, c_e, T):
return ((2 * (1 - self.t_plus(c_e, T))) * self.thermodynamic_factor(c_e, T))
def chiRT_over_Fc(self, c_e, T):
tol = pybamm.settings.tolerances['chi__c_e']
c_e = pybamm.maximum(c_e, tol)
return ((((self.R * T) / self.F) * self.chi(c_e, T)) / c_e)
def t_plus(self, c_e, T):
inputs = {'Electrolyte concentration [mol.m-3]': c_e, 'Temperature [K]': T}
return pybamm.FunctionParameter('Cation transference number', inputs)
def thermodynamic_factor(self, c_e, T):
inputs = {'Electrolyte concentration [mol.m-3]': c_e, 'Temperature [K]': T}
return pybamm.FunctionParameter('Thermodynamic factor', inputs)
def D_e(self, c_e, T):
tol = pybamm.settings.tolerances['D_e__c_e']
c_e = pybamm.maximum(c_e, tol)
inputs = {'Electrolyte concentration [mol.m-3]': c_e, 'Temperature [K]': T}
return pybamm.FunctionParameter('Electrolyte diffusivity [m2.s-1]', inputs)
def kappa_e(self, c_e, T):
tol = pybamm.settings.tolerances['kappa_e__c_e']
c_e = pybamm.maximum(c_e, tol)
inputs = {'Electrolyte concentration [mol.m-3]': c_e, 'Temperature [K]': T}
return pybamm.FunctionParameter('Electrolyte conductivity [S.m-1]', inputs)
def j0_Li_metal(self, c_e, c_Li, T):
inputs = {'Electrolyte concentration [mol.m-3]': c_e, 'Lithium metal concentration [mol.m-3]': c_Li, 'Temperature [K]': T}
return pybamm.FunctionParameter('Exchange-current density for lithium metal electrode [A.m-2]', inputs)
def j0_stripping(self, c_e, c_Li, T):
inputs = {'Electrolyte concentration [mol.m-3]': c_e, 'Plated lithium concentration [mol.m-3]': c_Li, 'Temperature [K]': T}
return pybamm.FunctionParameter('Exchange-current density for stripping [A.m-2]', inputs)
def j0_plating(self, c_e, c_Li, T):
inputs = {'Electrolyte concentration [mol.m-3]': c_e, 'Plated lithium concentration [mol.m-3]': c_Li, 'Temperature [K]': T}
return pybamm.FunctionParameter('Exchange-current density for plating [A.m-2]', inputs)
def dead_lithium_decay_rate(self, L_sei):
inputs = {'Total SEI thickness [m]': L_sei}
return pybamm.FunctionParameter('Dead lithium decay rate [s-1]', inputs) |
def test_cohorts_to_array__indexes():
with pytest.raises(ValueError, match='Cohort tuples must all be the same length'):
_cohorts_to_array([(0, 1), (0, 1, 2)])
np.testing.assert_equal(_cohorts_to_array([]), np.array([]))
np.testing.assert_equal(_cohorts_to_array([0, 1]), np.array([[0], [1]]))
np.testing.assert_equal(_cohorts_to_array([(0, 1), (2, 1)]), np.array([[0, 1], [2, 1]]))
np.testing.assert_equal(_cohorts_to_array([(0, 1, 2), (3, 1, 2)]), np.array([[0, 1, 2], [3, 1, 2]])) |
def update_db(dest, src):
for (comp, group) in src.items():
if (comp in dest):
update_db(dest[comp][0], group[0])
update_db(dest[comp][1], group[1])
if (len(group) > 2):
dest[comp] = (dest[comp][:2] + group[2:])
else:
dest[comp] = copy_group(group) |
_torch
_sentencepiece
_tokenizers
_flax
class MT5IntegrationTest(unittest.TestCase):
def test_small_integration_test(self):
model = FlaxMT5ForConditionalGeneration.from_pretrained('google/mt5-small')
tokenizer = AutoTokenizer.from_pretrained('google/mt5-small')
input_ids = tokenizer('Hello there', return_tensors='np').input_ids
labels = tokenizer('Hi I am', return_tensors='np').input_ids
decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id)
logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[(- 1)])).mean()
mtf_score = (- (labels.shape[(- 1)] * loss.item()))
EXPECTED_SCORE = (- 84.9127)
self.assertTrue((abs((mtf_score - EXPECTED_SCORE)) < 0.0001)) |
_only
def init_wandb_logger(opt):
import wandb
logger = get_root_logger()
project = opt['logger']['wandb']['project']
resume_id = opt['logger']['wandb'].get('resume_id')
if resume_id:
wandb_id = resume_id
resume = 'allow'
logger.warning(f'Resume wandb logger with id={wandb_id}.')
else:
wandb_id = wandb.util.generate_id()
resume = 'never'
wandb.init(id=wandb_id, resume=resume, name=opt['name'], config=opt, project=project, sync_tensorboard=True)
logger.info(f'Use wandb logger with id={wandb_id}; project={project}.') |
def apply_seq_mse(model: torch.nn.Module, sim: QuantizationSimModel, data_loader: DataLoader, params: SeqMseParams, modules_to_exclude: Optional[List[torch.nn.Module]]=None, module_classes_to_exclude: Optional[List[torch.nn.Module]]=None, checkpoints_config: Optional[str]=None):
assert (sim._quant_scheme == QuantScheme.post_training_tf), 'Use TF quant-scheme with sequential MSE.'
quantizers = get_quantizers_to_be_disabled(sim, modules_to_exclude, module_classes_to_exclude)
enable_disable_quantizers(quantizers, enabled=False)
compute_all_param_encodings(sim)
with tempfile.TemporaryDirectory() as tempdir:
cached_dataset = CachedDataset(data_loader, params.num_batches, os.path.join(tempdir, 'cached_dataset'))
if checkpoints_config:
apply_seq_mse_using_opt_sampling(checkpoints_config, model, sim, cached_dataset, params, tempdir)
else:
dummy_input = change_tensor_device_placement(next(iter(data_loader)), get_device(model))
fp32_modules = get_ordered_list_of_modules(model, dummy_input)
fp32_modules = [(name, module) for (name, module) in fp32_modules if isinstance(module, SUPPORTED_MODULES)]
run_seq_mse(fp32_modules, model, sim.model, params, params.forward_fn, cached_dataset, None)
enable_disable_quantizers(quantizers, enabled=True) |
def _long_description(dist: 'Distribution', val: _DictOrStr, root_dir: _Path):
from setuptools.config import expand
if isinstance(val, str):
file: Union[(str, list)] = val
text = expand.read_files(file, root_dir)
ctype = _guess_content_type(val)
else:
file = (val.get('file') or [])
text = (val.get('text') or expand.read_files(file, root_dir))
ctype = val['content-type']
_set_config(dist, 'long_description', text)
if ctype:
_set_config(dist, 'long_description_content_type', ctype)
if file:
dist._referenced_files.add(cast(str, file)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.