code stringlengths 281 23.7M |
|---|
def define_G(model_opt):
from .sr3_modules import unet
if (('norm_groups' not in model_opt) or (model_opt['norm_groups'] is None)):
model_opt['norm_groups'] = 32
model = unet.UNet(in_channel=model_opt['in_channel'], out_channel=model_opt['out_channel'], norm_groups=model_opt['norm_groups'], inner_channel=model_opt['inner_channel'], channel_mults=model_opt['channel_multiplier'], attn_res=model_opt['attn_res'], res_blocks=model_opt['res_blocks'], dropout=model_opt['dropout'], image_size=256)
return model |
class TPaneEntry(TestCase):
def test_all_have(self):
sel = SongsEntry('foo', 'foo', SONGS)
self.assertFalse(sel.all_have('artist', 'one'))
self.assertFalse(sel.all_have('~#mtime', 4))
self.assertTrue(sel.all_have('foo', 'bar'))
def test_all(self):
entry = AllEntry()
conf = PaneConfig('title:artist')
assert (not entry.get_count_markup(conf))
entry.get_markup(conf)
self.assertEqual(list(entry.songs), [])
self.assertFalse(entry.contains_text(''))
repr(entry)
def test_unknown(self):
entry = UnknownEntry(SONGS)
conf = PaneConfig('title:artist')
self.assertEqual(entry.songs, set(SONGS))
self.assertEqual(entry.key, '')
self.assertFalse(entry.contains_text(''))
assert (util.escape(SONGS[0]('artist')) in entry.get_count_markup(conf))
entry.get_markup(conf)
repr(entry)
def test_songs(self):
entry = SongsEntry('key', 'key', SONGS)
self.assertEqual(entry.key, 'key')
conf = PaneConfig('title:artist')
assert ('boris' in entry.get_count_markup(conf))
assert (entry.get_markup(conf) == 'key')
assert entry.contains_text('key')
repr(entry)
def test_songs_markup(self):
entry = SongsEntry('key', 'key', SONGS)
conf = PaneConfig('<title>')
assert (entry.get_markup(conf) == 'key') |
class FeatureExtractionPipeline(Pipeline):
def _sanitize_parameters(self, truncation=None, **kwargs):
preprocess_params = {}
if (truncation is not None):
preprocess_params['truncation'] = truncation
return (preprocess_params, {}, {})
def preprocess(self, inputs, truncation=None) -> Dict[(str, GenericTensor)]:
return_tensors = self.framework
if (truncation is None):
kwargs = {}
else:
kwargs = {'truncation': truncation}
model_inputs = self.tokenizer(inputs, return_tensors=return_tensors, **kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs):
if (self.framework == 'pt'):
return model_outputs[0].tolist()
elif (self.framework == 'tf'):
return model_outputs[0].numpy().tolist()
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs) |
def get_template_files(template_name, project_name, **kwargs):
kwargs['project_name'] = project_name
kwargs['project_name_normalized'] = project_name.lower().replace('.', '-')
kwargs['package_name'] = kwargs['project_name_normalized'].replace('-', '_')
config = RootConfig({})
kwargs.setdefault('author', config.template.name)
kwargs.setdefault('email', config.template.email)
kwargs.setdefault('year', str(datetime.now(timezone.utc).year))
return __load_template_module(template_name)(**kwargs) |
class Solution(object):
def isIsomorphic(self, s, t):
if (len(s) != len(t)):
return False
ls = len(s)
mapStoT = ([0] * 127)
mapTtoS = ([0] * 127)
for i in range(ls):
(s_num, t_num) = (ord(s[i]), ord(t[i]))
if ((mapStoT[s_num] == 0) and (mapTtoS[t_num] == 0)):
mapStoT[s_num] = t_num
mapTtoS[t_num] = s_num
elif ((mapTtoS[t_num] != s_num) or (mapStoT[s_num] != t_num)):
return False
return True |
def test_conftest_symlink(pytester: Pytester) -> None:
real = pytester.mkdir('real')
realtests = real.joinpath('app/tests')
realtests.mkdir(parents=True)
symlink_or_skip(realtests, pytester.path.joinpath('symlinktests'))
symlink_or_skip(real, pytester.path.joinpath('symlink'))
pytester.makepyfile(**{'real/app/tests/test_foo.py': 'def test1(fixture): pass', 'real/conftest.py': textwrap.dedent('\n import pytest\n\n print("conftest_loaded")\n\n \n def fixture():\n print("fixture_used")\n ')})
result = pytester.runpytest('-vs', 'symlinktests')
result.stdout.fnmatch_lines(["*fixture 'fixture' not found*"])
assert (result.ret == ExitCode.TESTS_FAILED)
result = pytester.runpytest('-vs', 'symlink')
assert (result.ret == ExitCode.OK) |
def test_multitensor_offsetmap():
a = np.random.random((5, 5, 5, 5))
b = np.random.random((4, 4, 4))
c = np.random.random((3, 3))
at = Tensor(tensor=a, name='a')
bt = Tensor(tensor=b, name='b')
ct = Tensor(tensor=c, name='c')
mt = MultiTensor([at, bt, ct])
assert (mt.off_set_map == {'a': 0, 'b': (5 ** 4), 'c': ((5 ** 4) + (4 ** 3))}) |
class MultiloadCz(BaseDecrypter):
__name__ = 'MultiloadCz'
__type__ = 'decrypter'
__version__ = '0.46'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('use_subfolder', 'bool', 'Save package to subfolder', True), ('subfolder_per_package', 'bool', 'Create a subfolder for each package', True), ('usedHoster', 'str', 'Prefered hoster list (bar-separated)', ''), ('ignoredHoster', 'str', 'Ignored hoster list (bar-separated)', '')]
__description__ = 'Multiload.cz decrypter plugin'
__license__ = 'GPLv3'
__authors__ = [('zoidberg', '')]
FOLDER_PATTERN = '<form action="" method="get"><textarea.*?>([^>]*)</textarea></form>'
LINK_PATTERN = '<p class="manager-server"><strong>(.+?)</strong></p><p class="manager-linky"><a href="(.+?)">'
def decrypt(self, pyfile):
self.data = self.load(pyfile.url)
if (re.match(self.__pattern__, pyfile.url).group(1) == 'slozka'):
m = re.search(self.FOLDER_PATTERN, self.data)
if (m is not None):
self.links.extend(m.group(1).split())
else:
m = re.findall(self.LINK_PATTERN, self.data)
if (m is not None):
prefered_set = set(self.config.get('usedHoster').split('|'))
self.links.extend((x[1] for x in m if (x[0] in prefered_set)))
if (not self.links):
ignored_set = set(self.config.get('ignoredHoster').split('|'))
self.links.extend((x[1] for x in m if (x[0] not in ignored_set))) |
class ViewProviderAsmConstraint(ViewProviderAsmGroup):
def setupContextMenu(self, vobj, menu):
obj = vobj.Object
action = QtGui.QAction(QtGui.QIcon(), ('Enable constraint' if obj.Disabled else 'Disable constraint'), menu)
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), self.toggleDisable)
menu.addAction(action)
Constraint.setupContextMenu(obj, menu)
def toggleDisable(self):
obj = self.ViewObject.Object
FreeCAD.setActiveTransaction('Toggle constraint')
try:
obj.Disabled = (not obj.Disabled)
FreeCAD.closeActiveTransaction()
except Exception:
FreeCAD.closeActiveTransaction(True)
raise
def attach(self, vobj):
super(ViewProviderAsmConstraint, self).attach(vobj)
vobj.OnTopWhenSelected = 2
try:
vobj.SwitchNode.overrideSwitch = 'OverrideVisible'
except Exception:
pass
def getIcon(self):
return Constraint.getIcon(self.ViewObject.Object)
def _getSelection(self, owner, subname, elements):
if (not owner):
raise RuntimeError('no owner')
parent = getattr(owner.Proxy, 'parent', None)
if isinstance(parent, AsmConstraintGroup):
subname = ((owner.Name + '.') + subname)
owner = parent.Object
parent = parent.parent
if (not isinstance(parent, Assembly)):
raise RuntimeError('not from the same assembly {},{}'.format(objName(owner), parent))
subname = ((owner.Name + '.') + subname)
obj = self.ViewObject.Object
mysub = (((parent.getConstraintGroup().Name + '.') + obj.Name) + '.')
sel = []
if (not elements):
elements = ['']
elements = [(subname + element) for element in elements]
elements.append(mysub)
sel = [Selection(Object=parent.Object, SubElementNames=elements)]
typeid = Constraint.getTypeID(obj)
return AsmConstraint.getSelection(typeid, sel)
def canDropObjectEx(self, _obj, owner, subname, elements):
cstr = self.ViewObject.Object
if logger.catchTrace('Cannot drop to AsmConstraint {}'.format(cstr), self._getSelection, owner, subname, elements):
return True
return False
def dropObjectEx(self, _vobj, _obj, owner, subname, elements):
sel = self._getSelection(owner, subname, elements)
cstr = self.ViewObject.Object
typeid = Constraint.getTypeID(cstr)
sel = AsmConstraint.Selection(SelObject=None, SelSubname=None, Assembly=sel.Assembly, Constraint=cstr, Elements=sel.Elements)
AsmConstraint.make(typeid, sel, undo=False)
return '.'
def canDelete(self, _obj):
return True |
class AttrVI_ATTR_4882_COMPLIANT(BooleanAttribute):
resources = [(constants.InterfaceType.usb, 'INSTR'), (constants.InterfaceType.vxi, 'INSTR')]
py_name = 'is_4882_compliant'
visa_name = 'VI_ATTR_4882_COMPLIANT'
visa_type = 'ViBoolean'
default = NotAvailable
(read, write, local) = (True, False, False) |
class PluginEnabledFilterCombo(Gtk.ComboBox):
def __init__(self):
combo_store = Gtk.ListStore(str, int)
super().__init__(model=combo_store)
cell = Gtk.CellRendererText()
cell.props.ellipsize = Pango.EllipsizeMode.END
self.pack_start(cell, True)
self.add_attribute(cell, 'text', 0)
def combo_sep(model, iter_, data):
return (model[iter_][1] == EnabledType.SEP)
self.set_row_separator_func(combo_sep, None)
def refill(self, tags, no_tags):
active = max(self.get_active(), 0)
combo_store = self.get_model()
combo_store.clear()
combo_store.append([_('Any state'), EnabledType.ALL])
combo_store.append(['', EnabledType.SEP])
combo_store.append([_('Enabled'), EnabledType.EN])
combo_store.append([_('Disabled'), EnabledType.DIS])
if tags:
combo_store.append(['', EnabledType.SEP])
for tag in sorted(tags):
combo_store.append([tag, EnabledType.TAG])
if no_tags:
combo_store.append([_('No category'), EnabledType.NO])
self.set_active(active)
def get_active_row(self):
iter_ = self.get_active_iter()
if iter_:
model = self.get_model()
return list(model[iter_]) |
class CTOCFlagsSpec(ByteSpec):
def read(self, header, frame, data):
(value, data) = ByteSpec.read(self, header, frame, data)
return (CTOCFlags(value), data)
def validate(self, frame, value):
value = ByteSpec.validate(self, frame, value)
if (value is not None):
return CTOCFlags(value)
return value |
def test_transform_bounds_densify_out_of_bounds():
transformer = Transformer.from_crs('EPSG:4326', '+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs', always_xy=True)
with pytest.raises(ProjError):
transformer.transform_bounds((- 120), 40, (- 80), 64, densify_pts=(- 1)) |
class LRSchedulerStep(object):
def __init__(self, fai_optimizer: OptimWrapper, total_step, lr_phases, mom_phases):
self.optimizer = fai_optimizer
self.total_step = total_step
self.lr_phases = []
for (i, (start, lambda_func)) in enumerate(lr_phases):
if (len(self.lr_phases) != 0):
assert (self.lr_phases[(- 1)][0] < start)
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if (i < (len(lr_phases) - 1)):
self.lr_phases.append((int((start * total_step)), int((lr_phases[(i + 1)][0] * total_step)), lambda_func))
else:
self.lr_phases.append((int((start * total_step)), total_step, lambda_func))
assert (self.lr_phases[0][0] == 0)
self.mom_phases = []
for (i, (start, lambda_func)) in enumerate(mom_phases):
if (len(self.mom_phases) != 0):
assert (self.mom_phases[(- 1)][0] < start)
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if (i < (len(mom_phases) - 1)):
self.mom_phases.append((int((start * total_step)), int((mom_phases[(i + 1)][0] * total_step)), lambda_func))
else:
self.mom_phases.append((int((start * total_step)), total_step, lambda_func))
assert (self.mom_phases[0][0] == 0)
def step(self, step):
for (start, end, func) in self.lr_phases:
if (step >= start):
self.optimizer.lr = func(((step - start) / (end - start)))
for (start, end, func) in self.mom_phases:
if (step >= start):
self.optimizer.mom = func(((step - start) / (end - start))) |
class Condition(models.Model):
RELATION_EQUAL = 'eq'
RELATION_NOT_EQUAL = 'neq'
RELATION_CONTAINS = 'contains'
RELATION_GREATER_THAN = 'gt'
RELATION_GREATER_THAN_EQUAL = 'gte'
RELATION_LESSER_THAN = 'lt'
RELATION_LESSER_THAN_EQUAL = 'lte'
RELATION_EMPTY = 'empty'
RELATION_NOT_EMPTY = 'notempty'
RELATION_CHOICES = ((RELATION_EQUAL, 'is equal to (==)'), (RELATION_NOT_EQUAL, 'is not equal to (!=)'), (RELATION_CONTAINS, 'contains'), (RELATION_GREATER_THAN, 'is greater than (>)'), (RELATION_GREATER_THAN_EQUAL, 'is greater than or equal (>=)'), (RELATION_LESSER_THAN, 'is lesser than (<)'), (RELATION_LESSER_THAN_EQUAL, 'is lesser than or equal (<=)'), (RELATION_EMPTY, 'is empty'), (RELATION_NOT_EMPTY, 'is not empty'))
uri = models.URLField(max_length=800, blank=True, verbose_name=_('URI'), help_text=_('The Uniform Resource Identifier of this condition (auto-generated).'))
uri_prefix = models.URLField(max_length=256, verbose_name=_('URI Prefix'), help_text=_('The prefix for the URI of this condition.'))
uri_path = models.SlugField(max_length=512, blank=True, verbose_name=_('URI Path'), help_text=_('The path for the URI of this condition.'))
comment = models.TextField(blank=True, verbose_name=_('Comment'), help_text=_('Additional internal information about this condition.'))
locked = models.BooleanField(default=False, verbose_name=_('Locked'), help_text=_('Designates whether this condition can be changed.'))
editors = models.ManyToManyField(Site, related_name='conditions_as_editor', blank=True, verbose_name=_('Editors'), help_text=_('The sites that can edit this condition (in a multi site setup).'))
source = models.ForeignKey(Attribute, blank=True, null=True, on_delete=models.SET_NULL, related_name='conditions', db_constraint=False, verbose_name=_('Source'), help_text=_('The attribute of the value for this condition.'))
relation = models.CharField(max_length=8, choices=RELATION_CHOICES, verbose_name=_('Relation'), help_text=_('The relation this condition is using.'))
target_text = models.CharField(max_length=256, blank=True, verbose_name=_('Target (Text)'), help_text=_('If using a regular value, the text value this condition is checking against (for boolean values use 1 and 0).'))
target_option = models.ForeignKey('options.Option', blank=True, null=True, on_delete=models.SET_NULL, related_name='conditions', db_constraint=False, verbose_name=_('Target (Option)'), help_text=_('If using a value pointing to an option, the option this condition is checking against.'))
class Meta():
ordering = ('uri',)
verbose_name = _('Condition')
verbose_name_plural = _('Conditions')
def __str__(self):
return self.uri
def save(self, *args, **kwargs):
self.uri = self.build_uri(self.uri_prefix, self.uri_path)
super().save(*args, **kwargs)
def source_label(self):
return self.source.uri
def relation_label(self):
return self.get_relation_display()
def target_label(self):
if self.target_option:
return self.target_option.label
else:
return self.target_text
def is_locked(self):
return self.locked
def resolve(self, values, set_prefix=None, set_index=None):
source_values = filter((lambda value: (value.attribute == self.source)), values)
if (set_prefix is not None):
source_values = filter((lambda value: (value.set_prefix == set_prefix)), source_values)
if (set_index is not None):
source_values = filter((lambda value: ((value.set_index == int(set_index)) or (value.set_collection is False))), source_values)
source_values = list(source_values)
if (not source_values):
if set_prefix:
rpartition = set_prefix.rpartition('|')
(set_prefix, set_index) = (rpartition[0], int(rpartition[2]))
return self.resolve(values, set_prefix, set_index)
if (self.relation == self.RELATION_EQUAL):
return self._resolve_equal(source_values)
elif (self.relation == self.RELATION_NOT_EQUAL):
return (not self._resolve_equal(source_values))
elif (self.relation == self.RELATION_CONTAINS):
return self._resolve_contains(source_values)
elif (self.relation == self.RELATION_GREATER_THAN):
return self._resolve_greater_than(source_values)
elif (self.relation == self.RELATION_GREATER_THAN_EQUAL):
return self._resolve_greater_than_equal(source_values)
elif (self.relation == self.RELATION_LESSER_THAN):
return self._resolve_lesser_than(source_values)
elif (self.relation == self.RELATION_LESSER_THAN_EQUAL):
return self._resolve_lesser_than_equal(source_values)
elif (self.relation == self.RELATION_EMPTY):
return (not self._resolve_not_empty(source_values))
elif (self.relation == self.RELATION_NOT_EMPTY):
return self._resolve_not_empty(source_values)
else:
return False
def _resolve_equal(self, values):
results = []
for value in values:
if self.target_option:
results.append((value.option == self.target_option))
else:
results.append((value.text == self.target_text))
return (True in results)
def _resolve_contains(self, values):
results = []
for value in values:
results.append((self.target_text in value.text))
return (True in results)
def _resolve_greater_than(self, values):
for value in values:
try:
if (float(value.text) > float(self.target_text)):
return True
except ValueError:
pass
return False
def _resolve_greater_than_equal(self, values):
for value in values:
try:
if (float(value.text) >= float(self.target_text)):
return True
except ValueError:
pass
return False
def _resolve_lesser_than(self, values):
for value in values:
try:
if (float(value.text) < float(self.target_text)):
return True
except ValueError:
pass
return False
def _resolve_lesser_than_equal(self, values):
for value in values:
try:
if (float(value.text) <= float(self.target_text)):
return True
except ValueError:
pass
return False
def _resolve_not_empty(self, values):
for value in values:
if (bool(value.text) or bool(value.option)):
return True
return False
def build_uri(cls, uri_prefix, uri_path):
if (not uri_path):
raise RuntimeError('uri_path is missing')
return join_url((uri_prefix or settings.DEFAULT_URI_PREFIX), '/conditions/', uri_path) |
class GroupPushRulesManager(GetWithoutIdMixin, CreateMixin, UpdateMixin, DeleteMixin, RESTManager):
_path = '/groups/{group_id}/push_rule'
_obj_cls = GroupPushRules
_from_parent_attrs = {'group_id': 'id'}
_create_attrs = RequiredOptional(optional=('deny_delete_tag', 'member_check', 'prevent_secrets', 'commit_message_regex', 'commit_message_negative_regex', 'branch_name_regex', 'author_email_regex', 'file_name_regex', 'max_file_size', 'commit_committer_check', 'reject_unsigned_commits'))
_update_attrs = RequiredOptional(optional=('deny_delete_tag', 'member_check', 'prevent_secrets', 'commit_message_regex', 'commit_message_negative_regex', 'branch_name_regex', 'author_email_regex', 'file_name_regex', 'max_file_size', 'commit_committer_check', 'reject_unsigned_commits'))
def get(self, **kwargs: Any) -> GroupPushRules:
return cast(GroupPushRules, super().get(**kwargs)) |
class OpVisitor(Generic[T]):
def visit_goto(self, op: Goto) -> T:
raise NotImplementedError
def visit_branch(self, op: Branch) -> T:
raise NotImplementedError
def visit_return(self, op: Return) -> T:
raise NotImplementedError
def visit_unreachable(self, op: Unreachable) -> T:
raise NotImplementedError
def visit_assign(self, op: Assign) -> T:
raise NotImplementedError
def visit_assign_multi(self, op: AssignMulti) -> T:
raise NotImplementedError
def visit_load_error_value(self, op: LoadErrorValue) -> T:
raise NotImplementedError
def visit_load_literal(self, op: LoadLiteral) -> T:
raise NotImplementedError
def visit_get_attr(self, op: GetAttr) -> T:
raise NotImplementedError
def visit_set_attr(self, op: SetAttr) -> T:
raise NotImplementedError
def visit_load_static(self, op: LoadStatic) -> T:
raise NotImplementedError
def visit_init_static(self, op: InitStatic) -> T:
raise NotImplementedError
def visit_tuple_get(self, op: TupleGet) -> T:
raise NotImplementedError
def visit_tuple_set(self, op: TupleSet) -> T:
raise NotImplementedError
def visit_inc_ref(self, op: IncRef) -> T:
raise NotImplementedError
def visit_dec_ref(self, op: DecRef) -> T:
raise NotImplementedError
def visit_call(self, op: Call) -> T:
raise NotImplementedError
def visit_method_call(self, op: MethodCall) -> T:
raise NotImplementedError
def visit_cast(self, op: Cast) -> T:
raise NotImplementedError
def visit_box(self, op: Box) -> T:
raise NotImplementedError
def visit_unbox(self, op: Unbox) -> T:
raise NotImplementedError
def visit_raise_standard_error(self, op: RaiseStandardError) -> T:
raise NotImplementedError
def visit_call_c(self, op: CallC) -> T:
raise NotImplementedError
def visit_truncate(self, op: Truncate) -> T:
raise NotImplementedError
def visit_extend(self, op: Extend) -> T:
raise NotImplementedError
def visit_load_global(self, op: LoadGlobal) -> T:
raise NotImplementedError
def visit_int_op(self, op: IntOp) -> T:
raise NotImplementedError
def visit_comparison_op(self, op: ComparisonOp) -> T:
raise NotImplementedError
def visit_float_op(self, op: FloatOp) -> T:
raise NotImplementedError
def visit_float_neg(self, op: FloatNeg) -> T:
raise NotImplementedError
def visit_float_comparison_op(self, op: FloatComparisonOp) -> T:
raise NotImplementedError
def visit_load_mem(self, op: LoadMem) -> T:
raise NotImplementedError
def visit_set_mem(self, op: SetMem) -> T:
raise NotImplementedError
def visit_get_element_ptr(self, op: GetElementPtr) -> T:
raise NotImplementedError
def visit_load_address(self, op: LoadAddress) -> T:
raise NotImplementedError
def visit_keep_alive(self, op: KeepAlive) -> T:
raise NotImplementedError
def visit_unborrow(self, op: Unborrow) -> T:
raise NotImplementedError |
class Block(nn.Module):
def __init__(self, channels):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, 3, 1, 1, bias=False)
self.bn1 = nn.BatchNorm2d(channels)
self.prelu1 = nn.PReLU(channels)
self.conv2 = nn.Conv2d(channels, channels, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm2d(channels)
self.prelu2 = nn.PReLU(channels)
def forward(self, x):
short_cut = x
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.prelu2(x)
return (x + short_cut) |
def test_bng_printer():
assert (_bng_print(sympy.pi) == '_pi')
assert (_bng_print(sympy.E) == '_e')
(x, y) = sympy.symbols('x y')
assert (_bng_print(sympy.sympify('x & y')) == 'x && y')
assert (_bng_print(sympy.sympify('x | y')) == 'x || y')
assert (_bng_print(sympy.sin(x)) == 'sin(x)')
assert (_bng_print(sympy.cos(x)) == 'cos(x)')
assert (_bng_print(sympy.tan(x)) == 'tan(x)')
assert (_bng_print(sympy.asin(x)) == 'asin(x)')
assert (_bng_print(sympy.acos(x)) == 'acos(x)')
assert (_bng_print(sympy.atan(x)) == 'atan(x)')
assert (_bng_print(sympy.sinh(x)) == 'sinh(x)')
assert (_bng_print(sympy.cosh(x)) == 'cosh(x)')
assert (_bng_print(sympy.tanh(x)) == 'tanh(x)')
assert (_bng_print(sympy.asinh(x)) == 'asinh(x)')
assert (_bng_print(sympy.acosh(x)) == 'acosh(x)')
assert (_bng_print(sympy.atanh(x)) == 'atanh(x)')
assert (_bng_print(sympy.log(x)) == 'ln(x)')
assert (_bng_print(sympy.exp(x)) == 'exp(x)')
assert (_bng_print(sympy.sqrt(x)) == 'sqrt(x)')
assert (_bng_print(sympy.Abs(x)) == 'abs(x)')
assert (_bng_print(sympy.floor(x)) == 'rint(x - 0.5)')
assert (_bng_print(sympy.ceiling(x)) == '(rint(x + 1) - 1)')
assert (_bng_print(sympy.Min(x, y)) == 'min(x, y)')
assert (_bng_print(sympy.Max(x, y)) == 'max(x, y)')
assert (_bng_print(sympy.Eq(x, y)) == 'x == y')
assert (_bng_print(sympy.Ne(x, y)) == 'x != y')
assert (_bng_print((x < y)) == 'x < y')
assert (_bng_print((x <= y)) == 'x <= y')
assert (_bng_print((x > y)) == 'x > y')
assert (_bng_print((x >= y)) == 'x >= y') |
(params=[_PROJECT_TASK, _PROJECT_TASK_NEW_INTERFACE])
def project(request, tmp_path):
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(request.param))
tmp_path.joinpath('in.txt').touch()
tmp_path.joinpath('to_be_deleted_file_1.txt').touch()
tmp_path.joinpath('to_be_deleted_folder_1').mkdir()
tmp_path.joinpath('to_be_deleted_folder_1', 'to_be_deleted_file_2.txt').touch()
return tmp_path |
class UploaderBase(object):
__metaclass__ = abc.ABCMeta
def __init__(self, bucket_name, **kwargs):
self.bucket_name = bucket_name
self.auth = kwargs.get('auth', None)
regions = kwargs.get('regions', [])
self.regions = regions
hosts_cache_dir = kwargs.get('hosts_cache_dir', None)
self.hosts_cache_dir = hosts_cache_dir
def get_up_token(self, **kwargs):
if (not self.auth):
raise ValueError('can not get up_token by auth not provided')
bucket_name = kwargs.get('bucket_name', self.bucket_name)
kwargs_for_up_token = {k: kwargs[k] for k in ['key', 'expires', 'policy', 'strict_policy'] if (k in kwargs)}
up_token = self.auth.upload_token(bucket=bucket_name, **kwargs_for_up_token)
return up_token
def _get_regions(self):
if self.regions:
return self.regions
default_region = config.get_default('default_zone')
if default_region:
self.regions = [default_region]
return self.regions
def _get_up_hosts(self, access_key=None):
if ((not self.auth) and (not access_key)):
raise ValueError('Must provide access_key if auth is unavailable.')
if (not access_key):
access_key = self.auth.get_access_key()
regions = self._get_regions()
if (not regions):
raise ValueError('No region available.')
if (regions[0].up_host and regions[0].up_host_backup):
return [regions[0].up_host, regions[0].up_host_backup]
return regions[0].get_up_host(ak=access_key, bucket=self.bucket_name, home_dir=self.hosts_cache_dir)
def upload(self, key, file_path, data, data_size, modify_time, part_size, mime_type, metadata, file_name, custom_vars, **kwargs): |
def get_train_dataloader(data_pth, max_seq_length, train_batch_size):
print('processing training data')
data = get_data(data_pth)
(features, vocab) = convert_example_to_feature(data, max_seq_length, None, sum_mode=args.sum_mode, context_mode=args.context_mode, get_vocab=True)
train_data = PGNDataset(features)
sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=sampler, batch_size=train_batch_size, collate_fn=batchify_data)
return (train_dataloader, features, vocab) |
def process_squad_data(data, morphological_analyzer):
for article in data:
tokenized_title = morphological_analyzer.get_tokenized_string(article['title'])
for paragraph in article['paragraphs']:
title_context = paragraph['context']
(_, context) = title_context.split(' [SEP] ')
tokenized_context = morphological_analyzer.get_tokenized_string(context)
for qa in paragraph['qas']:
tokenized_question = morphological_analyzer.get_tokenized_string(qa['question'])
for answer in qa['answers']:
tokenized_answer = morphological_analyzer.get_tokenized_string(answer['text'])
try:
(tokenized_answer, answer_start) = get_answer_start(tokenized_answer, ((tokenized_title + ' [SEP] ') + tokenized_context))
answer['text'] = tokenized_answer
answer['answer_start'] = answer_start
except:
print(f'not found {answer} in {context}', file=sys.stderr)
continue
qa['question'] = tokenized_question
paragraph['context'] = f'{tokenized_title} [SEP] {tokenized_context}'
article['title'] = tokenized_title
return data |
class FakeTestCase(unittest.TestCase):
def runTest(self):
pass
def subTest(self, *args, **kwargs):
try:
self._subtest = unittest.case._SubTest(self, object(), {})
(yield)
finally:
self._subtest = None
def __call__(self, result):
pass |
def load_parent(**kwargs: Any) -> CompletedProcess:
frame = inspect.currentframe()
if (not frame):
raise Exception('workflow: load_parent() called from unknown frame')
caller_frame = frame.f_back
if (not caller_frame):
raise Exception('workflow: load_parent() called from unknown caller')
caller_info = inspect.getframeinfo(caller_frame)
caller_file = caller_info.filename
caller_directory = Path(caller_file).parent
parent_directory = caller_directory.parent
path = util.find_workflow_path(cwd=parent_directory)
if (not path):
raise log.bad(f'workflow: load_parent() called in {caller_file} but pretf.workflow.py not found in parent directories')
return custom(path, context=kwargs) |
def generate_class_type_decl(cl: ClassIR, c_emitter: Emitter, external_emitter: Emitter, emitter: Emitter) -> None:
context = c_emitter.context
name = emitter.type_struct_name(cl)
context.declarations[name] = HeaderDeclaration(f'PyTypeObject *{emitter.type_struct_name(cl)};', needs_export=True)
if (not cl.is_ext_class):
return
generate_object_struct(cl, external_emitter)
generate_full = ((not cl.is_trait) and (not cl.builtin_base))
if generate_full:
context.declarations[emitter.native_function_name(cl.ctor)] = HeaderDeclaration(f'{native_function_header(cl.ctor, emitter)};', needs_export=True) |
class TestLoadNetCDF2DPandas(TestLoadNetCDF):
def setup_method(self):
self.tempdir = tempfile.TemporaryDirectory()
self.saved_path = pysat.params['data_dirs']
pysat.params['data_dirs'] = self.tempdir.name
self.testInst = pysat.Instrument(platform='pysat', name='testing2d', update_files=True, num_samples=100, use_header=True)
self.stime = pysat.instruments.pysat_testing2d._test_dates['']['']
self.epoch_name = 'time'
self.loaded_inst = None
return
def teardown_method(self):
pysat.params['data_dirs'] = self.saved_path
del self.loaded_inst, self.testInst, self.stime, self.epoch_name
self.tempdir.cleanup()
del self.tempdir, self.saved_path
return |
def check_other_isdataclass_overloads(x: type, y: object) -> None:
dc.fields(y)
dc.asdict(x)
dc.asdict(y)
dc.astuple(x)
dc.astuple(y)
dc.replace(x)
dc.replace(y)
if dc.is_dataclass(x):
assert_type(x, Type['DataclassInstance'])
assert_type(dc.fields(x), Tuple[(dc.Field[Any], ...)])
if dc.is_dataclass(y):
assert_type(y, Union[('DataclassInstance', Type['DataclassInstance'])])
assert_type(dc.fields(y), Tuple[(dc.Field[Any], ...)])
if (dc.is_dataclass(y) and (not isinstance(y, type))):
assert_type(y, 'DataclassInstance')
assert_type(dc.fields(y), Tuple[(dc.Field[Any], ...)])
assert_type(dc.asdict(y), Dict[(str, Any)])
assert_type(dc.astuple(y), Tuple[(Any, ...)])
dc.replace(y) |
def get_args_tuple(args, kwargs, arg_names, kwargs_defaults):
args_list = list(args)
args_len = len(args)
all_args_len = len(arg_names)
try:
while (args_len < all_args_len):
arg_name = arg_names[args_len]
if (arg_name in kwargs_defaults):
args_list.append(kwargs.get(arg_name, kwargs_defaults[arg_name]))
else:
args_list.append(kwargs[arg_name])
args_len += 1
remaining_keys = sorted([k for k in kwargs if (k not in arg_names)])
for k in remaining_keys:
args_list.append((k, kwargs[k]))
except KeyError as e:
raise TypeError(('Missing argument %r' % (e.args[0],)))
return tuple(args_list) |
def test_mlpg_gradcheck():
static_dim = 2
T = 10
for windows in _get_windows_set():
torch.manual_seed(1234)
means = torch.rand(T, (static_dim * len(windows)), requires_grad=True)
variances = torch.ones((static_dim * len(windows))).expand(T, (static_dim * len(windows)))
inputs = (means, variances, windows)
assert gradcheck(MLPG.apply, inputs, eps=0.001, atol=0.001)
variances = torch.rand((static_dim * len(windows))).expand(T, (static_dim * len(windows)))
inputs = (means, variances, windows)
assert gradcheck(MLPG.apply, inputs, eps=0.001, atol=0.001) |
def handle_long_project_survey_participants_request(**kwargs) -> Any:
data = kwargs['data']
headers = kwargs['headers']
resp = None
if (('test' in data.get('instrument')) and ('raw' in data.get('event'))):
resp = [{'email': '', 'email_occurrence': 1, 'identifier': '', 'record': '', 'invitation_sent_status': 0, 'invitation_send_time': '', 'response_status': 2, 'survey_access_code': '', 'survey_link': ''}, {'email': '', 'email_occurrence': 1, 'identifier': '', 'record': '', 'invitation_sent_status': 0, 'invitation_send_time': '', 'response_status': 2, 'survey_access_code': '', 'survey_link': ''}]
return (201, headers, json.dumps(resp)) |
def test_class_interact():
parent = Parameter.create(name='parent', type='group')
interactor = Interactor(parent=parent, nest=False)
def outside_class_deco(func):
(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class A():
def a(self, x=5):
return x
def b(cls, y=5):
return y
_class_deco
def c(self, z=5):
return z
a = A()
ai = interactor.decorate()(a.a)
assert (ai() == a.a())
bi = interactor.decorate()(A.b)
assert (bi() == A.b())
ci = interactor.decorate()(a.c)
assert (ci() == a.c()) |
_optionals.HAS_PYSCF.require_in_instance
class PySCFDriver(ElectronicStructureDriver):
def __init__(self, atom: (str | list[str])='H 0.0 0.0 0.0; H 0.0 0.0 0.735', *, unit: DistanceUnit=DistanceUnit.ANGSTROM, charge: int=0, spin: int=0, basis: str='sto3g', method: MethodType=MethodType.RHF, xc_functional: str='lda,vwn', xcf_library: str='libxc', conv_tol: float=1e-09, max_cycle: int=50, init_guess: InitialGuess=InitialGuess.MINAO, max_memory: (int | None)=None, chkfile: (str | None)=None) -> None:
super().__init__()
from pyscf import gto, scf
PySCFDriver.check_method_supported(method)
if isinstance(atom, list):
atom = ';'.join(atom)
elif isinstance(atom, str):
atom = atom.replace('\n', ';')
else:
raise QiskitNatureError(f'`atom` must be either a `str` or `list[str]`, but you passed {atom}')
validate_min('max_cycle', max_cycle, 1)
self.atom = atom
self._unit = unit
self._charge = charge
self._spin = spin
self._basis = basis
self._method = method
self._xc_functional = xc_functional
self.xcf_library = xcf_library
self._conv_tol = conv_tol
self._max_cycle = max_cycle
self._init_guess = init_guess.value
self._max_memory = max_memory
self._chkfile = chkfile
self._mol: gto.Mole = None
self._calc: scf.HF = None
def atom(self) -> str:
return self._atom
def atom(self, atom: (str | list[str])) -> None:
if isinstance(atom, list):
atom = ';'.join(atom)
self._atom = atom.replace('\n', ';')
def unit(self) -> DistanceUnit:
return self._unit
def unit(self, unit: DistanceUnit) -> None:
self._unit = unit
def charge(self) -> int:
return self._charge
def charge(self, charge: int) -> None:
self._charge = charge
def spin(self) -> int:
return self._spin
def spin(self, spin: int) -> None:
self._spin = spin
def basis(self) -> str:
return self._basis
def basis(self, value: str) -> None:
self._basis = value
def method(self) -> MethodType:
return self._method
def method(self, value: MethodType) -> None:
self._method = value
def xc_functional(self) -> str:
return self._xc_functional
_functional.setter
def xc_functional(self, xc_functional: str) -> None:
self._xc_functional = xc_functional
def xcf_library(self) -> str:
return self._xcf_library
_library.setter
def xcf_library(self, xcf_library: str) -> None:
if (xcf_library not in ('libxc', 'xcfun')):
raise QiskitNatureError(f"Invalid XCF library. It can be either 'libxc' or 'xcfun', not '{xcf_library}'")
self._xcf_library = xcf_library
def conv_tol(self) -> float:
return self._conv_tol
_tol.setter
def conv_tol(self, conv_tol: float) -> None:
self._conv_tol = conv_tol
def max_cycle(self) -> int:
return self._max_cycle
_cycle.setter
def max_cycle(self, max_cycle: int) -> None:
self._max_cycle = max_cycle
def init_guess(self) -> str:
return self._init_guess
_guess.setter
def init_guess(self, init_guess: str) -> None:
self._init_guess = init_guess
def max_memory(self) -> int:
return self._max_memory
_memory.setter
def max_memory(self, max_memory: int) -> None:
self._max_memory = max_memory
def chkfile(self) -> str:
return self._chkfile
def chkfile(self, chkfile: str) -> None:
self._chkfile = chkfile
def from_molecule(molecule: MoleculeInfo, *, basis: str='sto3g', method: MethodType=MethodType.RHF, driver_kwargs: (dict[(str, Any)] | None)=None) -> 'PySCFDriver':
PySCFDriver.check_method_supported(method)
kwargs = {}
if driver_kwargs:
args = inspect.signature(PySCFDriver.__init__).parameters.keys()
for (key, value) in driver_kwargs.items():
if ((key not in ['self']) and (key in args)):
kwargs[key] = value
kwargs['atom'] = [' '.join(map(str, (name, *coord))) for (name, coord) in zip(molecule.symbols, molecule.coords)]
kwargs['charge'] = molecule.charge
kwargs['spin'] = (molecule.multiplicity - 1)
kwargs['unit'] = molecule.units
kwargs['basis'] = PySCFDriver.to_driver_basis(basis)
kwargs['method'] = method
return PySCFDriver(**kwargs)
def to_driver_basis(basis: str) -> str:
return basis
def check_method_supported(method: MethodType) -> None:
pass
def run(self) -> ElectronicStructureProblem:
self.run_pyscf()
return self.to_problem()
def _build_molecule(self) -> None:
from pyscf import gto
from pyscf.lib import logger as pylogger
from pyscf.lib import param
atom = self._check_molecule_format(self.atom)
if (self._max_memory is None):
self._max_memory = param.MAX_MEMORY
try:
verbose = pylogger.QUIET
output = None
if logger.isEnabledFor(logging.DEBUG):
verbose = pylogger.INFO
(file, output) = tempfile.mkstemp(suffix='.log')
os.close(file)
self._mol = gto.Mole(atom=atom, unit=self._unit.value, basis=self._basis, max_memory=self._max_memory, verbose=verbose, output=output)
self._mol.symmetry = False
self._mol.charge = self._charge
self._mol.spin = self._spin
self._mol.build(parse_arg=False)
if (output is not None):
self._process_pyscf_log(output)
try:
os.remove(output)
except Exception:
pass
except Exception as exc:
raise QiskitNatureError('Failed to build the PySCF Molecule object.') from exc
def _check_molecule_format(val: str) -> (str | list[str]):
from pyscf import gto
atoms = [x.strip() for x in val.split(';')]
if ((atoms is None) or (len(atoms) < 1)):
raise QiskitNatureError(('Molecule format error: ' + val))
parts = [x.strip() for x in atoms[0].split()]
if (len(parts) != 4):
try:
newval = []
for entry in gto.mole.from_zmatrix(val):
if (entry[0].upper() != 'X'):
newval.append(entry)
return newval
except Exception as exc:
raise QiskitNatureError(('Failed to convert atom string: ' + val)) from exc
return val
def run_pyscf(self) -> None:
self._build_molecule()
from pyscf import dft, scf
from pyscf.lib import chkfile as lib_chkfile
method_name = None
method_cls = None
try:
method_name = self.method.value.upper()
method_cls = getattr(scf, method_name)
except AttributeError as exc:
raise QiskitNatureError(f'Failed to load {method_name} HF object.') from exc
self._calc = method_cls(self._mol)
if (method_name in ('RKS', 'ROKS', 'UKS')):
self._calc._numint.libxc = getattr(dft, self.xcf_library)
self._calc.xc = self.xc_functional
if ((self._chkfile is not None) and os.path.exists(self._chkfile)):
self._calc.__dict__.update(lib_chkfile.load(self._chkfile, 'scf'))
logger.info('PySCF loaded from chkfile e(hf): %s', self._calc.e_tot)
else:
self._calc.conv_tol = self._conv_tol
self._calc.max_cycle = self._max_cycle
self._calc.init_guess = self._init_guess
self._calc.kernel()
logger.info('PySCF kernel() converged: %s, e(hf): %s', self._calc.converged, self._calc.e_tot)
def to_qcschema(self, *, include_dipole: bool=True) -> QCSchema:
from pyscf import __version__ as pyscf_version
from pyscf import ao2mo, gto
from pyscf.tools import dump_mat
(einsum_func, _) = get_einsum()
data = _QCSchemaData()
data.overlap = self._calc.get_ovlp()
(data.mo_coeff, data.mo_coeff_b) = self._expand_mo_object(self._calc.mo_coeff, array_dimension=3)
(data.mo_energy, data.mo_energy_b) = self._expand_mo_object(self._calc.mo_energy)
(data.mo_occ, data.mo_occ_b) = self._expand_mo_object(self._calc.mo_occ)
if logger.isEnabledFor(logging.DEBUG):
self._mol.stdout.write('\n')
self._calc.analyze()
self._mol.stdout.write('\n\n--- Alpha Molecular Orbitals ---\n\n')
dump_mat.dump_mo(self._mol, data.mo_coeff, digits=7, start=1)
if (data.mo_coeff_b is not None):
self._mol.stdout.write('\n--- Beta Molecular Orbitals ---\n\n')
dump_mat.dump_mo(self._mol, data.mo_coeff_b, digits=7, start=1)
self._mol.stdout.flush()
data.hij = self._calc.get_hcore()
data.hij_mo = np.dot(np.dot(data.mo_coeff.T, data.hij), data.mo_coeff)
if (data.mo_coeff_b is not None):
data.hij_mo_b = np.dot(np.dot(data.mo_coeff_b.T, data.hij), data.mo_coeff_b)
data.eri = self._mol.intor('int2e', aosym=8)
data.eri_mo = fold(ao2mo.full(self._mol, data.mo_coeff, aosym=4))
if (data.mo_coeff_b is not None):
data.eri_mo_bb = fold(ao2mo.full(self._mol, data.mo_coeff_b, aosym=4))
data.eri_mo_ba = fold(ao2mo.general(self._mol, [data.mo_coeff_b, data.mo_coeff_b, data.mo_coeff, data.mo_coeff], aosym=4))
data.e_nuc = gto.mole.energy_nuc(self._mol)
data.e_ref = self._calc.e_tot
data.symbols = [self._mol.atom_pure_symbol(i) for i in range(self._mol.natm)]
data.coords = self._mol.atom_coords(unit='Bohr').ravel().tolist()
data.multiplicity = (self._spin + 1)
data.charge = self._charge
data.masses = list(self._mol.atom_mass_list())
data.method = self._method.value.upper()
data.basis = self._basis
data.creator = 'PySCF'
data.version = pyscf_version
data.nbasis = self._mol.nbas
data.nmo = self._mol.nao
data.nalpha = self._mol.nelec[0]
data.nbeta = self._mol.nelec[1]
if include_dipole:
self._mol.set_common_orig((0, 0, 0))
ao_dip = self._mol.intor_symmetric('int1e_r', comp=3)
d_m = self._calc.make_rdm1(self._calc.mo_coeff, self._calc.mo_occ)
if (not (isinstance(d_m, np.ndarray) and (d_m.ndim == 2))):
d_m = (d_m[0] + d_m[1])
elec_dip = np.negative(einsum_func('xij,ji->x', ao_dip, d_m).real)
elec_dip = np.round(elec_dip, decimals=8)
nucl_dip = einsum_func('i,ix->x', self._mol.atom_charges(), self._mol.atom_coords())
nucl_dip = np.round(nucl_dip, decimals=8)
ref_dip = (nucl_dip + elec_dip)
logger.info('HF Electronic dipole moment: %s', elec_dip)
logger.info('Nuclear dipole moment: %s', nucl_dip)
logger.info('Total dipole moment: %s', ref_dip)
data.dip_nuc = nucl_dip
data.dip_ref = ref_dip
data.dip_x = ao_dip[0]
data.dip_y = ao_dip[1]
data.dip_z = ao_dip[2]
data.dip_mo_x_a = np.dot(np.dot(data.mo_coeff.T, data.dip_x), data.mo_coeff)
data.dip_mo_y_a = np.dot(np.dot(data.mo_coeff.T, data.dip_y), data.mo_coeff)
data.dip_mo_z_a = np.dot(np.dot(data.mo_coeff.T, data.dip_z), data.mo_coeff)
if (data.mo_coeff_b is not None):
data.dip_mo_x_b = np.dot(np.dot(data.mo_coeff_b.T, data.dip_x), data.mo_coeff_b)
data.dip_mo_y_b = np.dot(np.dot(data.mo_coeff_b.T, data.dip_y), data.mo_coeff_b)
data.dip_mo_z_b = np.dot(np.dot(data.mo_coeff_b.T, data.dip_z), data.mo_coeff_b)
return self._to_qcschema(data, include_dipole=include_dipole)
def to_problem(self, *, basis: ElectronicBasis=ElectronicBasis.MO, include_dipole: bool=True) -> ElectronicStructureProblem:
qcschema = self.to_qcschema(include_dipole=include_dipole)
problem = qcschema_to_problem(qcschema, basis=basis, include_dipole=include_dipole)
if (include_dipole and (problem.properties.electronic_dipole_moment is not None)):
problem.properties.electronic_dipole_moment.reverse_dipole_sign = True
return problem
def _expand_mo_object(self, mo_object: (tuple[((np.ndarray | None), (np.ndarray | None))] | np.ndarray), array_dimension: int=2) -> tuple[(np.ndarray, np.ndarray)]:
if isinstance(mo_object, tuple):
return mo_object
if (len(mo_object.shape) == array_dimension):
return (mo_object[0], mo_object[1])
return (mo_object, None)
def _process_pyscf_log(self, logfile: str) -> None:
with open(logfile, 'r', encoding='utf8') as file:
contents = file.readlines()
for (i, content) in enumerate(contents):
if content.startswith('System:'):
contents = contents[i:]
break
logger.debug('PySCF processing messages log:\n%s', ''.join(contents)) |
class TestParameterValuesWithModel(TestCase):
def test_parameter_values_with_model(self):
param_to_model = {'Ai2020': pybamm.lithium_ion.DFN({'particle mechanics': 'swelling and cracking'}), 'Chen2020': pybamm.lithium_ion.DFN(), 'Chen2020_composite': pybamm.lithium_ion.DFN({'particle phases': ('2', '1'), 'open-circuit potential': (('single', 'current sigmoid'), 'single')}), 'Ecker2015': pybamm.lithium_ion.DFN(), 'Ecker2015_graphite_halfcell': pybamm.lithium_ion.DFN({'working electrode': 'positive'}), 'Mohtat2020': pybamm.lithium_ion.DFN(), 'NCA_Kim2011': pybamm.lithium_ion.DFN(), 'OKane2022': pybamm.lithium_ion.DFN({'SEI': 'solvent-diffusion limited', 'lithium plating': 'partially reversible'}), 'OKane2022_graphite_SiOx_halfcell': pybamm.lithium_ion.DFN({'working electrode': 'positive', 'SEI': 'solvent-diffusion limited', 'lithium plating': 'partially reversible'}), 'ORegan2022': pybamm.lithium_ion.DFN(), 'Prada2013': pybamm.lithium_ion.DFN(), 'Ramadass2004': pybamm.lithium_ion.DFN(), 'Xu2019': pybamm.lithium_ion.DFN({'working electrode': 'positive'})}
for (param, model) in param_to_model.items():
with self.subTest(param=param):
parameter_values = pybamm.ParameterValues(param)
parameter_values.process_model(model) |
def properties_of_geometric_objects():
Print_Function()
global n, nbar
g = (((('# # # 0 0,' + '# # # 0 0,') + '# # # 0 0,') + '0 0 0 0 2,') + '0 0 0 2 0')
c3d = Ga('p1 p2 p3 n nbar', g=g)
(p1, p2, p3, n, nbar) = c3d.mv()
print('g_{ij} =\n', c3d.g)
P1 = F(p1)
P2 = F(p2)
P3 = F(p3)
print('Extracting direction of line from L = P1^P2^n')
L = ((P1 ^ P2) ^ n)
delta = ((L | n) | nbar)
print('(L|n)|nbar =', delta)
print('Extracting plane of circle from C = P1^P2^P3')
C = ((P1 ^ P2) ^ P3)
delta = (((C ^ n) | n) | nbar)
print('((C^n)|n)|nbar =', delta)
print('(p2-p1)^(p3-p1) =', ((p2 - p1) ^ (p3 - p1))) |
def add_default_codecs():
try:
from pyglet.image.codecs import dds
registry.add_encoders(dds)
registry.add_decoders(dds)
except ImportError:
pass
if (compat_platform == 'darwin'):
try:
from pyglet.image.codecs import quartz
registry.add_encoders(quartz)
registry.add_decoders(quartz)
except ImportError:
pass
if (compat_platform in ('win32', 'cygwin')):
from pyglet.libs.win32.constants import WINDOWS_7_OR_GREATER
if WINDOWS_7_OR_GREATER:
try:
from pyglet.image.codecs import wic
registry.add_encoders(wic)
registry.add_decoders(wic)
except ImportError:
pass
if (compat_platform in ('win32', 'cygwin')):
try:
from pyglet.image.codecs import gdiplus
registry.add_encoders(gdiplus)
registry.add_decoders(gdiplus)
except ImportError:
pass
if compat_platform.startswith('linux'):
try:
from pyglet.image.codecs import gdkpixbuf2
registry.add_encoders(gdkpixbuf2)
registry.add_decoders(gdkpixbuf2)
except ImportError:
pass
try:
from pyglet.image.codecs import pil
registry.add_encoders(pil)
registry.add_decoders(pil)
except ImportError:
pass
try:
from pyglet.image.codecs import png
registry.add_encoders(png)
registry.add_decoders(png)
except ImportError:
pass
try:
from pyglet.image.codecs import bmp
registry.add_encoders(bmp)
registry.add_decoders(bmp)
except ImportError:
pass |
def test_many2one_match_ic13():
det_id = 0
recall_mat = np.array([[1, 0], [0, 0]])
precision_mat = np.array([[1, 0], [0, 0]])
recall_thr = 0.5
precision_thr = 0.5
gt_match_flag = [0, 0]
det_match_flag = [0, 0]
gt_dont_care_index = []
with pytest.raises(AssertionError):
det_id_tmp = 1.0
utils.many2one_match_ic13(det_id_tmp, recall_mat, precision_mat, recall_thr, precision_thr, gt_match_flag, det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
recall_mat_tmp = [[1, 0], [0, 0]]
utils.many2one_match_ic13(det_id, recall_mat_tmp, precision_mat, recall_thr, precision_thr, gt_match_flag, det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
precision_mat_tmp = [[1, 0], [0, 0]]
utils.many2one_match_ic13(det_id, recall_mat, precision_mat_tmp, recall_thr, precision_thr, gt_match_flag, det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
recall_thr_tmp = 1.1
utils.many2one_match_ic13(det_id, recall_mat, precision_mat, recall_thr_tmp, precision_thr, gt_match_flag, det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
precision_thr_tmp = 1.1
utils.many2one_match_ic13(det_id, recall_mat, precision_mat, recall_thr, precision_thr_tmp, gt_match_flag, det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
gt_match_flag_tmp = np.array([0, 1])
utils.many2one_match_ic13(det_id, recall_mat, precision_mat, recall_thr, precision_thr, gt_match_flag_tmp, det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
det_match_flag_tmp = np.array([0, 1])
utils.many2one_match_ic13(det_id, recall_mat, precision_mat, recall_thr, precision_thr, gt_match_flag, det_match_flag_tmp, gt_dont_care_index)
with pytest.raises(AssertionError):
gt_dont_care_index_tmp = np.array([0, 1])
utils.many2one_match_ic13(det_id, recall_mat, precision_mat, recall_thr, precision_thr, gt_match_flag, det_match_flag, gt_dont_care_index_tmp)
result = utils.many2one_match_ic13(det_id, recall_mat, precision_mat, recall_thr, precision_thr, gt_match_flag, det_match_flag, gt_dont_care_index)
assert result[0]
assert (result[1] == [0])
gt_dont_care_index = [0]
result = utils.many2one_match_ic13(det_id, recall_mat, precision_mat, recall_thr, precision_thr, gt_match_flag, det_match_flag, gt_dont_care_index)
assert (not result[0])
assert (result[1] == []) |
def get_resnext_cifar(num_classes, blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
assert (((blocks - 2) % 9) == 0)
layers = ([((blocks - 2) // 9)] * 3)
channels_per_layers = [256, 512, 1024]
init_block_channels = 64
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARResNeXt(channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, num_classes=num_classes, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
.parametrize('marker1, marker2, expected', [('python_version >= "3" and sys_platform == "win32"', 'python_version >= "3" and sys_platform != "win32" and sys_platform != "linux"', 'python_version >= "3" and sys_platform != "linux"'), ('python_version >= "3.8" and python_version < "4.0" and sys_platform == "win32"', 'python_version >= "3.8" and python_version < "4.0"', 'python_version >= "3.8" and python_version < "4.0"')])
def test_multi_marker_union_multi_is_multi(marker1: str, marker2: str, expected: str) -> None:
m1 = parse_marker(marker1)
m2 = parse_marker(marker2)
assert (str(m1.union(m2)) == expected)
assert (str(m2.union(m1)) == expected) |
def test_spa_python_numpy_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30), periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude, golden.longitude, pressure=82000, temperature=11, delta_t=67, atmos_refract=0.5667, how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns]) |
class DeepMimicEnv(Env):
def __init__(self, args, enable_draw):
super().__init__(args, enable_draw)
self._core = DeepMimicCore.cDeepMimicCore(enable_draw)
rand_seed = np.random.randint(np.iinfo(np.int32).max)
self._core.SeedRand(rand_seed)
self._core.ParseArgs(args)
self._core.Init()
return
def update(self, timestep):
self._core.Update(timestep)
def reset(self):
self._core.Reset()
def get_time(self):
return self._core.GetTime()
def get_name(self):
return self._core.GetName()
def draw(self):
self._core.Draw()
def keyboard(self, key, x, y):
self._core.Keyboard(key, x, y)
def mouse_click(self, button, state, x, y):
self._core.MouseClick(button, state, x, y)
def mouse_move(self, x, y):
self._core.MouseMove(x, y)
def reshape(self, w, h):
self._core.Reshape(w, h)
def shutdown(self):
self._core.Shutdown()
def is_done(self):
return self._core.IsDone()
def set_playback_speed(self, speed):
self._core.SetPlaybackSpeed(speed)
def set_updates_per_sec(self, updates_per_sec):
self._core.SetUpdatesPerSec(updates_per_sec)
def get_win_width(self):
return self._core.GetWinWidth()
def get_win_height(self):
return self._core.GetWinHeight()
def get_num_update_substeps(self):
return self._core.GetNumUpdateSubsteps()
def is_rl_scene(self):
return self._core.IsRLScene()
def get_num_agents(self):
return self._core.GetNumAgents()
def need_new_action(self, agent_id):
return self._core.NeedNewAction(agent_id)
def record_state(self, agent_id):
return np.array(self._core.RecordState(agent_id))
def record_goal(self, agent_id):
return np.array(self._core.RecordGoal(agent_id))
def get_action_space(self, agent_id):
return ActionSpace(self._core.GetActionSpace(agent_id))
def set_action(self, agent_id, action):
return self._core.SetAction(agent_id, action.tolist())
def get_state_size(self, agent_id):
return self._core.GetStateSize(agent_id)
def get_goal_size(self, agent_id):
return self._core.GetGoalSize(agent_id)
def get_action_size(self, agent_id):
return self._core.GetActionSize(agent_id)
def get_num_actions(self, agent_id):
return self._core.GetNumActions(agent_id)
def build_state_offset(self, agent_id):
return np.array(self._core.BuildStateOffset(agent_id))
def build_state_scale(self, agent_id):
return np.array(self._core.BuildStateScale(agent_id))
def build_goal_offset(self, agent_id):
return np.array(self._core.BuildGoalOffset(agent_id))
def build_goal_scale(self, agent_id):
return np.array(self._core.BuildGoalScale(agent_id))
def build_action_offset(self, agent_id):
return np.array(self._core.BuildActionOffset(agent_id))
def build_action_scale(self, agent_id):
return np.array(self._core.BuildActionScale(agent_id))
def build_action_bound_min(self, agent_id):
return np.array(self._core.BuildActionBoundMin(agent_id))
def build_action_bound_max(self, agent_id):
return np.array(self._core.BuildActionBoundMax(agent_id))
def build_state_norm_groups(self, agent_id):
return np.array(self._core.BuildStateNormGroups(agent_id))
def build_goal_norm_groups(self, agent_id):
return np.array(self._core.BuildGoalNormGroups(agent_id))
def calc_reward(self, agent_id):
return self._core.CalcReward(agent_id)
def get_reward_min(self, agent_id):
return self._core.GetRewardMin(agent_id)
def get_reward_max(self, agent_id):
return self._core.GetRewardMax(agent_id)
def get_reward_fail(self, agent_id):
return self._core.GetRewardFail(agent_id)
def get_reward_succ(self, agent_id):
return self._core.GetRewardSucc(agent_id)
def enable_amp_task_reward(self):
return self._core.EnableAMPTaskReward()
def get_amp_obs_size(self):
return self._core.GetAMPObsSize()
def get_amp_obs_offset(self):
return np.array(self._core.GetAMPObsOffset())
def get_amp_obs_scale(self):
return np.array(self._core.GetAMPObsScale())
def get_amp_obs_norm_group(self):
return np.array(self._core.GetAMPObsNormGroup())
def record_amp_obs_expert(self, agent_id):
return np.array(self._core.RecordAMPObsExpert(agent_id))
def record_amp_obs_agent(self, agent_id):
return np.array(self._core.RecordAMPObsAgent(agent_id))
def is_episode_end(self):
return self._core.IsEpisodeEnd()
def check_terminate(self, agent_id):
return Env.Terminate(self._core.CheckTerminate(agent_id))
def check_valid_episode(self):
return self._core.CheckValidEpisode()
def log_val(self, agent_id, val):
self._core.LogVal(agent_id, float(val))
return
def set_sample_count(self, count):
self._core.SetSampleCount(count)
return
def set_mode(self, mode):
self._core.SetMode(mode.value)
return |
class TDBase(rhf.TDBase):
_keys = {'cell'}
def __init__(self, mf):
rhf.TDBase.__init__(self, mf)
self.cell = mf.cell
def get_ab(self, mf=None):
raise NotImplementedError
def nuc_grad_method(self):
raise NotImplementedError
get_nto = rhf.TDBase.get_nto
analyze = lib.invalid_method('analyze')
oscillator_strength = lib.invalid_method('oscillator_strength')
transition_dipole = lib.invalid_method('transition_dipole')
transition_quadrupole = lib.invalid_method('transition_quadrupole')
transition_octupole = lib.invalid_method('transition_octupole')
transition_velocity_dipole = lib.invalid_method('transition_velocity_dipole')
transition_velocity_quadrupole = lib.invalid_method('transition_velocity_quadrupole')
transition_velocity_octupole = lib.invalid_method('transition_velocity_octupole')
transition_magnetic_dipole = lib.invalid_method('transition_magnetic_dipole')
transition_magnetic_quadrupole = lib.invalid_method('transition_magnetic_quadrupole') |
def plot(t_plot, z_plot, t_slices, var_name, units, comsol_var_fun, dfn_var_fun, dfncc_var_fun, param, cmap='viridis'):
(fig, ax) = plt.subplots(2, 2, figsize=(13, 7))
fig.subplots_adjust(left=0.15, bottom=0.1, right=0.95, top=0.95, wspace=0.4, hspace=0.8)
comsol_var = comsol_var_fun(t=t_plot, z=z_plot)
comsol_var_plot = ax[(0, 0)].pcolormesh((z_plot * 1000.0), t_plot, np.transpose(comsol_var), shading='gouraud', cmap=cmap)
if ('cn' in var_name):
format = '%.0e'
elif ('cp' in var_name):
format = '%.0e'
else:
format = None
fig.colorbar(comsol_var_plot, ax=ax, format=format, location='top', shrink=0.42, aspect=20, anchor=(0.0, 0.0))
ccmap = plt.get_cmap('inferno')
for (ind, t) in enumerate(t_slices):
color = ccmap((float(ind) / len(t_slices)))
comsol_var_slice = comsol_var_fun(t=t, z=z_plot)
dfn_var_slice = dfn_var_fun(t=t, z=z_plot)
dfncc_var_slice = dfncc_var_fun(t=np.array([t]), z=z_plot)
ax[(0, 1)].plot((z_plot * 1000.0), comsol_var_slice, 'o', fillstyle='none', color=color)
ax[(0, 1)].plot((z_plot * 1000.0), dfn_var_slice, '-', color=color, label=f'{t_slices[ind]:.0f} s')
ax[(0, 1)].plot((z_plot * 1000.0), dfncc_var_slice, ':', color=color)
(comsol_p,) = ax[(0, 1)].plot(np.nan, np.nan, 'ko', fillstyle='none')
(pybamm_p,) = ax[(0, 1)].plot(np.nan, np.nan, 'k-', fillstyle='none')
(dfncc_p,) = ax[(0, 1)].plot(np.nan, np.nan, 'k:', fillstyle='none')
dfn_var = dfn_var_fun(t=t_plot, z=z_plot)
dfncc_var = dfncc_var_fun(t=t_plot, z=z_plot)
error = np.abs((comsol_var - dfn_var))
error_bar = np.abs((comsol_var - dfncc_var))
ax[(1, 0)].plot((z_plot * 1000.0), np.nanmean(error, axis=1), 'k-', label='$1+1$D')
ax[(1, 0)].plot((z_plot * 1000.0), np.nanmean(error_bar, axis=1), 'k:', label='DFNCC')
ax[(1, 1)].plot(t_plot, np.nanmean(error, axis=0), 'k-', label='$1+1$D')
ax[(1, 1)].plot(t_plot, np.nanmean(error_bar, axis=0), 'k:', label='DFNCC')
ax[(0, 0)].tick_params(which='both')
ax[(0, 1)].tick_params(which='both')
ax[(1, 0)].tick_params(which='both')
if (var_name in ['$\\mathcal{I}^*$']):
ax[(1, 0)].set_yscale('log')
ax[(1, 0)].set_yticks = [1e-05, 0.0001, 0.001, 0.01, 0.1, 0.01, 0.1, 1]
else:
ax[(1, 0)].ticklabel_format(style='sci', scilimits=((- 2), 2), axis='y')
ax[(1, 1)].tick_params(which='both')
if (var_name in ['$\\phi^*_{\\mathrm{s,cn}}$', '$\\phi^*_{\\mathrm{s,cp}} - V^*$']):
ax[(1, 0)].ticklabel_format(style='sci', scilimits=((- 2), 2), axis='y')
else:
ax[(1, 1)].set_yscale('log')
ax[(1, 1)].set_yticks = [1e-05, 0.0001, 0.001, 0.01, 0.1, 0.01, 0.1, 1]
ax[(0, 0)].set_xlabel('$z^*$ [mm]')
ax[(0, 0)].set_ylabel('$t^*$ [s]')
ax[(0, 0)].set_title(f'{var_name} {units}', y=1.5)
ax[(0, 1)].set_xlabel('$z^*$ [mm]')
ax[(0, 1)].set_ylabel(f'{var_name}')
ax[(1, 0)].set_xlabel('$z^*$ [mm]')
ax[(1, 0)].set_ylabel((('Time-averaged' + '\n') + f'absolute error {units}'))
ax[(1, 1)].set_xlabel('$t^*$ [s]')
ax[(1, 1)].set_ylabel((('Space-averaged' + '\n') + f'absolute error {units}'))
ax[(0, 0)].text((- 0.1), 1.6, '(a)', transform=ax[(0, 0)].transAxes)
ax[(0, 1)].text((- 0.1), 1.6, '(b)', transform=ax[(0, 1)].transAxes)
ax[(1, 0)].text((- 0.1), 1.2, '(c)', transform=ax[(1, 0)].transAxes)
ax[(1, 1)].text((- 0.1), 1.2, '(d)', transform=ax[(1, 1)].transAxes)
leg1 = ax[(0, 1)].legend(bbox_to_anchor=(0, 1.1, 1.0, 0.102), loc='lower left', borderaxespad=0.0, ncol=3, mode='expand')
ax[(0, 1)].legend([comsol_p, pybamm_p, dfncc_p], ['COMSOL', '$1+1$D', 'DFNCC'], bbox_to_anchor=(0, 1.5, 1.0, 0.102), loc='lower left', borderaxespad=0.0, ncol=3, mode='expand')
ax[(0, 1)].add_artist(leg1)
ax[(1, 0)].legend(bbox_to_anchor=(0.0, 1.1, 1.0, 0.102), loc='lower right', borderaxespad=0.0, ncol=3)
ax[(1, 1)].legend(bbox_to_anchor=(0.0, 1.1, 1.0, 0.102), loc='lower right', borderaxespad=0.0, ncol=3) |
class TestLDIFParser(unittest.TestCase):
def _parse_records(self, ldif_string, ignored_attr_types=None, max_entries=0):
ldif_file = StringIO(ldif_string)
ldif_parser = ldif.LDIFRecordList(ldif_file, ignored_attr_types=ignored_attr_types, max_entries=max_entries)
parser_method = getattr(ldif_parser, ('parse_%s_records' % self.record_type))
parser_method()
if (self.record_type == 'entry'):
return ldif_parser.all_records
elif (self.record_type == 'change'):
return ldif_parser.all_modify_changes
def _unparse_records(self, records):
ldif_file = StringIO()
ldif_writer = ldif.LDIFWriter(ldif_file)
if (self.record_type == 'entry'):
for (dn, entry) in records:
ldif_writer.unparse(dn, entry)
elif (self.record_type == 'change'):
for (dn, modops, controls) in records:
ldif_writer.unparse(dn, modops)
return ldif_file.getvalue()
def check_records(self, ldif_string, records, ignored_attr_types=None, max_entries=0):
ldif_string = textwrap.dedent(ldif_string).lstrip()
parsed_records = self._parse_records(ldif_string, ignored_attr_types=ignored_attr_types, max_entries=max_entries)
generated_ldif = self._unparse_records(records)
parsed_records2 = self._parse_records(generated_ldif, ignored_attr_types=ignored_attr_types, max_entries=max_entries)
self.assertEqual(records, parsed_records)
self.assertEqual(records, parsed_records2) |
def evaluation(myNet, test_loader, args):
myNet.eval()
num_correct = 0
num_total = 0
with torch.no_grad():
for (xs, ys) in test_loader:
if torch.cuda.is_available():
(xs, ys) = (xs.cuda(), ys.cuda())
ypreds = myNet(xs)
(_, preds) = torch.max(ypreds, (- 1))
num_correct += float((preds == ys).int().sum().data)
num_total += len(preds)
accuracy = ((100.0 * num_correct) / float(num_total))
return accuracy |
def create_motion_model_widgets() -> dict[(str, tuple[(str, QtWidgets.QWidget)])]:
widgets = _create_sigma_widgets()
accuracy = QtWidgets.QDoubleSpinBox()
accuracy.setRange(0.1, 10)
accuracy.setStepType(QtWidgets.QAbstractSpinBox.AdaptiveDecimalStepType)
accuracy.setToolTip('Integration limits for calculating probabilities')
widgets['accuracy'] = ('accuracy', accuracy)
return widgets |
class Ensemble(nn.Module):
def __init__(self, models, name=None):
super(Ensemble, self).__init__()
if (name is not None):
self.name = name
else:
self.name = ('%s_ensemble' % models[0].name)
self.models = nn.ModuleList(models)
def forward(self, x):
xs = torch.stack([model(x) for model in self.models])
xs = (xs - torch.logsumexp(xs, dim=(- 1), keepdim=True))
x = torch.logsumexp(xs, dim=0)
return x |
def makeMetaChild(name, cfgDict):
children = []
for (chName, chOpts) in cfgDict.items():
if (not isinstance(chOpts, dict)):
ch = Parameter.create(name=chName, type=chName, value=chOpts)
else:
ch = Parameter.create(name=chName, **chOpts)
_encounteredTypes.add(ch.type())
children.append(ch)
param = Parameter.create(name=name, type='group', children=children)
param.setOpts(expanded=False)
return param |
class _march_rays(Function):
_fwd(cast_inputs=torch.float32)
def forward(ctx, n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, bound, density_bitfield, C, H, near, far, align=(- 1), perturb=False, dt_gamma=0, max_steps=1024):
if (not rays_o.is_cuda):
rays_o = rays_o.cuda()
if (not rays_d.is_cuda):
rays_d = rays_d.cuda()
rays_o = rays_o.contiguous().view((- 1), 3)
rays_d = rays_d.contiguous().view((- 1), 3)
M = (n_alive * n_step)
if (align > 0):
M += (align - (M % align))
xyzs = torch.zeros(M, 3, dtype=rays_o.dtype, device=rays_o.device)
dirs = torch.zeros(M, 3, dtype=rays_o.dtype, device=rays_o.device)
deltas = torch.zeros(M, 2, dtype=rays_o.dtype, device=rays_o.device)
if perturb:
noises = torch.rand(n_alive, dtype=rays_o.dtype, device=rays_o.device)
else:
noises = torch.zeros(n_alive, dtype=rays_o.dtype, device=rays_o.device)
_backend.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, bound, dt_gamma, max_steps, C, H, density_bitfield, near, far, xyzs, dirs, deltas, noises)
return (xyzs, dirs, deltas) |
def _migrate_v17(preset: dict) -> dict:
if (preset['game'] == 'prime1'):
preset['configuration']['elevators']['excluded_teleporters'].append({'world_name': 'Impact Crater', 'area_name': 'Metroid Prime Lair', 'node_name': 'Teleporter to Credits'})
preset['configuration']['elevators']['excluded_teleporters'].append({'world_name': 'Frigate Orpheon', 'area_name': 'Exterior Docking Hangar', 'node_name': 'Teleport to Landing Site'})
return preset |
class Keynote():
id: ID
title: str = strawberry.field(resolver=make_localized_resolver('title'))
description: str = strawberry.field(resolver=make_localized_resolver('description'))
slug: str = strawberry.field(resolver=make_localized_resolver('slug'))
topic: Optional[Topic]
speakers: List[ScheduleItemUser]
start: Optional[datetime]
end: Optional[datetime]
rooms: List[Room]
youtube_video_id: Optional[str]
def __init__(self, id: ID, title: str, description: str, slug: str, topic: Optional[Topic], speakers: List[ScheduleItemUser], start: Optional[datetime], end: Optional[datetime], rooms: List[Room], youtube_video_id: Optional[str]):
self.id = id
self.title = title
self.description = description
self.slug = slug
self.topic = topic
self.speakers = speakers
self.start = start
self.end = end
self.rooms = rooms
self.youtube_video_id = youtube_video_id
def from_django_model(cls, instance):
schedule_item = instance.schedule_item
return cls(id=instance.id, title=instance.title, description=instance.description, slug=instance.slug, topic=(Topic.from_django_model(instance.topic) if instance.topic else None), speakers=[ScheduleItemUser(id=speaker.user_id, fullname=speaker.user.full_name, full_name=speaker.user.full_name, conference_code=instance.conference.code) for speaker in instance.speakers.all()], start=(schedule_item.start if schedule_item else None), end=(schedule_item.end if schedule_item else None), rooms=(schedule_item.rooms.all() if schedule_item else []), youtube_video_id=(schedule_item.youtube_video_id if schedule_item else None)) |
def test_multiple_constraints_on_root(package: ProjectPackage, solver: Solver, repo: Repository) -> None:
package.add_dependency(Factory.create_dependency('foo', {'version': '^1.0', 'python': '^2.7'}))
package.add_dependency(Factory.create_dependency('foo', {'version': '^2.0', 'python': '^3.7'}))
foo15 = get_package('foo', '1.5.0')
foo25 = get_package('foo', '2.5.0')
repo.add_package(foo15)
repo.add_package(foo25)
transaction = solver.solve()
check_solver_result(transaction, [{'job': 'install', 'package': foo15}, {'job': 'install', 'package': foo25}]) |
class TestPyfakefsTestCase(unittest.TestCase):
def setUp(self):
class TestTestCase(fake_filesystem_unittest.TestCase):
def runTest(self):
pass
self.test_case = TestTestCase('runTest')
def test_test_case_type(self):
self.assertIsInstance(self.test_case, unittest.TestCase)
self.assertIsInstance(self.test_case, fake_filesystem_unittest.TestCaseMixin) |
class Parameterized(object):
def __init__(self):
self._cached_params = {}
self._cached_param_dtypes = {}
self._cached_param_shapes = {}
self._cached_assign_ops = {}
self._cached_assign_placeholders = {}
def get_params_internal(self, **tags):
raise NotImplementedError
def get_params(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_params):
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_param_dtypes):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_dtypes[tag_tuple] = [val.dtype for val in param_values]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_param_shapes):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_shapes[tag_tuple] = [val.shape for val in param_values]
return self._cached_param_shapes[tag_tuple]
def get_param_values(self, **tags):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
return flatten_tensors(param_values)
def set_param_values(self, flattened_params, **tags):
debug = tags.pop('debug', False)
param_values = unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
ops = []
feed_dict = dict()
for (param, dtype, value) in zip(self.get_params(**tags), self.get_param_dtypes(**tags), param_values):
if (param not in self._cached_assign_ops):
assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype)
assign_op = tf.assign(param, assign_placeholder)
self._cached_assign_ops[param] = assign_op
self._cached_assign_placeholders[param] = assign_placeholder
ops.append(self._cached_assign_ops[param])
feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
if debug:
print(('setting value of %s' % param.name))
tf.get_default_session().run(ops, feed_dict=feed_dict)
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
global load_params
if load_params:
d['params'] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
tf.get_default_session().run(tf.variables_initializer(self.get_params()))
self.set_param_values(d['params']) |
class TestVersionSourceName():
def test_empty(self, isolation):
with pytest.raises(ValueError, match='The `source` option under the `tool.hatch.version` table must not be empty if defined'):
_ = HatchMetadata(isolation, {'version': {'source': ''}}, None).version.source_name
def test_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.version.source` must be a string'):
_ = HatchMetadata(isolation, {'version': {'source': 9000}}, None).version.source_name
def test_correct(self, isolation):
metadata = HatchMetadata(isolation, {'version': {'source': 'foo'}}, None)
assert (metadata.version.source_name == metadata.version.source_name == 'foo')
def test_default(self, isolation):
metadata = HatchMetadata(isolation, {'version': {}}, None)
assert (metadata.version.source_name == metadata.version.source_name == 'regex') |
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
plugin: 'TrezorPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password, *, script_type=None):
client = self.get_client()
address_path = (self.get_derivation_prefix() + ('/%d/%d' % sequence))
script_type = self.plugin.get_trezor_input_script_type(script_type)
msg_sig = client.sign_message(address_path, message, script_type=script_type)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if (txin.utxo is None):
raise UserFacingException(_('Missing previous tx.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx) |
_collator('multicrop_collator')
def multicrop_collator(batch):
assert ('data' in batch[0]), 'data not found in sample'
assert ('label' in batch[0]), 'label not found in sample'
data = [x['data'] for x in batch]
labels = [torch.tensor(x['label']) for x in batch]
data_valid = [torch.tensor(x['data_valid']) for x in batch]
data_idx = [torch.tensor(x['data_idx']) for x in batch]
(num_duplicates, num_images) = (len(data[0]), len(data))
(output_data, output_label, output_data_valid, output_data_idx) = ([], [], [], [])
for pos in range(num_duplicates):
_output_data = []
for idx in range(num_images):
_output_data.append(data[idx][pos])
output_label.append(labels[idx][pos])
output_data_valid.append(data_valid[idx][pos])
output_data_idx.append(data_idx[idx][pos])
output_data.append(torch.stack(_output_data))
output_batch = {'data': [output_data], 'label': [torch.stack(output_label)], 'data_valid': [torch.stack(output_data_valid)], 'data_idx': [torch.stack(output_data_idx)]}
return output_batch |
def test_convert_variable_mixed_specificity():
type1 = TensorType(config.floatX, shape=(1, None, 3))
type2 = TensorType(config.floatX, shape=(None, 5, 3))
type3 = TensorType(config.floatX, shape=(1, 5, 3))
test_var1 = type1()
test_var2 = type2()
assert (type1.convert_variable(test_var2).type == type3)
assert (type2.convert_variable(test_var1).type == type3) |
def all_gather(data):
world_size = get_world_size()
if (world_size == 1):
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.tensor([tensor.numel()], device='cuda')
size_list = [torch.tensor([0], device='cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device='cuda'))
if (local_size != max_size):
padding = torch.empty(size=((max_size - local_size),), dtype=torch.uint8, device='cuda')
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list |
class TestNodePosition():
def test_position_class() -> None:
code = textwrap.dedent("\n class A: #\n ...\n\n class B(A): #\n pass\n\n class C: #\n '''Docstring'''\n\n class D: #\n ...\n\n class E: #\n def f():\n ...\n\n \n class F: #\n ...\n ").strip()
ast_nodes: list[nodes.NodeNG] = builder.extract_node(code)
a = ast_nodes[0]
assert isinstance(a, nodes.ClassDef)
assert (a.position == (1, 0, 1, 7))
b = ast_nodes[1]
assert isinstance(b, nodes.ClassDef)
assert (b.position == (4, 0, 4, 7))
c = ast_nodes[2]
assert isinstance(c, nodes.ClassDef)
assert (c.position == (7, 0, 7, 7))
d = ast_nodes[3]
assert isinstance(d, nodes.ClassDef)
assert (d.position == (10, 4, 10, 11))
e = ast_nodes[4]
assert isinstance(e, nodes.ClassDef)
assert (e.position == (13, 0, 13, 7))
f = ast_nodes[5]
assert isinstance(f, nodes.ClassDef)
assert (f.position == (18, 0, 18, 7))
def test_position_function() -> None:
code = textwrap.dedent("\n def a(): #\n ...\n\n def b(): #\n '''Docstring'''\n\n def c( #\n var: int = 42\n ):\n def d(): #\n ...\n\n \n def e(): #\n ...\n ").strip()
ast_nodes: list[nodes.NodeNG] = builder.extract_node(code)
a = ast_nodes[0]
assert isinstance(a, nodes.FunctionDef)
assert (a.position == (1, 0, 1, 5))
b = ast_nodes[1]
assert isinstance(b, nodes.FunctionDef)
assert (b.position == (4, 0, 4, 5))
c = ast_nodes[2]
assert isinstance(c, nodes.FunctionDef)
assert (c.position == (7, 0, 7, 5))
d = ast_nodes[3]
assert isinstance(d, nodes.FunctionDef)
assert (d.position == (10, 4, 10, 9))
e = ast_nodes[4]
assert isinstance(e, nodes.FunctionDef)
assert (e.position == (14, 0, 14, 5))
def test_position_async_function() -> None:
code = textwrap.dedent("\n async def a(): #\n ...\n\n async def b(): #\n '''Docstring'''\n\n async def c( #\n var: int = 42\n ):\n async def d(): #\n ...\n\n \n async def e(): #\n ...\n ").strip()
ast_nodes: list[nodes.NodeNG] = builder.extract_node(code)
a = ast_nodes[0]
assert isinstance(a, nodes.FunctionDef)
assert (a.position == (1, 0, 1, 11))
b = ast_nodes[1]
assert isinstance(b, nodes.FunctionDef)
assert (b.position == (4, 0, 4, 11))
c = ast_nodes[2]
assert isinstance(c, nodes.FunctionDef)
assert (c.position == (7, 0, 7, 11))
d = ast_nodes[3]
assert isinstance(d, nodes.FunctionDef)
assert (d.position == (10, 4, 10, 15))
e = ast_nodes[4]
assert isinstance(e, nodes.FunctionDef)
assert (e.position == (14, 0, 14, 11)) |
()
def mocked_stream_df() -> Mock:
mock = Mock()
mock.isStreaming = True
mock.writeStream = mock
mock.trigger.return_value = mock
mock.outputMode.return_value = mock
mock.option.return_value = mock
mock.foreachBatch.return_value = mock
mock.start.return_value = Mock(spec=StreamingQuery)
return mock |
class CrossEntropyLIDLoss(_Loss):
def __init__(self, output_size, label_smoothing):
super().__init__()
self.output_size = output_size
self.padding_idx = (- 1)
self.smoothing_value = label_smoothing
self.confidence = (1.0 - label_smoothing)
self.label_smoothing = label_smoothing
self.fast_xentropy = fast_xentropy = False
self.fast_xentropy = False
try:
import xentropy_cuda
from onmt.modules.optimized.softmax_xentropy import SoftmaxCrossEntropyLoss
self.softmax_xentropy = SoftmaxCrossEntropyLoss.apply
self.fast_xentropy = True
except (ModuleNotFoundError, AttributeError):
self.softmax_xentropy = None
self.fast_xentropy = False
def forward(self, lid_logits, labels, mask):
(len_t, bsz) = (lid_logits.size(0), lid_logits.size(1))
if ((labels.ndim == 1) and (labels.size(0) == 1)):
labels = labels.unsqueeze(0).repeat(len_t, bsz)
elif ((labels.ndim == 1) and (labels.size(0) == bsz)):
labels = labels.unsqueeze(0).repeat(len_t)
elif (labels.ndim == 2):
assert (labels.size(0) == len_t), (labels.size(1) == bsz)
else:
raise NotImplementedError
mask = mask.transpose(0, 1)
logits = lid_logits.view((- 1), lid_logits.size((- 1)))
gtruth = labels.view((- 1))
padding_mask = mask.contiguous().long()
non_pad_indices = torch.nonzero(padding_mask.view((- 1)).ne(1)).squeeze(1)
logits = logits.index_select(0, non_pad_indices)
gtruth = gtruth.index_select(0, non_pad_indices)
label_smoothing = (self.label_smoothing if self.training else 0.0)
eps_i = (self.smoothing_value if self.training else 0.0)
lprobs = F.log_softmax(logits, dim=(- 1), dtype=torch.float32)
nll_loss = (- lprobs.gather(1, gtruth.unsqueeze(1)))
smooth_loss = (- lprobs.sum(dim=(- 1), keepdim=True))
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
loss = (((1.0 - label_smoothing) * nll_loss) + (eps_i * smooth_loss))
return loss |
class ChamferLossMetric(TensorMetric):
def __init__(self, chamfer_loss_params, name: str, reduce_op: Optional[Any]=None):
super(ChamferLossMetric, self).__init__(name=name, reduce_op=reduce_op)
self.loss = ChamferLoss(**chamfer_loss_params)
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
loss_metric = self.loss(pc_source, pc_target, pred_flow)
return loss_metric |
def get_leaf_jaw_positions_for_type(beam_limiting_device_position_sequences, rt_beam_limiting_device_type):
leaf_jaw_positions = []
for sequence in beam_limiting_device_position_sequences:
matching_type = [item for item in sequence if (item.RTBeamLimitingDeviceType == rt_beam_limiting_device_type)]
if (len(matching_type) != 1):
raise ValueError('Expected exactly one item per control point for a given collimator')
leaf_jaw_positions.append(matching_type[0].LeafJawPositions)
return leaf_jaw_positions |
def test_show_nested_fixtures(pytester: Pytester, mode) -> None:
pytester.makeconftest('\n import pytest\n (scope=\'session\')\n def arg_same():\n """session scoped fixture"""\n ')
p = pytester.makepyfile('\n import pytest\n (scope=\'function\')\n def arg_same(arg_same):\n """function scoped fixture"""\n def test_arg1(arg_same):\n pass\n ')
result = pytester.runpytest(mode, p)
assert (result.ret == 0)
result.stdout.fnmatch_lines(['SETUP S arg_same*', '*SETUP F arg_same (fixtures used: arg_same)*', '*test_arg1 (fixtures used: arg_same)*', '*TEARDOWN F arg_same*', 'TEARDOWN S arg_same*']) |
def main(args):
modelpath = (args.loadDir + args.loadModel)
weightspath = (args.loadDir + args.loadWeights)
print(('Loading model: ' + modelpath))
print(('Loading weights: ' + weightspath))
model = Net(NUM_CLASSES)
model = torch.nn.DataParallel(model)
if (not args.cpu):
model = model.cuda()
def load_my_state_dict(model, state_dict):
own_state = model.state_dict()
for (name, param) in state_dict.items():
if (name not in own_state):
continue
own_state[name].copy_(param)
return model
model = load_my_state_dict(model, torch.load(weightspath))
print('Model and weights LOADED successfully')
model.eval()
if (not os.path.exists(args.datadir)):
print('Error: datadir could not be loaded')
loader = DataLoader(cityscapes(args.datadir, input_transform_cityscapes, target_transform_cityscapes, subset=args.subset), num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False)
for (step, (images, labels, filename, filenameGt)) in enumerate(loader):
if (not args.cpu):
images = images.cuda()
inputs = Variable(images)
with torch.no_grad():
outputs = model(inputs)
label = outputs[0].max(0)[1].byte().cpu().data
label_cityscapes = cityscapes_trainIds2labelIds(label.unsqueeze(0))
filenameSave = ('./save_results/' + filename[0].split('leftImg8bit/')[1])
os.makedirs(os.path.dirname(filenameSave), exist_ok=True)
label_cityscapes.save(filenameSave)
print(step, filenameSave) |
class COCOClsDatasetMS(Dataset):
def __init__(self, img_name_list_path, coco_root, label_file_path, scales, train=True, transform=None, gen_attn=False, unit=1):
img_name_list_path = os.path.join(img_name_list_path, f"{('train' if (train or gen_attn) else 'val')}_id.txt")
self.img_name_list = load_img_name_list(img_name_list_path)
self.label_list = load_image_label_list_from_npy(self.img_name_list, label_file_path)
self.coco_root = coco_root
self.transform = transform
self.train = train
self.unit = unit
self.scales = scales
self.gen_attn = gen_attn
def __getitem__(self, idx):
name = self.img_name_list[idx]
if (self.train or self.gen_attn):
img = PIL.Image.open(os.path.join(self.coco_root, 'train2014', (name + '.jpg'))).convert('RGB')
else:
img = PIL.Image.open(os.path.join(self.coco_root, 'val2014', (name + '.jpg'))).convert('RGB')
label = torch.from_numpy(self.label_list[idx])
rounded_size = (int((round((img.size[0] / self.unit)) * self.unit)), int((round((img.size[1] / self.unit)) * self.unit)))
ms_img_list = []
for s in self.scales:
target_size = (round((rounded_size[0] * s)), round((rounded_size[1] * s)))
s_img = img.resize(target_size, resample=PIL.Image.CUBIC)
ms_img_list.append(s_img)
if self.transform:
for i in range(len(ms_img_list)):
ms_img_list[i] = self.transform(ms_img_list[i])
msf_img_list = []
for i in range(len(ms_img_list)):
msf_img_list.append(ms_img_list[i])
msf_img_list.append(torch.flip(ms_img_list[i], [(- 1)]))
return (msf_img_list, label)
def __len__(self):
return len(self.img_name_list) |
class PartialFC(torch.nn.Module):
_version = 1
def __init__(self, margin_loss: Callable, embedding_size: int, num_classes: int, sample_rate: float=1.0, fp16: bool=False):
super(PartialFC, self).__init__()
assert distributed.is_initialized(), 'must initialize distributed before create this'
self.rank = distributed.get_rank()
self.world_size = distributed.get_world_size()
self.dist_cross_entropy = DistCrossEntropy()
self.embedding_size = embedding_size
self.sample_rate: float = sample_rate
self.fp16 = fp16
self.num_local: int = ((num_classes // self.world_size) + int((self.rank < (num_classes % self.world_size))))
self.class_start: int = (((num_classes // self.world_size) * self.rank) + min(self.rank, (num_classes % self.world_size)))
self.num_sample: int = int((self.sample_rate * self.num_local))
self.last_batch_size: int = 0
self.weight: torch.Tensor
self.weight_mom: torch.Tensor
self.weight_activated: torch.nn.Parameter
self.weight_activated_mom: torch.Tensor
self.is_updated: bool = True
self.init_weight_update: bool = True
if (self.sample_rate < 1):
self.register_buffer('weight', tensor=torch.normal(0, 0.01, (self.num_local, embedding_size)))
self.register_buffer('weight_mom', tensor=torch.zeros_like(self.weight))
self.register_parameter('weight_activated', param=torch.nn.Parameter(torch.empty(0, 0)))
self.register_buffer('weight_activated_mom', tensor=torch.empty(0, 0))
self.register_buffer('weight_index', tensor=torch.empty(0, 0))
else:
self.weight_activated = torch.nn.Parameter(torch.normal(0, 0.01, (self.num_local, embedding_size)))
if isinstance(margin_loss, Callable):
self.margin_softmax = margin_loss
else:
raise
_grad()
def sample(self, labels: torch.Tensor, index_positive: torch.Tensor, optimizer: torch.optim.Optimizer):
positive = torch.unique(labels[index_positive], sorted=True).cuda()
if ((self.num_sample - positive.size(0)) >= 0):
perm = torch.rand(size=[self.num_local]).cuda()
perm[positive] = 2.0
index = torch.topk(perm, k=self.num_sample)[1].cuda()
index = index.sort()[0].cuda()
else:
index = positive
self.weight_index = index
labels[index_positive] = torch.searchsorted(index, labels[index_positive])
self.weight_activated = torch.nn.Parameter(self.weight[self.weight_index])
self.weight_activated_mom = self.weight_mom[self.weight_index]
if isinstance(optimizer, torch.optim.SGD):
optimizer.state.pop(optimizer.param_groups[(- 1)]['params'][0], None)
optimizer.param_groups[(- 1)]['params'][0] = self.weight_activated
optimizer.state[self.weight_activated]['momentum_buffer'] = self.weight_activated_mom
else:
raise
_grad()
def update(self):
if self.init_weight_update:
self.init_weight_update = False
return
if (self.sample_rate < 1):
self.weight[self.weight_index] = self.weight_activated
self.weight_mom[self.weight_index] = self.weight_activated_mom
def forward(self, local_embeddings: torch.Tensor, local_labels: torch.Tensor, optimizer: torch.optim.Optimizer):
local_labels.squeeze_()
local_labels = local_labels.long()
self.update()
batch_size = local_embeddings.size(0)
if (self.last_batch_size == 0):
self.last_batch_size = batch_size
assert (self.last_batch_size == batch_size), 'last batch size do not equal current batch size: {} vs {}'.format(self.last_batch_size, batch_size)
_gather_embeddings = [torch.zeros((batch_size, self.embedding_size)).cuda() for _ in range(self.world_size)]
_gather_labels = [torch.zeros(batch_size).long().cuda() for _ in range(self.world_size)]
_list_embeddings = AllGather(local_embeddings, *_gather_embeddings)
distributed.all_gather(_gather_labels, local_labels)
embeddings = torch.cat(_list_embeddings)
labels = torch.cat(_gather_labels)
labels = labels.view((- 1), 1)
index_positive = ((self.class_start <= labels) & (labels < (self.class_start + self.num_local)))
labels[(~ index_positive)] = (- 1)
labels[index_positive] -= self.class_start
if (self.sample_rate < 1):
self.sample(labels, index_positive, optimizer)
with torch.cuda.amp.autocast(self.fp16):
norm_embeddings = normalize(embeddings)
norm_weight_activated = normalize(self.weight_activated)
logits = linear(norm_embeddings, norm_weight_activated)
if self.fp16:
logits = logits.float()
logits = logits.clamp((- 1), 1)
logits = self.margin_softmax(logits, labels)
loss = self.dist_cross_entropy(logits, labels)
return loss
def state_dict(self, destination=None, prefix='', keep_vars=False):
if (destination is None):
destination = collections.OrderedDict()
destination._metadata = collections.OrderedDict()
for (name, module) in self._modules.items():
if (module is not None):
module.state_dict(destination, ((prefix + name) + '.'), keep_vars=keep_vars)
if (self.sample_rate < 1):
destination['weight'] = self.weight.detach()
else:
destination['weight'] = self.weight_activated.data.detach()
return destination
def load_state_dict(self, state_dict, strict: bool=True):
if (self.sample_rate < 1):
self.weight = state_dict['weight'].to(self.weight.device)
self.weight_mom.zero_()
self.weight_activated.data.zero_()
self.weight_activated_mom.zero_()
self.weight_index.zero_()
else:
self.weight_activated.data = state_dict['weight'].to(self.weight_activated.data.device) |
class MetaRLScreener_pro(torch.nn.Module):
def __init__(self, model, epochs=100, lr=0.01, log=True):
super(MetaRLScreener_pro, self).__init__()
self.model = model
self.model.to(device)
self.epochs = epochs
self.lr = lr
self.log = log
self.temperature = 0.5
self.edge_action_rep_generator = Sequential(Linear((32 * 3), 32), ELU())
self.edge_action_prob_generator = Sequential(Linear(32, 32), ReLU(), Linear(32, 1))
self.edge_action_rep_generator.to(device)
self.edge_action_prob_generator.to(device)
def forward(self, graph, selection):
subgraph = relabel_graph(graph, selection)
node_reps = self.model.get_node_reps(graph.x, graph.edge_index, graph.edge_attr, graph.batch)
edge_reps = self.model.edge_emb(graph.edge_attr)
graph_rep = self.model.get_graph_rep(graph.x, graph.edge_index, graph.edge_attr, graph.batch)
if (len(torch.where((selection == True))[0]) == 0):
subgraph_rep = 0.0
else:
subgraph_rep = self.model.get_graph_rep(subgraph.x, subgraph.edge_index, subgraph.edge_attr, subgraph.batch)
tmp_vec = torch.cat([node_reps[graph.edge_index[0]], node_reps[graph.edge_index[1]], edge_reps], dim=1)
edge_action_reps = self.edge_action_rep_generator(tmp_vec.to(device))
action_prob = self.estimate_edge_selection_prob(graph_rep, subgraph_rep, edge_action_reps, selection)
return action_prob
def estimate_edge_selection_prob(self, graph_rep, subgraph_rep, edge_action_reps, selection):
graph_diff_rep = (graph_rep - subgraph_rep)
graph_diff_rep = graph_diff_rep.reshape((- 1), 1)
action_prob = torch.matmul(edge_action_reps, graph_diff_rep)
action_prob = action_prob.reshape((- 1))
action_prob[selection] = NegINF
action_prob = F.softmax((action_prob / self.temperature))
return action_prob
def estimate_edge_selection_prob_2(self, graph_rep, subgraph_rep, edge_action_reps, selection):
graph_diff_rep = (graph_rep - subgraph_rep)
action_prob = self.edge_action_prob_generator((graph_diff_rep * edge_action_reps))
action_prob = action_prob.reshape((- 1))
action_prob[selection] = NegINF
action_prob = F.softmax((action_prob / self.temperature))
return action_prob |
class ZoneUpdateHandler(threading.Thread):
def __init__(self, queue, handler):
super(ZoneUpdateHandler, self).__init__()
self.event = threading.Event()
self.lock = threading.Lock()
self.zones_to_update = set()
self.zones_queues = Queue()
self.queue_name = queue
self.daemon = True
self.handler = handler
def run(self):
log.info('ZoneUpdateHandler thread start.')
while (not self.event.wait(0.1)):
zone = self.zones_queues.get()
with self.lock:
self.zones_to_update.remove(zone)
self.handler(zone)
log.error('ZoneUpdateHandler thread end.')
def add_zones(self, zones):
for zone in zones:
with self.lock:
if (zone not in self.zones_to_update):
self.zones_to_update.add(zone)
self.zones_queues.put(zone)
if (not self.isAlive()):
log.error('ZoneUpdateHandler thread is stopped by accident.')
raise Exception |
def test_losses_models_ext_def(pvwatts_dc_pvwatts_ac_system, location, weather, mocker):
m = mocker.spy(sys.modules[__name__], 'constant_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts', aoi_model='no_loss', spectral_model='no_loss', losses_model=constant_losses)
mc.run_model(weather)
assert (m.call_count == 1)
assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame))
assert (mc.results.losses == 0.9)
assert (not mc.results.ac.empty) |
def test_mismatch_private_key():
(public_key_data, _) = generate_test_cert()
(_, private_key_data) = generate_test_cert()
private_key = NamedTemporaryFile(delete=True)
private_key.write(private_key_data)
private_key.seek(0)
cert = load_certificate(public_key_data)
with pytest.raises(KeyInvalidException):
cert.validate_private_key(private_key.name) |
def retrieve_cdn_ip_networks(retrieve_new_data=False):
cdn_ip_networks = []
cloudfront_dict = retrieve_amazon_cloudfront_ip_ranges(retrieve_new_data)
cdn_ip_networks += cloudfront_dict['list_of_ipaddress_objects']
cloudflare_dict = retrieve_cloudflare_ip_networks(retrieve_new_data)
cdn_ip_networks += cloudflare_dict['list_of_ipaddress_objects']
cdn_ip_networks = sorted(cdn_ip_networks, key=(lambda obj: ipaddress.get_mixed_type_key(obj)))
return cdn_ip_networks |
class DKKKU2(FinTS3Segment):
account = DataElementGroupField(type=Account2, _d='Kontoverbindung Auftraggeber')
credit_card_number = DataElementField(type='an', _d='Kreditkartennummer')
subaccount = DataElementField(type='an', required=False, _d='Subaccount?')
date_start = DataElementField(type='dat', required=False, _d='Von Datum')
date_end = DataElementField(type='dat', required=False, _d='Bis Datum')
max_number_responses = DataElementField(type='num', max_length=4, required=False, _d='Maximale Anzahl Eintrage')
touchdown_point = DataElementField(type='an', max_length=35, required=False, _d='Aufsetzpunkt') |
def test_format_variety():
def _(fmt, matches):
d = parse.extract_format(fmt, {'spam': 'spam'})
for k in matches:
assert (d.get(k) == matches[k])
for t in '%obxegfdDwWsS':
_(t, {'type': t})
_(('10' + t), {'type': t, 'width': '10'})
_('05d', {'type': 'd', 'width': '5', 'zero': True})
_('<', {'align': '<'})
_('.<', {'align': '<', 'fill': '.'})
_('>', {'align': '>'})
_('.>', {'align': '>', 'fill': '.'})
_('^', {'align': '^'})
_('.^', {'align': '^', 'fill': '.'})
_('x=d', {'type': 'd', 'align': '=', 'fill': 'x'})
_('d', {'type': 'd'})
_('ti', {'type': 'ti'})
_('spam', {'type': 'spam'})
_('.^010d', {'type': 'd', 'width': '10', 'align': '^', 'fill': '.', 'zero': True})
_('.2f', {'type': 'f', 'precision': '2'})
_('10.2f', {'type': 'f', 'width': '10', 'precision': '2'}) |
def project_onto_sector(operator, qubits, sectors):
if (not isinstance(operator, QubitOperator)):
raise TypeError('Input operator must be a QubitOperator.')
if (not isinstance(qubits, (list, numpy.ndarray))):
raise TypeError('Qubit input must be an array-like.')
if (not isinstance(sectors, (list, numpy.ndarray))):
raise TypeError('Sector input must be an array-like.')
if (len(qubits) != len(sectors)):
raise ValueError('Number of qubits and sectors must be equal.')
for i in sectors:
if (i not in [0, 1]):
raise ValueError('Sectors must be 0 or 1.')
projected_operator = QubitOperator()
for (term, factor) in operator.terms.items():
if [t for t in term if ((t[0] in qubits) and (t[1] in ['X', 'Y']))]:
continue
new_term = tuple((((t[0] - len([q for q in qubits if (q < t[0])])), t[1]) for t in term if (t[0] not in qubits)))
new_factor = (factor * ((- 1) ** sum([sectors[qubits.index(t[0])] for t in term if (t[0] in qubits)])))
projected_operator += QubitOperator(new_term, new_factor)
return projected_operator |
def test_get_observation_histogram(requests_mock):
requests_mock.get(f'{API_V1}/observations/histogram', json=SAMPLE_DATA['get_observation_histogram_month'], status_code=200)
histogram = get_observation_histogram(interval='month', place_id=24, d1='2020-01-01', d2='2020-12-31')
assert (len(histogram) == 12)
assert (histogram[datetime(2020, 1, 1, 0, 0)] == 272)
assert all((isinstance(k, datetime) for k in histogram.keys())) |
class TestOptimizerStateShardingIntegration(unittest.TestCase, TestOptimizer):
def _maybe_destro_dist():
if dist.is_initialized():
logging.debug('Destroy previous torch dist process group')
dist.destroy_process_group()
def setUp(self):
self._maybe_destro_dist()
dist_init(0, 1)
def tearDown(self):
self._maybe_destro_dist()
def _get_config(self):
return {'name': 'zero', 'base_optimizer': {'name': 'sgd'}, 'num_epochs': 3}
def _instance_to_test(self):
return ZeRO |
def test_colorinterp_like_all(runner, path_4band_no_colorinterp, path_rgba_byte_tif, tmpdir):
noci = str(tmpdir.join('test_colorinterp_like_all.tif'))
rasterio.shutil.copy(path_4band_no_colorinterp, noci)
result = runner.invoke(main_group, ['edit-info', noci, '--like', path_rgba_byte_tif, '--all'])
assert (result.exit_code == 0)
with rasterio.open(noci) as src:
assert (src.colorinterp == (ColorInterp.red, ColorInterp.green, ColorInterp.blue, ColorInterp.alpha)) |
class Train_config():
world_size = 0
mini_batch_size = 0
iter_per_epoch = 0
total_epoch = 0
warm_iter = 0
learning_rate = 0
momentum = 0
weight_decay = 0
lr_decay = []
log_dump_interval = 0
resume_weights = None
init_weights = None
model_dir = ''
log_path = '' |
def _populate():
for name in ('allowed_functions', 'bytecode_analysis', 'bytecode_transformation', 'codegen', 'config', 'convert_frame', 'debug_utils', 'eval_frame', 'exc', 'guards', 'logging', 'mutation_guard', 'optimizations', 'output_graph', 'profiler', 'replay_record', 'resume_execution', 'side_effects', 'skipfiles', 'source', 'symbolic_convert', 'test_case', 'testing', 'utils', 'variables'):
try:
globals()[name] = sys.modules[f'torchdynamo.{name}'] = importlib.import_module(f'torch._dynamo.{name}')
except ImportError:
pass
for (name, val) in torch._dynamo.__dict__.items():
if (not name.startswith('_')):
globals()[name] = val |
class Snapshot(Model):
objects = SnapshotManager()
project = models.ForeignKey('Project', related_name='snapshots', on_delete=models.CASCADE, null=True, verbose_name=_('Project'), help_text=_('The project this snapshot belongs to.'))
title = models.CharField(max_length=256, verbose_name=_('Title'), help_text=_('The title for this snapshot.'))
description = models.TextField(blank=True, verbose_name=_('Description'), help_text=_('A description for this snapshot (optional).'))
class Meta():
ordering = ('project', '-created')
verbose_name = _('Snapshot')
verbose_name_plural = _('Snapshots')
def __str__(self):
return f'{self.project.title} / {self.title}'
def get_absolute_url(self):
return reverse('project', kwargs={'pk': self.project.pk})
def save(self, *args, **kwargs):
copy_values = kwargs.pop('copy_values', (self.pk is None))
super().save()
if copy_values:
for value in self.project.values.filter(snapshot=None):
value.pk = None
value.snapshot = self
value.save()
if value.file:
value.copy_file(value.file_name, value.file)
def rollback(self):
self.project.values.filter(snapshot=None).delete()
for value in self.values.all():
value.snapshot = None
value.save()
if value.file:
value.file.save(value.file_name, value.file)
for snapshot in self.project.snapshots.filter(created__gte=self.created):
snapshot.delete() |
def loadUtilitySpectra():
global am_zero_wavelength, am_zero_irradiance, waterspectra, ozonespectra, uniformgasspectra
spctra = numpy.loadtxt(os.path.join(this_dir, 'SPCTRAL_si_units.txt'), unpack=True)
(am_zero_wavelength, am_zero_irradiance, waterspectra, ozonespectra, uniformgasspectra) = spctra
waterspectra = (waterspectra / 100)
ozonespectra = (ozonespectra / 100) |
class LoginForm(Form):
def __init__(self, view):
super().__init__(view, 'login')
self.use_layout(FormLayout())
accounts = AccountManagementInterface.for_current_session()
if self.exception:
self.layout.add_alert_for_domain_exception(self.exception)
self.layout.add_input(TextInput(self, accounts.fields.email))
self.layout.add_input(PasswordInput(self, accounts.fields.password))
self.define_event_handler(accounts.events.login_event)
self.add_child(Button(self, accounts.events.login_event, style='primary')) |
def main():
opts = parse_args()
if ((not opts.overwrite) and isfile(opts.out_path)):
return
mkdir2(dirname(opts.out_path))
time_info = pickle.load(open(opts.time_info, 'rb'))
if (opts.method_type == 'det'):
rt_samples = time_info['runtime_all']
rt_dist = {'type': 'empirical', 'samples': rt_samples}
else:
raise ValueError(f'Unknown method type "{opts.method_type}"')
pickle.dump(rt_dist, open(opts.out_path, 'wb')) |
class Element(BaseElement):
def children(self):
return [self.__class__(el) for el in self._js.children]
def append(self, child):
if isinstance(child, JsProxy):
return self.append(Element(child))
elif isinstance(child, Element):
self._js.appendChild(child._js)
return child
elif isinstance(child, ElementCollection):
for el in child:
self.append(el)
def html(self):
return self._js.innerHTML
def html(self, value):
self._js.innerHTML = value
def content(self):
if (self._js.tagName == 'TEMPLATE'):
warnings.warn('Content attribute not supported for template elements.', stacklevel=2)
return None
return self._js.innerHTML
def content(self, value):
if (self._js.tagName == 'TEMPLATE'):
warnings.warn('Content attribute not supported for template elements.', stacklevel=2)
return
display(value, target=self.id)
def id(self):
return self._js.id
def id(self, value):
self._js.id = value
def options(self):
if ('options' in self._proxies):
return self._proxies['options']
if (not (self._js.tagName.lower() in {'select', 'datalist', 'optgroup'})):
raise AttributeError(f'Element {self._js.tagName} has no options attribute.')
self._proxies['options'] = OptionsProxy(self)
return self._proxies['options']
def value(self):
return self._js.value
def value(self, value):
if (not hasattr(self._js, 'value')):
raise AttributeError(f'Element {self._js.tagName} has no value attribute. If you want to force a value attribute, set it directly using the `_js.value = <value>` javascript API attribute instead.')
self._js.value = value
def selected(self):
return self._js.selected
def selected(self, value):
if (not hasattr(self._js, 'selected')):
raise AttributeError(f'Element {self._js.tagName} has no value attribute. If you want to force a value attribute, set it directly using the `_js.value = <value>` javascript API attribute instead.')
self._js.selected = value
def clone(self, new_id=None):
clone = Element(self._js.cloneNode(True))
clone.id = new_id
return clone
def remove_class(self, classname):
classList = self._js.classList
if isinstance(classname, list):
classList.remove(*classname)
else:
classList.remove(classname)
return self
def add_class(self, classname):
classList = self._js.classList
if isinstance(classname, list):
classList.add(*classname)
else:
self._js.classList.add(classname)
return self
def classes(self):
classes = self._js.classList.values()
return [x for x in classes]
def show_me(self):
self._js.scrollIntoView() |
class FusedAdamV1(torch.optim.Optimizer):
def __init__(self, params, lr=0.001, bias_correction=True, betas=(0.9, 0.999), eps=1e-08, eps_inside_sqrt=False, weight_decay=0.0, max_grad_norm=0.0, amsgrad=False):
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module('fused_adam_cuda')
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = {'lr': lr, 'bias_correction': bias_correction, 'betas': betas, 'eps': eps, 'weight_decay': weight_decay, 'max_grad_norm': max_grad_norm}
super().__init__(params, defaults)
self.eps_mode = (0 if eps_inside_sqrt else 1)
def supports_memory_efficient_fp16(self):
return True
def step(self, closure=None, grads=None, scale=1.0, grad_norms=None):
loss = None
if (closure is not None):
loss = closure()
if (grads is None):
grads_group = ([None] * len(self.param_groups))
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif (type(grads[0]) != list):
grads_group = [grads]
else:
grads_group = grads
if (grad_norms is None):
grad_norms = ([None] * len(self.param_groups))
for (group, grads_this_group, grad_norm) in zip(self.param_groups, grads_group, grad_norms):
if (grads_this_group is None):
grads_this_group = ([None] * len(group['params']))
combined_scale = scale
if (group['max_grad_norm'] > 0):
clip = (((grad_norm / scale) + 1e-06) / group['max_grad_norm'])
if (clip > 1):
combined_scale = (clip * scale)
bias_correction = (1 if group['bias_correction'] else 0)
for (p, grad) in zip(group['params'], grads_this_group):
if ((p.grad is None) and (grad is None)):
continue
if (grad is None):
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
out_p = p.data
fused_adam_cuda.adam(p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1, beta2, group['eps'], combined_scale, state['step'], self.eps_mode, bias_correction, group['weight_decay'])
return loss |
def convert(comp_type: str, src_file: Path, dest_dir: Path):
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
dpr_state = DPRState.from_type(comp_type, src_file=src_file)
model = dpr_state.load_dpr_model()
model.save_pretrained(dest_dir)
model.from_pretrained(dest_dir) |
class TestExitCode(unittest.TestCase):
def setUp(self):
sys.path.append('.')
from mprof import run_action
self.run_action = run_action
def test_exit_code_success(self):
s = '1+1'
tmpfile = tempfile.NamedTemporaryFile('w', suffix='.py')
with tmpfile as ofile:
ofile.write(s)
ofile.flush()
sys.argv = ['<ignored>', '--exit-code', tmpfile.name]
self.assertRaisesRegex(SystemExit, '0', self.run_action)
def test_exit_code_fail(self):
s = "raise RuntimeError('I am not working nicely')"
tmpfile = tempfile.NamedTemporaryFile('w', suffix='.py')
with tmpfile as ofile:
ofile.write(s)
ofile.flush()
sys.argv = ['<ignored>', '--exit-code', tmpfile.name]
self.assertRaisesRegex(SystemExit, '1', self.run_action)
def test_no_exit_code_success(self):
s = "raise RuntimeError('I am not working nicely')"
tmpfile = tempfile.NamedTemporaryFile('w', suffix='.py')
with tmpfile as ofile:
ofile.write(s)
ofile.flush()
sys.argv = ['<ignored>', tmpfile.name]
self.run_action() |
def decode_onion_error(error_packet: bytes, payment_path_pubkeys: Sequence[bytes], session_key: bytes) -> (OnionRoutingFailureMessage, int):
(decrypted_error, sender_index) = _decode_onion_error(error_packet, payment_path_pubkeys, session_key)
failure_msg = get_failure_msg_from_onion_error(decrypted_error)
return (failure_msg, sender_index) |
def handle_style(book, data):
if (not book.formatting_info):
return
blah = (DEBUG or (book.verbosity >= 2))
bv = book.biff_version
(flag_and_xfx, built_in_id, level) = unpack('<HBB', data[:4])
xf_index = (flag_and_xfx & 4095)
if ((data == b'\x00\x00\x00\x00') and ('Normal' not in book.style_name_map)):
built_in = 1
built_in_id = 0
xf_index = 0
name = 'Normal'
level = 255
elif (flag_and_xfx & 32768):
built_in = 1
name = built_in_style_names[built_in_id]
if (1 <= built_in_id <= 2):
name += str((level + 1))
else:
built_in = 0
built_in_id = 0
level = 0
if (bv >= 80):
try:
name = unpack_unicode(data, 2, lenlen=2)
except UnicodeDecodeError:
print(('STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d' % (built_in, xf_index, built_in_id, level)), file=book.logfile)
print('raw bytes:', repr(data[2:]), file=book.logfile)
raise
else:
name = unpack_string(data, 2, book.encoding, lenlen=1)
if (blah and (not name)):
print('WARNING *** A user-defined style has a zero-length name', file=book.logfile)
book.style_name_map[name] = (built_in, xf_index)
if blah:
fprintf(book.logfile, 'STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d name=%r\n', built_in, xf_index, built_in_id, level, name) |
class TestCDPSession(BaseTestCase):
async def test_create_session(self):
client = (await self.page.target.createCDPSession())
(await client.send('Runtime.enable'))
(await client.send('Runtime.evaluate', {'expression': 'window.foo = "bar"'}))
foo = (await self.page.evaluate('window.foo'))
self.assertEqual(foo, 'bar')
async def test_send_event(self):
client = (await self.page.target.createCDPSession())
(await client.send('Network.enable'))
events = []
client.on('Network.requestWillBeSent', (lambda e: events.append(e)))
(await self.page.goto((self.url + 'empty')))
self.assertEqual(len(events), 1)
async def test_enable_disable_domain(self):
client = (await self.page.target.createCDPSession())
(await client.send('Runtime.enable'))
(await client.send('Debugger.enable'))
(await self.page.coverage.startJSCoverage())
(await self.page.coverage.stopJSCoverage())
async def test_detach(self):
client = (await self.page.target.createCDPSession())
(await client.send('Runtime.enable'))
evalResponse = (await client.send('Runtime.evaluate', {'expression': '1 + 2', 'returnByValue': True}))
self.assertEqual(evalResponse['result']['value'], 3)
(await client.detach())
with self.assertRaises(NetworkError):
(await client.send('Runtime.evaluate', {'expression': '1 + 3', 'returnByValue': True})) |
(st.sets(text))
def test_map(tmpdir_factory, keys):
trie = marisa_trie.Trie(keys)
dirname = f'{str(uuid4())}_'
path = str(tmpdir_factory.mktemp(dirname).join('trie.bin'))
trie.save(path)
data = open(path, 'rb').read()
trie2 = marisa_trie.Trie()
trie2.map(data)
for key in keys:
assert (key in trie2) |
class MedrxivClusteringP2P(AbsTaskClustering):
def description(self):
return {'name': 'MedrxivClusteringP2P', 'hf_hub_name': 'mteb/medrxiv-clustering-p2p', 'description': 'Clustering of titles+abstract from medrxiv. Clustering of 10 sets, based on the main category.', 'reference': ' 'type': 'Clustering', 'category': 'p2p', 'eval_splits': ['test'], 'eval_langs': ['en'], 'main_score': 'v_measure', 'revision': 'e7a26af6f3ae46b30dde8737f02c07b1505bcc73'} |
def test_in_solver():
opt = {'method': 'adams', 'store_states': True, 'atol': 1}
solver = qutip.SESolver(qutip.qeye(1), options=opt)
adams = qutip.integrator.IntegratorScipyAdams
lsoda = qutip.integrator.IntegratorScipylsoda
bdf = qutip.integrator.IntegratorScipyBDF
assert (solver.options['store_states'] is True)
assert (solver.options['method'] == 'adams')
assert (solver.options['atol'] == 1)
assert (solver.options['order'] == adams.integrator_options['order'])
solver.options['method'] = 'bdf'
assert (solver.options['store_states'] is True)
assert (solver.options['method'] == 'bdf')
assert (solver.options['atol'] == bdf.integrator_options['atol'])
assert (solver.options['order'] == bdf.integrator_options['order'])
solver.options = {'method': 'vern7', 'store_final_state': True, 'atol': 0.01}
assert (solver.options['store_states'] is True)
assert (solver.options['store_final_state'] is True)
assert (solver.options['method'] == 'vern7')
assert (solver.options['atol'] == 0.01)
assert ('order' not in solver.options)
assert ('interpolate' in solver.options) |
def test_rebuild_system_tpm(s):
node0_tpm = ExplicitTPM(np.array([[0, 1], [0, 0]]))
node1_tpm = ExplicitTPM(np.array([[0, 1]]))
node_tpms = [node0_tpm, node1_tpm]
answer = ExplicitTPM(np.array([[[0, 0], [1, 1]], [[0, 0], [0, 1]]]))
assert macro.rebuild_system_tpm(node_tpms).array_equal(answer)
node_tpms = [node.tpm_on for node in s.nodes]
assert macro.rebuild_system_tpm(node_tpms).array_equal(s.tpm) |
class Optimizer(object):
def __init__(self, optimizer, learning_rate, learning_rate_decay_fn=None, max_grad_norm=None):
self._optimizer = optimizer
self._learning_rate = learning_rate
self._learning_rate_decay_fn = learning_rate_decay_fn
self._max_grad_norm = (max_grad_norm or 0)
self._training_step = 1
self._decay_step = 1
self._fp16 = None
def from_opt(cls, model, opt, checkpoint=None):
optim_opt = opt
optim_state_dict = None
if (opt.train_from and (checkpoint is not None)):
optim = checkpoint['optim']
ckpt_opt = checkpoint['opt']
ckpt_state_dict = {}
if isinstance(optim, Optimizer):
ckpt_state_dict['training_step'] = (optim._step + 1)
ckpt_state_dict['decay_step'] = (optim._step + 1)
ckpt_state_dict['optimizer'] = optim.optimizer.state_dict()
else:
ckpt_state_dict = optim
if (opt.reset_optim == 'none'):
optim_opt = ckpt_opt
optim_state_dict = ckpt_state_dict
elif (opt.reset_optim == 'all'):
pass
elif (opt.reset_optim == 'states'):
optim_opt = ckpt_opt
optim_state_dict = ckpt_state_dict
del optim_state_dict['optimizer']
elif (opt.reset_optim == 'keep_states'):
optim_state_dict = ckpt_state_dict
optimizer = cls(build_torch_optimizer(model, optim_opt), optim_opt.learning_rate, learning_rate_decay_fn=make_learning_rate_decay_fn(optim_opt), max_grad_norm=optim_opt.max_grad_norm)
if (opt.model_dtype == 'fp16'):
if (opt.optim == 'fusedadam'):
optimizer._fp16 = 'legacy'
else:
optimizer._fp16 = 'amp'
if optim_state_dict:
optimizer.load_state_dict(optim_state_dict)
return optimizer
def training_step(self):
return self._training_step
def learning_rate(self):
if (self._learning_rate_decay_fn is None):
return self._learning_rate
scale = self._learning_rate_decay_fn(self._decay_step)
return (scale * self._learning_rate)
def state_dict(self):
return {'training_step': self._training_step, 'decay_step': self._decay_step, 'optimizer': self._optimizer.state_dict()}
def load_state_dict(self, state_dict):
self._training_step = state_dict['training_step']
if ('decay_step' in state_dict):
self._decay_step = state_dict['decay_step']
if ('optimizer' in state_dict):
self._optimizer.load_state_dict(state_dict['optimizer'])
def zero_grad(self):
self._optimizer.zero_grad()
def backward(self, loss):
if (self._fp16 == 'amp'):
import apex
with apex.amp.scale_loss(loss, self._optimizer) as scaled_loss:
scaled_loss.backward()
elif (self._fp16 == 'legacy'):
kwargs = {}
if ('update_master_grads' in fn_args(self._optimizer.backward)):
kwargs['update_master_grads'] = True
self._optimizer.backward(loss, **kwargs)
else:
loss.backward()
def step(self):
learning_rate = self.learning_rate()
if (self._fp16 == 'legacy'):
if hasattr(self._optimizer, 'update_master_grads'):
self._optimizer.update_master_grads()
if (hasattr(self._optimizer, 'clip_master_grads') and (self._max_grad_norm > 0)):
self._optimizer.clip_master_grads(self._max_grad_norm)
for group in self._optimizer.param_groups:
group['lr'] = learning_rate
if ((self._fp16 is None) and (self._max_grad_norm > 0)):
clip_grad_norm_(group['params'], self._max_grad_norm)
self._optimizer.step()
self._decay_step += 1
self._training_step += 1 |
def get_valid_dataloader(musdb_root, is_wav, filed_mode, n_fft, hop_length, num_frame, batch_size=4, num_workers=1, pin_memory=True, target_names=None, cache_mode=True, dev_mode=False) -> DataLoader:
if filed_mode:
musdb_valid = FiledMusdbValidSet(musdb_root, is_wav, n_fft=n_fft, hop_length=hop_length, num_frame=num_frame, target_names=target_names, cache_mode=cache_mode, dev_mode=dev_mode)
else:
musdb_loader = MusdbLoader(musdb_root, is_wav)
musdb_valid = MusdbValidSet(musdb_loader.musdb_valid, n_fft=n_fft, hop_length=hop_length, num_frame=num_frame, target_names=target_names, cache_mode=cache_mode, dev_mode=dev_mode)
return DataLoader(musdb_valid, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory) |
class MockReadline():
def __init__(self):
self.l_buffer = lineobj.ReadLineTextBuffer('')
self._history = history.LineHistory()
def add_history(self, line):
self._history.add_history(lineobj.TextLine(line))
def _print_prompt(self):
pass
def _bell(self):
pass
def insert_text(self, string):
self.l_buffer.insert_text(string) |
class TestGenericReporting():
def test_collect_fail(self, pytester: Pytester, option) -> None:
pytester.makepyfile('import xyz\n')
result = pytester.runpytest(*option.args)
result.stdout.fnmatch_lines(['ImportError while importing*', '*No module named *xyz*', '*1 error*'])
def test_maxfailures(self, pytester: Pytester, option) -> None:
pytester.makepyfile('\n def test_1():\n assert 0\n def test_2():\n assert 0\n def test_3():\n assert 0\n ')
result = pytester.runpytest('--maxfail=2', *option.args)
result.stdout.fnmatch_lines(['*def test_1():*', '*def test_2():*', '*! stopping after 2 failures !*', '*2 failed*'])
def test_maxfailures_with_interrupted(self, pytester: Pytester) -> None:
pytester.makepyfile('\n def test(request):\n request.session.shouldstop = "session_interrupted"\n assert 0\n ')
result = pytester.runpytest('--maxfail=1', '-ra')
result.stdout.fnmatch_lines(['*= short test summary info =*', 'FAILED *', '*! stopping after 1 failures !*', '*! session_interrupted !*', '*= 1 failed in*'])
def test_tb_option(self, pytester: Pytester, option) -> None:
pytester.makepyfile('\n import pytest\n def g():\n raise IndexError\n def test_func():\n print(6*7)\n g() # --calling--\n ')
for tbopt in ['long', 'short', 'no']:
print(('testing --tb=%s...' % tbopt))
result = pytester.runpytest('-rN', ('--tb=%s' % tbopt))
s = result.stdout.str()
if (tbopt == 'long'):
assert ('print(6*7)' in s)
else:
assert ('print(6*7)' not in s)
if (tbopt != 'no'):
assert ('--calling--' in s)
assert ('IndexError' in s)
else:
assert ('FAILURES' not in s)
assert ('--calling--' not in s)
assert ('IndexError' not in s)
def test_tb_crashline(self, pytester: Pytester, option) -> None:
p = pytester.makepyfile('\n import pytest\n def g():\n raise IndexError\n def test_func1():\n print(6*7)\n g() # --calling--\n def test_func2():\n assert 0, "hello"\n ')
result = pytester.runpytest('--tb=line')
bn = p.name
result.stdout.fnmatch_lines([('*%s:3: IndexError*' % bn), ('*%s:8: AssertionError: hello*' % bn)])
s = result.stdout.str()
assert ('def test_func2' not in s)
def test_tb_crashline_pytrace_false(self, pytester: Pytester, option) -> None:
p = pytester.makepyfile("\n import pytest\n def test_func1():\n pytest.fail('test_func1', pytrace=False)\n ")
result = pytester.runpytest('--tb=line')
result.stdout.str()
bn = p.name
result.stdout.fnmatch_lines([('*%s:3: Failed: test_func1' % bn)])
def test_pytest_report_header(self, pytester: Pytester, option) -> None:
pytester.makeconftest('\n def pytest_sessionstart(session):\n session.config._somevalue = 42\n def pytest_report_header(config):\n return "hello: %s" % config._somevalue\n ')
pytester.mkdir('a').joinpath('conftest.py').write_text('\ndef pytest_report_header(config, start_path):\n return ["line1", str(start_path)]\n', encoding='utf-8')
result = pytester.runpytest('a')
result.stdout.fnmatch_lines(['*hello: 42*', 'line1', str(pytester.path)])
def test_show_capture(self, pytester: Pytester) -> None:
pytester.makepyfile("\n import sys\n import logging\n def test_one():\n sys.stdout.write('!This is stdout!')\n sys.stderr.write('!This is stderr!')\n logging.warning('!This is a warning log msg!')\n assert False, 'Something failed'\n ")
result = pytester.runpytest('--tb=short')
result.stdout.fnmatch_lines(['!This is stdout!', '!This is stderr!', '*WARNING*!This is a warning log msg!'])
result = pytester.runpytest('--show-capture=all', '--tb=short')
result.stdout.fnmatch_lines(['!This is stdout!', '!This is stderr!', '*WARNING*!This is a warning log msg!'])
stdout = pytester.runpytest('--show-capture=stdout', '--tb=short').stdout.str()
assert ('!This is stderr!' not in stdout)
assert ('!This is stdout!' in stdout)
assert ('!This is a warning log msg!' not in stdout)
stdout = pytester.runpytest('--show-capture=stderr', '--tb=short').stdout.str()
assert ('!This is stdout!' not in stdout)
assert ('!This is stderr!' in stdout)
assert ('!This is a warning log msg!' not in stdout)
stdout = pytester.runpytest('--show-capture=log', '--tb=short').stdout.str()
assert ('!This is stdout!' not in stdout)
assert ('!This is stderr!' not in stdout)
assert ('!This is a warning log msg!' in stdout)
stdout = pytester.runpytest('--show-capture=no', '--tb=short').stdout.str()
assert ('!This is stdout!' not in stdout)
assert ('!This is stderr!' not in stdout)
assert ('!This is a warning log msg!' not in stdout)
def test_show_capture_with_teardown_logs(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import logging\n import sys\n import pytest\n\n (scope="function", autouse="True")\n def hook_each_test(request):\n yield\n sys.stdout.write("!stdout!")\n sys.stderr.write("!stderr!")\n logging.warning("!log!")\n\n def test_func():\n assert False\n ')
result = pytester.runpytest('--show-capture=stdout', '--tb=short').stdout.str()
assert ('!stdout!' in result)
assert ('!stderr!' not in result)
assert ('!log!' not in result)
result = pytester.runpytest('--show-capture=stderr', '--tb=short').stdout.str()
assert ('!stdout!' not in result)
assert ('!stderr!' in result)
assert ('!log!' not in result)
result = pytester.runpytest('--show-capture=log', '--tb=short').stdout.str()
assert ('!stdout!' not in result)
assert ('!stderr!' not in result)
assert ('!log!' in result)
result = pytester.runpytest('--show-capture=no', '--tb=short').stdout.str()
assert ('!stdout!' not in result)
assert ('!stderr!' not in result)
assert ('!log!' not in result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.