code stringlengths 281 23.7M |
|---|
class InlineQueryResultVoice(InlineQueryResult):
def __init__(self, voice_url: str, title: str, id: str=None, voice_duration: int=0, caption: str='', parse_mode: Optional['enums.ParseMode']=None, caption_entities: List['types.MessageEntity']=None, reply_markup: 'types.InlineKeyboardMarkup'=None, input_message_content: 'types.InputMessageContent'=None):
super().__init__('voice', id, input_message_content, reply_markup)
self.voice_url = voice_url
self.title = title
self.voice_duration = voice_duration
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
async def write(self, client: 'pyrogram.Client'):
audio = raw.types.InputWebDocument(url=self.voice_url, size=0, mime_type='audio/mpeg', attributes=[raw.types.DocumentAttributeAudio(duration=self.voice_duration, title=self.title)])
(message, entities) = (await utils.parse_text_entities(client, self.caption, self.parse_mode, self.caption_entities)).values()
return raw.types.InputBotInlineResult(id=self.id, type=self.type, title=self.title, content=audio, send_message=((await self.input_message_content.write(client, self.reply_markup)) if self.input_message_content else raw.types.InputBotInlineMessageMediaAuto(reply_markup=((await self.reply_markup.write(client)) if self.reply_markup else None), message=message, entities=entities))) |
.parametrize('input_username, expected_output', [('jake', 'jake'), ('frank', 'frank'), ('fra-nk', 'fra-nk'), ('Jake', 'jake'), ('FranK', 'frank'), ('ja__ke', 'ja_ke'), ('ja___ke', 'ja_ke'), ('ja__', 'ja'), ('jake__', 'jake'), ('_jake', 'jake'), ('a', 'a0'), ('ab', 'ab'), ('abc', 'abc'), ('abcdefghijklmnopqrstuvwxyz', 'abcdefghijklmnopqrstuvwxyz'), (('c' * 256), ('c' * 255)), ('neid', 'aeneid'), ('etude', 'etude'), ('', 'bei_jing'), ('', 'shanana'), ('', 'taliqua'), ('', 'ptu_i'), ('', 'abhijiit'), ('', 'abhijiit'), ('', 'abhijiit'), ('', 'mlyaalm'), ('\ue000', '00'), ('', '00'), ('', 'ml_ml'), ('', 'mlml'), (b'kenny', 'kenny'), ((b'c' * 256), ('c' * 255)), (b'\\u0d2e\\u0d32\\u0d2e\\u0d32', 'u0d2e_u0d32_u0d2e_u0d32')])
def test_generate_valid_usernames(input_username, expected_output):
name_gen = generate_valid_usernames(input_username)
generated_output = list(islice(name_gen, 1))[0]
assert (generated_output == expected_output) |
def _create_and_upload_workflows(workflow: str, workflow_range: (int, int), file: Optional[str]=None, workers: int=WORKERS_DEFAULT_COUNT) -> None:
logger.info(f'Creating and uploading {workflow_range} workflows...')
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
futures = [executor.submit(_create_and_upload_single_workflow, build_extended_workflow_name(workflow, i), file) for i in range(workflow_range[0], (workflow_range[1] + 1))]
for future in concurrent.futures.as_completed(futures):
future.result() |
class Model(TrainableModel):
def __init__(self, embedding_size: int, lr: float, loss_fn: str, mining: str):
self._embedding_size = embedding_size
self._lr = lr
self._loss_fn = loss_fn
self._mining = mining
super().__init__()
def configure_encoders(self) -> Union[(Encoder, Dict[(str, Encoder)])]:
return MobilenetV3Encoder(self._embedding_size)
def configure_head(self, input_embedding_size) -> EncoderHead:
return EmptyHead(input_embedding_size)
def configure_loss(self) -> SimilarityLoss:
return (OnlineContrastiveLoss(mining=self._mining) if (self._loss_fn == 'contrastive') else TripletLoss(mining=self._mining))
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.parameters(), self._lr)
return optimizer |
class RemovedCommand(KickstartCommand):
def __init__(self, writePriority=None, *args, **kwargs):
if (self.__class__ is RemovedCommand):
raise TypeError('RemovedCommand is an abstract class.')
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
def dataList(self):
return None
def dataClass(self):
return None
def __str__(self):
return ''
def parse(self, args):
raise KickstartParseError((_('%s has been removed.') % self.currentCmd), lineno=self.lineno) |
.parametrize(['func', 'kind'], [pytest.param((lambda t, A: (A - 4)), (lambda : qutip.MCSolver.ExpectFeedback(qutip.num(10))), id='expect'), pytest.param((lambda t, A: ((len(A) < 3) * 1.0)), (lambda : qutip.MCSolver.CollapseFeedback()), id='collapse')])
def test_feedback(func, kind):
tol = 1e-06
psi0 = qutip.basis(10, 7)
a = qutip.destroy(10)
H = qutip.QobjEvo(qutip.num(10))
solver = qutip.MCSolver(H, c_ops=[qutip.QobjEvo([a, func], args={'A': kind()})], options={'map': 'serial'})
result = solver.run(psi0, np.linspace(0, 3, 31), e_ops=[qutip.num(10)], ntraj=10)
assert np.all((result.expect[0] > (4.0 - tol))) |
def _encode_object(obj):
is_nonbool_int = (isinstance(obj, int) and (not isinstance(obj, (bool, np.bool_))))
is_encode_type = isinstance(obj, (float, str, np.integer, np.floating))
if (is_nonbool_int or is_encode_type):
return obj
elif isinstance(obj, np.ndarray):
return _encode_numpy_array(obj)
raise ValueError('Unable to encode') |
class TestCheckOverflow():
def test_good(self):
cmdutils.check_overflow(1, 'int')
def test_bad(self):
int32_max = ((2 ** 31) - 1)
with pytest.raises(cmdutils.CommandError, match='Numeric argument is too large for internal int representation.'):
cmdutils.check_overflow((int32_max + 1), 'int') |
class ViewProviderAsmElement(ViewProviderAsmOnTop):
_iconName = 'Assembly_Assembly_Element.svg'
_iconDisabledName = 'Assembly_Assembly_ElementDetached.svg'
def __init__(self, vobj):
vobj.addProperty('App::PropertyBool', 'ShowCS', '', 'Show coordinate cross')
vobj.ShapeColor = self.getDefaultColor()
vobj.PointColor = self.getDefaultColor()
vobj.LineColor = self.getDefaultColor()
vobj.Transparency = 50
vobj.LineWidth = 4
vobj.PointSize = 4
self.axisNode = None
self.transNode = None
super(ViewProviderAsmElement, self).__init__(vobj)
def attach(self, vobj):
super(ViewProviderAsmElement, self).attach(vobj)
vobj.OnTopWhenSelected = 2
self.setupAxis()
def getDefaultColor(self):
return ((60.0 / 255.0), 1.0, 1.0)
def canDropObjectEx(self, _obj, owner, subname, elements):
if (not owner):
return False
if ((not elements) and (not utils.isElement((owner, subname)))):
return False
proxy = self.ViewObject.Object.Proxy
return (proxy.getAssembly().getPartGroup() == owner)
def dropObjectEx(self, vobj, _obj, owner, subname, elements):
if (not elements):
elements = ['']
for element in elements:
AsmElement.make(AsmElement.Selection(SelObj=None, SelSubname=None, Element=vobj.Object, Group=owner, Subname=(subname + element)), undo=True)
return '.'
def doubleClicked(self, _vobj=None):
from . import mover
return mover.movePart(element=self.ViewObject.Object, moveElement=False)
def getIcon(self):
return utils.getIcon(self.__class__, getattr(self.ViewObject.Object, 'Detach', False))
def updateData(self, _obj, prop):
vobj = getattr(self, 'ViewObject', None)
if ((not vobj) or FreeCAD.isRestoring()):
return
if (prop == 'Detach'):
vobj.signalChangeIcon()
elif (prop in ('Placement', 'Shape', 'Radius')):
self.setupAxis()
_AxisOrigin = None
def showCS(self):
vobj = getattr(self, 'ViewObject', None)
if ((not vobj) or hasProperty(vobj.Object, 'Radius')):
return
if (getattr(vobj, 'ShowCS', False) or gui.AsmCmdManager.ShowElementCS or (not hasattr(vobj.Object, 'Shape'))):
return True
return utils.isInfinite(vobj.Object.Shape)
def getElementPicked(self, pp):
vobj = self.ViewObject
if self.showCS():
axis = self._AxisOrigin
if axis:
sub = axis.getElementPicked(pp)
if sub:
return sub
return vobj.getElementPicked(pp)
def getDetailPath(self, subname, path, append):
vobj = self.ViewObject
if (subname in ('X', 'Y', 'Z')):
subname = ''
return vobj.getDetailPath(subname, path, append)
def getAxis(cls):
axis = cls._AxisOrigin
if (not axis):
axis = FreeCADGui.AxisOrigin()
axis.Labels = {'X': '', 'Y': '', 'Z': ''}
cls._AxisOrigin = axis
return axis.Node
def setupAxis(self):
vobj = getattr(self, 'ViewObject', None)
if (not vobj):
return
switch = getattr(self, 'axisNode', None)
if (not self.showCS()):
if switch:
switch.whichChild = (- 1)
return
if (not switch):
parentSwitch = vobj.SwitchNode
if (not parentSwitch.getNumChildren()):
return
from pivy import coin
switch = coin.SoSwitch()
node = coin.SoType.fromName('SoFCSelectionRoot').createInstance()
switch.addChild(node)
trans = coin.SoTransform()
node.addChild(trans)
node.addChild(ViewProviderAsmElement.getAxis())
self.axisNode = switch
self.transNode = trans
if (parentSwitch.getNumChildren() > 1):
parentSwitch.getChild(1).addChild(switch)
if (parentSwitch.getNumChildren() > 2):
parentSwitch.getChild(2).addChild(switch)
switch.whichChild = 0
pla = vobj.Object.Placement.inverse().multiply(utils.getElementPlacement(vobj.Object.Shape))
self.transNode.translation.setValue(pla.Base)
self.transNode.rotation.setValue(pla.Rotation.Q)
def onChanged(self, _vobj, prop):
if (prop == 'ShowCS'):
self.setupAxis()
def setupMenu(menu, vobj, vobj2):
obj = vobj.Object
action = QtGui.QAction(QtGui.QIcon(), 'Move part', menu)
action.setToolTip('Move the owner part using this element as reference coordinate')
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), vobj2.Proxy.doubleClicked)
menu.addAction(action)
action = QtGui.QAction(QtGui.QIcon(), ('Attach element' if obj.Detach else 'Detach element'), menu)
if obj.Detach:
action.setToolTip('Attach this element to its linked geometry,\nso that it will auto update on change.')
else:
action.setToolTip('Detach this element so that it stays the same\non change of the linked geometry.')
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), vobj.Proxy.toggleDetach)
menu.addAction(action)
if obj.Proxy.isBroken():
action = QtGui.QAction(QtGui.QIcon(), 'Fix element', menu)
action.setToolTip('Auto fix broken element')
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), vobj.Proxy.fix)
menu.addAction(action)
action = QtGui.QAction(QtGui.QIcon(), 'Offset element', menu)
action.setToolTip('Activate dragger to offset this element')
menu.addAction(action)
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), vobj2.Proxy.offset)
if (vobj2.Object.Offset != FreeCAD.Placement()):
action = QtGui.QAction(QtGui.QIcon(), 'Reset offset', menu)
action.setToolTip('Clear offset of this element')
menu.addAction(action)
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), vobj2.Proxy.resetOffset)
action = QtGui.QAction(QtGui.QIcon(), 'Flip element', menu)
action.setToolTip('Flip this element\' Z normal by rotating 180 degree\nalong the X axis (or Y axis by holding the CTRL key).\n\nNote that depending on the type of constraint and the\norder of the selected element, flipping element may not\nbe effective. You can try "Flip part" instead.')
menu.addAction(action)
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), vobj2.Proxy.flip)
action = QtGui.QAction(QtGui.QIcon(), 'Flip part', menu)
action.setToolTip('Flip the owner part using this element Z normal as\nreference, which is done by rotating 180 degree along\nthe element\'s X axis (or Y axis by holding the CTRL key).\n\nNote that depending on the type of constraint and the\norder of the selected element, flipping part may not\nbe effective. You can try "Flip element" instead.')
menu.addAction(action)
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), vobj2.Proxy.flipPart)
ViewProviderAsmElement.setupSyncNameMenu('Sync element name', menu, vobj)
def setupSyncNameMenu(name, menu, vobj):
if vobj.Proxy.syncElementName(True):
action = QtGui.QAction(QtGui.QIcon(), name, menu)
action.setToolTip('Adjust element name to match with any referenced\nassembly labels')
menu.addAction(action)
QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), vobj.Proxy.syncElementName)
def setupContextMenu(self, vobj, menu):
ViewProviderAsmElement.setupMenu(menu, vobj, vobj)
return True
def syncElementName(self, check=False, transaction=True):
obj = self.ViewObject.Object
label = obj.Proxy.autoLabel()
if (obj.Label == label):
return False
if check:
return True
if (not transaction):
obj.Label = label
return
FreeCAD.setActiveTransaction('Sync element name')
try:
obj.Label = label
FreeCAD.ActiveDocument.recompute()
FreeCAD.closeActiveTransaction()
except Exception:
FreeCAD.closeActiveTransaction(True)
def fix(self):
obj = self.ViewObject.Object
FreeCAD.setActiveTransaction('Fix element')
try:
obj.Proxy.fix()
obj.recompute()
FreeCAD.closeActiveTransaction()
except Exception:
FreeCAD.closeActiveTransaction(True)
logger.error(traceback.format_exc())
raise RuntimeError('Broken element.\n\nYou can manually fix it by select a new geometry,\ndrag and drop it to the broken element object')
def toggleDetach(self):
obj = self.ViewObject.Object
FreeCAD.setActiveTransaction(('Attach element' if obj.Detach else 'Detach element'))
try:
obj.Detach = (not obj.Detach)
FreeCAD.closeActiveTransaction()
except Exception:
FreeCAD.closeActiveTransaction(True)
raise
def offset(self):
from . import mover
return mover.movePart(element=self.ViewObject.Object, moveElement=True)
def doResetOffset(obj):
FreeCAD.setActiveTransaction('Reset offset')
obj.Offset = FreeCAD.Placement()
obj.recompute(True)
FreeCAD.closeActiveTransaction()
def resetOffset(self):
obj = self.ViewObject.Object
ViewProviderAsmElement.doResetOffset(obj)
def doFlip(obj, info, flipElement):
if (QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ControlModifier):
rot = FreeCAD.Rotation(FreeCAD.Vector(0, 1, 0), 180)
else:
rot = FreeCAD.Rotation(FreeCAD.Vector(1, 0, 0), 180)
rot = FreeCAD.Placement(FreeCAD.Vector(), rot)
title = ('Flip element' if flipElement else 'Flip part')
FreeCAD.setActiveTransaction(title)
try:
if flipElement:
obj.Offset = rot.multiply(obj.Offset)
else:
if hasProperty(obj, 'Count'):
info = obj.Proxy.getInfo(expand=True)[0]
shape = Part.getShape(obj, ('%d.' % info.Part[1]), transform=False)
offset = utils.getElementPlacement(shape)
else:
offset = utils.getElementPlacement(obj.getSubObject(''))
offset = offset.multiply(rot).multiply(offset.inverse())
pla = offset.multiply(info.Placement)
setPlacement(info.Part, pla)
obj.recompute(True)
FreeCAD.closeActiveTransaction()
except Exception:
QtGui.QMessageBox.critical(None, 'Flip', (title + ' failed'))
FreeCAD.closeActiveTransaction(True)
raise
def flip(self):
obj = self.ViewObject.Object
ViewProviderAsmElement.doFlip(obj, obj.Proxy.getInfo(), True)
def flipPart(self):
obj = self.ViewObject.Object
ViewProviderAsmElement.doFlip(obj, obj.Proxy.getInfo(), False)
def getLinkedViewProvider(self, recursive):
obj = self.ViewObject.Object
try:
sub = obj.Proxy.getElementSubname(recursive)
except Exception:
return
linked = obj.Proxy.getAssembly().getPartGroup().getSubObject(sub, retType=1)
if (not linked):
return
subs = Part.splitSubname(sub)
if (subs[1] or subs[2]):
return (linked.ViewObject, Part.joinSubname('', subs[1], subs[2]))
return linked.ViewObject |
class AutoUpdatePeekLayerDropdown(QtWidgets.QComboBox):
def __init__(self, *args, m=None, layers=None, exclude=None, use_active=False, empty_ok=True, **kwargs):
super().__init__(*args, **kwargs)
self.m = m
self._layers = layers
self._exclude = exclude
self._use_active = use_active
self._empty_ok = empty_ok
self.last_layers = []
self._last_active = None
self.update_layers()
self.setSizeAdjustPolicy(self.AdjustToMinimumContentsLengthWithIcon)
self.activated.connect(self.set_last_active)
def enterEvent(self, e):
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), '<h3>Peek Layer</h3>Select a layer to peek on.<p>An overlay of the selected layer will be printed on top of the currently visible layer. The controls on the side can be used to select the peek-method as well as the transparency of the overlay.')
()
def set_last_active(self):
self._last_active = self.currentText()
def mousePressEvent(self, event):
if (event.button() == Qt.RightButton):
self.update_layers()
elif (event.button() == Qt.LeftButton):
self.update_layers()
super().mousePressEvent(event)
def layers(self):
if (self._layers is not None):
return self._layers
else:
return [i for i in self.m._get_layers(exclude=self._exclude) if (not str(i).startswith('_'))]
def update_layers(self):
layers = self.layers
if (set(layers) == set(self.last_layers)):
return
self.last_layers = layers
self.clear()
if self._empty_ok:
self.addItem('')
view = self.view()
view.setTextElideMode(Qt.ElideNone)
for key in layers:
if (key == 'all'):
continue
self.addItem(str(key))
view.setFixedWidth((view.sizeHintForColumn(0) + 10))
if self._use_active:
currindex = self.findText(str(self.m.BM.bg_layer))
self.setCurrentIndex(currindex)
elif (self._last_active is not None):
idx = self.findText(self._last_active)
if (idx != (- 1)):
self.setCurrentIndex(idx) |
def test_jax_shape_ops():
x_np = np.zeros((20, 3))
x = Shape()(pt.as_tensor_variable(x_np))
x_fg = FunctionGraph([], [x])
compare_jax_and_py(x_fg, [], must_be_device_array=False)
x = Shape_i(1)(pt.as_tensor_variable(x_np))
x_fg = FunctionGraph([], [x])
compare_jax_and_py(x_fg, [], must_be_device_array=False) |
class FC(nn.Sequential):
def __init__(self, in_size: int, out_size: int, *, activation=nn.ReLU(inplace=True), bn: bool=False, init=None, preact: bool=False, name: str=''):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=(not bn))
if (init is not None):
init(fc.weight)
if (not bn):
nn.init.constant(fc.bias, 0)
if preact:
if bn:
self.add_module((name + 'bn'), BatchNorm1d(in_size))
if (activation is not None):
self.add_module((name + 'activation'), activation)
self.add_module((name + 'fc'), fc)
if (not preact):
if bn:
self.add_module((name + 'bn'), BatchNorm1d(out_size))
if (activation is not None):
self.add_module((name + 'activation'), activation) |
def test_maximum_builds(app):
user = model.user.create_user('foobar', 'password', '')
user.maximum_queued_builds_count = 1
user.save()
repo = model.repository.create_repository('foobar', 'somerepo', user)
prepared_build = PreparedBuild()
prepared_build.build_name = 'foo'
prepared_build.is_manual = True
prepared_build.dockerfile_id = 'foobar'
prepared_build.archive_url = 'someurl'
prepared_build.tags = ['latest']
prepared_build.subdirectory = '/'
prepared_build.context = '/'
prepared_build.metadata = {}
start_build(repo, prepared_build)
with pytest.raises(MaximumBuildsQueuedException):
start_build(repo, prepared_build) |
def pq_compute_multi_core(matched_annotations_list, gt_folder, pred_folder, categories):
cpu_num = multiprocessing.cpu_count()
annotations_split = np.array_split(matched_annotations_list, cpu_num)
print('Number of cores: {}, images per core: {}'.format(cpu_num, len(annotations_split[0])))
workers = multiprocessing.Pool(processes=cpu_num)
processes = []
for (proc_id, annotation_set) in enumerate(annotations_split):
p = workers.apply_async(pq_compute_single_core, (proc_id, annotation_set, gt_folder, pred_folder, categories))
processes.append(p)
pq_stat = PQStat()
for p in processes:
pq_stat += p.get()
return pq_stat |
class RemoveCmdPrefix(_QtileMigrator):
ID = 'RemoveCmdPrefix'
SUMMARY = 'Removes ``cmd_`` prefix from method calls and definitions.'
HELP = '\n The ``cmd_`` prefix was used to identify methods that should be exposed to\n qtile\'s command API. This has been deprecated and so calls no longer require\n the prefix.\n\n For example:\n\n .. code:: python\n\n qtile.cmd_spawn("vlc")\n\n would be replaced with:\n\n .. code:: python\n\n qtile.spawn("vlc")\n\n Where users have created their own widgets with methods using this prefix,\n the syntax has also changed:\n\n For example:\n\n .. code:: python\n\n class MyWidget(libqtile.widget.base._Widget):\n def cmd_my_command(self):\n pass\n\n Should be updated as follows:\n\n .. code:: python\n\n from libqtile.command.base import expose_command\n\n class MyWidget(libqtile.widget.base._Widget):\n _command\n def my_command(self):\n pass\n '
AFTER_VERSION = '0.22.1'
TESTS = [Change('qtile.cmd_spawn("alacritty")', 'qtile.spawn("alacritty")'), Change('qtile.cmd_groups()', 'qtile.get_groups()'), Change('qtile.cmd_screens()', 'qtile.get_screens()'), Change('qtile.current_window.cmd_hints()', 'qtile.current_window.get_hints()'), Change('qtile.current_window.cmd_opacity(0.5)', 'qtile.current_window.set_opacity(0.5)'), Change('\n class MyWidget(widget.Clock):\n def cmd_my_command(self):\n pass\n ', '\n from libqtile.command.base import expose_command\n\n class MyWidget(widget.Clock):\n _command\n def my_command(self):\n pass\n '), NoChange('\n def cmd_some_other_func():\n pass\n '), Check('\n from libqtile import qtile, widget\n\n class MyClock(widget.Clock):\n def cmd_my_exposed_command(self):\n pass\n\n def my_func(qtile):\n qtile.cmd_spawn("rickroll")\n hints = qtile.current_window.cmd_hints()\n groups = qtile.cmd_groups()\n screens = qtile.cmd_screens()\n qtile.current_window.cmd_opacity(0.5)\n\n def cmd_some_other_func():\n pass\n ', '\n from libqtile import qtile, widget\n from libqtile.command.base import expose_command\n\n class MyClock(widget.Clock):\n _command\n def my_exposed_command(self):\n pass\n\n def my_func(qtile):\n qtile.spawn("rickroll")\n hints = qtile.current_window.get_hints()\n groups = qtile.get_groups()\n screens = qtile.get_screens()\n qtile.current_window.set_opacity(0.5)\n\n def cmd_some_other_func():\n pass\n ')]
def run(self, original):
transformer = CmdPrefixTransformer()
updated = original.visit(transformer)
self.update_lint(transformer)
if transformer.needs_import:
context = codemod.CodemodContext()
AddImportsVisitor.add_needed_import(context, 'libqtile.command.base', 'expose_command')
visitor = AddImportsVisitor(context)
updated = updated.visit(visitor)
return (original, updated) |
class TestSerialLoopback():
ADAPTER_TIMEOUT = 1.0
()
def adapter(self, connected_device_address):
device = connected_device_address.split(',')[0]
return SerialAdapter(device, baudrate=19200, timeout=self.ADAPTER_TIMEOUT, read_termination=chr(15))
()
def loopback(self, connected_device_address):
device = connected_device_address.split(',')[1]
return Serial(device, baudrate=19200)
def test_read(self, adapter, loopback):
loopback.write(b'abc')
result = adapter.read_bytes(3)
assert (len(result) == 3)
.parametrize('data', [b'a', (b'aaaaabbbbbccccceeeee' * 50), (b'a' * 256), (b'aaaaabbbbb\nccccceeeee' * 50)])
def test_read_all(self, adapter, loopback, data):
loopback.write(data)
start = time()
result = adapter.read_bytes((- 1))
elapsed = (time() - start)
assert (result == data)
assert (self.ADAPTER_TIMEOUT == pytest.approx(elapsed, abs=0.1))
.parametrize('chunk', [1, 256, 10000])
def test_read_varied_chunk_size(self, adapter, loopback, chunk):
data = (b'abcde' * 10)
loopback.write(data)
result = adapter.read_bytes((- 1))
assert (result == data) |
def arguments_mock():
arguments = SimpleNamespace()
arguments.url = ''
arguments.dmenu_invocation = 'rofi -dmenu'
arguments.insert_mode = True
arguments.io_encoding = 'UTF-8'
arguments.merge_candidates = False
arguments.password_only = False
arguments.username_only = False
arguments.no_tld_download = True
return arguments |
def transform_for_stmt(builder: IRBuilder, s: ForStmt) -> None:
def body() -> None:
builder.accept(s.body)
def else_block() -> None:
assert (s.else_body is not None)
builder.accept(s.else_body)
for_loop_helper(builder, s.index, s.expr, body, (else_block if s.else_body else None), s.is_async, s.line) |
class CsObject():
__metaclass__ = ABCMeta
def __init__(self, objType):
self.objectType = objType
self.label = ''
self.deleted = 0
self.verified = 0
self.date = ''
self.user = ''
self.draw = True
def __str__(self):
pass
def fromJsonText(self, jsonText, objId=(- 1)):
pass
def toJsonText(self):
pass
def updateDate(self):
try:
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
except locale.Error:
locale.setlocale(locale.LC_ALL, 'en_US')
except locale.Error:
locale.setlocale(locale.LC_ALL, 'us_us.utf8')
except locale.Error:
locale.setlocale(locale.LC_ALL, 'us_us')
except:
pass
self.date = datetime.datetime.now().strftime('%d-%b-%Y %H:%M:%S')
def delete(self):
self.deleted = 1
self.draw = False |
def get_env_info():
import torch
import torchvision
from basicsr.version import __version__
msg = '\n ____ _ _____ ____\n / __ ) ____ _ _____ (_)_____/ ___/ / __ \\\n / __ |/ __ `// ___// // ___/\\__ \\ / /_/ /\n / /_/ // /_/ /(__ )/ // /__ ___/ // _, _/\n /_____/ \\__,_//____//_/ \\___//____//_/ |_|\n ______ __ __ __ __\n / ____/____ ____ ____/ / / / __ __ _____ / /__ / /\n / / __ / __ \\ / __ \\ / __ / / / / / / // ___// //_/ / /\n / /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/\n \\____/ \\____/ \\____/ \\____/ /_____/\\____/ \\___//_/|_| (_)\n '
msg += f'''
Version Information:
BasicSR: {__version__}
PyTorch: {torch.__version__}
TorchVision: {torchvision.__version__}'''
return msg |
def get_project_dependencies(project_requires: list[Dependency], locked_packages: list[Package], root_package_name: NormalizedName) -> Iterable[tuple[(Package, Dependency)]]:
packages_by_name: dict[(str, list[Package])] = {}
for pkg in locked_packages:
if (pkg.name not in packages_by_name):
packages_by_name[pkg.name] = []
packages_by_name[pkg.name].append(pkg)
for packages in packages_by_name.values():
packages.sort(key=(lambda package: package.version), reverse=True)
nested_dependencies = walk_dependencies(dependencies=project_requires, packages_by_name=packages_by_name, root_package_name=root_package_name)
return nested_dependencies.items() |
class SerializableOptimizer(Configurable):
def __init__(self, opt_name, params=None):
if ('learning_rate' not in params):
raise ValueError('Must include learning rate')
self.params = params
self.opt_name = opt_name
self.lr_op = None
def get_params(self):
return dict(opt_name=self.opt_name, params=self.params)
def get(self, name=None, lr_decay=None, global_step=None):
params = ({} if (self.params is None) else self.params.copy())
with tf.variable_scope('opt'):
lr_tensor = tf.get_variable('lr', dtype=tf.float32, initializer=tf.constant(params['learning_rate']), trainable=False)
if (lr_decay is not None):
params['learning_rate'] = lr_decay(learning_rate=params['learning_rate'], global_step=global_step, name='lr_decay')
self.lr_op = (lr_tensor if (lr_decay is None) else lr_tensor.assign(params['learning_rate']))
params['learning_rate'] = self.lr_op
if (self.opt_name == 'Adam'):
if (name is None):
return AdamOptimizer(**params)
else:
return AdamOptimizer(name=name, **params)
elif (self.opt_name == 'Adadelta'):
if (name is None):
return AdadeltaOptimizer(**params)
else:
return AdadeltaOptimizer(name=name, **params)
elif (self.opt_name == 'RMSprop'):
if (name is None):
return RMSPropOptimizer(**params)
else:
return RMSPropOptimizer(name=name, **params)
elif (self.opt_name == 'Momentum'):
if (name is None):
return MomentumOptimizer(**params)
else:
return MomentumOptimizer(name=name, **params)
else:
raise NotImplemented()
def __getstate__(self):
state = self.__dict__.copy()
state['lr_op'] = None
return state
def __setstate__(self, state):
if ('lr_op' not in state):
state['lr_op'] = None
self.__dict__ = state |
def test_to_pep_508_caret() -> None:
dependency = Dependency('foo', '^1.2.3')
assert (dependency.to_pep_508() == 'foo (>=1.2.3,<2.0.0)')
dependency = Dependency('foo', '^1.2')
assert (dependency.to_pep_508() == 'foo (>=1.2,<2.0)')
dependency = Dependency('foo', '^0.2.3')
assert (dependency.to_pep_508() == 'foo (>=0.2.3,<0.3.0)')
dependency = Dependency('foo', '^0.2')
assert (dependency.to_pep_508() == 'foo (>=0.2,<0.3)') |
def _get_labels_and_probs(y_pred: np.ndarray, task_type: TaskType, prediction_type: Optional[PredictionType]) -> tuple[(np.ndarray, Optional[np.ndarray])]:
assert (task_type in (TaskType.BINCLASS, TaskType.MULTICLASS))
if (prediction_type is None):
return (y_pred, None)
if (prediction_type == PredictionType.LOGITS):
probs = (scipy.special.expit(y_pred) if (task_type == TaskType.BINCLASS) else scipy.special.softmax(y_pred, axis=1))
elif (prediction_type == PredictionType.PROBS):
probs = y_pred
else:
util.raise_unknown('prediction_type', prediction_type)
assert (probs is not None)
labels = (np.round(probs) if (task_type == TaskType.BINCLASS) else probs.argmax(axis=1))
return (labels.astype('int64'), probs) |
def test_vertical_perspective():
crs = ProjectedCRS(conversion=VerticalPerspectiveConversion(50, 0, 1, 0, 2, 3))
expected_cf = {'semi_major_axis': 6378137.0, 'semi_minor_axis': crs.ellipsoid.semi_minor_metre, 'inverse_flattening': crs.ellipsoid.inverse_flattening, 'reference_ellipsoid_name': 'WGS 84', 'longitude_of_prime_meridian': 0.0, 'prime_meridian_name': 'Greenwich', 'horizontal_datum_name': 'World Geodetic System 1984 ensemble', 'grid_mapping_name': 'vertical_perspective', 'perspective_point_height': 50.0, 'latitude_of_projection_origin': 0.0, 'longitude_of_projection_origin': 1.0, 'false_easting': 2.0, 'false_northing': 3.0, 'geographic_crs_name': 'undefined', 'projected_crs_name': 'undefined'}
cf_dict = crs.to_cf()
assert cf_dict.pop('crs_wkt').startswith('PROJCRS[')
assert (cf_dict == expected_cf)
_test_roundtrip(expected_cf, 'PROJCRS[')
assert (crs.cs_to_cf() == [{'axis': 'X', 'long_name': 'Easting', 'standard_name': 'projection_x_coordinate', 'units': 'metre'}, {'axis': 'Y', 'long_name': 'Northing', 'standard_name': 'projection_y_coordinate', 'units': 'metre'}]) |
def main():
parser = argparse.ArgumentParser('Dataset preprocessing')
parser.add_argument('dataset', choices=['cp_v2', 'v2', 'cp_v1'])
args = parser.parse_args()
if (args.dataset == 'v2'):
load_v2()
elif (args.dataset == 'cp_v1'):
load_cp_v1()
elif (args.dataset == 'cp_v2'):
load_cp() |
def find_closest_msssim(target, img, fmt='jpeg'):
lower = 0
upper = 100
prev_mid = upper
def _mssim(a, b):
a = torch.from_numpy(np.asarray(a).astype(np.float32)).permute(2, 0, 1).unsqueeze(0)
b = torch.from_numpy(np.asarray(b).astype(np.float32)).permute(2, 0, 1).unsqueeze(0)
return ms_ssim(a, b, data_range=255.0).item()
for i in range(10):
mid = (((upper - lower) / 2) + lower)
if (int(mid) == int(prev_mid)):
break
prev_mid = mid
(rec, bpp) = pillow_encode(img, fmt=fmt, quality=int(mid))
msssim_val = _mssim(rec, img)
if (msssim_val > target):
upper = (mid - 1)
else:
lower = mid
return (rec, bpp, msssim_val) |
def train(args):
mode = args.mode
if (args.fusionType != 'C'):
if (args.mode != 'both'):
print("Only Concat fusion supports one stream versions. Changing mode to /'both/'...")
mode = 'both'
if (args.lstmType == '3dconvblock'):
raise Exception('3dconvblock instead of lstm is only available for fusionType C ! aborting execution...')
if (args.fusionType == 'C'):
model_function = models.getProposedModelC
elif (args.fusionType == 'A'):
model_function = models.getProposedModelA
elif (args.fusionType == 'M'):
model_function = models.getProposedModelM
dataset = args.dataset
dataset_videos = {'hockey': 'raw_videos/HockeyFights', 'movies': 'raw_videos/movies'}
if (dataset == 'rwf2000'):
initial_learning_rate = 0.0004
elif (dataset == 'hockey'):
initial_learning_rate = 1e-06
elif (dataset == 'movies'):
initial_learning_rate = 1e-05
batch_size = args.batchSize
vid_len = args.vidLen
if (dataset == 'rwf2000'):
dataset_frame_size = 320
else:
dataset_frame_size = 224
frame_diff_interval = 1
input_frame_size = 224
lstm_type = args.lstmType
crop_dark = {'hockey': (16, 45), 'movies': (18, 48), 'rwf2000': (0, 0)}
epochs = args.numEpochs
preprocess_data = args.preprocessData
create_new_model = (not args.resume)
save_path = args.savePath
resume_path = args.resumePath
background_suppress = args.noBackgroundSuppression
if (resume_path == 'NOT_SET'):
currentModelPath = os.path.join(save_path, (str(dataset) + '_currentModel'))
else:
currentModelPath = resume_path
bestValPath = os.path.join(save_path, (str(dataset) + '_best_val_acc_Model'))
rwfPretrainedPath = args.rwfPretrainedPath
if (rwfPretrainedPath == 'NOT_SET'):
if (lstm_type == 'sepconv'):
rwfPretrainedPath = './trained_models/rwf2000_model/sepconvlstm-M/model/rwf2000_model'
else:
pass
resume_learning_rate = 5e-05
cnn_trainable = True
one_hot = False
loss = 'binary_crossentropy'
if preprocess_data:
if (dataset == 'rwf2000'):
os.mkdir(os.path.join(dataset, 'processed'))
convert_dataset_to_npy(src='{}/RWF-2000'.format(dataset), dest='{}/processed'.format(dataset), crop_x_y=None, target_frames=vid_len, frame_size=dataset_frame_size)
else:
if os.path.exists('{}'.format(dataset)):
shutil.rmtree('{}'.format(dataset))
split = train_test_split(dataset_name=dataset, source=dataset_videos[dataset])
os.mkdir(dataset)
os.mkdir(os.path.join(dataset, 'videos'))
move_train_test(dest='{}/videos'.format(dataset), data=split)
os.mkdir(os.path.join(dataset, 'processed'))
convert_dataset_to_npy(src='{}/videos'.format(dataset), dest='{}/processed'.format(dataset), crop_x_y=crop_dark[dataset], target_frames=vid_len, frame_size=dataset_frame_size)
train_generator = DataGenerator(directory='{}/processed/train'.format(dataset), batch_size=batch_size, data_augmentation=True, shuffle=True, one_hot=one_hot, sample=False, resize=input_frame_size, background_suppress=background_suppress, target_frames=vid_len, dataset=dataset, mode=mode)
test_generator = DataGenerator(directory='{}/processed/test'.format(dataset), batch_size=batch_size, data_augmentation=False, shuffle=False, one_hot=one_hot, sample=False, resize=input_frame_size, background_suppress=background_suppress, target_frames=vid_len, dataset=dataset, mode=mode)
print('> cnn_trainable : ', cnn_trainable)
if create_new_model:
print('> creating new model...')
model = model_function(size=input_frame_size, seq_len=vid_len, cnn_trainable=cnn_trainable, frame_diff_interval=frame_diff_interval, mode=mode, lstm_type=lstm_type)
if ((dataset == 'hockey') or (dataset == 'movies')):
print('> loading weights pretrained on rwf dataset from', rwfPretrainedPath)
model.load_weights(rwfPretrainedPath)
optimizer = Adam(lr=initial_learning_rate, amsgrad=True)
model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])
print('> new model created')
else:
print('> getting the model from...', currentModelPath)
if (dataset == 'rwf2000'):
model = model_function(size=input_frame_size, seq_len=vid_len, cnn_trainable=cnn_trainable, frame_diff_interval=frame_diff_interval, mode=mode, lstm_type=lstm_type)
optimizer = Adam(lr=resume_learning_rate, amsgrad=True)
model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])
model.load_weights(f'{currentModelPath}')
elif ((dataset == 'hockey') or (dataset == 'movies')):
model = model_function(size=input_frame_size, seq_len=vid_len, cnn_trainable=cnn_trainable, frame_diff_interval=frame_diff_interval, mode=mode, lstm_type=lstm_type)
optimizer = Adam(lr=initial_learning_rate, amsgrad=True)
model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])
model.load_weights(f'{currentModelPath}')
print('> Summary of the model : ')
model.summary(line_length=140)
print('> Optimizer : ', model.optimizer.get_config())
dot_img_file = 'model_architecture.png'
print('> plotting the model architecture and saving at ', dot_img_file)
plot_model(model, to_file=dot_img_file, show_shapes=True)
modelcheckpoint = ModelCheckpoint(currentModelPath, monitor='loss', verbose=0, save_best_only=False, save_weights_only=True, mode='auto', save_freq='epoch')
modelcheckpointVal = ModelCheckpoint(bestValPath, monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', save_freq='epoch')
historySavePath = os.path.join(save_path, 'results', str(dataset))
save_training_history = SaveTrainingCurves(save_path=historySavePath)
callback_list = [modelcheckpoint, modelcheckpointVal, save_training_history]
callback_list.append(LearningRateScheduler(lr_scheduler, verbose=0))
model.fit(steps_per_epoch=len(train_generator), x=train_generator, epochs=epochs, validation_data=test_generator, validation_steps=len(test_generator), verbose=1, workers=8, max_queue_size=8, use_multiprocessing=False, callbacks=callback_list) |
def make_labels(n=1000, n_classes=3, one_hot=False, seed=99999):
rstate = np.random.RandomState(seed)
def _one_hot(x):
(classes, x) = np.unique(x, return_inverse=True)
return np.column_stack([(x == c) for c in classes])
ref_labels = rstate.randint(n_classes, size=n)
sys_labels = rstate.randint(n_classes, size=n)
if one_hot:
ref_labels = _one_hot(ref_labels)
sys_labels = _one_hot(sys_labels)
return (ref_labels, sys_labels) |
def test_base_variables():
for file in ['t.py', 't.json', 't.yaml']:
cfg_file = osp.join(data_path, f'config/{file}')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item1 == [1, 2])
assert (cfg.item2.a == 0)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test')
assert (cfg.item5 == dict(a=0, b=1))
assert (cfg.item6 == [dict(a=0), dict(b=1)])
assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])))
assert (cfg.item8 == file)
assert (cfg.item9 == dict(a=0))
assert (cfg.item10 == [3.1, 4.2, 5.3])
for file in ['u.py', 'u.json', 'u.yaml']:
cfg_file = osp.join(data_path, f'config/{file}')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.base == '_base_.item8')
assert (cfg.item1 == [1, 2])
assert (cfg.item2.a == 0)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test')
assert (cfg.item5 == dict(a=0, b=1))
assert (cfg.item6 == [dict(a=0), dict(b=1)])
assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])))
assert (cfg.item8 == 't.py')
assert (cfg.item9 == dict(a=0))
assert (cfg.item10 == [3.1, 4.2, 5.3])
assert (cfg.item11 == 't.py')
assert (cfg.item12 == dict(a=0))
assert (cfg.item13 == [3.1, 4.2, 5.3])
assert (cfg.item14 == [1, 2])
assert (cfg.item15 == dict(a=dict(b=dict(a=0)), b=[False], c=['test'], d=[[{'e': 0}], [{'a': 0}, {'b': 1}]], e=[1, 2]))
cfg_file = osp.join(data_path, 'config/v.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item21 == 't.py')
assert (cfg.item22 == 't.py')
assert (cfg.item23 == [3.1, 4.2, 5.3])
assert (cfg.item24 == [3.1, 4.2, 5.3])
assert (cfg.item25 == dict(a=dict(b=[3.1, 4.2, 5.3]), b=[[3.1, 4.2, 5.3]], c=[[{'e': 't.py'}], [{'a': 0}, {'b': 1}]], e='t.py')) |
.parametrize('username,password', users)
def test_update_m2m_multisite(db, client, username, password):
client.login(username=username, password=password)
instances = OptionSet.objects.all()
for instance in instances:
optionset_options = [{'option': optionset_option.option.id, 'order': optionset_option.order} for optionset_option in instance.optionset_options.all()[:1]]
conditions = [condition.pk for condition in instance.conditions.all()[:1]]
url = reverse(urlnames['detail'], args=[instance.pk])
data = {'uri_prefix': instance.uri_prefix, 'uri_path': instance.uri_path, 'comment': instance.comment, 'order': instance.order, 'options': optionset_options, 'conditions': conditions}
response = client.put(url, data, content_type='application/json')
assert (response.status_code == get_obj_perms_status_code(instance, username, 'update')), response.json()
if (response.status_code == 200):
instance.refresh_from_db()
assert (optionset_options == [{'option': optionset_option.option.id, 'order': optionset_option.order} for optionset_option in instance.optionset_options.all()])
assert (conditions == [condition.pk for condition in instance.conditions.all()]) |
def parse_args():
parser = argparse.ArgumentParser(description='FCGEC Switch Module Params')
base_args = ArgumentGroup(parser, 'base', 'Base Settings')
base_args.add_arg('mode', str, 'train', 'Experiment Mode')
base_args.add_arg('cuda', bool, True, 'device : True - CUDA, False - CPU (Force)')
base_args.add_arg('gpu_id', int, 0, 'GPU Device ID, defaut->cuda:0')
base_args.add_arg('seed', int, 2022, 'Experiment Seed')
base_args.add_arg('checkpoints', str, 'checkpoints/', 'Checkpoint Path Dir')
base_args.add_arg('checkp', str, 'switch_module/', 'Checkpoint Sub Dir')
data_args = ArgumentGroup(parser, 'dataset', 'Dataset Settings')
data_args.add_arg('data_base_dir', str, 'dataset/', 'Base Dir Of Dataset')
pretrained_args = ArgumentGroup(parser, 'pretrained', 'Pretrained Model Settings')
pretrained_args.add_arg('use_lm', bool, True, 'Whether Model Use Language Models')
pretrained_args.add_arg('lm_path', str, '../pretrained_models/roberta-base-chinese/', 'Bert Pretrained Model Path')
pretrained_args.add_arg('lm_hidden_size', int, 768, 'HiddenSize of PLM')
pretrained_args.add_arg('output_hidden_states', bool, True, 'Output PLM Hidden States')
pretrained_args.add_arg('finetune', bool, True, 'Finetune Or Freeze')
convertor_args = ArgumentGroup(parser, 'convertor', 'Convertor Settings')
convertor_args.add_arg('p2next', bool, True, 'Convert Base Point Labels To Next version')
search_params = ArgumentGroup(parser, 'search', 'Search Settings')
search_params.add_arg('sw_mode', str, 'rsgs', 'Switch Decode Search Mode')
search_params.add_arg('beam_width', int, 10, 'Beam Width')
model_args = ArgumentGroup(parser, 'model', 'Model Settings')
model_args.add_arg('num_classes', int, 2, 'Number of classes')
model_args.add_arg('padding_size', int, 150, 'Padding Size Of PLM Model')
model_args.add_arg('padding_val', int, 0, 'Padding Value Of LM Model')
model_args.add_arg('ignore_val', int, (- 1), 'Padding Value Of ignore (switch) index')
model_args.add_arg('dropout', float, 0.1, 'Dropout')
model_args.add_arg('scale_attn', bool, True, 'Scale Attention Scores for Pointer Network')
model_args.add_arg('layers_num', int, 12, 'Number Of LM Layers')
model_args.add_arg('layer_init_w', float, 0.1, 'Initial Layer Weights')
train_args = ArgumentGroup(parser, 'train', 'Training Settings')
train_args.add_arg('batch_size', int, 64, 'Batch Size')
train_args.add_arg('shuffle', bool, True, 'DataLoader Shuffle Params')
train_args.add_arg('droplast', bool, False, 'Drop Rest Data')
train_args.add_arg('optimizer', str, 'adamW', 'Optimizer Selection, Can Choose [AdamW]')
train_args.add_arg('lr', float, 1e-05, 'Learning Rate')
train_args.add_arg('wd', float, 0.01, 'Weight Decay')
train_args.add_arg('warmup_steps', int, 10, 'Warm Up Steps Phase')
train_args.add_arg('epoch', int, 50, 'Epochs')
train_args.add_arg('criterion', str, 'CE', 'Criterion Selection, Can Choose [CE]')
train_args.add_arg('print_step', int, 10, 'Training Print Steps')
train_args.add_arg('eval_step', int, 100, 'Evaluating Steps')
args = parser.parse_args()
return args |
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed'))
model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model |
def test_dsl_async_cmd_serial_multiple_expanded_syntax_save():
echo = get_cmd('tests/testfiles/cmds/echo-out-and-err.sh', 'tests\\testfiles\\cmds\\echo-out-and-err.bat')
context = Context({'cmds': [{'run': [f'{echo} A', [f'{echo} B.1', f'{echo} B.2'], f'{echo} C'], 'save': True}, {'run': [[f'{echo} D.1', f'{echo} D.2']], 'save': True}]})
step = AsyncCmdStep('blah', context)
step.run_step()
out = context['cmdOut']
assert (len(out) == 4)
outA = out[0]
assert (outA.stdout == 'stdout A')
assert (outA.stderr == 'stderr A')
outB = out[1]
assert (len(outB) == 2)
outB1 = outB[0]
assert (outB1.stdout == 'stdout B.1')
assert (outB1.stderr == 'stderr B.1')
outB2 = outB[1]
assert (outB2.stdout == 'stdout B.2')
assert (outB2.stderr == 'stderr B.2')
outC = out[2]
assert (outC.stdout == 'stdout C')
assert (outC.stderr == 'stderr C')
outD = out[3]
assert (len(outD) == 2)
outD1 = outD[0]
assert (outD1.stdout == 'stdout D.1')
assert (outD1.stderr == 'stderr D.1')
outD2 = outD[1]
assert (outD2.stdout == 'stdout D.2')
assert (outD2.stderr == 'stderr D.2') |
class PlaylistLibrary(Library[(str, Playlist)]):
def __init__(self, library: Library, pl_dir: _fsnative=_DEFAULT_PLAYLIST_DIR):
self.librarian = None
super().__init__(f'{type(self).__name__} for {library._name}')
print_d(f'Initializing Playlist Library {self} to watch {library._name!r}')
self.pl_dir = pl_dir
if (library is None):
raise ValueError('Need a library to listen to')
self._library = library
self._read_playlists(library)
self._rsig = library.connect('removed', self.__songs_removed)
self._csig = library.connect('changed', self.__songs_changed)
def _read_playlists(self, library) -> None:
print_d(f'Reading playlist directory {self.pl_dir} (library: {library})')
try:
fns = os.listdir(self.pl_dir)
except FileNotFoundError as e:
print_w(f'No playlist dir found in {self.pl_dir!r}, creating. ({e})')
os.mkdir(self.pl_dir)
fns = []
failed = []
for fn in fns:
full_path = os.path.join(self.pl_dir, fn)
if os.path.isdir(full_path):
continue
if HIDDEN_RE.match(fsn2text(fn)):
print_d(f'Ignoring hidden file {fn!r}')
continue
try:
XSPFBackedPlaylist(self.pl_dir, fn, songs_lib=library, pl_lib=self)
except TypeError as e:
legacy = FileBackedPlaylist(self.pl_dir, fn, songs_lib=library, pl_lib=None)
if (not len(legacy)):
try:
size = os.stat(legacy._last_fn).st_size
if (size >= _MIN_NON_EMPTY_PL_BYTES):
data = {'filename': fn, 'size': (size / 1024)}
print_w((((_('No library songs found in legacy playlist %(filename)r (of size %(size).1f kB).') % data) + ' ') + _('Have you changed library root dir(s), but not this playlist?')))
continue
except OSError:
print_e(f'Problem reading {legacy._last_fn!r}')
continue
finally:
failed.append(fn)
print_w(f'Converting {fn!r} to XSPF format ({e})')
XSPFBackedPlaylist.from_playlist(legacy, songs_lib=library, pl_lib=self)
except OSError:
print_w(f'Invalid Playlist {fn!r}')
failed.append(fn)
if failed:
total = len(failed)
print_e((ngettext('%d playlist failed to convert', '%d playlists failed to convert', total) % len(failed)))
def create(self, name_base: (str | None)=None) -> Playlist:
if name_base:
return XSPFBackedPlaylist.new(self.pl_dir, name_base, songs_lib=self._library, pl_lib=self)
return XSPFBackedPlaylist.new(self.pl_dir, songs_lib=self._library, pl_lib=self)
def create_from_songs(self, songs: Iterable[AudioFile], title=None) -> Playlist:
return XSPFBackedPlaylist.from_songs(self.pl_dir, songs, title=title, songs_lib=self._library, pl_lib=self)
def destroy(self):
for sig in [self._rsig, self._csig]:
self._library.disconnect(sig)
def playlists_featuring(self, song: AudioFile) -> Generator[(Playlist, None, None)]:
return (pl for pl in self if (song in pl._list))
def __songs_removed(self, library, songs):
print_d(f'Removing {len(songs)} song(s) across {len(self)} playlist(s) in {self}')
changed = {pl for pl in self if pl.remove_songs(songs)}
if changed:
for pl in changed:
pl.write()
self.changed(changed)
def __songs_changed(self, library, songs) -> None:
changed = set()
for playlist in self:
for song in songs:
if (song in playlist.songs):
changed.add(playlist)
break
if changed:
for pl in changed:
pl.finalize()
pl.write()
self.changed(changed)
def recreate(self, playlist: Playlist, songs: Iterable[AudioFile]):
playlist._list.clear()
playlist._list.extend(songs)
playlist.finalize()
playlist.write()
self.changed([playlist]) |
.sphinx(srcdir=srcdir, confoverrides={'hoverxref_ignore_refs': ['section i']})
def test_ignore_refs(app, status, warning):
app.build()
path = (app.outdir / 'index.html')
assert (path.exists() is True)
content = open(path).read()
chunks = ['<a class="reference internal" href="chapter-i.html#chapter-i"><span class="std std-ref">This a :ref: to Chapter I</span></a>', '<a class="reference internal" href="chapter-i.html#section-i"><span class="std std-ref">This a :hoverxref: to Chapter I, Section I</span></a>']
for chunk in chunks:
assert (chunk in content)
ignored_chunks = ['<a class="hxr-hoverxref reference internal" href="chapter-i.html#section-i"><span class="std std-ref">This a :hoverxref: to Chapter I, Section I</span></a>']
for chunk in ignored_chunks:
assert (chunk not in content) |
def weights_init_kaiming(m):
classname = m.__class__.__name__
if ((classname.find('Conv') != (- 1)) or (classname.find('ConvTranspose') != (- 1))):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in', nonlinearity='relu')
if (m.bias is not None):
m.bias.data.zero_()
elif (classname.find('Linear') != (- 1)):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in', nonlinearity='relu')
if (m.bias is not None):
m.bias.data.zero_()
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0) |
('randovania.games.prime2.patcher.claris_randomizer.validate_game_files_path', autospec=True)
('randovania.games.prime2.patcher.claris_randomizer.get_data_path', autospec=True)
def test_base_args(mock_get_data_path: MagicMock, mock_validate_game_files_path: MagicMock):
mock_get_data_path.return_value = Path('data')
game_root = Path('root')
results = claris_randomizer._base_args(game_root)
expected_results = [Path('data', 'ClarisPrimeRandomizer', 'Randomizer.exe'), Path('root'), ('-data:' + str(persistence.local_data_dir().joinpath('CustomEchoesRandomizerData.json')))]
assert (results == expected_results)
mock_validate_game_files_path.assert_called_once_with(Path('root', 'files')) |
class TestLoggingForTestGenerator():
message = b'some written message'
def adapter(self, caplog):
adapter = ProtocolAdapter()
caplog.set_level(logging.DEBUG)
return adapter
def test_write(self, adapter, caplog):
adapter.comm_pairs = [(self.message, None)]
written = self.message.decode()
adapter.write(written)
record = caplog.records[0]
assert (record.msg == 'WRITE:%s')
assert (record.args == (written,))
def test_write_bytes(self, adapter, caplog):
adapter.comm_pairs = [(self.message, None)]
adapter.write(self.message)
record = caplog.records[0]
assert (record.msg == 'WRITE:%s')
assert (record.args == (self.message,))
def test_read(self, adapter, caplog):
adapter.comm_pairs = [(None, self.message)]
read = adapter.read()
assert (read == self.message.decode())
record = caplog.records[0]
assert (record.msg == 'READ:%s')
assert (record.args == (read,))
def test_read_bytes(self, adapter, caplog):
adapter.comm_pairs = [(None, self.message)]
read = adapter.read_bytes((- 1))
assert (read == self.message)
record = caplog.records[0]
assert (record.msg == 'READ:%s')
assert (record.args == (read,)) |
def test_export_logs_failure(initialized_db):
test_storage.put_content('local_us', 'except_upload', b'true')
repo = model.repository.get_repository('devtable', 'simple')
user = model.user.get_user('devtable')
worker = ExportActionLogsWorker(None)
called = [{}]
(netloc='testcallback')
def handle_request(url, request):
called[0] = json.loads(request.body)
return {'status_code': 200, 'content': '{}'}
def format_date(datetime):
return datetime.strftime('%m/%d/%Y')
now = datetime.now()
with HTTMock(handle_request):
with pytest.raises(IOError):
worker._process_queue_item({'export_id': 'someid', 'repository_id': repo.id, 'namespace_id': repo.namespace_user.id, 'namespace_name': 'devtable', 'repository_name': 'simple', 'start_time': format_date((now + timedelta(days=(- 10)))), 'end_time': format_date((now + timedelta(days=10))), 'callback_url': ' 'callback_email': None}, test_storage)
test_storage.remove('local_us', 'except_upload')
assert called[0]
assert (called[0]['export_id'] == 'someid')
assert (called[0]['status'] == 'failed') |
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger |
(DETECTION_URL, methods=['POST'])
def predict():
if (not (request.method == 'POST')):
return
if request.files.get('image'):
image_file = request.files['image']
image_bytes = image_file.read()
img = Image.open(io.BytesIO(image_bytes))
results = model(img, size=640)
return results.pandas().xyxy[0].to_json(orient='records') |
class SmithyLexer(RegexLexer):
name = 'Smithy'
url = '
filenames = ['*.smithy']
aliases = ['smithy']
version_added = '2.10'
unquoted = '[A-Za-z0-9_\\.#$-]+'
identifier = '[A-Za-z0-9_\\.#$-]+'
simple_shapes = ('use', 'byte', 'short', 'integer', 'long', 'float', 'document', 'double', 'bigInteger', 'bigDecimal', 'boolean', 'blob', 'string', 'timestamp')
aggregate_shapes = ('apply', 'list', 'map', 'set', 'structure', 'union', 'resource', 'operation', 'service', 'trait')
tokens = {'root': [('///.*$', Comment.Multiline), ('//.*$', Comment), ('[0-9a-zA-Z\\.#-]*', Name.Decorator), ('(=)', Name.Decorator), ('^(\\$version)(:)(.+)', bygroups(Keyword.Declaration, Name.Decorator, Name.Class)), ((('^(namespace)(\\s+' + identifier) + ')\\b'), bygroups(Keyword.Declaration, Name.Class)), (words(simple_shapes, prefix='^', suffix=(('(\\s+' + identifier) + ')\\b')), bygroups(Keyword.Declaration, Name.Class)), (words(aggregate_shapes, prefix='^', suffix=(('(\\s+' + identifier) + ')')), bygroups(Keyword.Declaration, Name.Class)), ('^(metadata)(\\s+)((?:\\S+)|(?:\\"[^"]+\\"))(\\s*)(=)', bygroups(Keyword.Declaration, Whitespace, Name.Class, Whitespace, Name.Decorator)), ('(true|false|null)', Keyword.Constant), ('(-?(?:0|[1-9]\\d*)(?:\\.\\d+)?(?:[eE][+-]?\\d+)?)', Number), ((identifier + ':'), Name.Label), (identifier, Name.Variable.Class), ('\\[', Text, '#push'), ('\\]', Text, '#pop'), ('\\(', Text, '#push'), ('\\)', Text, '#pop'), ('\\{', Text, '#push'), ('\\}', Text, '#pop'), ('"{3}(|\\n|\\\\")*"{3}', String.Doc), ('"(|\\n|\\\\"|[^"])*"', String.Double), ("'(|\\n|\\\\'|[^'])*'", String.Single), ('[:,]+', Punctuation), ('\\s+', Whitespace)]} |
class PairChallengeCommand(PairCommandBase):
def __init__(self, device_id: str, challenge_type: Union[(int, str)], pairing_token: Union[(int, str)], pin: str, device_type: str) -> None:
super().__init__(device_id, device_type, 'FINISH_PAIR')
self.CHALLENGE_TYPE = int(challenge_type)
self.PAIRING_REQ_TOKEN = int(pairing_token)
self.RESPONSE_VALUE = str(pin)
def process_response(self, json_obj: Dict[(str, Any)]) -> PairChallengeResponse:
item = dict_get_case_insensitive(json_obj, ResponseKey.ITEM)
return PairChallengeResponse(dict_get_case_insensitive(item, PairingResponseKey.AUTH_TOKEN)) |
def get_dataset(config, load_only_val=False, use_gt_inssem=False):
if (config.dataset_class == 'panopli'):
if use_gt_inssem:
(instance_dir, semantics_dir, instance_to_semantic_key) = ('rs_instance', 'rs_semantics', 'rs_instance_to_semantic')
else:
(instance_dir, semantics_dir, instance_to_semantic_key) = ('m2f_instance', 'm2f_semantics', 'm2f_instance_to_semantic')
train_set = None
if (not load_only_val):
train_set = PanopLiDataset(Path(config.dataset_root), 'train', (config.image_dim[0], config.image_dim[1]), config.max_depth, overfit=config.overfit, semantics_dir=semantics_dir, load_feat=(config.use_distilled_features_semantic or config.use_distilled_features_instance), feature_type=config.feature_type, instance_dir=instance_dir, instance_to_semantic_key=instance_to_semantic_key, create_seg_data_func=create_segmentation_data_panopli, subsample_frames=config.subsample_frames)
val_set = PanopLiDataset(Path(config.dataset_root), 'val', (config.image_dim[0], config.image_dim[1]), config.max_depth, overfit=config.overfit, semantics_dir=semantics_dir, instance_dir=instance_dir, instance_to_semantic_key=instance_to_semantic_key, create_seg_data_func=create_segmentation_data_panopli, subsample_frames=config.subsample_frames)
return (train_set, val_set)
elif (config.dataset_class == 'mos'):
if use_gt_inssem:
(instance_dir, semantics_dir) = ('instance', 'semantic')
else:
(instance_dir, semantics_dir) = ('detic_instance', 'detic_semantic')
train_set = None
if (not load_only_val):
train_set = MOSDataset(Path(config.dataset_root), 'train', (config.image_dim[0], config.image_dim[1]), config.max_depth, overfit=config.overfit, semantics_dir=semantics_dir, load_feat=False, feature_type=None, instance_dir=instance_dir, instance_to_semantic_key=None, create_seg_data_func=None, subsample_frames=config.subsample_frames)
val_set = MOSDataset(Path(config.dataset_root), 'val', (config.image_dim[0], config.image_dim[1]), config.max_depth, overfit=config.overfit, semantics_dir=semantics_dir, instance_dir=instance_dir, instance_to_semantic_key=None, create_seg_data_func=None, subsample_frames=config.subsample_frames)
return (train_set, val_set)
raise NotImplementedError |
class MaskShadowGANOptions(BaseOptions):
def __init__(self, training):
BaseOptions.__init__(self)
if training:
self.parser.add_argument('--dirA', type=str, required=True, help='Path to training shadow dataset')
self.parser.add_argument('--dirB', type=str, required=True, help='Path to training shadow free dataset')
else:
self.parser.add_argument('--dir', type=str, required=True, help='Path to test shadow dataset')
self.parser.add_argument('--lamA', type=float, default=10.0, help='weight for forward cycle loss (A->B->A)')
self.parser.add_argument('--lamB', type=float, default=10.0, help='weight for backward cycle loss (B->A->B)')
self.parser.add_argument('--lambda_ident', type=float, default=0.0, help='weight for identity loss')
self.parser.add_argument('--ngf', type=int, default=64, help='# of filters in first conv. layer of generator')
self.parser.add_argument('--ndf', type=int, default=64, help='# of filters in first conv. layer of discriminator')
self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--queue_size', type=int, default=100, help='the size of mask queue that stores previously generated shadow masks')
def parse(self):
return self.parser.parse_args() |
def test_byhand_awav2vel():
CRVAL3A = (6560 * u.AA).to(u.m).value
CDELT3A = (1.0 * u.AA).to(u.m).value
CUNIT3A = 'm'
CRPIX3A = 1.0
restwl = air_to_vac((6562.81 * u.AA))
RESTWAV = restwl.to(u.m).value
CRVAL3V = (CRVAL3A * u.m).to((u.m / u.s), u.doppler_optical(restwl)).value
CDELT3V = ((((CDELT3A * u.m) * air_to_vac_deriv((CRVAL3A * u.m))) / restwl) * constants.c)
CUNIT3V = 'm/s'
mywcs = wcs.WCS(naxis=1)
mywcs.wcs.ctype[0] = 'AWAV'
mywcs.wcs.crval[0] = CRVAL3A
mywcs.wcs.crpix[0] = CRPIX3A
mywcs.wcs.cunit[0] = CUNIT3A
mywcs.wcs.cdelt[0] = CDELT3A
mywcs.wcs.restwav = RESTWAV
mywcs.wcs.set()
newwcs = convert_spectral_axis(mywcs, (u.km / u.s), determine_ctype_from_vconv(mywcs.wcs.ctype[0], (u.km / u.s), 'optical'))
newwcs.wcs.set()
assert (newwcs.wcs.cunit[0] == 'm / s')
np.testing.assert_almost_equal(newwcs.wcs.crval, air_to_vac((CRVAL3A * u.m)).to((u.m / u.s), u.doppler_optical(restwl)).value)
np.testing.assert_almost_equal(newwcs.wcs.cdelt, CDELT3V.to((u.m / u.s)).value)
np.testing.assert_almost_equal(newwcs.wcs_pix2world((2.81,), 0), 0.0, decimal=3)
vline = ((100 * u.km) / u.s)
wave_line_vac = vline.to(u.AA, u.doppler_optical(restwl))
wave_line_air = vac_to_air(wave_line_vac)
pix_line_input = mywcs.wcs_world2pix((wave_line_air.to(u.m).value,), 0)
pix_line_output = newwcs.wcs_world2pix((vline.to((u.m / u.s)).value,), 0)
np.testing.assert_almost_equal(pix_line_output, pix_line_input, decimal=4) |
def parse_args():
parser = argparse.ArgumentParser(description='Link prediction for knowledge graphs')
parser.add_argument('--lr', type=float, default=0.003, help='learning rate (default: 0.003)')
parser.add_argument('--l2', type=float, default=0.0, help='Weight decay value to use in the optimizer. Default: 0.0')
parser.add_argument('--lr-decay', type=float, default=0.995, help='Decay the learning rate by this factor every epoch. Default: 0.995')
parser.add_argument('--hidden-drop', type=float, default=0.3, help='Dropout for the hidden layer. Default: 0.3.')
parser.add_argument('--input-drop', type=float, default=0.2, help='Dropout for the input embeddings. Default: 0.2.')
parser.add_argument('--feat-drop', type=float, default=0.2, help='Dropout for the convolutional features. Default: 0.2.')
parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing value to use. Default: 0.1')
parser.add_argument('--use-bias', action='store_true', help='Use a bias in the convolutional layer. Default: True')
parser.add_argument('--seed', type=int, default=17, metavar='S', help='random seed (default: 17)')
parser.add_argument('--epochs', type=int, default=1000, help='number of epochs to train (default: 1000)')
parser.add_argument('--batch-size', type=int, default=128, help='input batch size for training (default: 128)')
parser.add_argument('--log-interval', type=int, default=1000000, help='how many batches to wait before logging training status')
parser.add_argument('--loader-threads', type=int, default=4, help='How many loader threads to use for the batch loaders. Default: 4')
parser.add_argument('--embedding-dim', type=int, default=200, help='The embedding dimension (1D). Default: 200')
parser.add_argument('--embedding-shape1', type=int, default=20, help='The first dimension of the reshaped 2D embedding. The second dimension is infered. Default: 20')
parser.add_argument('--hidden-size', type=int, default=9728, help='The side of the hidden layer. The required size changes with the size of the embeddings. Default: 9728 (embedding size 200).')
parser.add_argument('--preprocess', type=int, default=0, help='preprocess')
parser.add_argument('--data', type=str, help='Targeting dataset')
parser.add_argument('--emb-path', type=str, help='Path to output file')
parser.add_argument('--train-path', type=str, help='Path to preprocessed training file')
return parser.parse_args() |
def test_targetstrategy_steering(tmpdir, multiproc_backend):
ys = YadageSteering.create(dataarg=('local:' + os.path.join(str(tmpdir), 'workdir')), workflow='workflow.yml', toplevel='tests/testspecs/nestedmapreduce', initdata={'input': [1, 2, 3]})
ys.adage_argument(default_trackers=False)
ys.adage_argument(**get_strategy('target:/init', {}))
ys.run_adage(multiproc_backend)
for x in ys.controller.adageobj.dag.nodes():
n = ys.controller.adageobj.dag.getNode(x)
if (n.name == 'init'):
assert (n.state == nodestate.SUCCESS)
else:
assert (n.state == nodestate.DEFINED) |
def newtype_attrs_typed_attrs(draw: DrawFn, defaults=None, kw_only=None):
default = NOTHING
class NewTypeAttrs():
a: int
if ((defaults is True) or ((defaults is None) and draw(booleans()))):
default = NewTypeAttrs(draw(integers()))
NewAttrs = NewType('NewAttrs', NewTypeAttrs)
return (field(type=NewAttrs, default=default, kw_only=(draw(booleans()) if (kw_only is None) else kw_only)), integers().map(NewTypeAttrs)) |
class TestKeywordSelection():
def test_select_simple(self, pytester: Pytester) -> None:
file_test = pytester.makepyfile('\n def test_one():\n assert 0\n class TestClass(object):\n def test_method_one(self):\n assert 42 == 43\n ')
def check(keyword, name):
reprec = pytester.inline_run('-s', '-k', keyword, file_test)
(passed, skipped, failed) = reprec.listoutcomes()
assert (len(failed) == 1)
assert (failed[0].nodeid.split('::')[(- 1)] == name)
assert (len(reprec.getcalls('pytest_deselected')) == 1)
for keyword in ['test_one', 'est_on']:
check(keyword, 'test_one')
check('TestClass and test', 'test_method_one')
.parametrize('keyword', ['xxx', 'xxx and test_2', 'TestClass', 'xxx and not test_1', 'TestClass and test_2', 'xxx and TestClass and test_2'])
def test_select_extra_keywords(self, pytester: Pytester, keyword) -> None:
p = pytester.makepyfile(test_select='\n def test_1():\n pass\n class TestClass(object):\n def test_2(self):\n pass\n ')
pytester.makepyfile(conftest='\n import pytest\n (wrapper=True)\n def pytest_pycollect_makeitem(name):\n item = yield\n if name == "TestClass":\n item.extra_keyword_matches.add("xxx")\n return item\n ')
reprec = pytester.inline_run(p.parent, '-s', '-k', keyword)
print('keyword', repr(keyword))
(passed, skipped, failed) = reprec.listoutcomes()
assert (len(passed) == 1)
assert passed[0].nodeid.endswith('test_2')
dlist = reprec.getcalls('pytest_deselected')
assert (len(dlist) == 1)
assert (dlist[0].items[0].name == 'test_1')
def test_keyword_extra(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n def test_one():\n assert 0\n test_one.mykeyword = True\n ')
reprec = pytester.inline_run('-k', 'mykeyword', p)
(passed, skipped, failed) = reprec.countoutcomes()
assert (failed == 1)
.xfail
def test_keyword_extra_dash(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n def test_one():\n assert 0\n test_one.mykeyword = True\n ')
reprec = pytester.inline_run('-k', '-mykeyword', p)
(passed, skipped, failed) = reprec.countoutcomes()
assert (((passed + skipped) + failed) == 0)
.parametrize('keyword', ['__', '+', '..'])
def test_no_magic_values(self, pytester: Pytester, keyword: str) -> None:
p = pytester.makepyfile('\n def test_one(): assert 1\n ')
reprec = pytester.inline_run('-k', keyword, p)
(passed, skipped, failed) = reprec.countoutcomes()
dlist = reprec.getcalls('pytest_deselected')
assert (((passed + skipped) + failed) == 0)
deselected_tests = dlist[0].items
assert (len(deselected_tests) == 1)
def test_no_match_directories_outside_the_suite(self, pytester: Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
pytester.makefile(**{'suite/pytest': '[pytest]'}, ext='.ini')
pytester.makepyfile(**{'suite/ddd/tests/__init__.py': '', 'suite/ddd/tests/test_foo.py': '\n def test_aaa(): pass\n def test_ddd(): pass\n '})
monkeypatch.chdir((pytester.path / 'suite'))
def get_collected_names(*args: str) -> List[str]:
(_, rec) = pytester.inline_genitems(*args)
calls = rec.getcalls('pytest_collection_finish')
assert (len(calls) == 1)
return [x.name for x in calls[0].session.items]
assert (get_collected_names() == ['test_aaa', 'test_ddd'])
assert (get_collected_names('-k', pytester._name) == []) |
def split_complex(comp_num_str):
split_indices = [m.start() for m in re.finditer('[+-]?(\\d+[/.])?\\d+', comp_num_str)]
if (len(split_indices) != 2):
raise Exception(("Somthing must be wrong with the regex, can't seem to handle this complex number : %s" % comp_num_str))
return (comp_num_str[split_indices[0]:split_indices[1]], comp_num_str[split_indices[1]:(- 1)]) |
class TestSPoint(unittest.TestCase):
def test_latitude_validity(self):
lon = 0
lat = np.pi
with pytest.raises(ValueError):
SPoint(lon, lat)
lon = 0
lat = np.inf
with pytest.raises(ValueError):
SPoint(lon, lat)
def test_longitude_validity(self):
lon = np.inf
lat = 0
with pytest.raises(ValueError):
SPoint(lon, lat)
def test_creation_from_degrees(self):
lon = 0
lat = 20
p1 = SPoint.from_degrees(lon, lat)
p2 = SPoint(np.deg2rad(lon), np.deg2rad(lat))
assert (p1 == p2)
def test_vertices(self):
lons = 0
lats = (np.pi / 2)
p = SPoint(lons, lats)
res = np.array([[0.0, 1.]])
assert np.allclose(p.vertices, res)
def test_vertices_in_degrees(self):
lons = 0
lats = (np.pi / 2)
p = SPoint(lons, lats)
res = np.array([[0.0, 90.0]])
assert np.allclose(p.vertices_in_degrees, res)
def test_raise_error_if_multi_point(self):
lons = np.array([0, np.pi])
lats = np.array([((- np.pi) / 2), (np.pi / 2)])
with pytest.raises(ValueError):
SPoint(lons, lats)
def test_str(self):
d = SPoint(1.0, 0.5)
self.assertEqual(str(d), '(1.0, 0.5)')
def test_repr(self):
d = SPoint(1.0, 0.5)
self.assertEqual(repr(d), '(1.0, 0.5)')
def test_to_shapely(self):
from shapely.geometry import Point
lon = 0.0
lat = (np.pi / 2)
spherical_point = SPoint(lon, lat)
shapely_point = Point(0.0, 90.0)
assert shapely_point.equals_exact(spherical_point.to_shapely(), tolerance=1e-10) |
class TPlaylistPlugins(TestCase):
class MockBrowser(Browser):
def __init__(self):
super().__init__()
self.activated = False
def activate(self):
self.activated = True
def get_toplevel(self):
return self
def is_toplevel(self):
return True
def _confirmer(self, *args):
self.confirmed = True
def setUp(self):
self.tempdir = mkdtemp()
self.pm = PluginManager(folders=[self.tempdir])
self.confirmed = False
self.mock_browser = self.MockBrowser()
self.handler = PlaylistPluginHandler(self._confirmer)
self.pm.register_handler(self.handler)
self.pm.rescan()
self.assertEqual(self.pm.plugins, [])
self.library = SongLibrary('foo')
def tearDown(self):
self.library.destroy()
self.pm.quit()
shutil.rmtree(self.tempdir)
def create_plugin(self, id='', name='', desc='', icon='', funcs=None, mod=False):
(fd, fn) = mkstemp(suffix='.py', text=True, dir=self.tempdir)
file = os.fdopen(fd, 'w')
if mod:
indent = ''
else:
file.write('from quodlibet.plugins.playlist import PlaylistPlugin\n')
file.write(('class %s(PlaylistPlugin):\n' % name))
indent = ' '
file.write(('%spass\n' % indent))
if name:
file.write(f'''{indent}PLUGIN_ID = {name!r}
''')
if name:
file.write(f'''{indent}PLUGIN_NAME = {name!r}
''')
if desc:
file.write(f'''{indent}PLUGIN_DESC = {desc!r}
''')
if icon:
file.write(f'''{indent}PLUGIN_ICON = {icon!r}
''')
for f in (funcs or []):
if (f in ['__init__']):
file.write(f'''{indent}def {f}(self, *args): super().__init__(*args); raise Exception("as expected.")
''')
else:
file.write(f'''{indent}def {f}(*args): return args
''')
file.flush()
file.close()
def test_empty_has_no_plugins(self):
self.pm.rescan()
self.assertEqual(self.pm.plugins, [])
def test_name_and_desc_plus_func_is_one(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_playlist'])
self.pm.rescan()
self.assertEqual(len(self.pm.plugins), 1)
def test_additional_functions_still_only_one(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_playlist', 'plugin_playlists'])
self.pm.rescan()
self.assertEqual(len(self.pm.plugins), 1)
def test_two_plugins_are_two(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_playlist'])
self.create_plugin(name='Name2', desc='Desc2', funcs=['plugin_albums'])
self.pm.rescan()
self.assertEqual(len(self.pm.plugins), 2)
def test_disables_plugin(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_playlist'])
self.pm.rescan()
self.assertFalse(self.pm.enabled(self.pm.plugins[0]))
def test_enabledisable_plugin(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_playlist'])
self.pm.rescan()
plug = self.pm.plugins[0]
self.pm.enable(plug, True)
self.assertTrue(self.pm.enabled(plug))
self.pm.enable(plug, False)
self.assertFalse(self.pm.enabled(plug))
def test_ignores_broken_plugin(self):
self.create_plugin(name='Broken', desc='Desc', funcs=['__init__', 'plugin_playlist'])
self.pm.rescan()
plug = self.pm.plugins[0]
self.pm.enable(plug, True)
menu = Gtk.Menu()
with capture_output():
self.handler.populate_menu(menu, None, self.mock_browser, [TEST_PLAYLIST])
self.assertEqual(len(menu.get_children()), 0, msg="Shouldn't have enabled a broken plugin")
def test_populate_menu(self):
plugin = Plugin(FakePlaylistPlugin)
self.handler.plugin_enable(plugin)
menu = Gtk.Menu()
self.handler.populate_menu(menu, None, self.mock_browser, [TEST_PLAYLIST])
num = (len(menu.get_children()) - 1)
self.assertEqual(num, 1, msg=('Need 1 plugin not %d' % num))
def test_handling_playlists_without_confirmation(self):
plugin = Plugin(FakePlaylistPlugin)
self.handler.plugin_enable(plugin)
playlists = generate_playlists(MAX_PLAYLISTS)
self.handler.handle(plugin.id, self.library, self.mock_browser, playlists)
self.assertTrue("Didn't execute plugin", (FakePlaylistPlugin.total > 0))
self.assertFalse(self.confirmed, ("Wasn't expecting a confirmation for %d invocations" % len(playlists)))
def test_handling_lots_of_songs_with_confirmation(self):
plugin = Plugin(FakePlaylistPlugin)
self.handler.plugin_enable(plugin)
playlists = generate_playlists((MAX_PLAYLISTS + 1))
self.handler.handle(plugin.id, self.library, self.mock_browser, playlists)
self.assertTrue(self.confirmed, ('Should have confirmed %d invocations (Max=%d).' % (len(playlists), MAX_PLAYLISTS))) |
def test_mouseInteraction():
pg.setConfigOption('mouseRateLimit', (- 1))
plt = pg.PlotWidget()
plt.show()
plt.scene().minDragTime = 0
vline = plt.addLine(x=0, movable=True)
hline = plt.addLine(y=0, movable=True)
hline2 = plt.addLine(y=(- 1), movable=False)
plt.setXRange((- 10), 10)
plt.setYRange((- 10), 10)
pos = plt.plotItem.vb.mapViewToScene(pg.Point(0, 5))
pos2 = (pos - QtCore.QPointF(200, 200))
mouseMove(plt, pos)
assert ((vline.mouseHovering is True) and (hline.mouseHovering is False))
mouseDrag(plt, pos, pos2, QtCore.Qt.MouseButton.LeftButton)
px = vline.pixelLength(pg.Point(1, 0), ortho=True)
assert (abs((vline.value() - plt.plotItem.vb.mapSceneToView(pos2).x())) <= px)
pos = plt.plotItem.vb.mapViewToScene(pg.Point(5, 0))
pos = (pos + QtCore.QPointF(0, 6))
pos2 = (pos + QtCore.QPointF((- 20), (- 20)))
mouseMove(plt, pos)
assert ((vline.mouseHovering is False) and (hline.mouseHovering is False))
mouseDrag(plt, pos, pos2, QtCore.Qt.MouseButton.LeftButton)
assert (hline.value() == 0)
pos = plt.plotItem.vb.mapViewToScene(pg.Point(5, 0))
pos2 = (pos - QtCore.QPointF(50, 50))
mouseMove(plt, pos)
assert ((vline.mouseHovering is False) and (hline.mouseHovering is True))
mouseDrag(plt, pos, pos2, QtCore.Qt.MouseButton.LeftButton)
px = hline.pixelLength(pg.Point(1, 0), ortho=True)
assert (abs((hline.value() - plt.plotItem.vb.mapSceneToView(pos2).y())) <= px)
pos = plt.plotItem.vb.mapViewToScene(pg.Point(5, (- 1)))
pos2 = (pos - QtCore.QPointF(50, 50))
mouseMove(plt, pos)
assert (hline2.mouseHovering == False)
mouseDrag(plt, pos, pos2, QtCore.Qt.MouseButton.LeftButton)
assert (hline2.value() == (- 1))
plt.close() |
def test_relevant_connections():
cm = connectivity.relevant_connections(2, (0, 1), (1,))
answer = np.array([[0, 1], [0, 1]])
assert np.array_equal(cm, answer)
cm = connectivity.relevant_connections(3, (0, 1), (0, 2))
answer = np.array([[1, 0, 1], [1, 0, 1], [0, 0, 0]])
assert np.array_equal(cm, answer) |
class Grammar():
def __init__(self, rules):
self.rules = frozenset(rules)
def __eq__(self, other):
return (self.rules == other.rules)
def __str__(self):
return (('\n' + '\n'.join(sorted((repr(x) for x in self.rules)))) + '\n')
def __repr__(self):
return str(self) |
def BuildObservations(Trajectory, MaxOrder):
VPrint('building observations')
LoopCounter = 0
for record in Trajectory:
LoopCounter += 1
if ((LoopCounter % 1000) == 0):
VPrint(LoopCounter)
trajectory = record[1]
for order in range(2, (MaxOrder + 2)):
SubSequence = ExtractSubSequences(trajectory, order)
for sequence in SubSequence:
Target = sequence[(- 1)]
Source = sequence[:(- 1)]
IncreaseCounter(Source, Target) |
def graphite_electrolyte_exchange_current_density_Ramadass2004(c_e, c_s_surf, c_s_max, T):
m_ref = (4.854 * (10 ** (- 6)))
E_r = 37480
arrhenius = np.exp(((E_r / pybamm.constants.R) * ((1 / 298.15) - (1 / T))))
return ((((m_ref * arrhenius) * (c_e ** 0.5)) * (c_s_surf ** 0.5)) * ((c_s_max - c_s_surf) ** 0.5)) |
class PerDollar(EquityCommissionModel):
def __init__(self, cost=DEFAULT_PER_DOLLAR_COST):
self.cost_per_dollar = float(cost)
def __repr__(self):
return '{class_name}(cost_per_dollar={cost})'.format(class_name=self.__class__.__name__, cost=self.cost_per_dollar)
def calculate(self, order, transaction):
cost_per_share = (transaction.price * self.cost_per_dollar)
return (abs(transaction.amount) * cost_per_share) |
def patch_cfg_for_new_paths(nested_dict, patch):
if (patch is None):
print('Nothing to patch')
return nested_dict
if (not isinstance(nested_dict, (dict, DictConfig))):
return nested_dict
for (key, value) in nested_dict.items():
if isinstance(value, (dict, DictConfig)):
nested_dict[key] = patch_cfg_for_new_paths(value, patch)
elif isinstance(value, (list, ListConfig)):
for (i, element) in enumerate(value):
value[i] = patch_cfg_for_new_paths(element, patch)
elif (key in patch):
print(f'Patched {key}: {nested_dict[key]} --> {patch[key]}')
nested_dict[key] = patch.get(key, nested_dict[key])
return nested_dict |
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if (col_unit is None):
return col_unit
(agg_id, col_id, distinct) = col_unit
if ((col_id in kmap) and (col_id in valid_col_units)):
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return (agg_id, col_id, distinct) |
def process_remaining_strings(remaining_strings: Union[(str, List[str])]):
def parse_string(s: str):
s = s.strip().replace('--', '')
if (' ' in s):
(k, v) = s.split(' ')
elif ('=' in s):
(k, v) = s.split('=')
else:
(k, v) = (s, 'True')
return {k: yaml.safe_load(v)}
if isinstance(remaining_strings, str):
remaining_strings_dict = parse_string(remaining_strings)
else:
remaining_strings_dict = {}
[remaining_strings_dict.update(parse_string(rs)) for rs in remaining_strings]
return remaining_strings_dict |
class NonSynthTextColumn(WideTextColumn):
can_edit = True
def __row_edited(self, render, path, new: str, model: Gtk.TreeModel) -> None:
print_d(f'Trying to edit {self.header_name} to {new!r}')
model[path][0][self.header_name] = new
model.path_changed(path)
def __init__(self, model, tag):
super().__init__(tag)
self._render.connect('edited', self.__row_edited, model)
def _fetch_value(self, model, iter_):
return model.get_value(iter_).get(self.header_name, '')
def _apply_value(self, model, iter_, cell, value):
cell.set_property('text', value.replace('\n', ', ')) |
def returndatacopy(computation: BaseComputation) -> None:
(mem_start_position, returndata_start_position, size) = computation.stack_pop_ints(3)
if ((returndata_start_position + size) > len(computation.return_data)):
raise OutOfBoundsRead(f'Return data length is not sufficient to satisfy request. Asked for data from index {returndata_start_position} to {(returndata_start_position + size)}. Return data is {len(computation.return_data)} bytes in length.')
computation.extend_memory(mem_start_position, size)
word_count = (ceil32(size) // 32)
copy_gas_cost = (word_count * constants.GAS_COPY)
computation.consume_gas(copy_gas_cost, reason='RETURNDATACOPY fee')
value = computation.return_data[returndata_start_position:(returndata_start_position + size)]
computation.memory_write(mem_start_position, size, value) |
def has_no_keywords(example):
keywords = ['def ', 'class ', 'for ', 'while ']
lines = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if (keyword in line.lower()):
return {'has_no_keywords': False}
return {'has_no_keywords': True} |
def test_update_page_error_section(db):
page = Page.objects.first()
section = page.sections.first()
section.locked = True
section.save()
instance = QuestionSet.objects.exclude(pages=page).first()
with pytest.raises(ValidationError):
QuestionSetLockedValidator(instance)({'pages': [page], 'locked': False}) |
('/v1/superusers/users/<username>/sendrecovery')
_only
_if(features.SUPER_USERS)
_if(features.MAILING)
class SuperUserSendRecoveryEmail(ApiResource):
_fresh_login
_not_prod
('sendInstallUserRecoveryEmail')
_scope(scopes.SUPERUSER)
def post(self, username):
if (app.config['AUTHENTICATION_TYPE'] != 'Database'):
raise InvalidRequest('Cannot send a recovery e-mail for non-database auth')
if SuperUserPermission().can():
user = pre_oci_model.get_nonrobot_user(username)
if (user is None):
raise NotFound()
if usermanager.is_superuser(username):
raise InvalidRequest('Cannot send a recovery email for a superuser')
code = pre_oci_model.create_reset_password_email_code(user.email)
send_recovery_email(user.email, code)
return {'email': user.email}
raise Unauthorized() |
def test_mypy_config_file(testdir, xdist_args):
testdir.makepyfile('\n def pyfunc(x):\n return x * 2\n ')
result = testdir.runpytest_subprocess('--mypy', *xdist_args)
mypy_file_checks = 1
mypy_status_check = 1
mypy_checks = (mypy_file_checks + mypy_status_check)
result.assert_outcomes(passed=mypy_checks)
assert (result.ret == 0)
mypy_config_file = testdir.makeini('\n [mypy]\n disallow_untyped_defs = True\n ')
result = testdir.runpytest_subprocess('--mypy-config-file', mypy_config_file, *xdist_args)
result.assert_outcomes(failed=mypy_checks) |
def configure_shot(net, logger, args):
logger.debug('---- Configuring SHOT ----')
if (args.arch == 'tanet'):
classifier = net.module.new_fc
ext = net
ext.module.new_fc = nn.Identity()
for (k, v) in classifier.named_parameters():
v.requires_grad = False
else:
for (k, v) in net.named_parameters():
if ('logits' in k):
v.requires_grad = False
classifier = nn.Sequential(*list(net.module.logits.children()))
ext = (list(net.module.children())[3:] + list(net.module.children())[:2])
ext = nn.Sequential(*ext)
optimizer = optim.SGD(ext.parameters(), lr=args_shot.lr, momentum=0.9)
return (optimizer, classifier, ext) |
def read_cameras_text(path):
cameras = {}
with open(path, 'r') as fid:
while True:
line = fid.readline()
if (not line):
break
line = line.strip()
if ((len(line) > 0) and (line[0] != '#')):
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model, width=width, height=height, params=params)
return cameras |
def get_omitted_any(disallow_any: bool, fail: MsgCallback, note: MsgCallback, orig_type: Type, options: Options, fullname: (str | None)=None, unexpanded_type: (Type | None)=None) -> AnyType:
if disallow_any:
nongen_builtins = get_nongen_builtins(options.python_version)
if (fullname in nongen_builtins):
typ = orig_type
alternative = nongen_builtins[fullname]
fail(message_registry.IMPLICIT_GENERIC_ANY_BUILTIN.format(alternative), typ, code=codes.TYPE_ARG)
else:
typ = (unexpanded_type or orig_type)
type_str = (typ.name if isinstance(typ, UnboundType) else format_type_bare(typ, options))
fail(message_registry.BARE_GENERIC.format(quote_type_string(type_str)), typ, code=codes.TYPE_ARG)
base_type = get_proper_type(orig_type)
base_fullname = (base_type.type.fullname if isinstance(base_type, Instance) else fullname)
if ((options.python_version < (3, 9)) and (base_fullname in GENERIC_STUB_NOT_AT_RUNTIME_TYPES)):
note('Subscripting classes that are not generic at runtime may require escaping, see typ, code=codes.TYPE_ARG)
any_type = AnyType(TypeOfAny.from_error, line=typ.line, column=typ.column)
else:
any_type = AnyType(TypeOfAny.from_omitted_generics, line=orig_type.line, column=orig_type.column)
return any_type |
def _get_beat_token(beat, strength, i_beat, n_beat):
l = [([0] * N_DIMENSION)]
l[0][DIMENSION['beat']] = preset_event2word['beat'][('Beat_%d' % beat)]
l[0][DIMENSION['strength']] = strength
l[0][DIMENSION['i_beat']] = i_beat
l[0][DIMENSION['n_beat']] = n_beat
l[0][DIMENSION['p_beat']] = (round(((float(i_beat) / n_beat) * 100)) + 1)
return l |
class GridAlgorithmicEnv(AlgorithmicEnv):
MOVEMENTS = ['left', 'right', 'up', 'down']
READ_HEAD_START = (0, 0)
def __init__(self, rows, *args, **kwargs):
self.rows = rows
AlgorithmicEnv.__init__(self, *args, **kwargs)
def _move(self, movement):
named = self.MOVEMENTS[movement]
(x, y) = self.read_head_position
if (named == 'left'):
x -= 1
elif (named == 'right'):
x += 1
elif (named == 'up'):
y -= 1
elif (named == 'down'):
y += 1
else:
raise ValueError('Unrecognized direction: {}'.format(named))
self.read_head_position = (x, y)
def generate_input_data(self, size):
return [[self.np_random.randint(self.base) for _ in range(self.rows)] for __ in range(size)]
def _get_obs(self, pos=None):
if (pos is None):
pos = self.read_head_position
(x, y) = pos
if any(((idx < 0) for idx in pos)):
return self.base
try:
return self.input_data[x][y]
except IndexError:
return self.base
def _render_observation(self):
x = self.read_head_position
label = 'Observation Grid : '
x_str = ''
for j in range((- 1), (self.rows + 1)):
if (j != (- 1)):
x_str += (' ' * len(label))
for i in range((- 2), (self.input_width + 2)):
if ((i == x[0]) and (j == x[1])):
x_str += colorize(self._get_str_obs((i, j)), 'green', highlight=True)
else:
x_str += self._get_str_obs((i, j))
x_str += '\n'
x_str = (label + x_str)
return x_str |
def get_features_user(mode='train'):
with tf.name_scope('input'):
files = tf.data.Dataset.list_files(user_embedding)
ds = files.apply(tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, cycle_length=8))
ds = ds.map(_parse_record, num_parallel_calls=8).batch(100000).prefetch(1)
iterator = ds.make_initializable_iterator()
return iterator |
def test_deprecated_alias(recwarn_always):
assert (old_hotness() == 'new hotness')
got = recwarn_always.pop(TrioAsyncioDeprecationWarning)
assert ('test_deprecate.old_hotness is deprecated' in got.message.args[0])
assert ('1.23' in got.message.args[0])
assert ('test_deprecate.new_hotness instead' in got.message.args[0])
assert ('issues/1' in got.message.args[0])
assert ('.. deprecated:: 1.23' in old_hotness.__doc__)
assert ('test_deprecate.new_hotness instead' in old_hotness.__doc__)
assert ('issues/1>`__' in old_hotness.__doc__) |
class get_loss(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
if ('seg_labelweights' in config['dataset_params']):
seg_num_per_class = config['dataset_params']['seg_labelweights']
seg_labelweights = (seg_num_per_class / np.sum(seg_num_per_class))
seg_labelweights = torch.Tensor(np.power((np.amax(seg_labelweights) / seg_labelweights), (1 / 3.0)))
else:
seg_labelweights = None
self.ce_loss = nn.CrossEntropyLoss(weight=seg_labelweights, ignore_index=config['dataset_params']['ignore_label'])
self.lovasz_loss = Lovasz_loss(ignore=config['dataset_params']['ignore_label'])
def forward(self, data_dict):
lovasz_loss = self.lovasz_loss(F.softmax(data_dict['sparse_logits'], dim=1), data_dict['sparse_label'])
seg_loss = self.ce_loss(data_dict['sparse_logits'], data_dict['sparse_label'])
total_loss = (lovasz_loss + seg_loss)
data_dict['loss'] = total_loss
data_dict['loss_sparse'] = total_loss
data_dict['loss_main_ce'] = seg_loss
data_dict['loss_main_lovasz'] = lovasz_loss
return data_dict |
def get_ipc_path(pipe=None):
ipc = 'discord-ipc-'
if pipe:
ipc = f'{ipc}{pipe}'
if (sys.platform in ('linux', 'darwin')):
tempdir = (os.environ.get('XDG_RUNTIME_DIR') or tempfile.gettempdir())
paths = ['.', 'snap.discord', 'app/com.discordapp.Discord', 'app/com.discordapp.DiscordCanary']
elif (sys.platform == 'win32'):
tempdir = '\\\\?\\pipe'
paths = ['.']
else:
return
for path in paths:
full_path = os.path.abspath(os.path.join(tempdir, path))
if ((sys.platform == 'win32') or os.path.isdir(full_path)):
for entry in os.scandir(full_path):
if (entry.name.startswith(ipc) and os.path.exists(entry)):
return entry.path |
class Effect6144(BaseEffect):
type = 'overheat'
def handler(fit, module, context, projectionRange, **kwargs):
for tgtAttr in ('aoeCloudSizeBonus', 'explosionDelayBonus', 'missileVelocityBonus', 'maxVelocityModifier', 'aoeVelocityBonus'):
module.boostItemAttr(tgtAttr, module.getModifiedItemAttr('overloadTrackingModuleStrengthBonus'), **kwargs) |
class Segmentation2Face(object):
def __init__(self, base_dir='./', in_size=1024, out_size=None, model=None, channel_multiplier=2, narrow=1, key=None, is_norm=True, device='cuda'):
self.facegan = FaceGAN(base_dir, in_size, out_size, model, channel_multiplier, narrow, key, is_norm, device=device)
def process(self, segf, aligned=True):
out = self.facegan.process(segf)
return (out, [segf], [out]) |
class InternationalEquityTestCase(WithInternationalPricingPipelineEngine, zf.ZiplineTestCase):
START_DATE = T('2014-01-02')
END_DATE = T('2014-02-06')
EXCHANGE_INFO = pd.DataFrame.from_records([{'exchange': 'XNYS', 'country_code': 'US'}, {'exchange': 'XTSE', 'country_code': 'CA'}, {'exchange': 'XLON', 'country_code': 'GB'}])
def make_equity_info(cls):
out = pd.concat([make_rotating_equity_info(num_assets=20, first_start=cls.START_DATE, frequency=get_calendar(exchange).day, periods_between_starts=1, asset_lifetime=5, exchange=exchange) for exchange in cls.EXCHANGE_INFO.exchange], ignore_index=True)
assert_equal(out.end_date.max(), cls.END_DATE)
return out
def make_exchanges_info(cls, equities, futures, root_symbols):
return cls.EXCHANGE_INFO
_space(domain=[CA_EQUITIES, US_EQUITIES, GB_EQUITIES])
def test_generic_pipeline_with_explicit_domain(self, domain):
calendar = domain.calendar
pipe = Pipeline({'open': EquityPricing.open.latest, 'high': EquityPricing.high.latest, 'low': EquityPricing.low.latest, 'close': EquityPricing.close.latest, 'volume': EquityPricing.volume.latest}, domain=domain)
sessions = self.daily_bar_sessions[calendar.name]
(start, end) = sessions[[(- 17), (- 10)]]
result = self.run_pipeline(pipe, start, end)
all_assets = self.assets_by_calendar[calendar]
expected_assets = [a for a in all_assets if alive_in_range(a, start, end, include_asset_start_date=False)]
expected_dates = sessions[(- 17):(- 9)]
for col in pipe.columns:
result_data = result[col].unstack()
assert_equal(pd.Index(expected_assets), result_data.columns)
assert_equal(expected_dates, result_data.index)
for asset in expected_assets:
for date in expected_dates:
value = result_data.at[(date, asset)]
self.check_expected_latest_value(calendar, col, date, asset, value)
([('US', US_EQUITIES, 'XNYS'), ('CA', CA_EQUITIES, 'XTSE'), ('GB', GB_EQUITIES, 'XLON')])
def test_currency_convert_prices(self, name, domain, calendar_name):
pipe = Pipeline({'close': EquityPricing.close.latest, 'close_USD': EquityPricing.close.fx('USD').latest, 'close_CAD': EquityPricing.close.fx('CAD').latest, 'close_EUR': EquityPricing.close.fx('EUR').latest, 'close_GBP': EquityPricing.close.fx('GBP').latest}, domain=domain)
sessions = self.daily_bar_sessions[calendar_name]
execution_sessions = sessions[(- 17):(- 9)]
(start, end) = execution_sessions[[0, (- 1)]]
result = self.run_pipeline(pipe, start, end)
closes_2d = result['close'].unstack(fill_value=np.nan)
all_currency_codes = self.daily_bar_currency_codes[calendar_name]
currency_codes = all_currency_codes.loc[[a.sid for a in closes_2d.columns]]
fx_reader = self.in_memory_fx_rate_reader
for target in self.FX_RATES_CURRENCIES:
result_2d = result[('close_' + target)].unstack(fill_value=np.nan)
expected_rates = fx_reader.get_rates(rate='mid', quote=target, bases=np.array(currency_codes, dtype=object), dts=sessions[(- 18):(- 10)])
expected_result_2d = (closes_2d * expected_rates)
assert_equal(result_2d, expected_result_2d)
([('US', US_EQUITIES, 'XNYS'), ('CA', CA_EQUITIES, 'XTSE'), ('GB', GB_EQUITIES, 'XLON')])
def test_only_currency_converted_data(self, name, domain, calendar_name):
pipe = Pipeline({'close_USD': EquityPricing.close.fx('USD').latest, 'close_EUR': EquityPricing.close.fx('EUR').latest}, domain=domain)
(start, end) = self.daily_bar_sessions[calendar_name][(- 2):]
result = self.run_pipeline(pipe, start, end)
calendar = get_calendar(calendar_name)
daily_bars = self.daily_bar_data[calendar_name]
currency_codes = self.daily_bar_currency_codes[calendar_name]
for ((dt, asset), row) in result.iterrows():
price_date = (dt - calendar.day)
expected_close = daily_bars[asset].loc[(price_date, 'close')]
expected_base = currency_codes.loc[asset]
expected_rate_USD = self.in_memory_fx_rate_reader.get_rate_scalar(rate='mid', quote='USD', base=expected_base, dt=price_date.asm8)
expected_price = (expected_close * expected_rate_USD)
assert_equal(row.close_USD, expected_price)
expected_rate_EUR = self.in_memory_fx_rate_reader.get_rate_scalar(rate='mid', quote='EUR', base=expected_base, dt=price_date.asm8)
expected_price = (expected_close * expected_rate_EUR)
assert_equal(row.close_EUR, expected_price)
def test_explicit_specialization_matches_implicit(self):
pipeline_specialized = Pipeline({'open': EquityPricing.open.latest, 'high': EquityPricing.high.latest, 'low': EquityPricing.low.latest, 'close': EquityPricing.close.latest, 'volume': EquityPricing.volume.latest}, domain=US_EQUITIES)
dataset_specialized = Pipeline({'open': USEquityPricing.open.latest, 'high': USEquityPricing.high.latest, 'low': USEquityPricing.low.latest, 'close': USEquityPricing.close.latest, 'volume': USEquityPricing.volume.latest})
sessions = self.daily_bar_sessions['XNYS']
self.assert_identical_results(pipeline_specialized, dataset_specialized, sessions[1], sessions[(- 1)])
def test_cannot_convert_volume_data(self):
with self.assertRaises(TypeError) as exc:
EquityPricing.volume.fx('EUR')
assert_equal(str(exc.exception), 'The .fx() method cannot be called on EquityPricing.volume because it does not produce currency-denominated data.')
def check_expected_latest_value(self, calendar, col, date, asset, value):
if np.isnan(value):
self.assertTrue(((date <= asset.start_date) or (date > asset.end_date)))
else:
self.assertTrue((asset.start_date < date <= asset.end_date))
bars = self.daily_bar_data[calendar.name]
expected_value = bars[asset.sid].loc[((date - calendar.day), col)]
assert_equal(value, expected_value)
def assert_identical_results(self, left, right, start_date, end_date):
left_result = self.run_pipeline(left, start_date, end_date)
right_result = self.run_pipeline(right, start_date, end_date)
assert_equal(left_result, right_result) |
def run_in_process_group(filename: str, calls: List[Dict[(str, Any)]]):
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
processes = []
q = Queue()
wait_event = Event()
for (rank, call) in enumerate(calls):
p = Process(target=init_and_run_process, args=(rank, call['world_size'], filename, call['function'], call['inputs'], q, wait_event))
p.start()
processes.append(p)
results = []
for _ in range(len(processes)):
results.append(q.get())
wait_event.set()
for p in processes:
p.join()
return results |
class Tishidden(TestCase):
(is_win, 'unix-like hidden')
def test_leading_dot(self):
assert is_hidden(fsnative('.'))
assert is_hidden(fsnative('foo/.bar'))
def test_normal_names_not_hidden(self):
assert (not is_hidden(fsnative('foo')))
assert (not is_hidden(fsnative('.foo/bar')))
def test_multiple_dots(self):
assert (not is_hidden(fsnative('...and Justice For All.flac'))) |
def upgrade(op, tables, tester):
op.add_column('user', sa.Column('company', UTF8CharField(length=255), nullable=True))
op.add_column('user', sa.Column('family_name', UTF8CharField(length=255), nullable=True))
op.add_column('user', sa.Column('given_name', UTF8CharField(length=255), nullable=True))
op.bulk_insert(tables.userpromptkind, [{'name': 'enter_name'}, {'name': 'enter_company'}])
tester.populate_column('user', 'company', tester.TestDataType.UTF8Char)
tester.populate_column('user', 'family_name', tester.TestDataType.UTF8Char)
tester.populate_column('user', 'given_name', tester.TestDataType.UTF8Char) |
def check_client_headers(expected_headers: dict[(str, str)], environ: dict[(str, str)]):
wrong_headers = {}
for (name, expected) in expected_headers.items():
value = environ.get('HTTP_{}'.format(name.upper().replace('-', '_')))
if (value != expected):
wrong_headers[name] = value
if wrong_headers:
message = '\n'.join((f"Expected '{expected_headers[name]}' for '{name}', got '{value}'." for (name, value) in wrong_headers.items()))
return f'''Incompatible client:
{message}
Server is version {randovania.VERSION}, please confirm you're updated.''' |
.parametrize(('variable', 'quote', 'prefix'), DEFAULT_PATTERN_PRODUCTS)
def test_set_default_pattern(temp_dir, helpers, variable, quote, prefix):
source = RegexSource(str(temp_dir), {'path': 'a/b'})
file_path = ((temp_dir / 'a') / 'b')
file_path.ensure_parent_dir_exists()
file_path.write_text(helpers.dedent(f'''
__all__ = [{quote}{variable}{quote}, {quote}foo{quote}]
{variable} = {quote}{prefix}0.0.1{quote}
def foo():
return {quote}bar{quote}
'''))
with temp_dir.as_cwd():
source.set_version('foo', source.get_version_data())
assert (source.get_version_data()['version'] == 'foo') |
class All2AllVInfo(object):
dims_sum_per_rank: List[int]
B_global: int
B_local: int
B_local_list: List[int]
D_local_list: List[int]
input_split_sizes: List[int] = field(default_factory=list)
output_split_sizes: List[int] = field(default_factory=list)
codecs: Optional[QuantizedCommCodecs] = None |
class SawyerPlateSlideEnv(SawyerXYZEnv):
def __init__(self):
goal_low = ((- 0.1), 0.85, 0.02)
goal_high = (0.1, 0.9, 0.02)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (0.0, 0.6, 0.015)
obj_high = (0.0, 0.6, 0.015)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0.0, 0.6, 0.015], dtype=np.float32), 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32)}
self.goal = np.array([0.0, 0.85, 0.02])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_plate_slide.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.08))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
if self.random_init:
obj_pos = self._get_state_rand_vec()
self.obj_init_pos = obj_pos[:3]
goal_pos = obj_pos[3:]
self._target_pos = goal_pos
self.sim.model.body_pos[self.model.body_name2id('cabinet')] = self._target_pos
self._set_obj_xyz(np.zeros(2))
self.maxDist = np.linalg.norm((self.obj_init_pos[:(- 1)] - self._target_pos[:(- 1)]))
self.target_reward = ((1000 * self.maxDist) + (1000 * 2))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
reachDist = np.linalg.norm((objPos - fingerCOM))
pullDist = np.linalg.norm((objPos[:(- 1)] - pullGoal[:(- 1)]))
c1 = 1000
c2 = 0.01
c3 = 0.001
if (reachDist < 0.05):
pullRew = ((1000 * (self.maxDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
else:
pullRew = 0
reward = ((- reachDist) + pullRew)
return [reward, reachDist, pullDist] |
class GaPrinter(StrPrinter):
_default_settings = ChainMap({'dict_mode': False, 'derivative_color': None, 'function_color': None, 'basis_vector_color': None}, StrPrinter._default_settings)
function_names = ('acos', 'acosh', 'acot', 'acoth', 'arg', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceiling', 'conjugate', 'cos', 'cosh', 'cot', 'coth', 'exp', 'floor', 'im', 'log', 're', 'root', 'sin', 'sinh', 'sqrt', 'sign', 'tan', 'tanh', 'Abs')
def _print_Function(self, expr):
name = expr.func.__name__
if (expr.func.nargs is not None):
if (name in GaPrinter.function_names):
return (expr.func.__name__ + ('(%s)' % self.stringify(expr.args, ', ')))
return _apply_ansi_color(self._settings['function_color'], ('%s' % (name,)))
def _print_BasisVectorSymbol(self, expr):
return _apply_ansi_color(self._settings['basis_vector_color'], self._print_Symbol(expr))
def _print_Derivative(self, expr):
function = expr.args[0]
diff_args = expr.args[1:]
xi = []
ni = []
for (x, n) in diff_args:
if (x in xi):
i = xi.index(x)
ni[i] += n
else:
xi.append(self._print(x))
ni.append(n)
s = 'D'
for (x, n) in zip(xi, ni):
s += (('{' + str(x)) + '}')
if (n > 1):
s += ('^' + str(n))
s += str(self._print(function))
return _apply_ansi_color(self._settings['derivative_color'], s)
def _print_dict(self, expr):
if (not self._settings['dict_mode']):
return super()._print_dict(expr)
return '\n'.join(('{} -> {}'.format(self._print(k), self._print(v)) for (k, v) in expr.items())) |
_canonicalize
_specialize
_rewriter([Subtensor])
def local_useless_subtensor(fgraph, node):
if (not node.op.idx_list):
return [node.inputs[0]]
if (not hasattr(fgraph, 'shape_feature')):
return
shape_of = fgraph.shape_feature.shape_of
cdata = get_constant_idx(node.op.idx_list, node.inputs, allow_partial=True, only_process_constants=True)
for (pos, idx) in enumerate(cdata):
if (not isinstance(idx, slice)):
return False
if ((idx.start is not None) and (idx.start != 0)):
return False
if ((idx.step is not None) and (idx.step != 1)):
return False
length_pos = shape_of[node.inputs[0]][pos]
if isinstance(idx.stop, (int, np.integer)):
length_pos_data = sys.maxsize
try:
length_pos_data = get_underlying_scalar_constant_value(length_pos, only_process_constants=True)
except NotScalarConstantError:
pass
if (idx.stop < length_pos_data):
return False
elif isinstance(idx.stop, Variable):
length_pos_shape_i = idx.stop
if (length_pos_shape_i.owner and isinstance(length_pos_shape_i.owner.op, ScalarFromTensor)):
length_pos_shape_i = length_pos_shape_i.owner.inputs[0]
elif (length_pos.owner and isinstance(length_pos.owner.op, TensorFromScalar)):
length_pos = length_pos.owner.inputs[0]
else:
return False
assert (str(length_pos.type.dtype) == 'int64')
assert (str(length_pos_shape_i.type.dtype) in ['int8', 'int16', 'int32', 'int64'])
if (length_pos_shape_i != length_pos):
return False
elif (idx.stop is None):
continue
else:
return False
return [node.inputs[0]] |
class _SwapNetworkToZZSWAP(cirq.PointOptimizer):
def optimization_at(self, circuit: 'cirq.Circuit', index: int, op: 'cirq.Operation') -> Optional[cirq.PointOptimizationSummary]:
if isinstance(op.gate, SwapNetworkProblemUnitary):
gate = op.gate
return cirq.PointOptimizationSummary(clear_span=1, clear_qubits=op.qubits, new_operations=compile_problem_unitary_to_zzswap(gate.problem_graph, gate.gamma, op.qubits)) |
def test_parameterassignment():
parass = OSC.ParameterAssignment('param1', 1)
prettyprint(parass.get_element())
parass2 = OSC.ParameterAssignment('param1', 1)
parass3 = OSC.ParameterAssignment('param1', 2)
assert (parass == parass2)
assert (parass != parass3)
parass4 = OSC.ParameterAssignment.parse(parass.get_element())
assert (parass4 == parass)
assert (version_validation('ParameterAssignment', parass, 0) == ValidationResponse.OK)
assert (version_validation('ParameterAssignment', parass, 1) == ValidationResponse.OK)
assert (version_validation('ParameterAssignment', parass, 2) == ValidationResponse.OK) |
.functions
.parametrize('df, column_name, dtype, ignore_exception, expected', [(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), ['a', 'b'], str, False, pd.DataFrame({'a': ['1', '2'], 'b': ['3', '4']})), (pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), ['b', 'a'], str, False, pd.DataFrame({'a': ['1', '2'], 'b': ['3', '4']})), (pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), ['a'], str, False, pd.DataFrame({'a': ['1', '2'], 'b': [3, 4]})), (pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), pd.Index(['a', 'b']), str, False, pd.DataFrame({'a': ['1', '2'], 'b': ['3', '4']})), (pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), ['a', 'b'], str, 'keep_values', pd.DataFrame({'a': ['1', '2'], 'b': ['3', '4']})), (pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), ['a', 'b'], str, 'fillna', pd.DataFrame({'a': ['1', '2'], 'b': ['3', '4']})), (pd.DataFrame({'a': ['a', 1], 'b': ['b', 2]}), ['a', 'b'], int, 'fillna', pd.DataFrame({'a': [None, 1], 'b': [None, 2]}))])
def test_multiple_columns(df, column_name, dtype, ignore_exception, expected):
result = df.change_type(column_name, dtype=dtype, ignore_exception=ignore_exception)
assert_frame_equal(result, expected) |
class FakeOsModuleLowLevelFileOpTest(FakeOsModuleTestBase):
def setUp(self):
os.umask(18)
super(FakeOsModuleLowLevelFileOpTest, self).setUp()
def test_open_read_only(self):
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
file_des = self.os.open(file_path, os.O_RDONLY)
self.assertEqual(b'contents', self.os.read(file_des, 8))
self.assert_raises_os_error(errno.EBADF, self.os.write, file_des, b'test')
self.os.close(file_des)
def test_open_read_only_write_zero_bytes_posix(self):
self.check_posix_only()
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
file_des = self.os.open(file_path, os.O_RDONLY)
self.assert_raises_os_error(errno.EBADF, self.os.write, file_des, b'test')
self.os.close(file_des)
def test_open_read_only_write_zero_bytes_windows(self):
self.check_windows_only()
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
file_des = self.os.open(file_path, os.O_RDONLY)
self.assertEqual(0, self.os.write(file_des, b''))
self.os.close(file_des)
def test_open_write_only(self):
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
file_des = self.os.open(file_path, os.O_WRONLY)
self.assertEqual(4, self.os.write(file_des, b'test'))
self.check_contents(file_path, b'testents')
self.os.close(file_des)
def test_open_write_only_raises_on_read(self):
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
file_des = self.os.open(file_path, os.O_WRONLY)
self.assert_raises_os_error(errno.EBADF, self.os.read, file_des, 5)
self.os.close(file_des)
file_des = self.os.open(file_path, (os.O_WRONLY | os.O_TRUNC))
self.assert_raises_os_error(errno.EBADF, self.os.read, file_des, 5)
self.os.close(file_des)
file_path2 = self.make_path('file2')
file_des = self.os.open(file_path2, (os.O_CREAT | os.O_WRONLY))
self.assert_raises_os_error(errno.EBADF, self.os.read, file_des, 5)
self.os.close(file_des)
file_des = self.os.open(file_path2, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
self.assert_raises_os_error(errno.EBADF, self.os.read, file_des, 5)
self.os.close(file_des)
def test_open_write_only_read_zero_bytes_posix(self):
self.check_posix_only()
file_path = self.make_path('file1')
file_des = self.os.open(file_path, (os.O_CREAT | os.O_WRONLY))
self.assert_raises_os_error(errno.EBADF, self.os.read, file_des, 0)
self.os.close(file_des)
def test_open_write_only_read_zero_bytes_windows(self):
self.check_windows_only()
file_path = self.make_path('file1')
file_des = self.os.open(file_path, (os.O_CREAT | os.O_WRONLY))
self.assertEqual(b'', self.os.read(file_des, 0))
self.os.close(file_des)
def test_open_read_write(self):
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
file_des = self.os.open(file_path, os.O_RDWR)
self.assertEqual(4, self.os.write(file_des, b'test'))
self.check_contents(file_path, b'testents')
self.os.close(file_des)
def test_open_create_is_read_only(self):
file_path = self.make_path('file1')
file_des = self.os.open(file_path, os.O_CREAT)
self.assertEqual(b'', self.os.read(file_des, 1))
self.assert_raises_os_error(errno.EBADF, self.os.write, file_des, b'foo')
self.os.close(file_des)
def test_open_create_truncate_is_read_only(self):
file_path = self.make_path('file1')
file_des = self.os.open(file_path, (os.O_CREAT | os.O_TRUNC))
self.assertEqual(b'', self.os.read(file_des, 1))
self.assert_raises_os_error(errno.EBADF, self.os.write, file_des, b'foo')
self.os.close(file_des)
def test_open_raises_if_does_not_exist(self):
file_path = self.make_path('file1')
self.assert_raises_os_error(errno.ENOENT, self.os.open, file_path, os.O_RDONLY)
self.assert_raises_os_error(errno.ENOENT, self.os.open, file_path, os.O_WRONLY)
self.assert_raises_os_error(errno.ENOENT, self.os.open, file_path, os.O_RDWR)
def test_exclusive_open_raises_without_create_mode(self):
self.skip_real_fs()
file_path = self.make_path('file1')
self.assertRaises(NotImplementedError, self.os.open, file_path, os.O_EXCL)
self.assertRaises(NotImplementedError, self.os.open, file_path, (os.O_EXCL | os.O_WRONLY))
self.assertRaises(NotImplementedError, self.os.open, file_path, (os.O_EXCL | os.O_RDWR))
self.assertRaises(NotImplementedError, self.os.open, file_path, ((os.O_EXCL | os.O_TRUNC) | os.O_APPEND))
def test_open_raises_if_parent_does_not_exist(self):
path = self.make_path('alpha', 'alpha')
self.assert_raises_os_error(errno.ENOENT, self.os.open, path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
def test_open_truncate(self):
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
file_des = self.os.open(file_path, (os.O_RDWR | os.O_TRUNC))
self.assertEqual(b'', self.os.read(file_des, 8))
self.assertEqual(4, self.os.write(file_des, b'test'))
self.check_contents(file_path, b'test')
self.os.close(file_des)
((not TestCase.is_windows), 'O_TEMPORARY only present in Windows')
def test_temp_file(self):
file_path = self.make_path('file1')
fd = self.os.open(file_path, ((os.O_CREAT | os.O_RDWR) | os.O_TEMPORARY))
self.assertTrue(self.os.path.exists(file_path))
self.os.close(fd)
self.assertFalse(self.os.path.exists(file_path))
def test_open_append(self):
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
file_des = self.os.open(file_path, (os.O_WRONLY | os.O_APPEND))
self.assertEqual(4, self.os.write(file_des, b'test'))
self.check_contents(file_path, b'contentstest')
self.os.close(file_des)
def test_open_create(self):
file_path = self.make_path('file1')
file_des = self.os.open(file_path, (os.O_RDWR | os.O_CREAT))
self.assertTrue(self.os.path.exists(file_path))
self.assertEqual(4, self.os.write(file_des, b'test'))
self.check_contents(file_path, 'test')
self.os.close(file_des)
def test_can_read_after_create_exclusive(self):
self.check_posix_only()
path1 = self.make_path('alpha')
file_des = self.os.open(path1, (os.O_CREAT | os.O_EXCL))
self.assertEqual(b'', self.os.read(file_des, 0))
self.assert_raises_os_error(errno.EBADF, self.os.write, file_des, b'')
self.os.close(file_des)
def test_open_create_mode_posix(self):
self.check_posix_only()
file_path = self.make_path('file1')
file_des = self.os.open(file_path, (os.O_WRONLY | os.O_CREAT), 448)
self.assertTrue(self.os.path.exists(file_path))
self.assert_raises_os_error(errno.EBADF, self.os.read, file_des, 5)
self.assertEqual(4, self.os.write(file_des, b'test'))
self.assert_mode_equal(448, self.os.stat(file_path).st_mode)
self.os.close(file_des)
def test_open_create_mode_windows(self):
self.check_windows_only()
file_path = self.make_path('file1')
file_des = self.os.open(file_path, (os.O_WRONLY | os.O_CREAT), 448)
self.assertTrue(self.os.path.exists(file_path))
self.assert_raises_os_error(errno.EBADF, self.os.read, file_des, 5)
self.assertEqual(4, self.os.write(file_des, b'test'))
self.assert_mode_equal(438, self.os.stat(file_path).st_mode)
self.os.close(file_des)
def testOpenCreateMode444Windows(self):
self.check_windows_only()
file_path = self.make_path('file1')
file_des = self.os.open(file_path, (os.O_WRONLY | os.O_CREAT), 290)
self.assert_mode_equal(292, self.os.stat(file_path).st_mode)
self.os.close(file_des)
self.os.chmod(file_path, 438)
def testOpenCreateMode666Windows(self):
self.check_windows_only()
file_path = self.make_path('file1')
file_des = self.os.open(file_path, (os.O_WRONLY | os.O_CREAT), 148)
self.assert_mode_equal(438, self.os.stat(file_path).st_mode)
self.os.close(file_des)
def test_open_exclusive(self):
file_path = self.make_path('file1')
file_des = self.os.open(file_path, ((os.O_RDWR | os.O_EXCL) | os.O_CREAT))
self.assertTrue(self.os.path.exists(file_path))
self.os.close(file_des)
def test_open_exclusive_raises_if_file_exists(self):
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'contents')
self.assert_raises_os_error(errno.EEXIST, self.os.open, file_path, ((os.O_RDWR | os.O_EXCL) | os.O_CREAT))
self.assert_raises_os_error(errno.EEXIST, self.os.open, file_path, ((os.O_RDWR | os.O_EXCL) | os.O_CREAT))
def test_open_exclusive_raises_if_symlink_exists_in_posix(self):
self.check_posix_only()
link_path = self.make_path('link')
link_target = self.make_path('link_target')
self.os.symlink(link_target, link_path)
self.assert_raises_os_error(errno.EEXIST, self.os.open, link_path, (((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC) | os.O_EXCL))
def test_open_exclusive_if_symlink_exists_works_in_windows(self):
self.check_windows_only()
self.skip_if_symlink_not_supported()
link_path = self.make_path('link')
link_target = self.make_path('link_target')
self.os.symlink(link_target, link_path)
fd = self.os.open(link_path, (((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC) | os.O_EXCL))
self.os.close(fd)
def test_open_directory_raises_under_windows(self):
self.check_windows_only()
dir_path = self.make_path('dir')
self.create_dir(dir_path)
self.assert_raises_os_error(errno.EACCES, self.os.open, dir_path, os.O_RDONLY)
self.assert_raises_os_error(errno.EACCES, self.os.open, dir_path, os.O_WRONLY)
self.assert_raises_os_error(errno.EACCES, self.os.open, dir_path, os.O_RDWR)
def test_open_directory_for_writing_raises_under_posix(self):
self.check_posix_only()
dir_path = self.make_path('dir')
self.create_dir(dir_path)
self.assert_raises_os_error(errno.EISDIR, self.os.open, dir_path, os.O_WRONLY)
self.assert_raises_os_error(errno.EISDIR, self.os.open, dir_path, os.O_RDWR)
def test_open_directory_read_only_under_posix(self):
self.check_posix_only()
self.skip_real_fs()
dir_path = self.make_path('dir')
self.create_dir(dir_path)
file_des = self.os.open(dir_path, os.O_RDONLY)
self.assertEqual(3, file_des)
self.os.close(file_des)
def test_opening_existing_directory_in_creation_mode(self):
self.check_linux_only()
dir_path = self.make_path('alpha')
self.os.mkdir(dir_path)
self.assert_raises_os_error(errno.EISDIR, self.os.open, dir_path, os.O_CREAT)
def test_writing_to_existing_directory(self):
self.check_macos_only()
dir_path = self.make_path('alpha')
self.os.mkdir(dir_path)
fd = self.os.open(dir_path, os.O_CREAT)
self.assert_raises_os_error(errno.EBADF, self.os.write, fd, b'')
def test_opening_existing_directory_in_write_mode(self):
self.check_posix_only()
dir_path = self.make_path('alpha')
self.os.mkdir(dir_path)
self.assert_raises_os_error(errno.EISDIR, self.os.open, dir_path, os.O_WRONLY)
def test_open_mode_posix(self):
self.check_posix_only()
self.skip_real_fs()
file_path = self.make_path('baz')
file_des = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
stat0 = self.os.fstat(file_des)
self.assertEqual((33279 & (~ self.os._umask())), stat0.st_mode)
self.os.close(file_des)
def test_open_mode_windows(self):
self.check_windows_only()
file_path = self.make_path('baz')
file_des = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
stat0 = self.os.fstat(file_des)
self.assertEqual(33206, stat0.st_mode)
self.os.close(file_des)
def test_write_read(self):
file_path = self.make_path('file1')
self.create_file(file_path, contents=b'orig contents')
new_contents = b'abcdef'
with self.open(file_path, 'wb') as fh:
fileno = fh.fileno()
self.assertEqual(len(new_contents), self.os.write(fileno, new_contents))
self.check_contents(file_path, new_contents)
with self.open(file_path, 'rb') as fh:
fileno = fh.fileno()
self.assertEqual(b'', self.os.read(fileno, 0))
self.assertEqual(new_contents[0:2], self.os.read(fileno, 2))
self.assertEqual(new_contents[2:10], self.os.read(fileno, 8))
self.assertEqual(new_contents[10:], self.os.read(fileno, 100))
self.assertEqual(b'', self.os.read(fileno, 10))
self.assert_raises_os_error(errno.EBADF, self.os.write, fileno, new_contents)
self.assert_raises_os_error(errno.EBADF, self.os.read, fileno, 10)
def test_write_from_different_f_ds(self):
file_path = self.make_path('baz')
fd0 = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
fd1 = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
self.os.write(fd0, b'aaaa')
self.os.write(fd1, b'bb')
self.assertEqual(4, self.os.path.getsize(file_path))
self.check_contents(file_path, b'bbaa')
self.os.close(fd1)
self.os.close(fd0)
def test_write_from_different_fds_with_append(self):
file_path = self.make_path('baz')
fd0 = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
fd1 = self.os.open(file_path, (os.O_WRONLY | os.O_APPEND))
self.os.write(fd0, b'aaa')
self.os.write(fd1, b'bbb')
self.assertEqual(6, self.os.path.getsize(file_path))
self.check_contents(file_path, b'aaabbb')
self.os.close(fd1)
self.os.close(fd0)
def test_read_only_read_after_write(self):
self.check_posix_only()
file_path = self.make_path('foo', 'bar', 'baz')
self.create_file(file_path, contents=b'test')
fd0 = self.os.open(file_path, os.O_CREAT)
fd1 = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
self.assertEqual(b'', self.os.read(fd0, 0))
self.os.close(fd1)
self.os.close(fd0)
def test_read_after_closing_write_descriptor(self):
file_path = self.make_path('baz')
fd0 = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
fd1 = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
fd2 = self.os.open(file_path, os.O_CREAT)
self.os.write(fd1, b'abc')
self.os.close(fd0)
self.assertEqual(b'abc', self.os.read(fd2, 3))
self.os.close(fd2)
self.os.close(fd1)
def test_writing_behind_end_of_file(self):
file_path = self.make_path('baz')
fd1 = self.os.open(file_path, os.O_CREAT)
fd2 = self.os.open(file_path, os.O_RDWR)
self.os.write(fd2, b'm')
fd3 = self.os.open(file_path, ((os.O_CREAT | os.O_WRONLY) | os.O_TRUNC))
self.assertEqual(b'', self.os.read(fd2, 1))
self.os.write(fd2, b'm')
self.assertEqual(b'\x00m', self.os.read(fd1, 2))
self.os.close(fd1)
self.os.close(fd2)
self.os.close(fd3)
def test_devnull_posix(self):
self.check_posix_only()
self.setup_fake_fs()
self.assertTrue(self.os.path.exists(self.os.devnull))
def test_devnull_windows(self):
self.check_windows_only()
self.setup_fake_fs()
if (sys.version_info < (3, 8)):
self.assertFalse(self.os.path.exists(self.os.devnull))
else:
self.assertTrue(self.os.path.exists(self.os.devnull))
def test_write_devnull(self):
fd = self.os.open(self.os.devnull, os.O_RDWR)
self.assertEqual(4, self.os.write(fd, b'test'))
self.assertEqual(b'', self.os.read(fd, 4))
self.os.close(fd)
fd = self.os.open(self.os.devnull, os.O_RDONLY)
self.assertEqual(b'', self.os.read(fd, 4))
self.os.close(fd)
def test_sendfile_with_invalid_fd(self):
self.check_linux_only()
self.assert_raises_os_error(errno.EBADF, self.os.sendfile, 100, 101, 0, 100)
src_file_path = self.make_path('foo')
dst_file_path = self.make_path('bar')
self.create_file(src_file_path, 'testcontent')
self.create_file(dst_file_path)
fd1 = self.os.open(src_file_path, os.O_RDONLY)
fd2 = self.os.open(dst_file_path, os.O_RDONLY)
self.assert_raises_os_error(errno.EBADF, self.os.sendfile, fd2, fd1, 0, 4)
def test_sendfile_no_offset(self):
self.check_linux_only()
src_file_path = self.make_path('foo')
dst_file_path = self.make_path('bar')
self.create_file(src_file_path, 'testcontent')
self.create_file(dst_file_path)
fd1 = self.os.open(src_file_path, os.O_RDONLY)
fd2 = self.os.open(dst_file_path, os.O_RDWR)
self.os.sendfile(fd2, fd1, 0, 3)
self.os.close(fd2)
self.os.close(fd1)
with self.open(dst_file_path) as f:
self.assertEqual('tes', f.read())
def test_sendfile_with_offset(self):
self.check_linux_only()
src_file_path = self.make_path('foo')
dst_file_path = self.make_path('bar')
self.create_file(src_file_path, 'testcontent')
self.create_file(dst_file_path)
fd1 = self.os.open(src_file_path, os.O_RDONLY)
fd2 = self.os.open(dst_file_path, os.O_RDWR)
self.os.sendfile(fd2, fd1, 4, 4)
self.os.close(fd2)
self.os.close(fd1)
with self.open(dst_file_path) as f:
self.assertEqual('cont', f.read())
def test_sendfile_twice(self):
self.check_linux_only()
src_file_path = self.make_path('foo')
dst_file_path = self.make_path('bar')
self.create_file(src_file_path, 'testcontent')
self.create_file(dst_file_path)
fd1 = self.os.open(src_file_path, os.O_RDONLY)
fd2 = self.os.open(dst_file_path, os.O_RDWR)
self.os.sendfile(fd2, fd1, 4, 4)
self.os.sendfile(fd2, fd1, 4, 4)
self.os.close(fd2)
self.os.close(fd1)
with self.open(dst_file_path) as f:
self.assertEqual('contcont', f.read())
def test_sendfile_offset_none(self):
self.check_linux_only()
src_file_path = self.make_path('foo')
dst_file_path = self.make_path('bar')
self.create_file(src_file_path, 'testcontent')
self.create_file(dst_file_path)
fd1 = self.os.open(src_file_path, os.O_RDONLY)
fd2 = self.os.open(dst_file_path, os.O_RDWR)
self.os.sendfile(fd2, fd1, None, 4)
self.os.sendfile(fd2, fd1, None, 3)
self.os.close(fd2)
self.os.close(fd1)
with self.open(dst_file_path) as f:
self.assertEqual('testcon', f.read())
((not TestCase.is_macos), 'Testing MacOs only behavior')
def test_no_sendfile_to_regular_file_under_macos(self):
src_file_path = self.make_path('foo')
dst_file_path = self.make_path('bar')
self.create_file(src_file_path, 'testcontent')
self.create_file(dst_file_path)
fd1 = self.os.open(src_file_path, os.O_RDONLY)
fd2 = self.os.open(dst_file_path, os.O_RDWR)
self.assertRaises(OSError, self.os.sendfile, fd2, fd1, 0, 3)
self.os.close(fd2)
self.os.close(fd1) |
class BaseCorrMM(OpenMPOp, _NoPythonOp):
check_broadcast = False
__props__ = ('border_mode', 'subsample', 'filter_dilation', 'num_groups', 'unshared')
_direction: Optional[str] = None
params_type = ParamsType(direction=EnumList(('DIRECTION_FORWARD', 'forward'), ('DIRECTION_BACKPROP_WEIGHTS', 'backprop weights'), ('DIRECTION_BACKPROP_INPUTS', 'backprop inputs')), dH=int64, dW=int64, dilH=int64, dilW=int64, padH_l=int64, padH_r=int64, padW_l=int64, padW_r=int64, num_groups=int64, unshared=int8)
def __init__(self, border_mode='valid', subsample=(1, 1), filter_dilation=(1, 1), num_groups=1, unshared=False, openmp=None):
super().__init__(openmp=openmp)
if isinstance(border_mode, int):
if (border_mode < 0):
raise ValueError('invalid border_mode {}, which must be a non-negative integer'.format(border_mode))
border_mode = (((border_mode, border_mode),) * 2)
elif isinstance(border_mode, tuple):
if (len(border_mode) != 2):
raise ValueError('invalid border_mode {} which must be a tuple of length 2'.format(border_mode))
border = ()
for mode in border_mode:
if (isinstance(mode, tuple) and (len(mode) == 2) and (min(mode) >= 0)):
border += ((int(mode[0]), int(mode[1])),)
elif (mode >= 0):
border += ((int(mode), int(mode)),)
else:
raise ValueError('invalid border mode {}. The tuple can only contain integers or tuples of length 2'.format(border_mode))
border_mode = border
elif (border_mode not in ('valid', 'full', 'half')):
raise ValueError('invalid border_mode {}, which must be either "valid", "full", "half", an integer or a tuple of two integers or a pair of integers'.format(border_mode))
self.border_mode = border_mode
if (len(subsample) != 2):
raise ValueError('subsample must have two elements')
if (len(filter_dilation) != 2):
raise ValueError('filter_dilation must have two elements')
self.subsample = tuple(subsample)
self.filter_dilation = tuple(filter_dilation)
self.unshared = unshared
if (not config.blas__ldflags):
self.blas_type = ''
elif ('openblas' in config.blas__ldflags):
self.blas_type = 'openblas'
elif ('mkl' in config.blas__ldflags):
self.blas_type = 'mkl'
else:
self.blas_type = ''
if (self._direction not in ('forward', 'backprop weights', 'backprop inputs')):
raise ValueError("_direction must be one of 'forward', 'backprop weights', 'backprop inputs'")
if (num_groups < 1):
raise ValueError('Number of groups should be greater than 0')
self.num_groups = num_groups
def pad(self):
if (self.border_mode == 'half'):
return ((((- 1), (- 1)),) * 2)
elif (self.border_mode == 'full'):
return ((((- 2), (- 2)),) * 2)
elif isinstance(self.border_mode, tuple):
return self.border_mode
else:
assert (self.border_mode == 'valid')
return (((0, 0),) * 2)
direction = property((lambda self: self.params_type.enum_from_alias(self._direction)))
dH = property((lambda self: self.subsample[0]))
dW = property((lambda self: self.subsample[1]))
dilH = property((lambda self: self.filter_dilation[0]))
dilW = property((lambda self: self.filter_dilation[1]))
padH_l = property((lambda self: self.pad[0][0]))
padH_r = property((lambda self: self.pad[0][1]))
padW_l = property((lambda self: self.pad[1][0]))
padW_r = property((lambda self: self.pad[1][1]))
def __str__(self):
return '{}{{{}, {}, {}, {} {}}}'.format(self.__class__.__name__, self.border_mode, str(self.subsample), str(self.filter_dilation), str(self.num_groups), str(self.unshared))
def as_common_dtype(in1, in2):
dtype = pytensor.scalar.upcast(in1.dtype, in2.dtype)
return (in1.astype(dtype), in2.astype(dtype))
def __setstate__(self, d):
self.__dict__.update(d)
if (not hasattr(self, 'num_groups')):
self.num_groups = 1
def c_support_code(self, **kwargs):
ccodes = blas_headers.blas_header_text()
if (self.blas_type == 'openblas'):
ccodes += blas_headers.openblas_threads_text()
elif (self.blas_type == 'mkl'):
ccodes += blas_headers.mkl_threads_text()
return ccodes
def c_libraries(self, **kwargs):
return ldflags()
def c_compile_args(self, **kwargs):
compile_args = ldflags(libs=False, flags=True)
compile_args += super().c_compile_args(**kwargs)
return compile_args
def c_lib_dirs(self, **kwargs):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self, **kwargs):
return ldflags(libs=False, include_dir=True)
def c_headers(self, **kwargs):
headers = ['<stdio.h>']
headers += super().c_headers(**kwargs)
return headers
def c_code_cache_version(self):
return (10, self.openmp, blas_header_version())
def c_support_code_apply(self, node, nodename):
sub = {}
dtype = str(node.__dict__['inputs'][0].dtype)
assert (dtype in ('float32', 'float64'))
if (dtype == 'float32'):
sub['gemm'] = 'sgemm_'
sub['gemv'] = 'sgemv_'
sub['float_type'] = 'npy_float'
sub['float_typenum'] = 'NPY_FLOAT'
sub['n_bytes'] = 4
sub['c_float_type'] = 'float'
else:
sub['gemm'] = 'dgemm_'
sub['gemv'] = 'dgemv_'
sub['float_type'] = 'npy_double'
sub['float_typenum'] = 'NPY_DOUBLE'
sub['n_bytes'] = 8
sub['c_float_type'] = 'double'
if self.openmp:
sub['omp_flags'] = '#pragma omp parallel for schedule(static)'
sub['omp_get_max_threads'] = 'omp_get_max_threads()'
sub['omp_get_thread_num'] = 'omp_get_thread_num()'
if (self.blas_type == 'openblas'):
sub['blas_set_num_threads'] = 'openblas_set_num_threads'
sub['blas_get_num_threads'] = 'openblas_get_num_threads()'
elif (self.blas_type == 'mkl'):
sub['blas_set_num_threads'] = 'mkl_set_num_threads'
sub['blas_get_num_threads'] = 'mkl_get_max_threads()'
else:
sub['blas_set_num_threads'] = ''
sub['blas_get_num_threads'] = '0'
else:
sub['omp_flags'] = ''
sub['omp_get_max_threads'] = '1'
sub['omp_get_thread_num'] = '0'
sub['blas_set_num_threads'] = ''
sub['blas_get_num_threads'] = '0'
final_code = ''
with open(os.path.join(os.path.split(__file__)[0], os.path.join('c_code', 'corr_gemm.c'))) as f:
code = f.read()
final_code += code
return (final_code % sub)
def c_code_helper(self, bottom, weights, top, sub, height=None, width=None):
if height:
height = f'(*(npy_int64 *)(PyArray_DATA({height})))'
else:
if (((self.direction != 0) and (self.dH != 1)) or ((self.direction == 1) and ((self.padH_l == (- 1)) or (self.padH_r == (- 1))))):
raise ValueError("height must be given for backprop with vertical sampling or border_mode='half'")
height = '-1'
if width:
width = f'(*(npy_int64 *)(PyArray_DATA({width})))'
else:
if (((self.direction != 0) and (self.dW != 1)) or ((self.direction == 1) and ((self.padW_l == (- 1)) or (self.padW_r == (- 1))))):
raise ValueError("width must be given for backprop with horizontal sampling or border_mode='half'")
width = '-1'
return ('\n // Mandatory args\n int direction = %(params)s->direction; // forward, bprop weights, bprop inputs\n\n // Optional args\n int dH = %(params)s->dH;\n int dW = %(params)s->dW;\n int dilH = %(params)s->dilH;\n int dilW = %(params)s->dilW;\n int padH_l = %(params)s->padH_l;\n int padH_r = %(params)s->padH_r;\n int padW_l = %(params)s->padW_l;\n int padW_r = %(params)s->padW_r;\n int numgroups = %(params)s->num_groups;\n int unshared = %(params)s->unshared;\n\n PyArrayObject * bottom = %(bottom)s;\n PyArrayObject * weights = %(weights)s;\n PyArrayObject * top = %(top)s;\n PyArrayObject * out2 = NULL;\n PyArrayObject **out = NULL;\n\n switch(%(params)s->direction) {\n case DIRECTION_FORWARD:\n out = &%(top)s;\n break;\n case DIRECTION_BACKPROP_WEIGHTS:\n out = &%(weights)s;\n break;\n case DIRECTION_BACKPROP_INPUTS:\n out = &%(bottom)s;\n break;\n default:\n PyErr_SetString(PyExc_ValueError, "CPU CorrMM: Invalid direction.");\n {%(fail)s}\n break;\n }\n\n int wdim, odim;\n wdim = unshared ? 6 : 4;\n odim = 4; //Can be set to 6 later for unshared backprop wrt weights\n\n // Obtain or infer kernel width and height\n // (we need to know it early to be able to handle auto-padding)\n int kH, kW, dil_kH, dil_kW;\n if (direction != 1) {\n // weight is an input variable, we can just read its shape\n kH = PyArray_DIMS(weights)[wdim-2];\n kW = PyArray_DIMS(weights)[wdim-1];\n }\n else {\n if (%(height)s != -1) {\n // kernel height is specified (perhaps vertical subsampling or half padding)\n kH = %(height)s;\n }\n else if (padH_l == -2 || padH_r == -2) {\n // vertical full padding, we can infer the kernel height\n kH = (2 - PyArray_DIMS(bottom)[2] + (PyArray_DIMS(top)[2] - 1) * dH - 1)/ dilH + 1;\n }\n else {\n // explicit padding, we can infer the kernel height\n kH = (PyArray_DIMS(bottom)[2] + padH_l + padH_r - (PyArray_DIMS(top)[2] - 1) * dH - 1) / dilH +1;\n }\n if (%(width)s != -1) {\n // kernel width is specified (perhaps horizontal subsampling or half padding)\n kW = %(width)s;\n }\n else if (padW_l == -2 || padW_r == -2) {\n kW = (2 - PyArray_DIMS(bottom)[3] + (PyArray_DIMS(top)[3] - 1) * dW - 1) / dilW + 1;\n }\n else {\n kW = (PyArray_DIMS(bottom)[3] + padW_l + padW_r - (PyArray_DIMS(top)[3] - 1) * dW - 1) / dilW + 1;\n }\n }\n\n // Implicit dilated kernel size\n dil_kH = (kH - 1) * dilH + 1;\n dil_kW = (kW - 1) * dilW + 1;\n\n // Auto-padding if requested\n if (padH_l == -1 || padH_r == -1) { // vertical half padding\n padH_l = padH_r = dil_kH / 2;\n }\n else if (padH_l == -2 || padH_r == -2) { // vertical full padding\n padH_l = padH_r = dil_kH - 1;\n }\n else if (padH_l < -2 || padH_r < -2) {\n PyErr_SetString(PyExc_ValueError, "BaseCorrMM: padH_l and padH_r must be >= -2");\n %(fail)s\n }\n if (padW_l == -1 || padW_r == -1) { // horizontal half padding\n padW_l = padW_r = dil_kW / 2;\n }\n else if (padW_l == -2 || padW_r == -2) { // horizontal full padding\n padW_l = padW_r = dil_kW - 1;\n }\n else if (padW_l < -2 || padW_r < -2) {\n PyErr_SetString(PyExc_ValueError, "BaseCorrMM: padW_l and padW_r must be >= -2");\n %(fail)s\n }\n\n // Infer output shape\n npy_intp out_dim[6];\n out_dim[4] = out_dim[5] = 0; //Only used for unshared backprop wrt weights\n switch(direction) {\n case 0: // forward pass\n // output is top: (batchsize, num_filters, height, width)\n // height and width: top = (bottom + pad_l + pad_r - ((weight-1)*dil + 1)) / sample + 1\n out_dim[0] = (npy_intp)PyArray_DIMS(bottom)[0];\n out_dim[1] = (npy_intp)PyArray_DIMS(weights)[0];\n out_dim[2] = (npy_intp)((PyArray_DIMS(bottom)[2] + padH_l + padH_r - ((PyArray_DIMS(weights)[wdim-2]-1)*dilH + 1)) / dH + 1);\n out_dim[3] = (npy_intp)((PyArray_DIMS(bottom)[3] + padW_l + padW_r - ((PyArray_DIMS(weights)[wdim-1]-1)*dilW + 1)) / dW + 1);\n if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0)\n {\n if (unshared) {\n PyErr_Format(PyExc_ValueError,\n "CorrMM: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld\\n",\n (long int)PyArray_DIMS(bottom)[0], (long int)PyArray_DIMS(bottom)[1],\n (long int)PyArray_DIMS(bottom)[2], (long int)PyArray_DIMS(bottom)[3],\n (long int)PyArray_DIMS(weights)[0], (long int)PyArray_DIMS(weights)[1],\n (long int)PyArray_DIMS(weights)[2], (long int)PyArray_DIMS(weights)[3],\n (long int)PyArray_DIMS(weights)[4], (long int)PyArray_DIMS(weights)[5],\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3]);\n }\n else {\n PyErr_Format(PyExc_ValueError,\n "CorrMM: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld\\n",\n (long int)PyArray_DIMS(bottom)[0], (long int)PyArray_DIMS(bottom)[1],\n (long int)PyArray_DIMS(bottom)[2], (long int)PyArray_DIMS(bottom)[3],\n (long int)PyArray_DIMS(weights)[0], (long int)PyArray_DIMS(weights)[1],\n (long int)PyArray_DIMS(weights)[2], (long int)PyArray_DIMS(weights)[3],\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3]);\n }\n %(fail)s\n }\n break;\n case 1: // backprop wrt. weights\n // output is weights: (num_filters, num_channels, height, width)\n // height and width: weights = (bottom + pad_l + pad_r - (top - 1) * sample - 1) / dil + 1\n out_dim[0] = (npy_intp)PyArray_DIMS(top)[1];\n if (unshared){\n odim = 6;\n out_dim[1] = (npy_intp)PyArray_DIMS(top)[2];\n out_dim[2] = (npy_intp)PyArray_DIMS(top)[3];\n }\n out_dim[wdim-3] = (npy_intp)PyArray_DIMS(bottom)[1] / numgroups;\n out_dim[wdim-2] = (npy_intp)kH; // already inferred further above\n out_dim[wdim-1] = (npy_intp)kW; // how convenient\n if (unshared) {\n if (out_dim[0] < 0 || out_dim[1] <= 0 || out_dim[2] <= 0 || out_dim[3] < 0\n || out_dim[4] <= 0 || out_dim[5] <= 0){\n PyErr_Format(PyExc_ValueError,\n "CorrMM backprop wrt. weights: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld\\n",\n (long int)PyArray_DIMS(bottom)[0], (long int)PyArray_DIMS(bottom)[1],\n (long int)PyArray_DIMS(bottom)[2], (long int)PyArray_DIMS(bottom)[3],\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3], (long int)out_dim[4], (long int)out_dim[5],\n (long int)PyArray_DIMS(top)[0], (long int)PyArray_DIMS(top)[1],\n (long int)PyArray_DIMS(top)[2], (long int)PyArray_DIMS(top)[3]);\n %(fail)s\n }\n }\n else {\n if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0)\n {\n PyErr_Format(PyExc_ValueError,\n "CorrMM backprop wrt. weights: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld\\n",\n (long int)PyArray_DIMS(bottom)[0], (long int)PyArray_DIMS(bottom)[1],\n (long int)PyArray_DIMS(bottom)[2], (long int)PyArray_DIMS(bottom)[3],\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3],\n (long int)PyArray_DIMS(top)[0], (long int)PyArray_DIMS(top)[1],\n (long int)PyArray_DIMS(top)[2], (long int)PyArray_DIMS(top)[3]);\n %(fail)s\n }\n }\n break;\n case 2: // backprop wrt. inputs\n // output is bottom: (batchsize, num_channels, height, width)\n // height and width: bottom = (top - 1) * sample + (weights-1)*dil + 1 - 2*pad\n out_dim[0] = (npy_intp)PyArray_DIMS(top)[0];\n out_dim[1] = (npy_intp)PyArray_DIMS(weights)[wdim-3] * numgroups;\n out_dim[2] = (npy_intp)((%(height)s != -1) ? %(height)s : (PyArray_DIMS(top)[2] - 1) * dH + (PyArray_DIMS(weights)[wdim-2]-1)*dilH + 1 - padH_l - padH_r);\n out_dim[3] = (npy_intp)((%(width)s != -1) ? %(width)s : (PyArray_DIMS(top)[3] - 1) * dW + (PyArray_DIMS(weights)[wdim-1]-1)*dilW + 1 - padW_l - padW_r);\n if (unshared) {\n if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0)\n {\n PyErr_Format(PyExc_ValueError,\n "CorrMM backprop wrt. inputs: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld\\n",\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3],\n (long int)PyArray_DIMS(weights)[0], (long int)PyArray_DIMS(weights)[1],\n (long int)PyArray_DIMS(weights)[2], (long int)PyArray_DIMS(weights)[3],\n (long int)PyArray_DIMS(weights)[4], (long int)PyArray_DIMS(weights)[5],\n (long int)PyArray_DIMS(top)[0], (long int)PyArray_DIMS(top)[1],\n (long int)PyArray_DIMS(top)[2], (long int)PyArray_DIMS(top)[3]);\n %(fail)s\n }\n }\n else {\n if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0)\n {\n PyErr_Format(PyExc_ValueError,\n "CorrMM backprop wrt. inputs: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld\\n",\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3],\n (long int)PyArray_DIMS(weights)[0], (long int)PyArray_DIMS(weights)[1],\n (long int)PyArray_DIMS(weights)[2], (long int)PyArray_DIMS(weights)[3],\n (long int)PyArray_DIMS(top)[0], (long int)PyArray_DIMS(top)[1],\n (long int)PyArray_DIMS(top)[2], (long int)PyArray_DIMS(top)[3]);\n %(fail)s\n }\n }\n break;\n default:\n PyErr_SetString(PyExc_ValueError, "BaseCorrMM: direction must be 0, 1, or 2\\n");\n %(fail)s\n }\n\n // Prepare output array\n int typenum;\n int failure;\n failure = !(*out\n && PyArray_NDIM(*out)==odim\n && PyArray_IS_C_CONTIGUOUS(*out)\n && PyArray_DIMS(*out)[0]==out_dim[0]\n && PyArray_DIMS(*out)[1]==out_dim[1]\n && PyArray_DIMS(*out)[2]==out_dim[2]\n && PyArray_DIMS(*out)[3]==out_dim[3]);\n if (odim == 6){\n failure = failure || !(PyArray_DIMS(*out)[4]==out_dim[4]\n && PyArray_DIMS(*out)[5]==out_dim[5]);\n }\n if ( failure )\n {\n Py_XDECREF(*out);\n if (direction != 1) {\n typenum = PyArray_TYPE(weights);\n }\n else {\n typenum = PyArray_TYPE(bottom);\n }\n //Change to PyArray_ZEROS which is faster than PyArray_EMPTY.\n *out = (PyArrayObject*)PyArray_ZEROS(odim,\n out_dim,\n typenum,\n 0);\n if (NULL == *out)\n {\n if (odim == 4) {\n PyErr_Format(PyExc_RuntimeError,\n "BaseCorrMM: Failed to allocate output of %%lld x %%lld x %%lld x %%lld",\n (long long)out_dim[0], (long long)out_dim[1], (long long)out_dim[2], (long long)out_dim[3]);\n }\n if (odim == 6) {\n PyErr_Format(PyExc_RuntimeError,\n "BaseCorrMM: Failed to allocate output of %%lld x %%lld x %%lld x %%lld %%lld %%lld",\n (long long)out_dim[0], (long long)out_dim[1], (long long)out_dim[2], (long long)out_dim[3],\n (long long)out_dim[4], (long long)out_dim[5]);\n }\n %(fail)s\n }\n }\n\n // Call corrMM code\n out2 = corrMM(%(bottom)s, %(weights)s, %(top)s, direction, dH, dW, dilH, dilW,\n padH_l, padH_r, padW_l, padW_r, numgroups, unshared);\n if (out2==NULL){\n %(fail)s\n }\n assert (out2 == *out);\n\n' % dict(bottom=bottom, weights=weights, top=top, height=height, width=width, fail=sub['fail'], params=sub['params'])) |
def main():
vocab = load_vocabulary()
model = build_model(len(vocab.word2index), load_checkpoint=True, checkpoint_epoch=checkpoint_epoch)
bot = BotAgent(model, vocab)
while True:
user_input = input('me: ')
if (user_input.strip() == ''):
continue
response = bot.response(user_input)
print(('%s: %s' % (BOT_NAME, response)))
curr_sys = platform.system()
if (curr_sys == 'Linux'):
os.system(('echo "%s" | festival --tts' % response))
elif (curr_sys == 'Darwin'):
os.system(('say "%s" ' % response)) |
def predict_entry_point():
import argparse
parser = argparse.ArgumentParser(description='Use this to run inference with nnU-Net. This function is used when you want to manually specify a folder containing a trained nnU-Net my_models. This is useful when the nnunet environment variables (nnUNet_results) are not set.')
parser.add_argument('-i', type=str, required=True, help='input folder. Remember to use the correct channel numberings for your files (_0000 etc). File endings must be the same as the training dataset!')
parser.add_argument('-o', type=str, required=True, help='Output folder. If it does not exist it will be created. Predicted segmentations will have the same name as their source images.')
parser.add_argument('-d', type=str, required=True, help='Dataset with which you would like to predict. You can specify either dataset name or id')
parser.add_argument('-p', type=str, required=False, default='nnUNetPlans', help='Plans identifier. Specify the plans in which the desired configuration is located. Default: nnUNetPlans')
parser.add_argument('-tr', type=str, required=False, default='nnUNetTrainer', help='What nnU-Net trainer class was used for training? Default: nnUNetTrainer')
parser.add_argument('-c', type=str, required=True, help='nnU-Net configuration that should be used for prediction. Config must be located in the plans specified with -p')
parser.add_argument('-f', nargs='+', type=str, required=False, default=(0, 1, 2, 3, 4), help='Specify the folds of the trained my_models that should be used for prediction. Default: (0, 1, 2, 3, 4)')
parser.add_argument('-step_size', type=float, required=False, default=0.5, help='Step size for sliding window prediction. The larger it is the faster but less accurate the prediction. Default: 0.5. Cannot be larger than 1. We recommend the default.')
parser.add_argument('--disable_tta', action='store_true', required=False, default=False, help='Set this flag to disable test time data augmentation in the form of mirroring. Faster, but less accurate inference. Not recommended.')
parser.add_argument('--verbose', action='store_true', help='Set this if you like being talked to. You will have to be a good listener/reader.')
parser.add_argument('--save_probabilities', action='store_true', help='Set this to export predicted class "probabilities". Required if you want to ensemble multiple configurations.')
parser.add_argument('--continue_prediction', action='store_true', help='Continue an aborted previous prediction (will not overwrite existing files)')
parser.add_argument('-chk', type=str, required=False, default='checkpoint_final.pth', help='Name of the checkpoint you want to use. Default: checkpoint_final.pth')
parser.add_argument('-npp', type=int, required=False, default=3, help='Number of processes used for preprocessing. More is not always better. Beware of out-of-RAM issues. Default: 3')
parser.add_argument('-nps', type=int, required=False, default=3, help='Number of processes used for segmentation export. More is not always better. Beware of out-of-RAM issues. Default: 3')
parser.add_argument('-prev_stage_predictions', type=str, required=False, default=None, help='Folder containing the predictions of the previous stage. Required for cascaded my_models.')
parser.add_argument('-num_parts', type=int, required=False, default=1, help='Number of separate nnUNetv2_predict call that you will be making. Default: 1 (= this one call predicts everything)')
parser.add_argument('-part_id', type=int, required=False, default=0, help='If multiple nnUNetv2_predict exist, which one is this? IDs start with 0 can end with num_parts - 1. So when you submit 5 nnUNetv2_predict calls you need to set -num_parts 5 and use -part_id 0, 1, 2, 3 and 4. Simple, right? Note: You are yourself responsible to make these run on separate GPUs! Use CUDA_VISIBLE_DEVICES (google, yo!)')
parser.add_argument('-device', type=str, default='cuda', required=False, help="Use this to set the device the inference should run with. Available options are 'cuda' (GPU), 'cpu' (CPU) and 'mps' (Apple M1/M2). Do NOT use this to set which GPU ID! Use CUDA_VISIBLE_DEVICES=X nnUNetv2_predict [...] instead!")
print('\n\nPlease cite the following paper when using nnU-Net:\nIsensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.\n\n')
args = parser.parse_args()
args.f = [(i if (i == 'all') else int(i)) for i in args.f]
model_folder = get_output_folder(args.d, args.tr, args.p, args.c)
if (not isdir(args.o)):
maybe_mkdir_p(args.o)
assert (args.part_id < args.num_parts), 'Do you even read the documentation? See nnUNetv2_predict -h.'
assert (args.device in ['cpu', 'cuda', 'mps']), f'-device must be either cpu, mps or cuda. Other devices are not tested/supported. Got: {args.device}.'
if (args.device == 'cpu'):
import multiprocessing
torch.set_num_threads(multiprocessing.cpu_count())
device = torch.device('cpu')
elif (args.device == 'cuda'):
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
device = torch.device('cuda')
else:
device = torch.device('mps')
predictor = nnUNetPredictor(tile_step_size=args.step_size, use_gaussian=True, use_mirroring=(not args.disable_tta), perform_everything_on_gpu=True, device=device, verbose=args.verbose, verbose_preprocessing=False)
predictor.initialize_from_trained_model_folder(model_folder, args.f, checkpoint_name=args.chk)
predictor.predict_from_files(args.i, args.o, save_probabilities=args.save_probabilities, overwrite=(not args.continue_prediction), num_processes_preprocessing=args.npp, num_processes_segmentation_export=args.nps, folder_with_segs_from_prev_stage=args.prev_stage_predictions, num_parts=args.num_parts, part_id=args.part_id) |
class MatchesException(object):
expected = attr.ib()
def match(self, other):
expected_type = type(self.expected)
if (type(other) is not expected_type):
return Mismatch('{} is not a {}'.format(other, expected_type))
if (other.args != self.expected.args):
return Mismatch('{} has different arguments: {}.'.format(other.args, self.expected.args)) |
def test_fully_covered_nrel():
dt = pd.date_range(start='2019-1-1 12:00:00', end='2019-1-1 18:00:00', freq='1h')
snowfall_data = pd.Series([1, 5, 0.6, 4, 0.23, (- 5), 19], index=dt)
expected = pd.Series([False, True, False, True, False, False, True], index=dt)
fully_covered = snow.fully_covered_nrel(snowfall_data)
assert_series_equal(expected, fully_covered) |
class GridInfo():
def __init__(self, ratio, num_windows, width, height):
self.ratio = ratio
self.num_windows = num_windows
self.width = width
self.height = height
self.num_rows = 0
self.num_cols = 0
def calc(self, num_windows, width, height):
best_ratio = None
best_rows_cols_orientation = None
for (rows, cols, orientation) in self._possible_grids(num_windows):
sample_width = (width / cols)
sample_height = (height / rows)
sample_ratio = (sample_width / sample_height)
diff = abs((sample_ratio - self.ratio))
if ((best_ratio is None) or (diff < best_ratio)):
best_ratio = diff
best_rows_cols_orientation = (rows, cols, orientation)
return best_rows_cols_orientation
def _possible_grids(self, num_windows):
if (num_windows < 2):
end = 2
else:
end = ((num_windows // 2) + 1)
for rows in range(1, end):
cols = int(math.ceil((num_windows / rows)))
(yield (rows, cols, ROWCOL))
if (rows != cols):
(yield (cols, rows, COLROW))
def get_sizes_advanced(self, total_width, total_height, xoffset=0, yoffset=0):
results = []
width = total_width
height = total_height
while (len(results) < self.num_windows):
remaining = (self.num_windows - len(results))
(orien, sizes) = self._get_row_or_col(remaining, width, height, xoffset, yoffset)
results.extend(sizes)
if (orien == ROWCOL):
height -= sizes[(- 1)][(- 1)]
yoffset += sizes[(- 1)][(- 1)]
else:
width -= sizes[(- 1)][(- 2)]
xoffset += sizes[(- 1)][(- 2)]
return results
def _get_row_or_col(self, num_windows, width, height, xoffset, yoffset):
(rows, cols, orientation) = self.calc(num_windows, width, height)
results = []
if (orientation == ROWCOL):
x = 0
y = 0
for (i, col) in enumerate(range(cols)):
w_width = (width // cols)
w_height = (height // rows)
if (i == (cols - 1)):
w_width = (width - x)
results.append(((x + xoffset), (y + yoffset), w_width, w_height))
x += w_width
elif (orientation == COLROW):
x = 0
y = 0
for (i, col) in enumerate(range(rows)):
w_width = (width // cols)
w_height = (height // rows)
if (i == (rows - 1)):
w_height = (height - y)
results.append(((x + xoffset), (y + yoffset), w_width, w_height))
y += w_height
return (orientation, results)
def get_sizes(self, total_width, total_height, xoffset=0, yoffset=0):
width = 0
height = 0
results = []
(rows, cols, orientation) = self.calc(self.num_windows, total_width, total_height)
if (orientation == ROWCOL):
y = 0
for (i, row) in enumerate(range(rows)):
x = 0
width = (total_width // cols)
for (j, col) in enumerate(range(cols)):
height = (total_height // rows)
if ((i == (rows - 1)) and (j == 0)):
remaining = (self.num_windows - len(results))
width = (total_width // remaining)
elif ((j == (cols - 1)) or ((len(results) + 1) == self.num_windows)):
width = (total_width - x)
results.append(((x + xoffset), (y + yoffset), width, height))
if (len(results) == self.num_windows):
return results
x += width
y += height
else:
x = 0
for (i, col) in enumerate(range(cols)):
y = 0
height = (total_height // rows)
for (j, row) in enumerate(range(rows)):
width = (total_width // cols)
if ((i == (cols - 1)) and (j == 0)):
remaining = (self.num_windows - len(results))
height = (total_height // remaining)
elif ((j == (rows - 1)) or ((len(results) + 1) == self.num_windows)):
height = (total_height - y)
results.append(((x + xoffset), (y + yoffset), width, height))
if (len(results) == self.num_windows):
return results
y += height
x += width
return results |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.