code stringlengths 281 23.7M |
|---|
def serialize_string(fmt):
if fmt.endswith('+'):
more_ok = True
else:
more_ok = False
fmt = fmt.rstrip('?+')
assert (fmt[0] == 'a')
ln = int(fmt[1:])
def func(v):
if (v is None):
v = b''
else:
v = v.encode('ascii')
s = v.ljust(ln)
if (more_ok or (len(s) == ln)):
return s
else:
raise SerializeError(('max string length: %i, value="%s"' % ln), v)
return func |
def _attribute_from_attrib_maker(ctx: mypy.plugin.ClassDefContext, auto_attribs: bool, kw_only: bool, lhs: NameExpr, rvalue: CallExpr, stmt: AssignmentStmt) -> (Attribute | None):
if (auto_attribs and (not stmt.new_syntax)):
assert (lhs.node is not None)
ctx.api.msg.need_annotation_for_var(lhs.node, stmt)
return None
if (len(stmt.lvalues) > 1):
ctx.api.fail('Too many names for one attribute', stmt)
return None
init_type = stmt.type
init = _get_bool_argument(ctx, rvalue, 'init', True)
kw_only |= _get_bool_argument(ctx, rvalue, 'kw_only', False)
attr_has_default = bool(_get_argument(rvalue, 'default'))
attr_has_factory = bool(_get_argument(rvalue, 'factory'))
if (attr_has_default and attr_has_factory):
ctx.api.fail('Can\'t pass both "default" and "factory".', rvalue)
elif attr_has_factory:
attr_has_default = True
type_arg = _get_argument(rvalue, 'type')
if (type_arg and (not init_type)):
try:
un_type = expr_to_unanalyzed_type(type_arg, ctx.api.options, ctx.api.is_stub_file)
except TypeTranslationError:
ctx.api.fail('Invalid argument to type', type_arg)
else:
init_type = ctx.api.anal_type(un_type)
if (init_type and isinstance(lhs.node, Var) and (not lhs.node.type)):
lhs.node.type = init_type
lhs.is_inferred_def = False
converter = _get_argument(rvalue, 'converter')
convert = _get_argument(rvalue, 'convert')
if (convert and converter):
ctx.api.fail('Can\'t pass both "convert" and "converter".', rvalue)
elif convert:
ctx.api.fail('convert is deprecated, use converter', rvalue)
converter = convert
converter_info = _parse_converter(ctx, converter)
alias = None
alias_expr = _get_argument(rvalue, 'alias')
if alias_expr:
alias = ctx.api.parse_str_literal(alias_expr)
if (alias is None):
ctx.api.fail('"alias" argument to attrs field must be a string literal', rvalue, code=LITERAL_REQ)
name = unmangle(lhs.name)
return Attribute(name, alias, ctx.cls.info, attr_has_default, init, kw_only, converter_info, stmt, init_type) |
class NASBOTDistance(GraphKernels):
def __init__(self, node_name='op_name', op_list=None, lengthscale=3.0, normalize=True, **kwargs):
super(NASBOTDistance, self).__init__(**kwargs)
self.node_name = node_name
self.op_list = (op_list if (op_list is not None) else OPS)
self.normalize = normalize
self.lengthscale = lengthscale
def _compute_kernel(self, dist, l=None):
if (dist is None):
return 0.0
if (l is None):
l = self.lengthscale
return np.exp(((- dist) / (l ** 2)))
def _compute_dist(self, g1: nx.Graph, g2: nx.Graph):
if ('~' in g1.name):
g1_ops = get_op_list(g1.name)
g2_ops = get_op_list(g2.name)
g1_counts = [g1_ops.count(op) for op in OPS_201]
g2_counts = [g2_ops.count(op) for op in OPS_201]
ops_dist = np.sum(np.abs(np.subtract(g1_counts, g2_counts)))
edit_dist = edit_distance(g1, g2)
return (ops_dist + edit_dist)
else:
a1 = nx.to_numpy_array(g1)
a2 = nx.to_numpy_array(g2)
row_sums = sorted(np.array(a1).sum(axis=0))
col_sums = sorted(np.array(a1).sum(axis=1))
other_row_sums = sorted(np.array(a2).sum(axis=0))
other_col_sums = sorted(np.array(a2).sum(axis=1))
row_dist = np.sum(np.abs(np.subtract(row_sums, other_row_sums)))
col_dist = np.sum(np.abs(np.subtract(col_sums, other_col_sums)))
counts = ([0] * len(self.op_list))
other_counts = ([0] * len(self.op_list))
for (node, attrs) in g1.nodes(data=True):
idx = self.op_list.index(attrs[self.node_name])
counts[idx] += 1
for (node, attrs) in g2.nodes(data=True):
idx = self.op_list.index(attrs[self.node_name])
other_counts[idx] += 1
ops_dist = np.sum(np.abs(np.subtract(counts, other_counts)))
return (((row_dist + col_dist) + ops_dist) + 0.0)
def forward(self, *graphs: nx.Graph, l: float=None):
n = len(graphs)
K = torch.zeros((n, n))
for i in range(n):
for j in range(i, n):
K[(i, j)] = self._compute_kernel(self._compute_dist(graphs[i], graphs[j]), l)
K[(j, i)] = K[(i, j)]
if self.normalize:
K = self.normalize_gram(K)
return K
def fit_transform(self, gr: list, l: float=None, rebuild_model: bool=False, save_gram_matrix: bool=False, **kwargs):
if ((not rebuild_model) and (self._gram is not None)):
return self._gram
K = self.forward(*gr, l=l)
if save_gram_matrix:
self._gram = K.clone()
self._train_x = gr[:]
return K
def transform(self, gr: list, l: float=None, **kwargs):
if (self._gram is None):
raise ValueError('The kernel has not been fitted. Run fit_transform first')
n = len(gr)
K = torch.zeros((len(self._train_x), n))
for i in range(len(self._train_x)):
for j in range(n):
K[(i, j)] = self._compute_kernel(self._compute_dist(self._train_x[i], gr[j]), l)
return K |
class TestInlineQueryResultCachedGifBase():
id_ = 'id'
type_ = 'gif'
gif_file_id = 'gif file id'
title = 'title'
caption = 'caption'
parse_mode = 'HTML'
caption_entities = [MessageEntity(MessageEntity.ITALIC, 0, 7)]
input_message_content = InputTextMessageContent('input_message_content')
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]]) |
class AllVersions():
def __init__(self) -> None:
self.windows_32 = WindowsVersions('32')
self.windows_64 = WindowsVersions('64')
self.windows_arm64 = WindowsVersions('ARM64')
self.windows_pypy_64 = PyPyVersions('64')
self.macos_cpython = CPythonVersions()
self.macos_pypy = PyPyVersions('64')
self.macos_pypy_arm64 = PyPyVersions('ARM64')
def update_config(self, config: MutableMapping[(str, str)]) -> None:
identifier = config['identifier']
version = Version(config['version'])
spec = Specifier(f'=={version.major}.{version.minor}.*')
log.info('Reading in %r -> %s %s', str(identifier), spec, version)
orig_config = copy.copy(config)
config_update: (AnyConfig | None) = None
if ('macosx' in identifier):
if identifier.startswith('cp'):
config_update = self.macos_cpython.update_version_macos(identifier, version, spec)
elif identifier.startswith('pp'):
if ('macosx_x86_64' in identifier):
config_update = self.macos_pypy.update_version_macos(spec)
elif ('macosx_arm64' in identifier):
config_update = self.macos_pypy_arm64.update_version_macos(spec)
elif ('win32' in identifier):
if identifier.startswith('cp'):
config_update = self.windows_32.update_version_windows(spec)
elif ('win_amd64' in identifier):
if identifier.startswith('cp'):
config_update = self.windows_64.update_version_windows(spec)
elif identifier.startswith('pp'):
config_update = self.windows_pypy_64.update_version_windows(spec)
elif (('win_arm64' in identifier) and identifier.startswith('cp')):
config_update = self.windows_arm64.update_version_windows(spec)
assert (config_update is not None), f'{identifier} not found!'
config.update(**config_update)
if (config != orig_config):
log.info(' Updated %s to %s', orig_config, config) |
def extract_refexpr_names(expr: RefExpr) -> set[str]:
output: set[str] = set()
while (isinstance(expr.node, MypyFile) or expr.fullname):
if (isinstance(expr.node, MypyFile) and expr.fullname):
output.add(expr.fullname)
if isinstance(expr, NameExpr):
is_suppressed_import = (isinstance(expr.node, Var) and expr.node.is_suppressed_import)
if isinstance(expr.node, TypeInfo):
output.update(split_module_names(expr.node.module_name))
elif (('.' in expr.fullname) and (not is_suppressed_import)):
output.add(expr.fullname.rsplit('.', 1)[0])
break
elif isinstance(expr, MemberExpr):
if isinstance(expr.expr, RefExpr):
expr = expr.expr
else:
break
else:
raise AssertionError(f'Unknown RefExpr subclass: {type(expr)}')
return output |
def list_packages(venv_container: VenvContainer, include_injected: bool, json_format: bool, short_format: bool) -> ExitCode:
venv_dirs: Collection[Path] = sorted(venv_container.iter_venv_dirs())
if (not venv_dirs):
print(f'nothing has been installed with pipx {sleep}', file=sys.stderr)
venv_container.verify_shared_libs()
if json_format:
all_venv_problems = list_json(venv_dirs)
elif short_format:
all_venv_problems = list_short(venv_dirs)
else:
if (not venv_dirs):
return EXIT_CODE_OK
all_venv_problems = list_text(venv_dirs, include_injected, str(venv_container))
if all_venv_problems.bad_venv_name:
logger.warning('\nOne or more packages contain out-of-date internal data installed from a\nprevious pipx version and need to be updated.\n To fix, execute: pipx reinstall-all')
if all_venv_problems.invalid_interpreter:
logger.warning('\nOne or more packages have a missing python interpreter.\n To fix, execute: pipx reinstall-all')
if all_venv_problems.missing_metadata:
logger.warning('\nOne or more packages have a missing internal pipx metadata.\n They were likely installed using a pipx version before 0.15.0.0.\n Please uninstall and install these package(s) to fix.')
if all_venv_problems.not_installed:
logger.warning('\nOne or more packages are not installed properly.\n Please uninstall and install these package(s) to fix.')
if all_venv_problems.any_():
print('', file=sys.stderr)
return EXIT_CODE_LIST_PROBLEM
return EXIT_CODE_OK |
def test_single_circuit():
q0 = cirq.GridQubit(0, 0)
q1 = cirq.GridQubit(0, 1)
qubits = reversed([q0, q1])
prep = cirq.Circuit([cirq.FSimGate(theta=(numpy.pi / 4), phi=0).on(q0, q1)])
evolve = cirq.Circuit([cirq.rz((numpy.pi / 2)).on(q0), cirq.rz((numpy.pi / 2)).on(q1)])
initial_rotation = cirq.ry((numpy.pi / 2)).on(q0)
final_rotation = cirq.rx(((- numpy.pi) / 2)).on(q0)
circuit = vpe_single_circuit(qubits, prep, evolve, initial_rotation, final_rotation)
assert (len(circuit) == 6)
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=100)
data_counts = result.data['msmt'].value_counts()
assert (data_counts[1] == 100) |
def second_page():
response = MagicMock()
response.status = 200
response.read.return_value = bytes('\n {\n "object": "list",\n "has_more": false,\n "next": null,\n "data": [\n { "object": "my_resource", "my_int": 126 },\n { "object": "my_resource", "my_int": 127 }\n ]\n }\n ', 'UTF-8')
return response |
.parametrize('username,password', users)
.parametrize('value_id', values)
def test_delete(db, client, username, password, value_id):
client.login(username=username, password=password)
url = reverse(urlnames['detail'], args=[value_id])
response = client.delete(url)
if password:
assert (response.status_code == 405)
else:
assert (response.status_code == 401) |
def os_mock(mocker):
m = mocker.patch('qutebrowser.config.configtypes.os', autospec=True)
m.path.expandvars.side_effect = (lambda x: x.replace('$HOME', '/home/foo'))
m.path.expanduser.side_effect = (lambda x: x.replace('~', '/home/foo'))
m.path.join.side_effect = (lambda *parts: '/'.join(parts))
return m |
def test_sequence():
cstats = ConstructorStats.get(m.Sequence)
s = m.Sequence(5)
assert (cstats.values() == ['of size', '5'])
assert ('Sequence' in repr(s))
assert (len(s) == 5)
assert ((s[0] == 0) and (s[3] == 0))
assert (12.34 not in s)
(s[0], s[3]) = (12.34, 56.78)
assert (12.34 in s)
assert (isclose(s[0], 12.34) and isclose(s[3], 56.78))
rev = reversed(s)
assert (cstats.values() == ['of size', '5'])
rev2 = s[::(- 1)]
assert (cstats.values() == ['of size', '5'])
it = iter(m.Sequence(0))
for _ in range(3):
with pytest.raises(StopIteration):
next(it)
assert (cstats.values() == ['of size', '0'])
expected = [0, 56.78, 0, 0, 12.34]
assert allclose(rev, expected)
assert allclose(rev2, expected)
assert (rev == rev2)
rev[0::2] = m.Sequence([2.0, 2.0, 2.0])
assert (cstats.values() == ['of size', '3', 'from std::vector'])
assert allclose(rev, [2, 56.78, 2, 0, 2])
assert (cstats.alive() == 4)
del it
assert (cstats.alive() == 3)
del s
assert (cstats.alive() == 2)
del rev
assert (cstats.alive() == 1)
del rev2
assert (cstats.alive() == 0)
assert (cstats.values() == [])
assert (cstats.default_constructions == 0)
assert (cstats.copy_constructions == 0)
assert (cstats.move_constructions >= 1)
assert (cstats.copy_assignments == 0)
assert (cstats.move_assignments == 0) |
class UploadDialog(QDialog):
new_infos = pyqtSignal(object)
def __init__(self, user_home):
super().__init__()
self.cwd = user_home
self._folder_id = (- 1)
self._folder_name = 'LanZouCloud'
self.set_pwd = False
self.set_desc = False
self.pwd = ''
self.desc = ''
self.allow_big_file = False
self.max_size = 100
self.selected = []
self.initUI()
self.set_size()
self.setStyleSheet(dialog_qss_style)
def set_pwd_desc_bigfile(self, settings):
self.set_pwd = settings['set_pwd']
self.set_desc = settings['set_desc']
self.pwd = settings['pwd']
self.desc = settings['desc']
self.allow_big_file = settings['allow_big_file']
self.max_size = settings['max_size']
if self.allow_big_file:
self.btn_chooseMultiFile.setToolTip('')
else:
self.btn_chooseMultiFile.setToolTip(f' {self.max_size}MB')
def set_values(self, folder_name, folder_id, files):
self.setWindowTitle((' ' + str(folder_name)))
self._folder_id = folder_id
self._folder_name = folder_name
if files:
self.selected = files
self.show_selected()
self.exec()
def initUI(self):
self.setWindowTitle('')
self.setWindowIcon(QIcon((SRC_DIR + 'upload.ico')))
self.logo = QLabel()
self.logo.setPixmap(QPixmap((SRC_DIR + 'logo3.gif')))
self.logo.setStyleSheet('background-color:rgb(0,153,255);')
self.logo.setAlignment(Qt.AlignmentFlag.AlignCenter)
self.btn_chooseDir = QPushButton('', self)
self.btn_chooseDir.setObjectName('btn_chooseDir')
self.btn_chooseDir.setObjectName('btn_chooseDir')
self.btn_chooseDir.setIcon(QIcon((SRC_DIR + 'folder.gif')))
self.btn_chooseMultiFile = QPushButton('', self)
self.btn_chooseDir.setObjectName('btn_chooseMultiFile')
self.btn_chooseMultiFile.setObjectName('btn_chooseMultiFile')
self.btn_chooseMultiFile.setIcon(QIcon((SRC_DIR + 'file.ico')))
self.btn_deleteSelect = QPushButton('', self)
self.btn_deleteSelect.setObjectName('btn_deleteSelect')
self.btn_deleteSelect.setIcon(QIcon((SRC_DIR + 'delete.ico')))
self.btn_deleteSelect.setToolTip(' Delete ')
self.list_view = MyListView()
self.list_view.drop_files.connect(self.add_drop_files)
self.list_view.setViewMode(QListView.ViewMode.ListMode)
self.slm = QStandardItem()
self.model = QStandardItemModel()
self.list_view.setModel(self.model)
self.model.removeRows(0, self.model.rowCount())
self.list_view.setEditTriggers(QAbstractItemView.EditTrigger.NoEditTriggers)
self.list_view.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows)
self.list_view.setSelectionMode(QAbstractItemView.SelectionMode.ExtendedSelection)
self.buttonBox = QDialogButtonBox()
self.buttonBox.setOrientation(Qt.Orientation.Horizontal)
self.buttonBox.setStandardButtons((QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel))
self.buttonBox.button(QDialogButtonBox.StandardButton.Ok).setText('')
self.buttonBox.button(QDialogButtonBox.StandardButton.Cancel).setText('')
vbox = QVBoxLayout()
hbox_head = QHBoxLayout()
hbox_button = QHBoxLayout()
hbox_head.addWidget(self.btn_chooseDir)
hbox_head.addStretch(1)
hbox_head.addWidget(self.btn_chooseMultiFile)
hbox_button.addWidget(self.btn_deleteSelect)
hbox_button.addStretch(1)
hbox_button.addWidget(self.buttonBox)
vbox.addWidget(self.logo)
vbox.addLayout(hbox_head)
vbox.addWidget(self.list_view)
vbox.addLayout(hbox_button)
self.setLayout(vbox)
self.setMinimumWidth(350)
self.btn_chooseDir.clicked.connect(self.slot_btn_chooseDir)
self.btn_chooseMultiFile.clicked.connect(self.slot_btn_chooseMultiFile)
self.btn_deleteSelect.clicked.connect(self.slot_btn_deleteSelect)
self.buttonBox.accepted.connect(self.slot_btn_ok)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.clear_old)
self.buttonBox.rejected.connect(self.reject)
def set_size(self):
if self.selected:
h = (18 if (len(self.selected) > 18) else 10)
w = 40
for i in self.selected:
i_len = len(i)
if (i_len > 100):
w = 100
break
if (i_len > w):
w = i_len
self.resize((120 + (w * 7)), (h * 30))
else:
self.resize(400, 300)
def clear_old(self):
self.selected = []
self.model.removeRows(0, self.model.rowCount())
self.set_size()
def show_selected(self):
self.model.removeRows(0, self.model.rowCount())
for item in self.selected:
if os.path.isfile(item):
self.model.appendRow(QStandardItem(QIcon((SRC_DIR + 'file.ico')), item))
else:
self.model.appendRow(QStandardItem(QIcon((SRC_DIR + 'folder.gif')), item))
self.set_size()
def backslash(self):
tasks = {}
for item in self.selected:
url = os.path.normpath(item)
total_size = 0
total_file = 0
if os.path.isfile(url):
total_size = os.path.getsize(url)
if (not total_size):
continue
total_file += 1
else:
for filename in os.listdir(url):
file_path = os.path.join(url, filename)
if (not os.path.isfile(file_path)):
continue
total_size += os.path.getsize(file_path)
total_file += 1
tasks[url] = UpJob(url=url, fid=self._folder_id, folder=self._folder_name, pwd=(self.pwd if self.set_pwd else None), desc=(self.desc if self.set_desc else None), total_size=total_size, total_file=total_file)
return tasks
def slot_btn_ok(self):
tasks = self.backslash()
if self.selected:
self.new_infos.emit(tasks)
self.clear_old()
def slot_btn_deleteSelect(self):
_indexes = self.list_view.selectionModel().selection().indexes()
if (not _indexes):
return
indexes = []
for i in _indexes:
indexes.append(i.row())
indexes = set(indexes)
for i in sorted(indexes, reverse=True):
self.selected.remove(self.model.item(i, 0).text())
self.model.removeRow(i)
self.set_size()
def add_drop_files(self, files):
for item in files:
if (item not in self.selected):
self.selected.append(item)
self.show_selected()
def slot_btn_chooseDir(self):
dir_choose = QFileDialog.getExistingDirectory(self, '', self.cwd)
if (dir_choose == ''):
return
if (dir_choose not in self.selected):
self.selected.append(dir_choose)
self.cwd = os.path.dirname(dir_choose)
self.show_selected()
def slot_btn_chooseMultiFile(self):
(files, _) = QFileDialog.getOpenFileNames(self, '', self.cwd, 'All Files (*)')
if (len(files) == 0):
return
for _file in files:
if (_file not in self.selected):
if (os.path.getsize(_file) <= (self.max_size * 1048576)):
self.selected.append(_file)
elif self.allow_big_file:
self.selected.append(_file)
self.show_selected()
def keyPressEvent(self, e):
if (e.key() == Qt.Key.Key_Delete):
self.slot_btn_deleteSelect() |
class RedisClusterIntegrationTests(RedisIntegrationTestCase):
def setUp(self):
self.baseplate_app_config = {'rediscluster.url': f'redis://{redis_endpoint}/0', 'rediscluster.timeout': '1 second', 'rediscluster.max_connections': '4'}
self.redis_client_builder = ClusterRedisClient
self.redis_context_name = 'rediscluster'
super().setUp()
def test_simple_command(self):
with self.server_span:
result = self.context.rediscluster.ping()
self.assertTrue(result)
server_span_observer = self.baseplate_observer.get_only_child()
span_observer = server_span_observer.get_only_child()
self.assertEqual(span_observer.span.name, 'rediscluster.PING')
self.assertTrue(span_observer.on_start_called)
self.assertTrue(span_observer.on_finish_called)
self.assertIsNone(span_observer.on_finish_exc_info)
def test_error(self):
with self.server_span:
with self.assertRaises(rediscluster.RedisClusterException):
self.context.rediscluster.execute_command('crazycommand')
server_span_observer = self.baseplate_observer.get_only_child()
span_observer = server_span_observer.get_only_child()
self.assertTrue(span_observer.on_start_called)
self.assertTrue(span_observer.on_finish_called)
self.assertIsNotNone(span_observer.on_finish_exc_info)
def test_lock(self):
with self.server_span:
with self.context.rediscluster.lock('foo-lock'):
pass
server_span_observer = self.baseplate_observer.get_only_child()
self.assertGreater(len(server_span_observer.children), 0)
for span_observer in server_span_observer.children:
self.assertTrue(span_observer.on_start_called)
self.assertTrue(span_observer.on_finish_called)
def test_pipeline(self):
with self.server_span:
with self.context.rediscluster.pipeline('foo') as pipeline:
pipeline.set('foo', 'bar')
pipeline.get('foo')
pipeline.get('foo')
pipeline.get('foo')
pipeline.get('foo')
pipeline.get('foo')
pipeline.delete('foo')
pipeline.execute()
server_span_observer = self.baseplate_observer.get_only_child()
span_observer = server_span_observer.get_only_child()
self.assertEqual(span_observer.span.name, 'rediscluster.pipeline_foo')
self.assertTrue(span_observer.on_start_called)
self.assertTrue(span_observer.on_finish_called)
self.assertIsNone(span_observer.on_finish_exc_info)
def test_metrics(self):
client_name = 'redis_test'
for client_name_kwarg_name in ['redis_client_name', 'client_name']:
with self.subTest():
self.setup_baseplate_redis(redis_client_kwargs={'redis_client_name': client_name})
expected_labels = {'redis_client_name': client_name, 'redis_type': 'cluster', 'redis_command': 'SET', 'redis_database': '0'}
with self.server_span:
self.context.rediscluster.set('prometheus', 'rocks')
request_labels = {**expected_labels, 'redis_success': 'true'}
assert (REGISTRY.get_sample_value(f'{REQUESTS_TOTAL._name}_total', request_labels) == 1.0), "Unexpected value for REQUESTS_TOTAL metric. Expected one 'set' command"
assert (REGISTRY.get_sample_value(f'{LATENCY_SECONDS._name}_bucket', {**request_labels, 'le': '+Inf'}) == 1.0), "Expected one 'set' latency request"
assert (REGISTRY.get_sample_value(ACTIVE_REQUESTS._name, {**expected_labels}) == 0.0), 'Should have 0 (and not None) active requests'
self.tearDown()
def test_pipeline_metrics(self):
client_name = 'test_client'
for client_name_kwarg_name in ['redis_client_name', 'client_name']:
with self.subTest():
self.setup_baseplate_redis(redis_client_kwargs={'redis_client_name': client_name})
expected_labels = {'redis_client_name': client_name, 'redis_type': 'cluster', 'redis_command': 'pipeline', 'redis_database': '0'}
with self.server_span:
with self.context.rediscluster.pipeline('foo') as pipeline:
pipeline.set('foo', 'bar')
pipeline.get('foo')
pipeline.get('foo')
pipeline.get('foo')
pipeline.get('foo')
pipeline.get('foo')
pipeline.delete('foo')
pipeline.execute()
request_labels = {**expected_labels, 'redis_success': 'true'}
assert (REGISTRY.get_sample_value(f'{REQUESTS_TOTAL._name}_total', request_labels) == 1.0), "Unexpected value for REQUESTS_TOTAL metric. Expected one 'set' command"
assert (REGISTRY.get_sample_value(f'{LATENCY_SECONDS._name}_bucket', {**request_labels, 'le': '+Inf'}) == 1.0), "Expected one 'set' latency request"
assert (REGISTRY.get_sample_value(ACTIVE_REQUESTS._name, {**expected_labels}) == 0.0), 'Should have 0 (and not None) active requests'
self.tearDown() |
def _get_all_tilted_square_lattices(min_side_length=2, max_side_length=8, side_length_step=2):
width_heights = np.arange(min_side_length, (max_side_length + 1), side_length_step)
return [TiltedSquareLattice(width, height) for (width, height) in itertools.combinations_with_replacement(width_heights, r=2)] |
class BaseQueryable():
name = None
def __eq__(self, other: Any) -> Any:
return self.equals(other)
def __ne__(self, other: Any) -> Any:
return self.not_equals(other)
def __lt__(self, other: Any) -> Condition:
return self.less_than(other)
def __le__(self, other: Any) -> Condition:
return self.less_or_equals(other)
def __gt__(self, other: Any) -> Condition:
return self.greater_than(other)
def __ge__(self, other: Any) -> Condition:
return self.greater_or_equals(other)
def equals(self, other: Any) -> Condition:
raise NotImplementedError
def not_equals(self, other: Any) -> Condition:
raise NotImplementedError
def less_than(self, other: Any) -> Condition:
raise NotImplementedError
def less_or_equals(self, other: Any) -> Condition:
raise NotImplementedError
def greater_than(self, other: Any) -> Condition:
raise NotImplementedError
def greater_or_equals(self, other: Any) -> Condition:
raise NotImplementedError
def _condition(self, operator: str, value: Union[(str, int)]=None, field_operator: str='') -> Condition:
if isinstance(value, self.__class__):
if (not field_operator):
raise UnexpectedValue(f'{operator} does not support field-to-field comparison')
operator = field_operator
value = value.name
if (not isinstance(self.name, str)):
raise AiosnowException(f'Missing left operand of {self.__class__}')
return Condition(self.name, operator, value)
def in_list(self, values: list) -> Condition:
return self._condition(BaseOperator.ONEOF, serialize_list(values))
def not_in_list(self, values: list) -> Condition:
return self._condition(BaseOperator.NOT_ONEOF, serialize_list(values))
def is_empty(self) -> Condition:
return self._condition(BaseOperator.EMPTY)
def is_populated(self) -> Condition:
return self._condition(BaseOperator.POPULATED)
def is_anything(self) -> Condition:
return self._condition(BaseOperator.ANYTHING) |
def read_fec_packets(filename):
assert (np.dtype(np.float32).itemsize == 4)
assert (np.dtype(np.int16).itemsize == 2)
with open(filename, 'rb') as f:
version = np.frombuffer(f.read(2), dtype=np.int16).item()
header_size = np.frombuffer(f.read(2), dtype=np.int16).item()
num_packets = np.frombuffer(f.read(2), dtype=np.int16).item()
packet_size = np.frombuffer(f.read(2), dtype=np.int16).item()
subframe_size = np.frombuffer(f.read(2), dtype=np.int16).item()
subframes_per_packet = np.frombuffer(f.read(2), dtype=np.int16).item()
num_features = np.frombuffer(f.read(2), dtype=np.int16).item()
dummy_features = np.zeros((subframes_per_packet, num_features), dtype=np.float32)
rates = []
packets = []
for i in range(num_packets):
rate = np.frombuffer(f.read(2), dtype=np.int16).item
rates.append(rate)
features = np.reshape(np.frombuffer(f.read((subframe_size * subframes_per_packet)), dtype=np.float32), dummy_features.shape)
packet = np.flip(features, axis=(- 2))
packets.append(packet)
return packets |
class PollAnswer(TelegramObject):
__slots__ = ('option_ids', 'poll_id', 'user', 'voter_chat')
def __init__(self, poll_id: str, option_ids: Sequence[int], user: Optional[User]=None, voter_chat: Optional[Chat]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.poll_id: str = poll_id
self.voter_chat: Optional[Chat] = voter_chat
self.option_ids: Tuple[(int, ...)] = parse_sequence_arg(option_ids)
self.user: Optional[User] = user
self._id_attrs = (self.poll_id, self.option_ids, self.user, self.voter_chat)
self._freeze()
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['PollAnswer']:
data = cls._parse_data(data)
if (not data):
return None
data['user'] = User.de_json(data.get('user'), bot)
data['voter_chat'] = Chat.de_json(data.get('voter_chat'), bot)
return super().de_json(data=data, bot=bot) |
def is_valid_withdraw_expired(channel_state: NettingChannelState, state_change: ReceiveWithdrawExpired, withdraw_state: PendingWithdrawState, block_number: BlockNumber) -> SuccessOrError:
expected_nonce = get_next_nonce(channel_state.partner_state)
withdraw_expired = is_withdraw_expired(block_number=block_number, expiration_threshold=get_receiver_expiration_threshold(expiration=withdraw_state.expiration))
if (not withdraw_expired):
return SuccessOrError(f'WithdrawExpired for withdraw that has not yet expired {state_change.total_withdraw}.')
elif (channel_state.canonical_identifier != state_change.canonical_identifier):
return SuccessOrError('Invalid canonical identifier provided in WithdrawExpired')
elif (state_change.sender != channel_state.partner_state.address):
return SuccessOrError('Expired withdraw not from partner.')
elif (state_change.total_withdraw != withdraw_state.total_withdraw):
return SuccessOrError(f'WithdrawExpired and local withdraw amounts do not match. Received {state_change.total_withdraw}, local amount {withdraw_state.total_withdraw}')
elif (state_change.nonce != expected_nonce):
return SuccessOrError(f'Nonce did not change sequentially, expected: {expected_nonce} got: {state_change.nonce}.')
else:
return SuccessOrError() |
class Unmarshaller():
def unmarshal(pkg_reader, package, part_factory):
parts = Unmarshaller._unmarshal_parts(pkg_reader, package, part_factory)
Unmarshaller._unmarshal_relationships(pkg_reader, package, parts)
for part in parts.values():
part.after_unmarshal()
package.after_unmarshal()
def _unmarshal_parts(pkg_reader, package, part_factory):
parts = {}
for (partname, content_type, reltype, blob) in pkg_reader.iter_sparts():
parts[partname] = part_factory(partname, content_type, reltype, blob, package)
return parts
def _unmarshal_relationships(pkg_reader, package, parts):
for (source_uri, srel) in pkg_reader.iter_srels():
source = (package if (source_uri == '/') else parts[source_uri])
target = (srel.target_ref if srel.is_external else parts[srel.target_partname])
source.load_rel(srel.reltype, target, srel.rId, srel.is_external) |
class CmdCreateObj(CmdEvscapeRoom):
key = 'createobj'
aliases = ['cobj']
locks = 'cmd:perm(Admin)'
obj1_search = False
obj2_search = False
def func(self):
caller = self.caller
args = self.args
if (not args):
caller.msg('Usage: createobj name[:typeclass]')
return
typeclass = 'EvscaperoomObject'
if (':' in args):
(name, typeclass) = (part.strip() for part in args.rsplit(':', 1))
if typeclass.startswith('state_'):
typeclass = ('evscaperoom.states.' + typeclass)
else:
name = args.strip()
obj = create_evscaperoom_object(typeclass=typeclass, key=name, location=self.room)
caller.msg(f'Created new object {name} ({obj.typeclass_path}).') |
class _LiveLoggingStreamHandler(logging_StreamHandler):
stream: TerminalReporter = None
def __init__(self, terminal_reporter: TerminalReporter, capture_manager: Optional[CaptureManager]) -> None:
super().__init__(stream=terminal_reporter)
self.capture_manager = capture_manager
self.reset()
self.set_when(None)
self._test_outcome_written = False
def reset(self) -> None:
self._first_record_emitted = False
def set_when(self, when: Optional[str]) -> None:
self._when = when
self._section_name_shown = False
if (when == 'start'):
self._test_outcome_written = False
def emit(self, record: logging.LogRecord) -> None:
ctx_manager = (self.capture_manager.global_and_fixture_disabled() if self.capture_manager else nullcontext())
with ctx_manager:
if (not self._first_record_emitted):
self.stream.write('\n')
self._first_record_emitted = True
elif (self._when in ('teardown', 'finish')):
if (not self._test_outcome_written):
self._test_outcome_written = True
self.stream.write('\n')
if ((not self._section_name_shown) and self._when):
self.stream.section(('live log ' + self._when), sep='-', bold=True)
self._section_name_shown = True
super().emit(record)
def handleError(self, record: logging.LogRecord) -> None:
pass |
def _kick_user(sa: ServerApp, session: MultiplayerSession, membership: MultiplayerMembership, user_id: int):
session_common.add_audit_entry(sa, session, (f'Kicked {membership.effective_name}' if (membership.user != sa.get_current_user()) else 'Left session'))
with database.db.atomic():
for association in WorldUserAssociation.select().join(World).where((World.session == session.id), (WorldUserAssociation.user == user_id)):
association.delete_instance()
membership.delete_instance()
if (not list(session.members)):
session.delete_instance(recursive=True)
logger().info(f'{session_common.describe_session(session)}. Kicking user {user_id} and deleting session.')
else:
logger().info(f'{session_common.describe_session(session)}. Kicking user {user_id}.') |
class Aizawa(DynSys):
def _rhs(x, y, z, t, a, b, c, d, e, f):
xdot = (((x * z) - (b * x)) - (d * y))
ydot = (((d * x) + (y * z)) - (b * y))
zdot = (((((((c + (a * z)) - (0. * (z ** 3))) - (x ** 2)) - (y ** 2)) - ((e * z) * (x ** 2))) - ((e * z) * (y ** 2))) + ((f * z) * (x ** 3)))
return (xdot, ydot, zdot) |
class EvaluationArguments():
model_ckpt: Optional[str] = field(default='lvwerra/codeparrot', metadata={'help': 'Model name or path of model to be evaluated.'})
dataset_name: Optional[str] = field(default='lvwerra/codeparrot-clean-valid', metadata={'help': 'Name or path of validation dataset.'})
batch_size: Optional[int] = field(default=2, metadata={'help': 'Batch size used for evaluation.'})
max_eval_steps: Optional[int] = field(default=(- 1), metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'})
seq_length: Optional[int] = field(default=1024, metadata={'help': 'Length of sequences to be evaluated.'})
seed: Optional[int] = field(default=1, metadata={'help': 'Random seed used for evaluation.'}) |
def set_config(args):
args.output_path = '/path/to/outputs/'
args.sparse_comm = True
args.client_sparsity = 0.3
args.server_sparsity = 0.3
args.model = 'fedweit'
args.base_network = 'lenet'
args.lr_patience = 3
args.lr_factor = 3
args.lr_min = 1e-10
if (args.base_network == 'lenet'):
args.lr = (0.001 / 3)
args.wd = 0.0001
if ('fedweit' in args.model):
args.wd = 0.0001
args.lambda_l1 = 0.001
args.lambda_l2 = 100.0
args.lambda_mask = 0
return args |
def test_quantsim_handling_folded_bn_layer():
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'True', 'is_symmetric': 'False'}}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
model = tiny_conv_net()
cle_applied_model = equalize_model(model)
qsim = QuantizationSimModel(cle_applied_model, quant_scheme='tf', config_file='./data/quantsim_config.json')
layers = qsim.model.layers
assert (not isinstance(layers[2], tf.keras.layers.BatchNormalization))
assert (not isinstance(layers[6], tf.keras.layers.BatchNormalization))
assert (len(cle_applied_model.layers) == (len(model.layers) - 2))
for layer in layers:
if isinstance(layer, tf.keras.layers.InputLayer):
continue
for q in layer.output_quantizers:
assert q.is_enabled()
for q in layer.param_quantizers:
assert q.is_enabled()
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json') |
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:(- 1)])
if (args.set_cfgs is not None):
cfg_from_list(args.set_cfgs, cfg)
return (args, cfg) |
class VideoWidget(QVideoWidget):
def __init__(self, parent=None):
super(VideoWidget, self).__init__(parent)
self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
p = self.palette()
p.setColor(QPalette.Window, Qt.black)
self.setPalette(p)
self.setAttribute(Qt.WA_OpaquePaintEvent)
def keyPressEvent(self, event):
if ((event.key() == Qt.Key_Escape) and self.isFullScreen()):
self.setFullScreen(False)
event.accept()
elif ((event.key() == Qt.Key_Enter) and (event.modifiers() & Qt.Key_Alt)):
self.setFullScreen((not self.isFullScreen()))
event.accept()
else:
super(VideoWidget, self).keyPressEvent(event)
def mouseDoubleClickEvent(self, event):
self.setFullScreen((not self.isFullScreen()))
event.accept() |
class ResourceName(_ResourceNameBase):
interface_type: ClassVar[str]
resource_class: ClassVar[str]
def __post_init__(self):
for f in fields(self):
if (getattr(self, f.name) == ''):
raise TypeError((f.name + ' is a required parameter'))
self._fields = tuple((f.name for f in fields(self)))
def interface_type_const(self) -> constants.InterfaceType:
try:
return getattr(constants.InterfaceType, self.interface_type.lower())
except Exception:
return constants.InterfaceType.unknown
def from_parts(cls, *parts):
resource_parts = fields(cls)
if (len(parts) < sum((1 for f in resource_parts if f.default))):
raise ValueError('not enough parts')
elif (len(parts) > len(resource_parts)):
raise ValueError('too many parts')
(k, rp) = (resource_parts[0], resource_parts[1:])
(p, pending) = (parts[0], parts[1:])
kwargs = {k.name: (k.default if (p == '') else p)}
while (len(pending) < len(rp)):
(k, rp) = (rp[0], rp[1:])
if (k.default == ''):
if (not pending):
raise ValueError((k.name + ' part is mandatory'))
(p, pending) = (pending[0], pending[1:])
if (not p):
raise ValueError((k.name + ' part is mandatory'))
kwargs[k.name] = p
else:
kwargs[k.name] = k.default
kwargs.update(((k.name, p) for (k, p) in zip(rp, pending)))
return cls(**kwargs) |
_model
class pvt_v2_b3(PyramidVisionTransformerImpr):
def __init__(self, **kwargs):
super(pvt_v2_b3, self).__init__(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], drop_rate=0.0, drop_path_rate=0.1) |
def test_lateralprofile():
latprofile = xodr.LateralProfile()
prettyprint(latprofile.get_element())
latprofile.add_shape(xodr.elevation._Poly3Profile(0, 0, 0, 0, 0, 0))
prettyprint(latprofile.get_element())
latprofile.add_superelevation(xodr.elevation._Poly3Profile(0, 0, 0, 0, 0))
prettyprint(latprofile.get_element())
latprofile2 = xodr.LateralProfile()
latprofile2.add_shape(xodr.elevation._Poly3Profile(0, 0, 0, 0, 0, 0))
latprofile2.add_superelevation(xodr.elevation._Poly3Profile(0, 0, 0, 0, 0))
latprofile3 = xodr.LateralProfile()
latprofile3.add_userdata(xodr.UserData('stuffs', 'morestuffs'))
latprofile3.add_shape(xodr.elevation._Poly3Profile(0, 0, 0, 0, 0, 0))
latprofile3.add_superelevation(xodr.elevation._Poly3Profile(0, 0, 0, 0, 3))
prettyprint(latprofile3)
assert (latprofile2 == latprofile)
assert (latprofile != latprofile3)
assert (version_validation('t_road_lateralProfile', latprofile, wanted_schema='xodr') == ValidationResponse.OK) |
class TestFSFile(unittest.TestCase):
def setUp(self):
import tempfile
import zipfile
from pathlib import Path
import fsspec
self.random_string = _generate_random_string()
self.local_filename = os.path.join(tempfile.gettempdir(), self.random_string)
Path(self.local_filename).touch()
self.local_file = fsspec.open(self.local_filename)
self.random_string2 = _generate_random_string()
self.local_filename2 = os.path.join(tempfile.gettempdir(), self.random_string2)
Path(self.local_filename2).touch()
self.zip_name = os.path.join(tempfile.gettempdir(), (self.random_string2 + '.zip'))
zip_file = zipfile.ZipFile(self.zip_name, 'w', zipfile.ZIP_DEFLATED)
zip_file.write(self.local_filename2)
zip_file.close()
os.remove(self.local_filename2)
def tearDown(self):
os.remove(self.local_filename)
with suppress(PermissionError):
os.remove(self.zip_name)
def test_regular_filename_is_returned_with_str(self):
from satpy.readers import FSFile
assert (str(FSFile(self.random_string)) == self.random_string)
def test_fsfile_with_regular_filename_abides_pathlike(self):
from satpy.readers import FSFile
assert (os.fspath(FSFile(self.random_string)) == self.random_string)
def test_fsfile_with_regular_filename_and_fs_spec_abides_pathlike(self):
from satpy.readers import FSFile
assert (os.fspath(FSFile(self.random_string, fs=None)) == self.random_string)
def test_fsfile_with_pathlike(self):
from pathlib import Path
from satpy.readers import FSFile
f = FSFile(Path(self.local_filename))
assert (str(f) == os.fspath(f) == self.local_filename)
def test_fsfile_with_fs_open_file_abides_pathlike(self):
from satpy.readers import FSFile
assert os.fspath(FSFile(self.local_file)).endswith(self.random_string)
def test_repr_includes_filename(self):
from satpy.readers import FSFile
assert (self.random_string in repr(FSFile(self.local_file)))
def test_open_regular_file(self):
from satpy.readers import FSFile
_assert_is_open_file_and_close(FSFile(self.local_filename).open())
def test_open_local_fs_file(self):
from satpy.readers import FSFile
_assert_is_open_file_and_close(FSFile(self.local_file).open())
def test_open_zip_fs_regular_filename(self):
from fsspec.implementations.zip import ZipFileSystem
from satpy.readers import FSFile
zip_fs = ZipFileSystem(self.zip_name)
file = FSFile(_posixify_path(self.local_filename2), zip_fs)
_assert_is_open_file_and_close(file.open())
def test_open_zip_fs_openfile(self):
import fsspec
from satpy.readers import FSFile
open_file = fsspec.open(((('zip:/' + _posixify_path(self.local_filename2)) + '::file://') + self.zip_name))
file = FSFile(open_file)
_assert_is_open_file_and_close(file.open())
def test_sorting_fsfiles(self):
from fsspec.implementations.zip import ZipFileSystem
from satpy.readers import FSFile
zip_fs = ZipFileSystem(self.zip_name)
file1 = FSFile(self.local_filename2, zip_fs)
file2 = FSFile(self.local_filename)
extra_file = os.path.normpath('/somedir/bla')
sorted_filenames = [os.fspath(file) for file in sorted([file1, file2, extra_file])]
expected_filenames = sorted([extra_file, os.fspath(file1), os.fspath(file2)])
assert (sorted_filenames == expected_filenames)
def test_equality(self):
from fsspec.implementations.zip import ZipFileSystem
from satpy.readers import FSFile
zip_fs = ZipFileSystem(self.zip_name)
assert (FSFile(self.local_filename) == FSFile(self.local_filename))
assert (FSFile(self.local_filename, zip_fs) == FSFile(self.local_filename, zip_fs))
assert (FSFile(self.local_filename, zip_fs) != FSFile(self.local_filename))
assert (FSFile(self.local_filename) != FSFile(self.local_filename2))
def test_hash(self):
from fsspec.implementations.cached import CachingFileSystem
from fsspec.implementations.local import LocalFileSystem
from fsspec.implementations.zip import ZipFileSystem
from satpy.readers import FSFile
lfs = LocalFileSystem()
zfs = ZipFileSystem(self.zip_name)
cfs = CachingFileSystem(fs=lfs)
assert (len({hash(FSFile(fn, fs)) for fn in {self.local_filename, self.local_filename2} for fs in [None, lfs, zfs, cfs]}) == (2 * 4)) |
class _RHBugzillaConverters(object):
def convert_build_update(component=None, fixed_in=None, qa_whiteboard=None, devel_whiteboard=None, internal_whiteboard=None, sub_component=None):
adddict = {}
def get_alias():
pass
if (fixed_in is not None):
adddict['cf_fixed_in'] = fixed_in
if (qa_whiteboard is not None):
adddict['cf_qa_whiteboard'] = qa_whiteboard
if (devel_whiteboard is not None):
adddict['cf_devel_whiteboard'] = devel_whiteboard
if (internal_whiteboard is not None):
adddict['cf_internal_whiteboard'] = internal_whiteboard
if sub_component:
if (not isinstance(sub_component, dict)):
component = listify(component)
if (not component):
raise ValueError('component must be specified if specifying sub_component')
sub_component = {component[0]: sub_component}
adddict['sub_components'] = sub_component
get_alias()
return adddict
def pre_translation(query):
old = query.copy()
def split_comma(_v):
if isinstance(_v, list):
return _v
return _v.split(',')
if ('bug_id' in query):
query['id'] = split_comma(query.pop('bug_id'))
if ('component' in query):
query['component'] = split_comma(query['component'])
if (('include_fields' not in query) and ('column_list' in query)):
query['include_fields'] = query.pop('column_list')
if (old != query):
log.debug('RHBugzilla pretranslated query to: %s', query)
def post_translation(query, bug):
ignore = query
if (('component' in bug) and ('components' not in bug)):
val = bug['component']
bug['components'] = ((isinstance(val, list) and val) or [val])
bug['component'] = bug['components'][0]
if (('version' in bug) and ('versions' not in bug)):
val = bug['version']
bug['versions'] = ((isinstance(val, list) and val) or [val])
bug['version'] = bug['versions'][0]
if (('sub_components' in bug) and ('sub_component' not in bug)):
val = bug['sub_components']
bug['sub_component'] = ''
if isinstance(val, dict):
values = []
for vallist in val.values():
values += vallist
bug['sub_component'] = ' '.join(values) |
class ShuffleNetV2(nn.Module):
def __init__(self, bn_norm, model_size='1.5x'):
super(ShuffleNetV2, self).__init__()
self.stage_repeats = [4, 8, 4]
self.model_size = model_size
if (model_size == '0.5x'):
self.stage_out_channels = [(- 1), 24, 48, 96, 192, 1024]
elif (model_size == '1.0x'):
self.stage_out_channels = [(- 1), 24, 116, 232, 464, 1024]
elif (model_size == '1.5x'):
self.stage_out_channels = [(- 1), 24, 176, 352, 704, 1024]
elif (model_size == '2.0x'):
self.stage_out_channels = [(- 1), 24, 244, 488, 976, 2048]
else:
raise NotImplementedError
input_channel = self.stage_out_channels[1]
self.first_conv = nn.Sequential(nn.Conv2d(3, input_channel, 3, 2, 1, bias=False), get_norm(bn_norm, input_channel), nn.ReLU(inplace=True))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.features = []
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[(idxstage + 2)]
for i in range(numrepeat):
if (i == 0):
self.features.append(ShuffleV2Block(bn_norm, input_channel, output_channel, mid_channels=(output_channel // 2), ksize=3, stride=2))
else:
self.features.append(ShuffleV2Block(bn_norm, (input_channel // 2), output_channel, mid_channels=(output_channel // 2), ksize=3, stride=1))
input_channel = output_channel
self.features = nn.Sequential(*self.features)
self.conv_last = nn.Sequential(nn.Conv2d(input_channel, self.stage_out_channels[(- 1)], 1, 1, 0, bias=False), get_norm(bn_norm, self.stage_out_channels[(- 1)]), nn.ReLU(inplace=True))
self._initialize_weights()
def forward(self, x):
x = self.first_conv(x)
x = self.maxpool(x)
x = self.features(x)
x = self.conv_last(x)
return x
def _initialize_weights(self):
for (name, m) in self.named_modules():
if isinstance(m, nn.Conv2d):
if ('first' in name):
nn.init.normal_(m.weight, 0, 0.01)
else:
nn.init.normal_(m.weight, 0, (1.0 / m.weight.shape[1]))
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if (m.bias is not None):
nn.init.constant_(m.bias, 0) |
class Solution(object):
def inorderTraversal(self, root):
if (root is None):
return []
res = []
stack = [root]
while (len(stack) > 0):
curr = stack.pop()
if (not isinstance(curr, TreeNode)):
res.append(curr)
continue
if (curr.right is not None):
stack.append(curr.right)
stack.append(curr.val)
if (curr.left is not None):
stack.append(curr.left)
return res |
class ValueList(IntEnum):
def convert(cls, value):
try:
return cls[value]
except KeyError:
raise ValueError('{} invalid value for {}'.format(value, cls))
def _unpickle(cls, enum_name, enum_members, enum_member):
enum_cls = cls(enum_name, enum_members)
return enum_cls[enum_member]
def __reduce_ex__(self, proto):
return (ValueList._unpickle, (self.__class__.__name__, list(self.__class__.__members__.keys()), self.name))
def __eq__(self, other):
return (self.name == other)
def __ne__(self, other):
return (self.name != other)
def __hash__(self):
return hash(self.name)
def __repr__(self):
return (('<' + str(self)) + '>') |
def mutual_exclusion_cuts(ctx, param, value):
if ((not value) or ctx.resilient_parsing):
return value
name = param.name
prev_name = ctx.meta.get(EXCLUSION_CUTS, None)
if (prev_name is None):
ctx.meta[EXCLUSION_CUTS] = name
return value
if (prev_name == name):
return value
raise click.UsageError('Cannot specify more than one of --cut-smarts, --cut-rgroup, or --cut-rgroup-file') |
class SUNRGBDInstance(object):
def __init__(self, line):
data = line.split(' ')
data[1:] = [float(x) for x in data[1:]]
self.classname = data[0]
self.xmin = data[1]
self.ymin = data[2]
self.xmax = (data[1] + data[3])
self.ymax = (data[2] + data[4])
self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax])
self.centroid = np.array([data[5], data[6], data[7]])
self.width = data[8]
self.length = data[9]
self.height = data[10]
self.size = (np.array([data[9], data[8], data[10]]) * 2)
self.orientation = np.zeros((3,))
self.orientation[0] = data[11]
self.orientation[1] = data[12]
self.heading_angle = np.arctan2(self.orientation[1], self.orientation[0])
self.box3d = np.concatenate([self.centroid, self.size, self.heading_angle[None]]) |
class PassedDrivenMilesCompositeMetric(SupportsCompositeMetricCompute):
composite_metric_name: str
requires_metric: List[str]
requires_validator: List[str]
def __init__(self, composite_metric_name: str, intervention_validators: List[str], driven_miles_metric: Type[SupportsMetricCompute]=metrics.SimulatedDrivenMilesMetric, ignore_entire_scene: bool=False):
self.composite_metric_name = composite_metric_name
self.requires_metric = [driven_miles_metric.metric_name]
self.requires_validator = list(set(intervention_validators))
self.ignore_entire_scene = ignore_entire_scene
def compute(self, metric_results: Dict[(str, torch.Tensor)], validation_results: Dict[(str, validators.ValidatorOutput)], simulation_output: SimulationOutputCLE) -> float:
driven_miles_result = metric_results[self.requires_metric[0]]
min_all_frame_failed: List[int] = [driven_miles_result.size(0)]
for validator_name in self.requires_validator:
validator_results = validation_results[validator_name]
if (len(validator_results.failed_frames) > 0):
if self.ignore_entire_scene:
return 0.0
min_frame_failed = min(validator_results.failed_frames)
min_all_frame_failed.append(min_frame_failed)
min_frame_failed = min(min_all_frame_failed)
passed_driven_miles = driven_miles_result[:min_frame_failed].sum()
passed_driven_miles_cpu = passed_driven_miles.cpu().item()
return float(passed_driven_miles_cpu) |
def __get_freeman_code(freeman_coordination_list: [(int, int)]) -> [int]:
freeman_code_list = list()
for i in range((len(freeman_coordination_list) - 1)):
current_freeman_point = freeman_coordination_list[i]
next_freeman_point = freeman_coordination_list[(i + 1)]
freeman_code = __get_freeman_code_by_two_point(point1=current_freeman_point, point2=next_freeman_point)
if (freeman_code is not None):
freeman_code_list.append(freeman_code)
return freeman_code_list |
def set_kubeconfig_auth(kubeconfig: any, context_auth: ContextAuth) -> str:
if ('current-context' not in kubeconfig.keys()):
raise Exception('invalid kubeconfig file, impossible to determine current-context')
user_id = None
cluster_id = None
user_name = None
cluster_name = None
current_context = kubeconfig['current-context']
for context in kubeconfig['contexts']:
if (context['name'] == current_context):
user_name = context['context']['user']
cluster_name = context['context']['cluster']
if (user_name is None):
raise Exception('user not set for context {} in kubeconfig file'.format(current_context))
if (cluster_name is None):
raise Exception('cluster not set for context {} in kubeconfig file'.format(current_context))
for (index, user) in enumerate(kubeconfig['users']):
if (user['name'] == user_name):
user_id = index
for (index, cluster) in enumerate(kubeconfig['clusters']):
if (cluster['name'] == cluster_name):
cluster_id = index
if (cluster_id is None):
raise Exception('no cluster {} found in kubeconfig users'.format(cluster_name))
if ('client-certificate' in kubeconfig['users'][user_id]['user']):
kubeconfig['users'][user_id]['user']['client-certificate-data'] = context_auth.clientCertificateDataBase64
del kubeconfig['users'][user_id]['user']['client-certificate']
if ('client-key' in kubeconfig['users'][user_id]['user']):
kubeconfig['users'][user_id]['user']['client-key-data'] = context_auth.clientKeyDataBase64
del kubeconfig['users'][user_id]['user']['client-key']
if ('certificate-authority' in kubeconfig['clusters'][cluster_id]['cluster']):
kubeconfig['clusters'][cluster_id]['cluster']['certificate-authority-data'] = context_auth.clusterCertificateDataBase64
del kubeconfig['clusters'][cluster_id]['cluster']['certificate-authority']
kubeconfig_str = yaml.dump(kubeconfig)
return kubeconfig_str |
class decoder(nn.Module):
def __init__(self, in_dim=128, out_dim=(17 * 3), h_dim=128):
super(decoder, self).__init__()
self.in_dim = in_dim
self.h_dim = h_dim
self.out_dim = out_dim
self.fc1 = residual_linear(in_dim, h_dim)
self.fc2 = residual_linear((h_dim * 2), h_dim)
self.fc3 = nn.Linear((h_dim * 2), out_dim)
def forward(self, input):
(input, skip) = input
bs = input.shape[0]
d1 = self.fc1(input)
d2 = self.fc2(torch.cat([d1, skip[1]], 1))
out = self.fc3(torch.cat([d2, skip[0]], 1))
out = out.view(bs, 17, 3)
return out |
def get_string_or_list(prompt, default=None):
so_far = []
while True:
full_prompt = prompt
if (default and (not so_far)):
if isinstance(default, str):
full_prompt += ' [{}]'.format(default)
else:
full_prompt += ' [{}]'.format(', '.join(default))
if so_far:
full_prompt += ' [hit Enter when finished]'
full_prompt += ' '
answer = input(full_prompt)
if (not answer):
if so_far:
so_far.sort()
return so_far
answer = default
if (answer is None):
return get_string_or_list(prompt, default)
if isinstance(answer, str):
so_far.append(answer)
else:
so_far.extend(answer) |
def disasm(count, ql: Qiling, address: int, size: int):
buf = ql.mem.read(address, size)
try:
for i in md.disasm(buf, address):
return '{:08X}\t{:08X}: {:24s} {:10s} {:16s}'.format(count[0], i.address, spaced_hex(buf), i.mnemonic, i.op_str)
except:
import traceback
print(traceback.format_exc()) |
class TrainProgressMonitorTest(unittest.TestCase):
def test_train_progress_monitor(self) -> None:
input_dim = 2
dataset_len = 10
batch_size = 2
max_epochs = 3
num_train_steps_per_epoch = (dataset_len / batch_size)
my_unit = DummyTrainUnit(input_dim=input_dim)
logger = InMemoryLogger()
monitor = TrainProgressMonitor(loggers=logger)
dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size)
train(my_unit, dataloader, max_epochs=max_epochs, callbacks=[monitor])
buf = logger.log_buffer
self.assertEqual(len(buf), (max_epochs + 1))
self.assertEqual(buf[0]['Training steps completed vs epochs'], (num_train_steps_per_epoch * 0))
self.assertEqual(buf[1]['Training steps completed vs epochs'], (num_train_steps_per_epoch * 1))
self.assertEqual(buf[2]['Training steps completed vs epochs'], (num_train_steps_per_epoch * 2))
self.assertEqual(buf[3]['Training steps completed vs epochs'], (num_train_steps_per_epoch * 3)) |
class STM32F4xxGpio(STM32F1xxGpio):
class Type(ctypes.Structure):
_fields_ = [('MODER', ctypes.c_uint32), ('OTYPER', ctypes.c_uint32), ('OSPEEDR', ctypes.c_uint32), ('PUPDR', ctypes.c_uint32), ('IDR', ctypes.c_uint32), ('ODR', ctypes.c_uint32), ('BSRR', ctypes.c_uint32), ('LCKR', ctypes.c_uint32), ('AFRL', ctypes.c_uint32), ('AFRH', ctypes.c_uint32)]
def __init__(self, ql, label, moder_reset=0, ospeedr_reset=0, pupdr_reset=0):
QlPeripheral.__init__(self, ql, label)
GpioHooks.__init__(self, ql, 16)
self.instance = self.struct(MODER=moder_reset, OSPEEDR=ospeedr_reset, PUPDR=pupdr_reset) |
class GraphIgnoreLockRangeMenu(ContextMenuUnconditional):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.settings = GraphSettings.getInstance()
def display(self, callingWindow, srcContext):
return (srcContext in ('dmgStatsGraph', 'remoteRepsGraph', 'ewarStatsGraph'))
def getText(self, callingWindow, itmContext):
return _t('Ignore Lock Range')
def activate(self, callingWindow, fullContext, i):
self.settings.set('ignoreLockRange', (not self.settings.get('ignoreLockRange')))
wx.PostEvent(self.mainFrame, GE.GraphOptionChanged())
def isChecked(self, i):
return self.settings.get('ignoreLockRange') |
_flags(floatX='float64')
def test_debugprint():
(x, y, z) = matrices('xyz')
e = (x + (y * z))
op = OpFromGraph([x, y, z], [e])
out = op(x, y, z)
output_str = debugprint(out, file='str')
lines = output_str.split('\n')
exp_res = 'OpFromGraph{inline=False} [id A]\n x [id B]\n y [id C]\n z [id D]\n\nInner graphs:\n\nOpFromGraph{inline=False} [id A]\n Add [id E]\n *0-<Matrix(float64, shape=(?, ?))> [id F]\n Mul [id G]\n *1-<Matrix(float64, shape=(?, ?))> [id H]\n *2-<Matrix(float64, shape=(?, ?))> [id I]\n'
for (truth, out) in zip(exp_res.split('\n'), lines):
assert (truth.strip() == out.strip()) |
def githash(filename=None):
if (filename is None):
cwd = None
else:
cwd = os.path.dirname(os.path.abspath(filename))
cmd = 'git log -1 --format="%h"'
try:
return subprocess.check_output(cmd, shell=True, cwd=cwd).decode().strip()
except Exception:
return |
class Transaction(base.Transaction):
def __init__(self, tx):
self._tx = tx
def raw_transaction(self):
return self._tx
async def begin(self):
(await self._tx.start())
async def commit(self):
(await self._tx.commit())
async def rollback(self):
(await self._tx.rollback()) |
def get_score(points: Points) -> Score:
score = Score()
score.max_score = (MAX_SONG_SCORE if (points.line_bonus == 0) else (MAX_SONG_SCORE - MAX_SONG_LINE_BONUS))
score.notes = round(((score.max_score * (points.notes + points.rap)) / points.parts))
score.golden = round((points.golden_notes + points.golden_rap))
score.score = round(((score.notes + points.line_bonus) + score.golden))
score.line_bonus = round(points.line_bonus)
return score |
class ReduceLROnPlateau(object):
def __init__(self, optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08):
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode, factor, patience, verbose, threshold, threshold_mode, cooldown, min_lr, eps)
self.optimizer = optimizer
self.current_lr = get_lr(optimizer)
def step(self):
self.optimizer.step()
def scheduler_step(self, val):
self.scheduler.step(val)
self.current_lr = get_lr(self.optimizer)
def state_dict(self):
return {'current_lr': self.current_lr, 'scheduler_state_dict': self.scheduler.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict()}
def load_state_dict(self, state_dict):
if ('current_lr' not in state_dict):
self.optimizer.load_state_dict(state_dict)
set_lr(self.optimizer, self.current_lr)
else:
self.current_lr = state_dict['current_lr']
self.scheduler.load_state_dict(state_dict['scheduler_state_dict'])
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
def rate(self, step=None):
if (step is None):
step = self._step
return (self.factor * ((self.model_size ** (- 0.5)) * min((step ** (- 0.5)), (step * (self.warmup ** (- 1.5))))))
def __getattr__(self, name):
return getattr(self.optimizer, name) |
class Abs(UnaryScalarOp):
nfunc_spec = ('abs', 1, 1)
def make_node(self, x):
inputs = [as_scalar(input) for input in [x]]
if (inputs[0].type == complex64):
outputs = [float32()]
elif (inputs[0].type == complex128):
outputs = [float64()]
else:
outputs = [t() for t in self.output_types([input.type for input in inputs])]
return Apply(self, inputs, outputs)
def impl(self, x):
return np.abs(x)
def L_op(self, inputs, outputs, gout):
(x,) = inputs
(gz,) = gout
if (outputs[0].type in discrete_types):
if (x.type in discrete_types):
return [x.zeros_like(dtype=config.floatX)]
else:
return [x.zeros_like()]
if (x.type in float_types):
return ((gz * sign(x)),)
return (((gz * x) / _abs(x)),)
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
type = node.inputs[0].type
if (type in int_types):
return f'{z} = abs({x});'
if (type in float_types):
return f'{z} = fabs({x});'
if (type in complex_types):
return f'{z} = sqrt({x}.real*{x}.real + {x}.imag*{x}.imag);'
if (node.outputs[0].type == bool):
return f'{z} = ({x}) ? 1 : 0;'
if (type in uint_types):
return f'{z} = {x};'
raise NotImplementedError('type not supported', type) |
class LayerSlider(Slider):
def __init__(self, m, layers=None, pos=None, txt_patch_props=None, exclude_layers=None, name=None, **kwargs):
self._m = m
self._init_args = {'layers': layers, 'txt_patch_props': txt_patch_props, 'exclude_layers': exclude_layers, **kwargs}
if (layers is None):
if (exclude_layers is None):
exclude_layers = ['all']
layers = self._m._get_layers(exclude=exclude_layers)
else:
uselayers = []
for l in layers:
if (not isinstance(l, str)):
uselayers.append(m._get_combined_layer_name(*l))
else:
uselayers.append(l)
layers = uselayers
layers = [i for i in layers if (not i.startswith('_'))]
if (pos is None):
ax_slider = self._m.f.add_axes([self._m.ax.get_position().x0, (self._m.ax.get_position().y0 - 0.05), (self._m.ax.get_position().width * 0.75), 0.05])
else:
ax_slider = self._m.f.add_axes(pos)
ax_slider.set_label('slider')
kwargs.setdefault('color', '.2')
kwargs.setdefault('track_color', '.8')
kwargs.setdefault('initcolor', 'none')
kwargs.setdefault('handle_style', dict(facecolor='.8', edgecolor='k', size=7))
kwargs.setdefault('label', None)
super().__init__(ax_slider, valmin=0, valmax=max((len(layers) - 1), 0.01), valinit=0, valstep=1, **kwargs)
self.drawon = False
self._layers = layers
if (txt_patch_props is not None):
self.valtext.set_bbox(txt_patch_props)
def fmt(val):
if (val < len(layers)):
return layers[val]
else:
return '---'
self._format = fmt
self._handle.set_marker('D')
self.track.set_edgecolor('none')
h = (self.track.get_height() / 2)
self.track.set_height(h)
self.track.set_y((self.track.get_y() + (h / 2)))
self._m.BM.add_artist(ax_slider, layer='all')
self.on_changed(self._on_changed)
if (name is None):
keys = (key for key in self._m.util._sliders if key.startswith('slider_'))
ns = []
for key in keys:
try:
ns.append(int(key[7:]))
except:
pass
name = f'slider_{((max(ns) + 1) if ns else 0)}'
self._init_args['name'] = name
self._m.util._sliders[name] = self
def set_layers(self, layers):
self._layers = layers
self.valmax = max((len(layers) - 1), 0.01)
self.ax.set_xlim(self.valmin, self.valmax)
if (self._m.BM.bg_layer in self._layers):
currval = self._layers.index(self._m.BM.bg_layer)
self.set_val(currval)
else:
self.set_val(0)
self._on_changed(self.val)
self._m.util._update_widgets()
self._m.BM.update()
def _reinit(self):
self.remove()
self.__init__(m=self._m, pos=self.ax.get_position(), **self._init_args)
self._m.util._update_widgets()
self._m.BM.update()
def _on_changed(self, val):
l = self._layers[int(val)]
self._m.BM.bg_layer = l
self._m.BM.update()
def remove(self):
self._m.BM.remove_artist(self.ax)
self.disconnect_events()
self.ax.remove()
del self._m.util._sliders[self._init_args['name']]
self._m.BM.update() |
class RegNetBackbone(nn.Module):
def __init__(self, mf: int=400, pretrained: bool=True, stem_width: int=32, stem_type: Optional[Callable[(..., nn.Module)]]=None, block_type: Optional[Callable[(..., nn.Module)]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=None, activation: Optional[Callable[(..., nn.Module)]]=None) -> None:
super().__init__()
self.mf = mf
if (self.mf == 400):
block_params = BlockParams.from_init_params(depth=16, w_0=48, w_a=27.89, w_m=2.09, group_width=8, se_ratio=0.25)
elif (self.mf == 800):
block_params = BlockParams.from_init_params(depth=14, w_0=56, w_a=38.84, w_m=2.4, group_width=16, se_ratio=0.25)
norm_layer = nn.BatchNorm2d
block_type = ResBottleneckBlock
activation = nn.ReLU
self.stem = SimpleStemIN(3, stem_width, norm_layer, activation)
current_width = stem_width
for (i, (width_out, stride, depth, group_width, bottleneck_multiplier)) in enumerate(block_params._get_expanded_params()):
setattr(self, f'block{(i + 1)}', AnyStage(current_width, width_out, stride, depth, block_type, norm_layer, activation, group_width, bottleneck_multiplier, block_params.se_ratio, stage_index=(i + 1)))
current_width = width_out
if pretrained:
self.load_pre_trained_weights()
def forward(self, x: Tensor) -> Tensor:
x = self.stem(x)
x = self.block1(x)
x = self.block2(x)
out1 = self.block3(x)
out2 = self.block4(out1)
return (out1, out2)
def load_pre_trained_weights(self) -> None:
arch = 'regnet_y_{}mf'.format(self.mf)
if (arch not in model_urls):
raise ValueError(f'No checkpoint is available for model type {arch}')
print('Loading Pytorch pretrained weights...')
state_dict = model_zoo.load_url(model_urls[arch])
for key in list(state_dict.keys()):
if key.startswith('trunk_output.'):
state_dict[key[13:]] = state_dict[key]
state_dict.pop(key)
if key.startswith('fc.'):
state_dict.pop(key)
self.load_state_dict(state_dict, strict=True) |
(debug=True)
def debug_keytester() -> None:
global _keytester_widget
if (_keytester_widget and (not sip.isdeleted(_keytester_widget)) and _keytester_widget.isVisible()):
_keytester_widget.close()
else:
_keytester_widget = miscwidgets.KeyTesterWidget()
_keytester_widget.show() |
def test_replace_deep_list_of_foo_by_real():
class TopWrap(Component):
def construct(s):
s.in_ = InPort(Bits32)
s.foobar = Foo_shamt_list_wrap(32)
s.foobar.in_ //= s.in_
foo_wrap = TopWrap()
foo_wrap.elaborate()
order = list(range(5))
random.shuffle(order)
for i in order:
foo_wrap.replace_component(foo_wrap.foobar.inner[i], Real_shamt)
simple_sim_pass(foo_wrap)
print()
foo_wrap.in_ = Bits32(16)
foo_wrap.tick()
print(foo_wrap.foobar.line_trace())
foo_wrap.in_ = Bits32(4)
foo_wrap.tick()
print(foo_wrap.foobar.line_trace()) |
.parametrize('model', ['faiman', 'pvsyst', 'sapm', 'fuentes', 'noct_sam'])
def test_PVSystem_multi_array_celltemp_wind_too_short(model, two_array_system):
with pytest.raises(ValueError, match='Length mismatch for per-array parameter'):
two_array_system.get_cell_temperature((1000, 1000), 25, (1,), model=model) |
def archival_to_version(data: dict[(str, str)], config: Configuration) -> (ScmVersion | None):
node: (str | None)
log.debug('data %s', data)
archival_describe = data.get('describe-name', DESCRIBE_UNSUPPORTED)
if (DESCRIBE_UNSUPPORTED in archival_describe):
warnings.warn('git archive did not support describe output')
else:
(tag, number, node, _) = _git_parse_describe(archival_describe)
return meta(tag, config=config, distance=number, node=node)
for ref in REF_TAG_RE.findall(data.get('ref-names', '')):
version = tag_to_version(ref, config)
if (version is not None):
return meta(version, config=config)
else:
node = data.get('node')
if (node is None):
return None
elif ('$FORMAT' in node.upper()):
warnings.warn('unprocessed git archival found (no export subst applied)')
return None
else:
return meta('0.0', node=node, config=config) |
class ActivationSampler():
def __init__(self, orig_module: torch.nn.Module, quant_module: QcQuantizeWrapper, orig_model: torch.nn.Module, quant_model: torch.nn.Module, forward_fn: Callable[([torch.nn.Module, Any], Any)]):
self._orig_module = orig_module
self._quant_module = quant_module
self._orig_model = orig_model
self._quant_model = quant_model
self._orig_module_collector = ModuleData(orig_model, orig_module, forward_fn)
self._quant_module_collector = ModuleData(quant_model, quant_module, forward_fn)
def sample_and_place_all_acts_on_cpu(self, cached_dataset: Dataset, cached_quant_dataset: Dataset=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
all_inp_data = []
all_out_data = []
iterator = iter(cached_dataset)
if cached_quant_dataset:
assert (len(cached_dataset) == len(cached_quant_dataset))
quant_iterator = iter(cached_quant_dataset)
for batch_index in range(len(cached_dataset)):
if cached_quant_dataset:
(inp_data, _) = self.sample_acts(next(quant_iterator), collect_input=True, collect_output=False)
(_, out_data) = self.sample_acts(next(iterator), collect_input=False, collect_output=True)
else:
(inp_data, out_data) = self.sample_acts(next(iterator))
all_inp_data.append(inp_data.cpu())
all_out_data.append(out_data.cpu())
if (batch_index == (len(cached_dataset) - 1)):
break
all_inp_data = torch.cat(all_inp_data, dim=0)
all_out_data = torch.cat(all_out_data, dim=0)
return (all_inp_data, all_out_data)
def sample_acts(self, model_inputs: Union[(torch.tensor, List, Tuple)], collect_input=True, collect_output=True) -> Tuple[(torch.Tensor, torch.Tensor)]:
(inp_data, out_data) = (None, None)
if collect_input:
(inp_data, _) = self._quant_module_collector.collect_inp_out_data(model_inputs, collect_input=True, collect_output=False)
if collect_output:
(_, out_data) = self._orig_module_collector.collect_inp_out_data(model_inputs, collect_input=False, collect_output=True)
return (inp_data, out_data) |
def draw_paths(times, paths, N, expectations, title=None, KDE=False, marginal=False, orientation='horizontal', marginalT=None, envelope=False, lower=None, upper=None, style='seaborn-v0_8-whitegrid', colormap='RdYlBu_r', **fig_kw):
if (orientation == 'horizontal'):
return draw_paths_horizontal(times, paths, N, expectations, title=title, KDE=KDE, marginal=marginal, marginalT=marginalT, envelope=envelope, lower=lower, upper=upper, style=style, colormap=colormap, **fig_kw)
elif (orientation == 'vertical'):
return draw_paths_vertical(times, paths, N, expectations, title=title, KDE=KDE, marginal=marginal, marginalT=marginalT, envelope=envelope, lower=lower, upper=upper, style=style, colormap=colormap, **fig_kw)
else:
raise ValueError('orientation can only take values horizontal, vertical') |
class Effect2853(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda module: module.item.requiresSkill('Cloaking')), 'cloakingTargetingDelay', module.getModifiedItemAttr('cloakingTargetingDelayBonus'), **kwargs) |
class EasyDict(dict):
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name] |
def revalidate_vercel_frontend(sender, **kwargs):
instance = kwargs['instance']
site = instance.get_site()
if (not site):
return
settings = VercelFrontendSettings.for_site(site)
if (not settings.revalidate_url):
return
revalidate_vercel_frontend_task.delay(page_id=instance.id) |
def create_columns(bm, face, prop):
if (not prop.add_columns):
return
res = []
col_w = (2 * prop.slab_outset)
pos_h = ((prop.floor_height / 2) + (prop.slab_thickness if prop.add_slab else 0))
for v in face.verts:
for i in range(prop.floor_count):
cube = create_cube_without_faces(bm, (col_w, col_w, prop.floor_height), (v.co.x, v.co.y, ((v.co.z + (pos_h * (i + 1))) + ((prop.floor_height / 2) * i))), bottom=True)
res.extend(cube.get('verts'))
columns = list({f for v in res for f in v.link_faces})
add_faces_to_group(bm, columns, MaterialGroup.COLUMNS) |
class PeftConfigMixin(PushToHubMixin):
peft_type: Optional[PeftType] = field(default=None, metadata={'help': 'The type of PEFT model.'})
def __dict__(self):
return asdict(self)
def to_dict(self):
return self.__dict__
def save_pretrained(self, save_directory, **kwargs):
if os.path.isfile(save_directory):
raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(save_directory, exist_ok=True)
output_dict = self.__dict__
output_path = os.path.join(save_directory, CONFIG_NAME)
with open(output_path, 'w') as writer:
writer.write(json.dumps(output_dict, indent=2, sort_keys=True))
def from_pretrained(cls, pretrained_model_name_or_path, subfolder=None, **kwargs):
path = (os.path.join(pretrained_model_name_or_path, subfolder) if (subfolder is not None) else pretrained_model_name_or_path)
if os.path.isfile(os.path.join(path, CONFIG_NAME)):
config_file = os.path.join(path, CONFIG_NAME)
else:
try:
config_file = hf_hub_download(pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder)
except Exception:
raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'")
loaded_attributes = cls.from_json_file(config_file)
config = cls(**kwargs)
for (key, value) in loaded_attributes.items():
if hasattr(config, key):
setattr(config, key, value)
return config
def from_json_file(cls, path_json_file, **kwargs):
with open(path_json_file, 'r') as file:
json_object = json.load(file)
return json_object |
class TestSearch():
def test_endswith(self):
path_pattern = '/{test}/test'
full_url_pattern = '/test1/test/test2/test'
result = search(path_pattern, full_url_pattern)
assert (result.named == {'test': 'test2'})
def test_exact(self):
path_pattern = '/{test}/test'
full_url_pattern = '/test/test'
result = search(path_pattern, full_url_pattern)
assert (result.named == {'test': 'test'})
.parametrize('path_pattern,expected', [('/{test_id}/test', {'test_id': 'test'}), ('/{test.id}/test', {'test.id': 'test'})])
def test_chars_valid(self, path_pattern, expected):
full_url_pattern = '/test/test'
result = search(path_pattern, full_url_pattern)
assert (result.named == expected)
.xfail(reason='Special characters of regex not supported. See strict=True)
.parametrize('path_pattern,expected', [('/{test~id}/test', {'test~id': 'test'}), ('/{test-id}/test', {'test-id': 'test'})])
def test_special_chars_valid(self, path_pattern, expected):
full_url_pattern = '/test/test'
result = search(path_pattern, full_url_pattern)
assert (result.named == expected) |
def resnet152_cbam(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet152'])
now_state_dict = model.state_dict()
now_state_dict.update(pretrained_state_dict)
model.load_state_dict(now_state_dict)
return model |
class _CheckObject():
def __init__(self, project, expected, kind='object', unsure=False):
self.project = project
self.kind = kind
self.unsure = unsure
self.expected = self._evaluate(expected)
def __call__(self, pymodule, node):
pyname = self._evaluate_node(pymodule, node)
if ((pyname is None) or (self.expected is None)):
return self.unsure
if self._unsure_pyname(pyname, unbound=(self.kind == 'name')):
return True
if (self.kind == 'name'):
return self._same_pyname(self.expected, pyname)
else:
pyobject = pyname.get_object()
if (self.kind == 'object'):
objects = [pyobject]
if (self.kind == 'type'):
objects = [pyobject.get_type()]
if (self.kind == 'instance'):
objects = [pyobject]
objects.extend(self._get_super_classes(pyobject))
objects.extend(self._get_super_classes(pyobject.get_type()))
for pyobject in objects:
if self._same_pyobject(self.expected.get_object(), pyobject):
return True
return False
def _get_super_classes(self, pyobject):
result = []
if isinstance(pyobject, pyobjects.AbstractClass):
for superclass in pyobject.get_superclasses():
result.append(superclass)
result.extend(self._get_super_classes(superclass))
return result
def _same_pyobject(self, expected, pyobject):
return (expected == pyobject)
def _same_pyname(self, expected, pyname):
return occurrences.same_pyname(expected, pyname)
def _unsure_pyname(self, pyname, unbound=True):
return (self.unsure and occurrences.unsure_pyname(pyname, unbound))
def _split_name(self, name):
parts = name.split('.')
(expression, kind) = (parts[0], parts[(- 1)])
if (len(parts) == 1):
kind = 'name'
return (expression, kind)
def _evaluate_node(self, pymodule, node):
scope = pymodule.get_scope().get_inner_scope_for_line(node.lineno)
expression = node
if (isinstance(expression, ast.Name) and isinstance(expression.ctx, ast.Store)):
(start, end) = patchedast.node_region(expression)
text = pymodule.source_code[start:end]
return evaluate.eval_str(scope, text)
else:
return evaluate.eval_node(scope, expression)
def _evaluate(self, code):
attributes = code.split('.')
pyname = None
if (attributes[0] in ('__builtin__', '__builtins__')):
class _BuiltinsStub():
def get_attribute(self, name):
return builtins.builtins[name]
def __getitem__(self, name):
return builtins.builtins[name]
def __contains__(self, name):
return (name in builtins.builtins)
pyobject = _BuiltinsStub()
else:
pyobject = self.project.get_module(attributes[0])
for attribute in attributes[1:]:
pyname = pyobject[attribute]
if (pyname is None):
return None
pyobject = pyname.get_object()
return pyname |
class PGNModel(object):
def __init__(self, model_file_path=None, is_eval=False, device=None, embedding=None):
encoder = Encoder(embedding)
decoder = Decoder()
reduce_state = ReduceState()
decoder.embedding.weight = encoder.embedding.weight
if is_eval:
encoder = encoder.eval()
decoder = decoder.eval()
reduce_state = reduce_state.eval()
encoder = encoder.to(device)
decoder = decoder.to(device)
reduce_state = reduce_state.to(device)
self.encoder = encoder
self.decoder = decoder
self.reduce_state = reduce_state
if (model_file_path is not None):
state = torch.load(model_file_path, map_location=(lambda storage, location: storage))
self.encoder.load_state_dict(state['encoder_state_dict'])
self.decoder.load_state_dict(state['decoder_state_dict'], strict=False)
self.reduce_state.load_state_dict(state['reduce_state_dict'])
def eval(self):
self.encoder = self.encoder.eval()
self.decoder = self.decoder.eval()
self.reduce_state = self.reduce_state.eval()
def train(self):
self.encoder = self.encoder.train()
self.decoder = self.decoder.train()
self.reduce_state = self.reduce_state.train() |
def setup(args):
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.SOLVER.IMS_PER_BATCH = args.batch_size
cfg.DATALOADER.NUM_WORKERS = args.num_work
cfg.TEST.EVAL_DIS_IOUS = args.eval_iou
cfg.TEST.EVAL_DEPTH = args.eval_depth
cfg.MODEL.BACKBONE.CONV_BODY = args.backbone
cfg.DATASETS.USE_MIX_TEACHING = args.mix_teaching
cfg.DATASETS.LABELED_RATIO = args.labeled_ratio
cfg.MODEL.WEIGHT = args.ckpt
print('Using Backone: ', cfg.MODEL.BACKBONE.CONV_BODY)
if (args.vis_thre > 0):
cfg.TEST.VISUALIZE_THRESHOLD = args.vis_thre
if (args.output is not None):
cfg.OUTPUT_DIR = args.output
if args.test:
cfg.DATASETS.TEST_SPLIT = 'test'
cfg.DATASETS.TEST = ('kitti_test',)
cfg.START_TIME = datetime.datetime.strftime(datetime.datetime.now(), '%m-%d %H:%M:%S')
default_setup(cfg, args)
return cfg |
(name='smicat', epilog=smicat_epilog)
('--input-smiles', is_flag=True, default=False, help='Use the input SMILES instead of the cleaned-up SMILES')
('--output', '-o', 'output_file', default='-', type=GzipFile('w'), help='Output filename (default is stdout)')
_single_database_parameters()
def smicat(input_smiles, output_file, database_options):
if database_options.database.endswith('.fragdb'):
db = open_fragdb_from_options_or_exit(database_options)
c = db.cursor()
if input_smiles:
c.execute('SELECT title, input_smiles FROM record UNION SELECT title, input_smiles FROM error_record')
else:
c.execute('SELECT title, normalized_smiles FROM record')
iter_id_and_smiles = c
else:
dataset = open_dataset_from_options_or_exit(database_options)
db = dataset.mmpa_db
it = dataset.iter_compounds()
if input_smiles:
iter_id_and_smiles = ((compound.public_id, compound.input_smiles) for compound in it)
else:
iter_id_and_smiles = ((compound.public_id, compound.clean_smiles) for compound in it)
with db:
for (id, smiles) in iter_id_and_smiles:
output_file.write(f'''{smiles} {id}
''') |
_model
def poolformerv2_s36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], token_mixers=Pooling, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['poolformerv2_s36']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
class ExportToHTML(SongsMenuPlugin):
PLUGIN_ID = 'Export to HTML'
PLUGIN_NAME = _('Export to HTML')
PLUGIN_DESC = _('Exports the selected song list to HTML.')
REQUIRES_ACTION = True
PLUGIN_ICON = Icons.TEXT_HTML
def plugin_songs(self, songs):
if (not songs):
return
target = choose_target_file(self.plugin_window, _('Export to HTML'), _('_Save'))
if (target is not None):
with open(target, 'wb') as f:
f.write(to_html(songs).encode('utf-8')) |
def initial_data():
objects = {'oses': {}, 'releases': {}, 'release_files': {}}
with APISession() as session:
for (key, resource_uri) in [('oses', 'downloads/os/'), ('releases', 'downloads/release/'), ('release_files', 'downloads/release_file/')]:
response = session.get(resource_uri)
object_list = response.json()
for obj in object_list:
objects[key][_get_id(obj, 'resource_uri')] = obj
objects['oses'] = {k: OSFactory(**obj) for (k, obj) in objects['oses'].items()}
for (key, obj) in objects['releases'].items():
obj.pop('release_page')
objects['releases'][key] = ReleaseFactory(**obj)
for (key, obj) in tuple(objects['release_files'].items()):
release_id = _get_id(obj, 'release')
try:
release = objects['releases'][release_id]
except KeyError:
objects['release_files'].pop(key)
else:
obj['release'] = release
obj['os'] = objects['oses'][_get_id(obj, 'os')]
objects['release_files'][key] = ReleaseFileFactory(**obj)
return {'oses': list(objects.pop('oses').values()), 'releases': list(objects.pop('releases').values()), 'release_files': list(objects.pop('release_files').values())} |
class KmeansVectorQuantizer(nn.Module):
def __init__(self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25):
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.vq_dim = vq_dim
self.time_first = time_first
assert ((vq_dim % groups) == 0), f'dim {vq_dim} must be divisible by groups {groups} for concatenation'
self.var_dim = (vq_dim // groups)
num_groups = (groups if (not combine_groups) else 1)
self.embedding = nn.Parameter((0.01 * torch.randn(num_vars, num_groups, self.var_dim)))
self.projection = nn.Sequential(nn.Conv1d(dim, dim, kernel_size=1, groups=groups, bias=False), Fp32GroupNorm(groups, dim))
self.gamma = gamma
self.mse_mean = nn.MSELoss(reduction='mean')
def _pass_grad(self, x, y):
return (y.detach() + (x - x.detach()))
def expand_embedding(self):
if self.combine_groups:
return self.embedding.expand(self.num_vars, self.groups, self.var_dim)
return self.embedding
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return (res['x'], res['targets'])
def forward(self, x, produce_targets=False):
result = {'num_vars': self.num_vars}
if self.time_first:
x = x.transpose(1, 2)
(bsz, fsz, tsz) = x.shape
ze = self.projection(x)
ze_ = ze.view(bsz, self.groups, self.var_dim, tsz).permute(0, 3, 1, 2)
d = (ze_.unsqueeze(0) - self.expand_embedding.unsqueeze(1).unsqueeze(1)).view(self.num_vars, bsz, tsz, self.groups, (- 1)).norm(dim=(- 1), p=2)
idx = d.argmin(dim=0)
zq = torch.stack([self.expand_embedding[(idx[(..., group)], group)] for group in range(self.groups)], dim=(- 2)).view(bsz, tsz, (self.groups * self.var_dim)).permute(0, 2, 1)
assert (ze.shape == zq.shape), (ze.shape, zq.shape)
x = self._pass_grad(ze, zq)
hard_x = idx.new_zeros(((bsz * tsz) * self.groups), self.num_vars).scatter_((- 1), idx.view((- 1), 1), 1.0).view((bsz * tsz), self.groups, (- 1))
hard_probs = torch.mean(hard_x.float(), dim=0)
result['code_perplexity'] = torch.exp((- torch.sum((hard_probs * torch.log((hard_probs + 1e-07))), dim=(- 1)))).sum()
if produce_targets:
result['targets'] = idx
if self.time_first:
x = x.transpose(1, 2)
result['x'] = x
ze = ze.float()
zq = zq.float()
latent_loss = self.mse_mean(zq, ze.detach())
commitment_loss = self.mse_mean(ze, zq.detach())
result['kmeans_loss'] = (latent_loss + (self.gamma * commitment_loss))
return result |
('vector-cas!', [values.W_MVector, values.W_Fixnum, values.W_Object, values.W_Object], simple=False)
def vector_cas_bang(vec, pos, old_val, new_val, env, cont):
if isinstance(vec, imp.W_ImpVector):
raise SchemeException('vector-cas!: exptects a non impersonator vector')
return vec.vector_ref(pos.value, env, vector_cas_bang_cont(vec, pos.value, old_val, new_val, env, cont)) |
class DummyNNModule(torch.nn.Module):
def __init__(self, A, addx=True, activation='sigmoid', sumoutput=False):
super(DummyNNModule, self).__init__()
self.A = A
self.addx = addx
self.activation = {'sigmoid': (lambda x: (1 / (1 + torch.exp((- x))))), 'cos': torch.cos, 'square': (lambda x: ((x - 0.1) ** 2))}[activation]
self.sumoutput = sumoutput
self.biasdiff = True
def set_diag_bias(self, diag, bias):
self.diag = diag
self.bias = bias
self.biasdiff = bias.requires_grad
def forward(self, x):
(nbatch, nr) = x.shape
x = x.unsqueeze((- 1))
A = self.A.unsqueeze(0).expand(nbatch, (- 1), (- 1))
A = (A + torch.diag_embed(self.diag))
y = torch.bmm(A, x).squeeze((- 1))
yr = (self.activation((2 * y)) + (2 * self.bias))
if self.addx:
yr = (yr + x.squeeze((- 1)))
if self.sumoutput:
yr = yr.sum()
return yr |
class GCBCPolicyImages(BCPolicyStates):
def __init__(self, ag_params, policyparams):
super(GCBCPolicyImages, self).__init__(ag_params, policyparams)
self._hp = self._default_hparams()
self._override_defaults(policyparams)
def _default_hparams(self):
default_dict = AttrDict({'confirm_first_image': False, 'crop_image_region': False, 'stack_goal_images': False})
default_dict.update(super(GCBCPolicyImages, self)._default_hparams())
return default_dict
def _preprocess_input(input):
assert (len(input.shape) == 4)
if (input.max() > 1.0):
input = (input / 255.0)
if (input.min() >= 0.0):
input = ((2 * input) - 1.0)
if (input.shape[(- 1)] == 3):
input = input.transpose(0, 3, 1, 2)
return input
def act(self, t=None, i_tr=None, images=None, state=None, goal=None, goal_image=None):
self.t = t
self.i_tr = i_tr
self.goal_image = goal_image
images = images[t]
if self._hp.crop_image_region:
(target_height, target_width) = self._hp.model_override_params['data_conf']['image_size_beforecrop']
if (self._hp.crop_image_region == 'select'):
from widowx_envs.utils.datautils.annotate_object_pos import Getdesig
if (self.t == 0):
self.crop_center = np.array(Getdesig(images[0]).desig, dtype=np.int32)
print('selected position', self.crop_center)
else:
self.crop_center = self._hp.crop_image_region
images = crop_image(target_height, target_width, self.crop_center, images)
if (self._hp.model_override_params['data_conf']['image_size_beforecrop'] != images.shape[2:4]):
(h, w) = self._hp.model_override_params['data_conf']['image_size_beforecrop']
resized_images = np.zeros([images.shape[0], h, w, 3], dtype=images.dtype)
for n in range(images.shape[0]):
resized_images[n] = cv2.resize(images[n], (w, h), interpolation=cv2.INTER_AREA)
images = resized_images
if ((t == 0) and self._hp.confirm_first_image):
import matplotlib.pyplot as plt
import matplotlib
plt.switch_backend('Tkagg')
if self.predictor._hp.concatenate_cameras:
plt.imshow(np.concatenate(np_unstack(images, axis=0), 0))
else:
plt.imshow(images[self.predictor._hp.sel_camera])
print('saving start image to', (self.traj_log_dir + '/start_image.png'))
plt.show()
if self.predictor._hp.goal_cond:
if self._hp.stack_goal_images:
for goal_image_single in goal_image:
plt.imshow(goal_image_single[0].transpose(1, 2, 0))
plt.show()
else:
plt.imshow(goal_image[(0, self.predictor._hp.sel_camera)])
plt.show()
images = self.npy2trch(self._preprocess_input(images))
inputs = AttrDict(I_0=images)
if self.predictor._hp.goal_cond:
if self._hp.stack_goal_images:
inputs['I_g'] = [self.npy2trch(self._preprocess_input(goal_image_single)) for goal_image_single in goal_image]
else:
inputs['I_g'] = self.npy2trch(self._preprocess_input((goal_image[(- 1)] if (len(goal_image.shape) > 4) else goal_image)))
output = AttrDict()
action = self.predictor(inputs).pred_actions.data.cpu().numpy().squeeze()
print('inferred action', action)
output.actions = action
return output |
class LearningToDownsample(nn.Module):
def __init__(self, in_channels, dw_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU')):
super(LearningToDownsample, self).__init__()
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
dw_channels1 = dw_channels[0]
dw_channels2 = dw_channels[1]
self.conv = ConvModule(in_channels, dw_channels1, 3, stride=2, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.dsconv1 = DepthwiseSeparableConvModule(dw_channels1, dw_channels2, kernel_size=3, stride=2, padding=1, norm_cfg=self.norm_cfg)
self.dsconv2 = DepthwiseSeparableConvModule(dw_channels2, out_channels, kernel_size=3, stride=2, padding=1, norm_cfg=self.norm_cfg)
def forward(self, x):
x = self.conv(x)
x = self.dsconv1(x)
x = self.dsconv2(x)
return x |
def main(argv: List[str]) -> None:
args = parse_args(argv)
for (name, val) in vars(args).items():
try:
vars(args)[name] = list(map(int, val.split(',')))
except (ValueError, AttributeError):
pass
rank_zero_print(f'PARAMS: (lr, batch_size, warmup_steps, decay_start, decay_steps): {(args.learning_rate, args.batch_size, args.lr_warmup_steps, args.lr_decay_start, args.lr_decay_steps)}')
device = init_from_env()
backend = get_process_group_backend_from_device(device)
eb_configs = init_embdedding_configs(args.embedding_dim, args.num_embeddings_per_feature, args.num_embeddings)
model = init_model(eb_configs, args.dense_arch_layer_sizes, args.over_arch_layer_sizes, args.learning_rate, args.batch_size, device)
optimizer = init_optimizer(model, args.learning_rate)
tb_logger = init_logger()
auroc = BinaryAUROC(device=device)
my_unit = MyUnit(module=model, optimizer=optimizer, device=device, tb_logger=tb_logger, train_auroc=auroc, log_every_n_steps=10)
train_dataloader = init_dataloader(args.batch_size, args.num_batches, args.num_embeddings, backend, args.num_embeddings_per_feature, args.seed, args.pin_memory)
eval_dataloader = init_dataloader(args.test_batch_size, args.num_batches, args.num_embeddings, backend, args.num_embeddings_per_feature, args.seed, args.pin_memory)
tqdm_callback = TQDMProgressBar()
fit(my_unit, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, max_epochs=args.epochs, callbacks=[tqdm_callback]) |
def monaco_patient_directory_picker(config, patient_id='', key_namespace='', advanced_mode=False, site=None):
(monaco_site, monaco_directory) = misc.get_site_and_directory(config, 'Monaco Plan Location', 'monaco', default=site, key=f'{key_namespace}_monaco_site')
if advanced_mode:
st.write(monaco_directory.resolve())
patient_id = st.text_input('Patient ID', patient_id, key=f'{key_namespace}_patient_id')
if advanced_mode:
st.write(patient_id)
if (patient_id == ''):
st.stop()
plan_directories = list(monaco_directory.glob(f'*~{patient_id}/plan'))
if (len(plan_directories) == 0):
if (patient_id != ''):
st.write(exceptions.NoRecordsFound(f'No Monaco plan directories found for patient ID {patient_id}'))
st.stop()
return {'patient_id': patient_id}
elif (len(plan_directories) > 1):
raise ValueError(f"More than one patient plan directory found for this ID, please only have one directory per patient. Directories found were {', '.join([str(path.resolve()) for path in plan_directories])}")
plan_directory = plan_directories[0]
patient_directory = pathlib.Path(plan_directory).parent
return (monaco_site, monaco_directory, patient_id, plan_directory, patient_directory) |
class FTPStore(Store):
def __init__(self, hostname, root_path, base_url, username=None, password=None, passive=True, secure=False, **kwargs):
if isinstance(hostname, FTP):
self.ftp_client = hostname
else:
if secure:
self.ftp_client = FTP_TLS(host=hostname, user=username, passwd=password, **kwargs)
self.ftp_client.prot_p()
else:
self.ftp_client = FTP(host=hostname, user=username, passwd=password, **kwargs)
self.ftp_client.set_pasv(passive)
self.root_path = root_path
self.base_url = base_url.rstrip('/')
def _get_remote_path(self, filename):
return join(self.root_path, filename)
def _change_directory(self, remote):
remote_dirs = remote.split('/')
for directory in remote_dirs:
try:
self.ftp_client.cwd(directory)
except Exception:
self.ftp_client.mkd(directory)
self.ftp_client.cwd(directory)
def put(self, filename: str, stream: FileLike) -> int:
remote_filename = self._get_remote_path(filename)
remote_dir = dirname(remote_filename)
remote_file = basename(remote_filename)
current = self.ftp_client.pwd()
self._change_directory(remote_dir)
try:
self.ftp_client.storbinary(('STOR %s' % remote_file), stream)
size = self.ftp_client.size(remote_file)
finally:
stream.close()
self.ftp_client.cwd(current)
return size
def delete(self, filename: str) -> None:
remote_filename = self._get_remote_path(filename)
self.ftp_client.delete(remote_filename)
def open(self, filename: str, mode: str='rb'):
remote_filename = self._get_remote_path(filename)
file_bytes = BytesIO()
self.ftp_client.retrbinary(('RETR %s' % remote_filename), file_bytes.write)
file_bytes.seek(0)
return file_bytes
def locate(self, attachment) -> str:
return ('%s/%s' % (self.base_url, attachment.path)) |
def main(count):
sqlexec('CREATE TEMP TABLE samples (i2 int2, i4 int4, i8 int8, n numeric, n2 numeric, t text, v varchar, c char(2), ts timestamp)')
insert_records = prepare('INSERT INTO samples VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)')
select_records = prepare('SELECT * FROM samples')
try:
insertSamples(count, insert_records)
timeTupleRead(select_records)
finally:
sqlexec('DROP TABLE samples') |
class TestAttributes():
def test_no_attributes(self):
attrs = x509.Attributes([])
assert (len(attrs) == 0)
def test_get_attribute_for_oid(self):
attr_list = [x509.Attribute(x509.oid.AttributeOID.CHALLENGE_PASSWORD, b'nonsense'), x509.Attribute(x509.oid.AttributeOID.UNSTRUCTURED_NAME, b'montessori', _ASN1Type.PrintableString.value)]
attrs = x509.Attributes(attr_list)
attr = attrs.get_attribute_for_oid(x509.oid.AttributeOID.UNSTRUCTURED_NAME)
assert (attr.oid == x509.oid.AttributeOID.UNSTRUCTURED_NAME)
assert (attr.value == b'montessori')
assert (attr._type == _ASN1Type.PrintableString.value)
def test_indexing(self):
attr_list = [x509.Attribute(x509.oid.AttributeOID.CHALLENGE_PASSWORD, b'nonsense'), x509.Attribute(x509.oid.AttributeOID.UNSTRUCTURED_NAME, b'montessori'), x509.Attribute(x509.ObjectIdentifier('2.999.2'), b'meaningless'), x509.Attribute(x509.ObjectIdentifier('2.999.1'), b'meaningless')]
attrs = x509.Attributes(attr_list)
assert (len(attrs) == 4)
assert (list(attrs) == attr_list)
assert (attrs[(- 1)] == attrs[3])
assert (attrs[0:3:2] == [attrs[0], attrs[2]])
def test_get_attribute_not_found(self):
attrs = x509.Attributes([])
with pytest.raises(x509.AttributeNotFound) as exc:
attrs.get_attribute_for_oid(x509.oid.AttributeOID.CHALLENGE_PASSWORD)
assert (exc.value.oid == x509.oid.AttributeOID.CHALLENGE_PASSWORD)
def test_repr(self):
attrs = x509.Attributes([x509.Attribute(x509.oid.AttributeOID.CHALLENGE_PASSWORD, b'nonsense')])
assert (repr(attrs) == "<Attributes([<Attribute(oid=<ObjectIdentifier(oid=1.2.840.113549.1.9.7, name=challengePassword)>, value=b'nonsense')>])>") |
class OrJsonLine(IO):
def load(path):
with open(path) as rf:
lines = rf.read().splitlines()
return [orjson.loads(l) for l in lines]
def dump(instances, path):
assert (type(instances) == list)
lines = [orjson.dumps(d, option=orjson.OPT_NON_STR_KEYS).decode() for d in instances]
with open(path, 'w') as wf:
wf.write('\n'.join(lines)) |
def test_format_error():
assert_is(None, asynq.debug.format_error(None))
asynq.debug.enable_traceback_syntax_highlight(False)
e = RuntimeError()
expected = 'RuntimeError\n'
assert_eq(expected, asynq.debug.format_error(e))
e._task = async_fn.asynq()
formatted = asynq.debug.format_error(e)
assert_in(expected, formatted)
try:
raise RuntimeError
except RuntimeError:
e._traceback = sys.exc_info()[2]
formatted = asynq.debug.format_error(e)
assert_in(expected, formatted)
assert_in('Traceback', formatted)
asynq.debug.enable_traceback_syntax_highlight(True)
expected = 'RuntimeError'
formatted = asynq.debug.format_error(e)
assert_in(expected, formatted)
assert_in('Traceback', formatted) |
class WeightAllocationsTestCase(unittest.TestCase):
def test_allocate_equal_weights(self):
signals = pd.DataFrame(data={'FI12345': [1, 1, 1, 0, 0], 'FI23456': [0, (- 1), 1, 0, (- 1)]})
target_weights = Moonshot().allocate_equal_weights(signals, cap=1.0)
self.assertDictEqual(target_weights.to_dict(orient='list'), {'FI12345': [1.0, 0.5, 0.5, 0.0, 0.0], 'FI23456': [0.0, (- 0.5), 0.5, 0.0, (- 1.0)]})
target_weights = Moonshot().allocate_equal_weights(signals, cap=0.5)
self.assertDictEqual(target_weights.to_dict(orient='list'), {'FI12345': [0.5, 0.25, 0.25, 0.0, 0.0], 'FI23456': [0.0, (- 0.25), 0.25, 0.0, (- 0.5)]})
def test_allocate_fixed_weights(self):
signals = pd.DataFrame(data={'FI12345': [1, 1, 1, 0, 0], 'FI23456': [0, (- 1), 1, 0, (- 1)], 'FI34567': [1, 1, 1, (- 1), (- 1)]})
target_weights = Moonshot().allocate_fixed_weights(signals, 0.34)
self.assertDictEqual(target_weights.to_dict(orient='list'), {'FI12345': [0.34, 0.34, 0.34, 0.0, 0.0], 'FI23456': [0.0, (- 0.34), 0.34, 0.0, (- 0.34)], 'FI34567': [0.34, 0.34, 0.34, (- 0.34), (- 0.34)]})
def test_allocate_fixed_weights_capped(self):
signals = pd.DataFrame(data={'FI12345': [1, 1, 1, 0, 0], 'FI23456': [0, (- 1), 1, 0, (- 1)], 'FI34567': [1, 1, 1, (- 1), (- 1)]})
target_weights = Moonshot().allocate_fixed_weights_capped(signals, 0.34, cap=1.5)
self.assertDictEqual(target_weights.to_dict(orient='list'), {'FI12345': [0.34, 0.34, 0.34, 0.0, 0.0], 'FI23456': [0.0, (- 0.34), 0.34, 0.0, (- 0.34)], 'FI34567': [0.34, 0.34, 0.34, (- 0.34), (- 0.34)]})
target_weights = Moonshot().allocate_fixed_weights_capped(signals, 0.34, cap=0.81)
self.assertDictEqual(target_weights.to_dict(orient='list'), {'FI12345': [0.34, 0.27, 0.27, 0.0, 0.0], 'FI23456': [0.0, (- 0.27), 0.27, 0.0, (- 0.34)], 'FI34567': [0.34, 0.27, 0.27, (- 0.34), (- 0.34)]})
def test_allocate_market_neutral_fixed_weights_capped(self):
signals = pd.DataFrame(data={'FI12345': [1, 1, 1, 0, 0], 'FI23456': [0, (- 1), 1, 1, (- 1)], 'FI34567': [1, 1, (- 1), (- 1), (- 1)]})
target_weights = Moonshot().allocate_market_neutral_fixed_weights_capped(signals, 0.34, cap=1.2, neutralize_weights=False)
self.assertDictEqual(target_weights.to_dict(orient='list'), {'FI12345': [0.3, 0.3, 0.3, 0.0, 0.0], 'FI23456': [0.0, (- 0.34), 0.3, 0.34, (- 0.3)], 'FI34567': [0.3, 0.3, (- 0.34), (- 0.34), (- 0.3)]})
target_weights = Moonshot().allocate_market_neutral_fixed_weights_capped(signals, 0.34, cap=1.2, neutralize_weights=True)
self.assertDictEqual(target_weights.to_dict(orient='list'), {'FI12345': [0.0, 0.17, 0.17, 0.0, 0.0], 'FI23456': [0.0, (- 0.34), 0.17, 0.34, (- 0.0)], 'FI34567': [0.0, 0.17, (- 0.34), (- 0.34), (- 0.0)]}) |
def test_cell_n3_diffuse():
cell = pbcgto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '0. 1.7834 1.7834\n 1.7834 0. 1.7834\n 1.7834 1.7834 0. '
cell.basis = {'C': [[0, (0.1, 1.0)], [1, (0.1, 1.0)]]}
cell.pseudo = 'gth-pade'
cell.verbose = 7
cell.mesh = ([5] * 3)
cell.output = '/dev/null'
cell.build()
return cell |
('/v1/superuser/users/<namespace>/quota', '/v1/superuser/organization/<namespace>/quota')
_if(features.SUPER_USERS)
_if(features.QUOTA_MANAGEMENT)
class SuperUserUserQuotaList(ApiResource):
schemas = {'NewNamespaceQuota': {'type': 'object', 'description': 'Description of a new organization quota', 'required': ['limit_bytes'], 'properties': {'limit_bytes': {'type': 'integer', 'description': 'Number of bytes the organization is allowed'}}}}
_fresh_login
_not_prod
(['listUserQuotaSuperUser', 'listOrganizationQuotaSuperUser'])
_scope(scopes.SUPERUSER)
def get(self, namespace):
if SuperUserPermission().can():
try:
namespace_user = user.get_user_or_org(namespace)
except DataModelException as ex:
raise request_error(exception=ex)
if (not namespace_user):
raise NotFound()
quotas = namespacequota.get_namespace_quota_list(namespace_user.username)
return [quota_view(quota) for quota in quotas]
raise Unauthorized()
_fresh_login
_not_prod
(['createUserQuotaSuperUser', 'createOrganizationQuotaSuperUser'])
_scope(scopes.SUPERUSER)
_json_request('NewNamespaceQuota')
def post(self, namespace):
if SuperUserPermission().can():
quota_data = request.get_json()
limit_bytes = quota_data['limit_bytes']
namespace_user = user.get_user_or_org(namespace)
quotas = namespacequota.get_namespace_quota_list(namespace_user.username)
if quotas:
raise request_error(message=("Quota for '%s' already exists" % namespace))
try:
newquota = namespacequota.create_namespace_quota(namespace_user, limit_bytes)
return ('Created', 201)
except DataModelException as ex:
raise request_error(exception=ex)
raise Unauthorized() |
.linux
class TestDBus():
NO_REPLY_ERROR = FakeDBusMessage.create_error('org.freedesktop.DBus.Error.NoReply')
FATAL_ERROR = FakeDBusMessage.create_error('test')
def dbus_adapter_patches(self, monkeypatch, config_stub):
monkeypatch.setattr(objects, 'debug_flags', ['test-notification-service'])
monkeypatch.setattr(notification, 'QDBusInterface', FakeDBusInterface)
def dbus_adapter(self, dbus_adapter_patches):
return notification.DBusNotificationAdapter()
def dbus_presenter(self, dbus_adapter_patches, monkeypatch):
monkeypatch.setattr(notification.NotificationBridgePresenter, '_get_adapter_candidates', (lambda _self, _setting: [notification.DBusNotificationAdapter, FakeNotificationAdapter]))
return notification.NotificationBridgePresenter()
def test_notify_fatal_error(self, dbus_adapter, fake_notification):
dbus_adapter.interface.notify_reply = self.FATAL_ERROR
with pytest.raises(notification.DBusError):
dbus_adapter.present(fake_notification, replaces_id=None)
def test_notify_fatal_error_presenter(self, dbus_presenter, fake_notification):
dbus_presenter._init_adapter()
dbus_presenter._adapter.interface.notify_reply = self.FATAL_ERROR
with pytest.raises(notification.DBusError):
dbus_presenter.present(fake_notification)
def test_notify_non_fatal_error(self, qtbot, dbus_adapter, fake_notification):
dbus_adapter.interface.notify_reply = self.NO_REPLY_ERROR
with qtbot.wait_signal(dbus_adapter.error) as blocker:
dbus_adapter.present(fake_notification, replaces_id=None)
assert (blocker.args == [f'error: {self.NO_REPLY_ERROR.errorName()}'])
def test_notify_non_fatal_error_presenter(self, dbus_presenter, fake_notification, caplog):
dbus_presenter._init_adapter()
dbus_presenter._adapter.interface.notify_reply = self.NO_REPLY_ERROR
with caplog.at_level(logging.ERROR):
dbus_presenter.present(fake_notification)
message = f'Notification error from libnotify adapter: {self.NO_REPLY_ERROR.errorMessage()}'
assert (message in caplog.messages)
assert (dbus_presenter._adapter is None)
.parametrize('error, exctype', [(NO_REPLY_ERROR, notification.DBusError), (FATAL_ERROR, notification.Error)])
def test_capabilities_error(self, dbus_adapter_patches, monkeypatch, error, exctype):
monkeypatch.setattr(FakeDBusInterface, 'CAPABILITIES_REPLY', error)
with pytest.raises(exctype):
notification.DBusNotificationAdapter()
.parametrize('error', [NO_REPLY_ERROR, FATAL_ERROR], ids=(lambda e: e.errorName()))
def test_capabilities_error_presenter(self, dbus_presenter, fake_notification, monkeypatch, caplog, error):
monkeypatch.setattr(FakeDBusInterface, 'CAPABILITIES_REPLY', error)
dbus_presenter.present(fake_notification)
message = f'Failed to initialize libnotify notification adapter: {error.errorName()}: {error.errorMessage()}'
assert (message in caplog.messages)
assert isinstance(dbus_presenter._adapter, FakeNotificationAdapter)
assert (dbus_presenter._adapter.presented == [fake_notification]) |
class MlpGeLUDropoutAddFunction(torch.autograd.Function):
_fwd(cast_inputs=torch.float16)
def forward(ctx, p, r_p, *args):
outputs = fused_mlp_gelu_dropout_add.forward(p, r_p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[(- 2)]
residual_mask = outputs[(- 1)]
ctx.p = p
ctx.r_p = p
return (outputs[0], dropout_mask, residual_mask)
_bwd
def backward(ctx, *grad_o):
p = ctx.p
r_p = ctx.r_p
grads = fused_mlp_gelu_dropout_add.backward(p, r_p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, None, *grads) |
class Solution(object):
def lengthOfLongestSubstring(self, s):
charMap = {}
for i in range(256):
charMap[i] = (- 1)
ls = len(s)
i = max_len = 0
for j in range(ls):
if (charMap[ord(s[j])] >= i):
i = (charMap[ord(s[j])] + 1)
charMap[ord(s[j])] = j
max_len = max(max_len, ((j - i) + 1))
return max_len |
class SVMGuide1(object):
def __init__(self, dataset_dir, subsample_ratio=1.0, split_seed=0, **kwargs):
self.dataset_dir = dataset_dir
(self.train_dataset, self.test_dataset) = self._preprocess(subsample_ratio, split_seed)
def _preprocess(self, subsample_ratio, split_seed):
train_path = os.path.join(self.dataset_dir, 'train.libsvm')
test_path = os.path.join(self.dataset_dir, 'train.libsvm')
(train_inputs, train_targets) = load_svmlight_file(train_path)
(test_inputs, test_targets) = load_svmlight_file(test_path)
(train_inputs, test_inputs) = (train_inputs.todense(), test_inputs.todense())
(train_inputs, train_targets) = (torch.tensor(train_inputs).float(), torch.tensor(train_targets).long())
(test_inputs, test_targets) = (torch.tensor(test_inputs).float(), torch.tensor(test_targets).long())
if torch.cuda.is_available():
(train_inputs, train_targets) = (train_inputs.cuda(), train_targets.cuda())
(test_inputs, test_targets) = (test_inputs.cuda(), test_targets.cuda())
all_inputs = torch.cat([train_inputs, test_inputs])
(input_min, _) = all_inputs.min(0)
(input_max, _) = all_inputs.max(0)
input_range = (input_max - input_min)
train_inputs = (2 * (((train_inputs - input_min) / input_range) - 0.5))
test_inputs = (2 * (((test_inputs - input_min) / input_range) - 0.5))
train_dataset = TensorDataset(train_inputs, train_targets)
generator = torch.Generator().manual_seed(split_seed)
num_samples = int((subsample_ratio * len(train_dataset)))
(train_dataset, _) = random_split(train_dataset, [num_samples, (len(train_dataset) - num_samples)], generator=generator)
test_dataset = TensorDataset(test_inputs, test_targets)
num_samples = int((subsample_ratio * len(test_dataset)))
(test_dataset, _) = random_split(test_dataset, [num_samples, (len(test_dataset) - num_samples)], generator=generator)
return (train_dataset, test_dataset) |
def test_time_dependent_spline_in_c_ops():
N = 10
a = qutip.destroy(N)
H = (a.dag() * a)
psi0 = qutip.basis(N, 9)
times = np.linspace(0, 10, 100)
kappa = 0.2
exact = (9 * np.exp((((- 2) * kappa) * (1 - np.exp((- times))))))
(spectra, coeff) = _string_w_interpolating_t(kappa, times)
a_ops = [[qutip.QobjEvo([(a + a.dag()), coeff]), spectra]]
collapse_points = (np.sqrt(kappa) * np.exp(((- 0.5) * times)))
c_ops = [[a, qutip.coefficient(collapse_points, tlist=times)]]
brme = brmesolve(H, psi0, times, a_ops, e_ops=[(a.dag() * a)], c_ops=c_ops)
assert (np.mean((np.abs((brme.expect[0] - exact)) / exact)) < 1e-05) |
.parametrize('bitsize', [3])
.parametrize('mod', [5, 8])
.parametrize('add_val', [1, 2])
.parametrize('cvs', [[], [0, 1], [1, 0], [1, 1]])
def test_add_mod_n(bitsize, mod, add_val, cvs):
gate = AddConstantMod(bitsize, mod, add_val=add_val, cvs=cvs)
basis_map = {}
num_cvs = len(cvs)
for x in range((2 ** bitsize)):
y = (((x + add_val) % mod) if (x < mod) else x)
if (not num_cvs):
basis_map[x] = y
continue
for cb in range((2 ** num_cvs)):
inp = f'0b_{cb:0{num_cvs}b}_{x:0{bitsize}b}'
if (tuple((int(x) for x in f'{cb:0{num_cvs}b}')) == tuple(cvs)):
out = f'0b_{cb:0{num_cvs}b}_{y:0{bitsize}b}'
basis_map[int(inp, 2)] = int(out, 2)
else:
basis_map[int(inp, 2)] = int(inp, 2)
op = gate.on_registers(**get_named_qubits(gate.signature))
circuit = cirq.Circuit(op)
cirq.testing.assert_equivalent_computational_basis_map(basis_map, circuit)
circuit += (op ** (- 1))
cirq.testing.assert_equivalent_computational_basis_map(identity_map(gate.num_qubits()), circuit) |
class FC6_Duplicate_TestCase(CommandSequenceTest):
def __init__(self, *args, **kwargs):
CommandSequenceTest.__init__(self, *args, **kwargs)
self.version = FC6
def runTest(self):
self.assert_parse_error('\nmultipath --name=mpath0 --device=/dev/sda --rule=failover\nmultipath --name=mpath1 --device=/dev/sda --rule=failover')
self.assert_parse('\nmultipath --name=mpath0 --device=/dev/sda --rule=failover\nmultipath --name=mpath0 --device=/dev/sdb --rule=failover') |
def mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):
if (hidden_depth == 0):
mods = [nn.Linear(input_dim, output_dim)]
else:
mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
for i in range((hidden_depth - 1)):
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
mods.append(nn.Linear(hidden_dim, output_dim))
if (output_mod is not None):
mods.append(output_mod)
trunk = nn.Sequential(*mods)
return trunk |
class BorderAlignFunction(Function):
def symbolic(g, input, boxes, pool_size):
return g.op('mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size)
def forward(ctx, input, boxes, pool_size):
ctx.pool_size = pool_size
ctx.input_shape = input.size()
assert (boxes.ndim == 3), 'boxes must be with shape [B, H*W, 4]'
assert (boxes.size(2) == 4), 'the last dimension of boxes must be (x1, y1, x2, y2)'
assert ((input.size(1) % 4) == 0), 'the channel for input feature must be divisible by factor 4'
output_shape = (input.size(0), (input.size(1) // 4), boxes.size(1), 4)
output = input.new_zeros(output_shape)
argmax_idx = input.new_zeros(output_shape).to(torch.int)
ext_module.border_align_forward(input, boxes, output, argmax_idx, pool_size=ctx.pool_size)
ctx.save_for_backward(boxes, argmax_idx)
return output
_differentiable
def backward(ctx, grad_output):
(boxes, argmax_idx) = ctx.saved_tensors
grad_input = grad_output.new_zeros(ctx.input_shape)
grad_output = grad_output.contiguous()
ext_module.border_align_backward(grad_output, boxes, argmax_idx, grad_input, pool_size=ctx.pool_size)
return (grad_input, None, None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.