code stringlengths 281 23.7M |
|---|
def project(x, original_x, epsilon, _type='linf'):
if (_type == 'linf'):
max_x = (original_x + epsilon)
min_x = (original_x - epsilon)
x = torch.max(torch.min(x, max_x), min_x)
elif (_type == 'l2'):
dist = (x - original_x)
dist = dist.view(x.shape[0], (- 1))
dist_norm = torch.norm(dist, dim=1, keepdim=True)
mask = (dist_norm > epsilon).unsqueeze(2).unsqueeze(3)
dist = (dist / dist_norm)
dist *= epsilon
dist = dist.view(x.shape)
x = (((original_x + dist) * mask.float()) + (x * (1 - mask.float())))
else:
raise NotImplementedError
return x |
('/task_operate', methods=['POST'])
def task_operate():
if (not session.get('logged_in')):
return redirect(url_for('login'))
global handle
task_name = str(request.form['name'])
if (request.form['key'] == ''):
try:
g.db.execute(('drop table if exists "%s";' % task_name))
g.db.commit()
shutil.rmtree(('../log/%s' % task_name))
shutil.rmtree(('../data/%s' % task_name))
except:
pass
os.makedirs(('../log/%s/error/image/' % task_name))
os.makedirs(('../data/%s/' % task_name))
g.db.execute('update tasks set status="",operate_time=(?),pass_num=0,pass_rate="0%" where name=(?)', [time.strftime('%y%m%d %H:%M', time.localtime()), task_name])
g.db.execute(('create table "%s" (id integer primary key autoincrement,case_name string not null, case_page string,flag string,step_num string,type string,ordd string, xpath string, operate string,data string,step_flag string,img string);' % task_name))
g.db.commit()
s = socket.socket()
s.connect(('127.0.0.1', 1111))
s.send(('task_name:%s' % task_name).encode('utf-8'))
handle[task_name] = s
print(handle[task_name])
elif (request.form['key'] == ''):
if (g.db.execute('select status from tasks where name=(?)', [task_name]).fetchall()[0][0] not in ('', '', '')):
flash('', 'danger')
return redirect(url_for('show_tasks'))
else:
try:
g.db.execute(('drop table if exists "%s";' % task_name))
g.db.execute('delete from tasks where name=(?)', [task_name])
g.db.commit()
shutil.rmtree(('../log/%s' % task_name))
shutil.rmtree(('../data/%s' % task_name))
except:
pass
elif (request.form['key'] == ''):
if (g.db.execute('select status from tasks where name=(?)', [task_name]).fetchall()[0][0] == ''):
flash(',', 'danger')
return redirect(url_for('show_tasks'))
if (int(g.db.execute('select progress from tasks where name=(?)', [task_name]).fetchall()[0][0].split('%')[0].split('.')[0]) < 10):
flash('...,', 'warning')
return redirect(url_for('show_tasks'))
case_detail = g.db.execute(('select * from "%s" where type="0" order by id' % task_name)).fetchall()
step_detail = {}
for elem in case_detail:
tmp = g.db.execute(('select * from "%s" where type="1" and case_name="%s" order by ordd' % (task_name, elem[1]))).fetchall()
step_detail[elem[1]] = tmp
return render_template('task_detail.html', cases=case_detail, steps=step_detail)
elif (request.form['key'] == ''):
handle[task_name].send(b'exit')
handle[task_name].close()
del handle[task_name]
g.db.execute('update tasks set status="" where name=(?)', [task_name])
g.db.commit()
return redirect(url_for('show_tasks')) |
class Effect5619(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Missile Launcher Rapid Heavy')), 'speed', ship.getModifiedItemAttr('shipBonusCB'), skill='Caldari Battleship', **kwargs) |
class BaseTaskHandle(ABC):
def stop(self) -> None:
pass
def current_jobset(self) -> Optional[BaseJobSet]:
pass
def add_observer(self) -> None:
pass
def is_stopped(self) -> bool:
pass
def get_jobsets(self) -> Sequence[BaseJobSet]:
pass
def create_jobset(self, name: str='JobSet', count: Optional[int]=None) -> BaseJobSet:
pass
def _inform_observers(self) -> None:
pass |
class DripOAuthTest(OAuth2Test):
backend_path = 'social_core.backends.drip.DripOAuth'
user_data_url = '
expected_username = ''
access_token_body = json.dumps({'access_token': '822bbf7cd12243df', 'token_type': 'bearer', 'scope': 'public'})
user_data_body = json.dumps({'users': [{'email': '', 'name': None}]})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline() |
class Migration(migrations.Migration):
dependencies = [('adserver', '0058_view_time')]
operations = [migrations.AddField(model_name='historicalpublisher', name='skip_payouts', field=models.BooleanField(default=False, help_text='Enable this to temporarily disable payouts. They will be processed again once you uncheck this.', verbose_name='Skip payouts')), migrations.AddField(model_name='publisher', name='skip_payouts', field=models.BooleanField(default=False, help_text='Enable this to temporarily disable payouts. They will be processed again once you uncheck this.', verbose_name='Skip payouts'))] |
class TestReconfig(KazooTestCase):
def setUp(self):
KazooTestCase.setUp(self)
if CI_ZK_VERSION:
version = CI_ZK_VERSION
else:
version = self.client.server_version()
if ((not version) or (version < (3, 5))):
pytest.skip('Must use Zookeeper 3.5 or above')
def test_no_super_auth(self):
with pytest.raises(NoAuthError):
self.client.reconfig(joining='server.999=0.0.0.0:1234:2345:observer;3456', leaving=None, new_members=None)
def test_add_remove_observer(self):
def free_sock_port():
s = socket.socket()
s.bind(('', 0))
return (s, s.getsockname()[1])
username = 'super'
password = 'test'
digest_auth = ('%s:%s' % (username, password))
client = self._get_client(auth_data=[('digest', digest_auth)])
client.start()
(s1, port1) = free_sock_port()
(s2, port2) = free_sock_port()
(s3, port3) = free_sock_port()
joining = ('server.100=0.0.0.0:%d:%d:observer;0.0.0.0:%d' % (port1, port2, port3))
(data, _) = client.reconfig(joining=joining, leaving=None, new_members=None)
assert (joining.encode('utf8') in data)
(data, _) = client.reconfig(joining=None, leaving='100', new_members=None)
assert (joining.encode('utf8') not in data)
curver = int(data.decode().split('\n')[(- 1)].split('=')[1], base=16)
with pytest.raises(BadVersionError):
self.client.reconfig(joining=joining, leaving=None, new_members=None, from_config=(curver + 1))
def test_bad_input(self):
with pytest.raises(BadArgumentsError):
self.client.reconfig(joining='some thing', leaving=None, new_members=None) |
def get_active_space_integrals(one_body_integrals, two_body_integrals, occupied_indices=None, active_indices=None):
occupied_indices = ([] if (occupied_indices is None) else occupied_indices)
if (len(active_indices) < 1):
raise ValueError('Some active indices required for reduction.')
core_constant = 0.0
for i in occupied_indices:
core_constant += (2 * one_body_integrals[(i, i)])
for j in occupied_indices:
core_constant += ((2 * two_body_integrals[(i, j, j, i)]) - two_body_integrals[(i, j, i, j)])
one_body_integrals_new = numpy.copy(one_body_integrals)
for u in active_indices:
for v in active_indices:
for i in occupied_indices:
one_body_integrals_new[(u, v)] += ((2 * two_body_integrals[(i, u, v, i)]) - two_body_integrals[(i, u, i, v)])
return (core_constant, one_body_integrals_new[numpy.ix_(active_indices, active_indices)], two_body_integrals[numpy.ix_(active_indices, active_indices, active_indices, active_indices)]) |
_sample_fn.register(ptr.BernoulliRV)
_sample_fn.register(ptr.CategoricalRV)
def jax_sample_fn_no_dtype(op):
name = op.name
jax_op = getattr(jax.random, name)
def sample_fn(rng, size, dtype, *parameters):
rng_key = rng['jax_state']
(rng_key, sampling_key) = jax.random.split(rng_key, 2)
sample = jax_op(sampling_key, *parameters, shape=size)
rng['jax_state'] = rng_key
return (rng, sample)
return sample_fn |
def custom_vdom_constructor(func: _CustomVdomDictConstructor) -> VdomDictConstructor:
(func)
def wrapper(*attributes_and_children: Any) -> VdomDict:
(attributes, children) = separate_attributes_and_children(attributes_and_children)
key = attributes.pop('key', None)
(attributes, event_handlers) = separate_attributes_and_event_handlers(attributes)
return func(attributes, children, key, event_handlers)
return cast(VdomDictConstructor, wrapper) |
def generate_df_tasks(c_code, mem_read_limit_per_process, WorldPop_inputfile):
task_list = []
with rasterio.open(WorldPop_inputfile) as src:
(worldpop_y_dim, worldpop_x_dim) = src.shape
transform = src.meta['transform']
(block_y_dim, block_x_dim) = [int(i) for i in src.block_shapes[0]]
expected_bytes_input_read = ((4 * worldpop_y_dim) * worldpop_x_dim)
worldpop_byte_limit = (max(883, mem_read_limit_per_process) * (10 ** 6))
if (expected_bytes_input_read < worldpop_byte_limit):
bool_original_window = True
current_window = [0, 0, worldpop_x_dim, worldpop_y_dim]
transform_and_coords = calculate_transform_and_coords_for_window(transform, current_window, bool_original_window)
task_list.append(([c_code, current_window, bool_original_window] + transform_and_coords))
else:
bool_original_window = False
window_x_dim = worldpop_x_dim
window_col_offset = 0
read_block_size = ((4 * block_y_dim) * window_x_dim)
window_block_count = int((worldpop_byte_limit // read_block_size))
window_y_dim = (window_block_count * block_y_dim)
window_row_offset = np.arange(0, worldpop_y_dim, window_y_dim)
for row_offset in window_row_offset:
current_window = [window_col_offset, row_offset, window_x_dim, window_y_dim]
transform_and_coords = calculate_transform_and_coords_for_window(transform, current_window, bool_original_window)
task_list.append(([c_code, current_window, bool_original_window] + transform_and_coords))
return pd.DataFrame(task_list) |
def mime_type_codec(mime_type_codec: str) -> Tuple[(str, List[str])]:
pattern = '(\\w+\\/\\w+)\\;\\scodecs=\\"([a-zA-Z-0-9.,\\s]*)\\"'
regex = re.compile(pattern)
results = regex.search(mime_type_codec)
if (not results):
raise RegexMatchError(caller='mime_type_codec', pattern=pattern)
(mime_type, codecs) = results.groups()
return (mime_type, [c.strip() for c in codecs.split(',')]) |
.end_to_end()
def test_skip_unchanged_w_dependencies_and_products(tmp_path):
source = '\n import pytask\n\n .depends_on("in.txt")\n .produces("out.txt")\n def task_dummy(depends_on, produces):\n produces.write_text(depends_on.read_text())\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
tmp_path.joinpath('in.txt').write_text('Original content of in.txt.')
session = build(paths=tmp_path)
assert (session.execution_reports[0].outcome == TaskOutcome.SUCCESS)
assert (tmp_path.joinpath('out.txt').read_text() == 'Original content of in.txt.')
session = build(paths=tmp_path)
assert (session.execution_reports[0].outcome == TaskOutcome.SKIP_UNCHANGED)
assert isinstance(session.execution_reports[0].exc_info[1], SkippedUnchanged)
assert (tmp_path.joinpath('out.txt').read_text() == 'Original content of in.txt.') |
class TestKeypoints(unittest.TestCase):
def test_cat_keypoints(self):
keypoints1 = Keypoints(torch.rand(2, 21, 3))
keypoints2 = Keypoints(torch.rand(4, 21, 3))
cat_keypoints = keypoints1.cat([keypoints1, keypoints2])
self.assertTrue(torch.all((cat_keypoints.tensor[:2] == keypoints1.tensor)).item())
self.assertTrue(torch.all((cat_keypoints.tensor[2:] == keypoints2.tensor)).item()) |
def test_maincli_interactive_all_yes(tmpdir):
runner = CliRunner()
result = runner.invoke(yadage.steering.main, [os.path.join(str(tmpdir), 'workdir'), 'workflow.yml', '-t', 'tests/testspecs/local-helloworld', '-g', 'interactive', '-p', 'par=value'], input='y\ny\ny\ny\ny\ny\n')
assert tmpdir.join('workdir/hello_world/hello_world.txt').check()
assert (result.exit_code == 0) |
class ModbusSocketFramer(ModbusFramer):
method = 'socket'
def __init__(self, decoder, client=None):
super().__init__(decoder, client)
self._hsize = 7
def checkFrame(self):
if self.isFrameReady():
(self._header['tid'], self._header['pid'], self._header['len'], self._header['uid']) = struct.unpack('>HHHB', self._buffer[0:self._hsize])
if (self._header['len'] < 2):
self.advanceFrame()
elif (((len(self._buffer) - self._hsize) + 1) >= self._header['len']):
return True
return False
def advanceFrame(self):
length = ((self._hsize + self._header['len']) - 1)
self._buffer = self._buffer[length:]
self._header = {'tid': 0, 'pid': 0, 'len': 0, 'uid': 0}
def isFrameReady(self):
return (len(self._buffer) > self._hsize)
def getFrame(self):
length = ((self._hsize + self._header['len']) - 1)
return self._buffer[self._hsize:length]
def decode_data(self, data):
if (len(data) > self._hsize):
(tid, pid, length, uid, fcode) = struct.unpack(SOCKET_FRAME_HEADER, data[0:(self._hsize + 1)])
return {'tid': tid, 'pid': pid, 'length': length, 'slave': uid, 'fcode': fcode}
return {}
def frameProcessIncomingPacket(self, single, callback, slave, tid=None, **kwargs):
while True:
if (not self.isFrameReady()):
if len(self._buffer):
if (self._header['len'] < 2):
self._process(callback, tid, error=True)
break
if (not self.checkFrame()):
Log.debug('Frame check failed, ignoring!!')
self.resetFrame()
continue
if (not self._validate_slave_id(slave, single)):
header_txt = self._header['uid']
Log.debug('Not a valid slave id - {}, ignoring!!', header_txt)
self.resetFrame()
continue
self._process(callback, tid)
def _process(self, callback, tid, error=False):
data = (self._buffer if error else self.getFrame())
if ((result := self.decoder.decode(data)) is None):
self.resetFrame()
raise ModbusIOException('Unable to decode request')
if (error and (result.function_code < 128)):
raise InvalidMessageReceivedException(result)
self.populateResult(result)
self.advanceFrame()
if (tid and (tid != result.transaction_id)):
self.resetFrame()
else:
callback(result)
def buildPacket(self, message):
data = message.encode()
packet = struct.pack(SOCKET_FRAME_HEADER, message.transaction_id, message.protocol_id, (len(data) + 2), message.slave_id, message.function_code)
packet += data
return packet |
def test_add_with_single_string():
context = Context({'arbset': {1, 2}, 'add': {'set': PyString('arbset'), 'addMe': 'three'}})
add.run_step(context)
context['add']['addMe'] = 'four'
add.run_step(context)
assert (context['arbset'] == {1, 2, 'three', 'four'})
assert (len(context) == 2) |
class TetrixWindow(QWidget):
def __init__(self):
super(TetrixWindow, self).__init__()
self.board = TetrixBoard()
nextPieceLabel = QLabel()
nextPieceLabel.setFrameStyle((QFrame.Box | QFrame.Raised))
nextPieceLabel.setAlignment(Qt.AlignCenter)
self.board.setNextPieceLabel(nextPieceLabel)
scoreLcd = QLCDNumber(5)
scoreLcd.setSegmentStyle(QLCDNumber.Filled)
levelLcd = QLCDNumber(2)
levelLcd.setSegmentStyle(QLCDNumber.Filled)
linesLcd = QLCDNumber(5)
linesLcd.setSegmentStyle(QLCDNumber.Filled)
startButton = QPushButton('&Start')
startButton.setFocusPolicy(Qt.NoFocus)
quitButton = QPushButton('&Quit')
quitButton.setFocusPolicy(Qt.NoFocus)
pauseButton = QPushButton('&Pause')
pauseButton.setFocusPolicy(Qt.NoFocus)
startButton.clicked.connect(self.board.start)
pauseButton.clicked.connect(self.board.pause)
quitButton.clicked.connect(QApplication.instance().quit)
self.board.scoreChanged.connect(scoreLcd.display)
self.board.levelChanged.connect(levelLcd.display)
self.board.linesRemovedChanged.connect(linesLcd.display)
layout = QGridLayout()
layout.addWidget(self.createLabel('NEXT'), 0, 0)
layout.addWidget(nextPieceLabel, 1, 0)
layout.addWidget(self.createLabel('LEVEL'), 2, 0)
layout.addWidget(levelLcd, 3, 0)
layout.addWidget(startButton, 4, 0)
layout.addWidget(self.board, 0, 1, 6, 1)
layout.addWidget(self.createLabel('SCORE'), 0, 2)
layout.addWidget(scoreLcd, 1, 2)
layout.addWidget(self.createLabel('LINES REMOVED'), 2, 2)
layout.addWidget(linesLcd, 3, 2)
layout.addWidget(quitButton, 4, 2)
layout.addWidget(pauseButton, 5, 2)
self.setLayout(layout)
self.setWindowTitle('Tetrix')
self.resize(550, 370)
def createLabel(self, text):
lbl = QLabel(text)
lbl.setAlignment((Qt.AlignHCenter | Qt.AlignBottom))
return lbl |
def transition(xs, out_channels, name=''):
n_branch_pre = len(xs)
n_branch_cur = len(out_channels)
xs_next = []
for i in range(n_branch_cur):
if (i < n_branch_pre):
x = xs[i]
if (x.shape[(- 1)] != out_channels[i]):
x = Conv2D(out_channels[i], 3, 1, 'same', use_bias=False, name=(name + '/b{}/conv'.format((i + 1))))(x)
x = BatchNormalization(name=(name + '/b{}/bn'.format((i + 1))))(x)
x = ReLU(name=(name + '/b{}/relu'.format((i + 1))))(x)
else:
x = xs[(- 1)]
x = Conv2D(out_channels[i], 3, 2, 'same', use_bias=False, name=(name + '/b{}/conv'.format((i + 1))))(x)
x = BatchNormalization(name=(name + '/b{}/bn'.format((i + 1))))(x)
x = ReLU(name=(name + '/b{}/relu'.format((i + 1))))(x)
xs_next.append(x)
return xs_next |
def GenerateProject(name, force=True):
path = Path(name).resolve()
if os.path.exists(path):
if force:
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
elif os.path.isfile(path):
raise PyUnityException(f'File exists: {path}')
else:
raise PyUnityException(f'Directory exists: {path}')
project = Project(path)
SaveProject(project)
return project |
def test_pip_install(temporary_directory: Path, project_source_root: Path, python: str) -> None:
temp_pep_517_backend_path = (temporary_directory / 'pep_517_backend')
shutil.copytree((Path(__file__).parent.parent / 'fixtures/pep_517_backend'), temp_pep_517_backend_path)
with open((temp_pep_517_backend_path / 'pyproject.toml'), 'a') as f:
f.write(BUILD_SYSTEM_TEMPLATE.format(project_path=project_source_root.as_posix()))
subprocess_run(python, '-m', 'pip', 'install', temp_pep_517_backend_path.as_posix())
pip_show = subprocess_run(python, '-m', 'pip', 'show', 'foo')
assert ('Name: foo' in pip_show.stdout) |
class TarRankSelectAlgo(CompRatioSelectAlgo):
def __init__(self, layer_db: LayerDatabase, pruner: Pruner, cost_calculator: cc.CostCalculator, eval_func: EvalFunction, eval_iterations, cost_metric: CostMetric, num_rank_indices: int, use_cuda: bool, pymo_utils_lib):
CompRatioSelectAlgo.__init__(self, layer_db, cost_calculator, cost_metric, comp_ratio_rounding_algo=None)
self._eval_func = eval_func
self._eval_iter = eval_iterations
self._is_cuda = use_cuda
self._pruner = pruner
self._num_rank_indices = num_rank_indices
self._svd_lib_ref = pymo.GetSVDInstance()
self._pymo_utils_lib = pymo_utils_lib
def _compute_compressed_model_cost(self, layer_ratio_list, original_model_cost):
for layer in self._layer_db:
if (layer not in self._layer_db.get_selected_layers()):
layer_ratio_list.append(LayerCompRatioPair(layer, None))
compressed_model_cost = self._cost_calculator.calculate_compressed_cost(self._layer_db, layer_ratio_list, self._cost_metric)
if (self._cost_metric == CostMetric.memory):
model_compression_ratio = Decimal((compressed_model_cost.memory / original_model_cost.memory))
else:
model_compression_ratio = Decimal((compressed_model_cost.mac / original_model_cost.mac))
return model_compression_ratio
def _compute_comp_ratios_and_eval_scores(self, rank_index):
comp_ratio_eval_score_across_layers = []
layer_ratio_list = []
for layer in self._layer_db.get_selected_layers():
rank = self._svd_lib_ref.GetCandidateRanks(str(layer.name), rank_index)
comp_ratio = self._cost_calculator.calculate_comp_ratio_given_rank(layer, rank[0], self._cost_metric)
pruned_layer_db = self._pruner.prune_model(self._layer_db, [LayerCompRatioPair(layer=layer, comp_ratio=comp_ratio)], self._cost_metric, None)
eval_score = self._eval_func(pruned_layer_db.model, self._eval_iter, use_cuda=self._is_cuda)
pruned_layer_db.destroy()
pruned_layer_db = None
comp_ratio_eval_score_across_layers.append(LayerCompRatioEvalScore(layer, comp_ratio, eval_score))
layer_ratio_list.append(LayerCompRatioPair(layer=layer, comp_ratio=comp_ratio))
return (layer_ratio_list, comp_ratio_eval_score_across_layers)
def select_per_layer_comp_ratios(self):
self._pymo_utils_lib.PymoSvdUtils.configure_layers_in_pymo_svd(self._layer_db.get_selected_layers(), self._cost_metric, self._svd_lib_ref)
num_rank_indices = self._svd_lib_ref.SetCandidateRanks(self._num_rank_indices)
original_model_cost = self._cost_calculator.compute_model_cost(self._layer_db)
comp_ratio_eval_score_across_layers = {}
rank_index_objective_score_map = {}
for rank_index in range(num_rank_indices):
(layer_ratio_list, comp_ratio_eval_score_across_layers[rank_index]) = self._compute_comp_ratios_and_eval_scores(rank_index)
pruned_layer_db = self._pruner.prune_model(self._layer_db, comp_ratio_eval_score_across_layers[rank_index], self._cost_metric, trainer=None)
model_accuracy = self._eval_func(pruned_layer_db.model, self._eval_iter, use_cuda=self._is_cuda)
pruned_layer_db.destroy()
pruned_layer_db = None
model_compression_ratio = self._compute_compressed_model_cost(layer_ratio_list, original_model_cost)
rank_index_objective_score_map[rank_index] = (float((1 - model_accuracy)) + float((1 - model_compression_ratio)))
best_rank_index = min(rank_index_objective_score_map.keys(), key=(lambda k: rank_index_objective_score_map[k]))
return (comp_ratio_eval_score_across_layers[best_rank_index], TarCompressionRatioSelectionStats(comp_ratio_eval_score_across_layers[best_rank_index])) |
class Model_inter_pad_without_BN(torch.nn.Module):
def __init__(self):
super(Model_inter_pad_without_BN, self).__init__()
self.except_shape = (2, 3, 32, 32)
self.conv1 = torch.nn.Conv2d(3, 32, kernel_size=2, stride=2, padding=2, bias=False)
self.relu1 = torch.nn.ReLU()
self.conv2 = torch.nn.Conv2d(32, 32, kernel_size=2, stride=2, padding=2, bias=False)
self.conv3 = torch.nn.Conv2d(32, 32, kernel_size=2, stride=2, padding=0, bias=False)
self.relu2 = torch.nn.ReLU()
self.conv4 = torch.nn.Conv2d(32, 32, kernel_size=2, stride=2, padding=2, bias=False)
self.conv5 = torch.nn.Conv2d(32, 32, kernel_size=2, padding=2, bias=False)
self.relu3 = torch.nn.ReLU()
self.conv6 = torch.nn.Conv2d(32, 32, kernel_size=2, padding=0, bias=False)
self.conv7 = torch.nn.Conv2d(32, 32, kernel_size=2, padding=0, bias=False)
self.relu4 = torch.nn.ReLU()
self.conv8 = torch.nn.Conv2d(32, 32, kernel_size=2, padding=0, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.relu2(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.relu3(x)
x = self.conv6(x)
x = self.conv7(x)
x = self.relu4(x)
x = self.conv8(x)
return x |
class FastPath():
_cache()
def __new__(cls, root):
return super().__new__(cls)
def __init__(self, root):
self.root = root
def joinpath(self, child):
return pathlib.Path(self.root, child)
def children(self):
with suppress(Exception):
return os.listdir((self.root or '.'))
with suppress(Exception):
return self.zip_children()
return []
def zip_children(self):
zip_path = zipp.Path(self.root)
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
return dict.fromkeys((child.split(posixpath.sep, 1)[0] for child in names))
def search(self, name):
return self.lookup(self.mtime).search(name)
def mtime(self):
with suppress(OSError):
return os.stat(self.root).st_mtime
self.lookup.cache_clear()
_cache
def lookup(self, mtime):
return Lookup(self) |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a Masked Language Modeling task')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--validation_split_percentage', default=5, help="The percentage of the train set used as validation set in case there's no validation split")
parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=False)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the Tokenizers library).')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--model_type', type=str, default=None, help='Model type to use if training from scratch.', choices=MODEL_TYPES)
parser.add_argument('--max_seq_length', type=int, default=None, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated.')
parser.add_argument('--line_by_line', type=bool, default=False, help='Whether distinct lines of text in the dataset are to be handled as distinct sequences.')
parser.add_argument('--preprocessing_num_workers', type=int, default=None, help='The number of processes to use for the preprocessing.')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--mlm_probability', type=float, default=0.15, help='Ratio of tokens to mask for masked language modeling loss')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
parser.add_argument('--checkpointing_steps', type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.")
parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.')
parser.add_argument('--with_tracking', action='store_true', help='Whether to enable experiment trackers for logging.')
parser.add_argument('--report_to', type=str, default='all', help='The integration to report the results and logs to. Supported platforms are `"tensorboard"`, `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.Only applicable when `--with_tracking` is passed.')
parser.add_argument('--low_cpu_mem_usage', action='store_true', help='It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded.If passed, LLM loading time and RAM consumption will be benefited.')
args = parser.parse_args()
if ((args.dataset_name is None) and (args.train_file is None) and (args.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (args.train_file is not None):
extension = args.train_file.split('.')[(- 1)]
if (extension not in ['csv', 'json', 'txt']):
raise ValueError('`train_file` should be a csv, json or txt file.')
if (args.validation_file is not None):
extension = args.validation_file.split('.')[(- 1)]
if (extension not in ['csv', 'json', 'txt']):
raise ValueError('`validation_file` should be a csv, json or txt file.')
if args.push_to_hub:
assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
return args |
def test_run_installs_with_local_poetry_file_transitive(installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage, tmpdir: str, fixture_dir: FixtureDirGetter) -> None:
root_dir = fixture_dir('directory')
package.root_dir = root_dir
locker.set_lock_path(root_dir)
directory = fixture_dir('directory').joinpath('project_with_transitive_file_dependencies')
package.add_dependency(Factory.create_dependency('project-with-transitive-file-dependencies', {'path': str(directory.relative_to(root_dir))}, root_dir=root_dir))
repo.add_package(get_package('pendulum', '1.4.4'))
repo.add_package(get_package('cachy', '0.2.0'))
result = installer.run()
assert (result == 0)
expected = fixture('with-file-dependency-transitive')
assert (locker.written_data == expected)
assert (installer.executor.installations_count == 4) |
_content
class WaveformPromise(Object):
codes = CodesNSLCE.T()
tmin = Timestamp.T()
tmax = Timestamp.T()
deltat = Float.T(optional=True)
source_hash = String.T()
def __init__(self, **kwargs):
kwargs['codes'] = CodesNSLCE(kwargs['codes'])
Object.__init__(self, **kwargs)
def time_span(self):
return (self.tmin, self.tmax) |
def print_help() -> None:
help_string = '\n UltraSinger.py [opt] [mode] [transcription] [pitcher] [extra]\n \n [opt]\n -h This help text.\n -i Ultrastar.txt\n audio like .mp3, .wav, youtube link\n -o Output folder\n \n [mode]\n ## INPUT is audio ##\n default Creates all\n \n # Single file creation selection is in progress, you currently getting all!\n (-u Create ultrastar txt file) # In Progress\n (-m Create midi file) # In Progress\n (-s Create sheet file) # In Progress\n \n ## INPUT is ultrastar.txt ##\n default Creates all\n\n # Single selection is in progress, you currently getting all!\n (-r repitch Ultrastar.txt (input has to be audio)) # In Progress\n (-p Check pitch of Ultrastar.txt input) # In Progress\n (-m Create midi file) # In Progress\n\n [transcription]\n # Default is whisper\n --whisper Multilingual model > tiny|base|small|medium|large-v1|large-v2 >> ((default) is large-v2\n English-only model > tiny.en|base.en|small.en|medium.en\n --whisper_align_model Use other languages model for Whisper provided from huggingface.co\n --language Override the language detected by whisper, does not affect transcription but steps after transcription\n --whisper_batch_size Reduce if low on GPU mem >> ((default) is 16)\n --whisper_compute_type Change to "int8" if low on GPU mem (may reduce accuracy) >> ((default) is "float16" for cuda devices, "int8" for cpu)\n \n [pitcher]\n # Default is crepe\n --crepe tiny|full >> ((default) is full)\n --crepe_step_size unit is miliseconds >> ((default) is 10)\n \n [extra]\n --hyphenation True|False >> ((default) is True)\n --disable_separation True|False >> ((default) is False)\n --disable_karaoke True|False >> ((default) is False)\n --create_audio_chunks True|False >> ((default) is False)\n --plot True|False >> ((default) is False)\n --format_version 0.3.0|1.0.0|1.1.0 >> ((default) is 1.0.0)\n \n [device]\n --force_cpu True|False >> ((default) is False) All steps will be forced to cpu\n --force_whisper_cpu True|False >> ((default) is False) Only whisper will be forced to cpu\n --force_crepe_cpu True|False >> ((default) is False) Only crepe will be forced to cpu\n '
print(help_string) |
def test_hookwrapper() -> None:
out = []
(hookwrapper=True)
def m1():
out.append('m1 init')
(yield None)
out.append('m1 finish')
def m2():
out.append('m2')
return 2
res = MC([m2, m1], {})
assert (res == [2])
assert (out == ['m1 init', 'm2', 'm1 finish'])
out[:] = []
res = MC([m2, m1], {}, firstresult=True)
assert (res == 2)
assert (out == ['m1 init', 'm2', 'm1 finish']) |
class VideoTestDUFDataset(VideoTestDataset):
def __getitem__(self, index):
folder = self.data_info['folder'][index]
(idx, max_idx) = self.data_info['idx'][index].split('/')
(idx, max_idx) = (int(idx), int(max_idx))
border = self.data_info['border'][index]
lq_path = self.data_info['lq_path'][index]
select_idx = generate_frame_indices(idx, max_idx, self.opt['num_frame'], padding=self.opt['padding'])
if self.cache_data:
if self.opt['use_duf_downsampling']:
imgs_lq = self.imgs_gt[folder].index_select(0, torch.LongTensor(select_idx))
imgs_lq = duf_downsample(imgs_lq, kernel_size=13, scale=self.opt['scale'])
else:
imgs_lq = self.imgs_lq[folder].index_select(0, torch.LongTensor(select_idx))
img_gt = self.imgs_gt[folder][idx]
else:
if self.opt['use_duf_downsampling']:
img_paths_lq = [self.imgs_gt[folder][i] for i in select_idx]
imgs_lq = read_img_seq(img_paths_lq, require_mod_crop=True, scale=self.opt['scale'])
imgs_lq = duf_downsample(imgs_lq, kernel_size=13, scale=self.opt['scale'])
else:
img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
imgs_lq = read_img_seq(img_paths_lq)
img_gt = read_img_seq([self.imgs_gt[folder][idx]], require_mod_crop=True, scale=self.opt['scale'])
img_gt.squeeze_(0)
return {'lq': imgs_lq, 'gt': img_gt, 'folder': folder, 'idx': self.data_info['idx'][index], 'border': border, 'lq_path': lq_path} |
class PollViewTests(TestCase):
def test_index_view_with_no_polls(self):
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No polls are available.')
self.assertQuerysetEqual(response.context['latest_poll_list'], [])
def test_index_view_with_a_past_poll(self):
create_poll(question='Past poll.', days=(- 30))
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_poll_list'], ['<Poll: Past poll.>'])
def test_index_view_with_a_future_poll(self):
create_poll(question='Future poll.', days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, 'No polls are available.', status_code=200)
self.assertQuerysetEqual(response.context['latest_poll_list'], [])
def test_index_view_with_future_poll_and_past_poll(self):
create_poll(question='Past poll.', days=(- 30))
create_poll(question='Future poll.', days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_poll_list'], ['<Poll: Past poll.>'])
def test_index_view_with_two_past_polls(self):
create_poll(question='Past poll 1.', days=(- 30))
create_poll(question='Past poll 2.', days=(- 5))
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_poll_list'], ['<Poll: Past poll 2.>', '<Poll: Past poll 1.>']) |
class AsmCmdGotoRelation(AsmCmdBase):
_id = 16
_menuText = QT_TRANSLATE_NOOP('asm3', 'Go to relation')
_tooltip = QT_TRANSLATE_NOOP('asm3', 'Select the corresponding part object in the relation group')
_iconName = 'Assembly_GotoRelation.svg'
_accel = 'A, R'
_toolbarName = ''
_cmdType = 'NoTransaction'
def Activated(cls):
from .assembly import AsmRelationGroup
if AsmCmdMove._moveInfo:
AsmRelationGroup.gotoRelation(AsmCmdMove._moveInfo)
return
sels = FreeCADGui.Selection.getSelectionEx('', 0, True)
if (sels and (len(sels[0].SubElementNames) == 1)):
AsmRelationGroup.gotoRelationOfConstraint(sels[0].Object, sels[0].SubElementNames[0])
def IsActive(cls):
if AsmCmdMove._moveInfo:
return True
if (cls._active is None):
cls.checkActive()
return cls._active
def checkActive(cls):
from .assembly import isTypeOf, AsmConstraint, AsmElementLink
sels = FreeCADGui.Selection.getSelection('', 1, True)
if (sels and isTypeOf(sels[0], (AsmConstraint, AsmElementLink))):
cls._active = True
else:
cls._active = False
def onSelectionChange(cls, hasSelection):
cls._active = (None if hasSelection else False) |
def test_run_with_custom_runner():
pipeline = dedent(' steps:\n - name: pypyr.steps.set\n in:\n set:\n test: 1\n ')
context = pipelinerunner.run(pipeline_name=pipeline, loader='pypyr.loaders.string')
assert (context['test'] == 1) |
def convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool):
roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
roberta.eval()
roberta_sent_encoder = roberta.model.encoder.sentence_encoder
config = RobertaConfig(vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.args.encoder_embed_dim, num_hidden_layers=roberta.args.encoder_layers, num_attention_heads=roberta.args.encoder_attention_heads, intermediate_size=roberta.args.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05)
if classification_head:
config.num_labels = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our BERT config:', config)
model = (RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config))
model.eval()
model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight)
model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
for i in range(config.num_hidden_layers):
layer: BertLayer = model.roberta.encoder.layer[i]
roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
self_attn: BertSelfAttention = layer.attention.self
assert (roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)))
self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
self_output: BertSelfOutput = layer.attention.output
assert (self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape)
self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
intermediate: BertIntermediate = layer.intermediate
assert (intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape)
intermediate.dense.weight = roberta_layer.fc1.weight
intermediate.dense.bias = roberta_layer.fc1.bias
bert_output: BertOutput = layer.output
assert (bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape)
bert_output.dense.weight = roberta_layer.fc2.weight
bert_output.dense.bias = roberta_layer.fc2.bias
bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
if classification_head:
model.classifier.dense.weight = roberta.model.classification_heads['mnli'].dense.weight
model.classifier.dense.bias = roberta.model.classification_heads['mnli'].dense.bias
model.classifier.out_proj.weight = roberta.model.classification_heads['mnli'].out_proj.weight
model.classifier.out_proj.bias = roberta.model.classification_heads['mnli'].out_proj.bias
else:
model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight
model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias
model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight
model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias
model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight
model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias
input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0)
our_output = model(input_ids)[0]
if classification_head:
their_output = roberta.model.classification_heads['mnli'](roberta.extract_features(input_ids))
else:
their_output = roberta.model(input_ids)[0]
print(our_output.shape, their_output.shape)
max_absolute_diff = torch.max(torch.abs((our_output - their_output))).item()
print(f'max_absolute_diff = {max_absolute_diff}')
success = torch.allclose(our_output, their_output, atol=0.001)
print('Do both models output the same tensors?', ('' if success else ''))
if (not success):
raise Exception('Something went wRoNg')
pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(pytorch_dump_folder_path) |
def _create_args_parser(cmpnt_fn: Callable[(..., AppDef)], cmpnt_defaults: Optional[Dict[(str, str)]]=None) -> argparse.ArgumentParser:
parameters = inspect.signature(cmpnt_fn).parameters
(function_desc, args_desc) = get_fn_docstring(cmpnt_fn)
script_parser = argparse.ArgumentParser(prog=f'torchx run <run args...> {cmpnt_fn.__name__} ', description=function_desc, formatter_class=TorchXArgumentHelpFormatter, add_help=False)
script_parser.add_argument('--help', action='help', default=argparse.SUPPRESS, help='show this help message and exit')
class _reminder_action(argparse.Action):
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Any, option_string: Optional[str]=None) -> None:
setattr(namespace, self.dest, ((self.default or '').split() if (len(values) == 0) else values))
for (param_name, parameter) in parameters.items():
param_desc = args_desc[parameter.name]
args: Dict[(str, Any)] = {'help': param_desc, 'type': get_argparse_param_type(parameter)}
if (parameter.default != inspect.Parameter.empty):
if is_bool(type(parameter.default)):
args['default'] = str(parameter.default)
else:
args['default'] = parameter.default
if (cmpnt_defaults and (param_name in cmpnt_defaults)):
args['default'] = cmpnt_defaults[param_name]
if (parameter.kind == inspect._ParameterKind.VAR_POSITIONAL):
args['nargs'] = argparse.REMAINDER
args['action'] = _reminder_action
script_parser.add_argument(param_name, **args)
else:
arg_names = [f'--{param_name}']
if (len(param_name) == 1):
arg_names = ([f'-{param_name}'] + arg_names)
if ('default' not in args):
args['required'] = True
script_parser.add_argument(*arg_names, **args)
return script_parser |
class SaveDataAction(Action):
mandatoryparams = ['attribute', 'value']
optionalparams = ['display', 'pspid', 'config']
def __init__(self, params, **kwargs):
Action.__init__(self, execparams=params, **kwargs)
def Execute(self):
Action.Execute(self)
attrib = self.execparams['attribute']
value = self.execparams['value']
pspid = self.execparams.get('pspid', None)
config = (self.execparams.get('config', 'false').lower() == 'true')
if (attrib in ['vendor', 'product', 'version', 'installDate']):
config = True
try:
prevPSPs = self.actmgr.GetPSPsFrom(self.parent.parent)
except:
psplog.error('Error getting previous PSP objects. Make sure there is a NewSWAction before you try to save to it.', exc_info=True)
return False
if (pspid != None):
prevPSP = filter((lambda x: (x.pspid == pspid)), prevPSPs)[0]
if ((prevPSP[attrib] != None) and (prevPSP[attrib] != 'NTR')):
psplog.debug('before copy: {0}'.format(prevPSP))
prevPSP = copy.deepcopy(prevPSP)
psplog.debug('after copy: {0}'.format(prevPSP))
self.actmgr.addPSP(prevPSP, self.parent)
prevPSP.SaveAttribute(attrib, value, config)
else:
for prevPSP in prevPSPs:
if ((prevPSP[attrib] != None) and (prevPSP[attrib] != 'NTR')):
psplog.debug('prevPSP: {0}'.format(prevPSP))
newPSP = copy.deepcopy(prevPSP)
psplog.debug('newPSP: {0}'.format(newPSP))
self.actmgr.addPSP(newPSP, self.parent)
newPSP.SaveAttribute(attrib, value, config)
return True |
class StrideWrapper(Dataset):
def __init__(self, dataset, stride):
self.dataset = dataset
self.index2old_index = [(idx * stride) for idx in range((len(self.dataset) // stride))]
def __getitem__(self, index):
old_index = self.index2old_index[index]
return self.dataset[old_index]
def __len__(self):
return len(self.index2old_index) |
class FileTransferSpeed(ProgressBarWidget):
def __init__(self):
self.fmt = '%6.2f %s'
self.units = ['B', 'K', 'M', 'G', 'T', 'P']
def update(self, pbar):
if (pbar.seconds_elapsed < 2e-06):
bps = 0.0
else:
bps = (float(pbar.currval) / pbar.seconds_elapsed)
spd = bps
for u in self.units:
if (spd < 1000):
break
spd /= 1000
return (self.fmt % (spd, (u + '/s'))) |
class ModelParallelSparseOnlyBase(unittest.TestCase):
def tearDown(self) -> None:
dist.destroy_process_group()
def test_sharding_ebc_as_top_level(self) -> None:
os.environ['RANK'] = '0'
os.environ['WORLD_SIZE'] = '1'
os.environ['LOCAL_WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = str('localhost')
os.environ['MASTER_PORT'] = str(get_free_port())
os.environ['NCCL_SOCKET_IFNAME'] = 'lo'
if torch.cuda.is_available():
curr_device = torch.device('cuda:0')
torch.cuda.set_device(curr_device)
backend = 'nccl'
else:
curr_device = torch.device('cpu')
backend = 'gloo'
dist.init_process_group(backend=backend)
embedding_dim = 128
num_embeddings = 256
ebc = EmbeddingBagCollection(device=torch.device('meta'), tables=[EmbeddingBagConfig(name='large_table', embedding_dim=embedding_dim, num_embeddings=num_embeddings, feature_names=['my_feature'], pooling=PoolingType.SUM)])
model = DistributedModelParallel(ebc, device=curr_device)
self.assertTrue(isinstance(model.module, ShardedEmbeddingBagCollection))
def test_sharding_fused_ebc_as_top_level(self) -> None:
os.environ['RANK'] = '0'
os.environ['WORLD_SIZE'] = '1'
os.environ['LOCAL_WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = str('localhost')
os.environ['MASTER_PORT'] = str(get_free_port())
os.environ['NCCL_SOCKET_IFNAME'] = 'lo'
if torch.cuda.is_available():
curr_device = torch.device('cuda:0')
torch.cuda.set_device(curr_device)
backend = 'nccl'
else:
curr_device = torch.device('cpu')
backend = 'gloo'
dist.init_process_group(backend=backend)
embedding_dim = 128
num_embeddings = 256
ebc = FusedEmbeddingBagCollection(device=torch.device('meta'), tables=[EmbeddingBagConfig(name='large_table', embedding_dim=embedding_dim, num_embeddings=num_embeddings, feature_names=['my_feature'], pooling=PoolingType.SUM)], optimizer_type=torch.optim.SGD, optimizer_kwargs={'lr': 0.02})
model = DistributedModelParallel(ebc, device=curr_device)
self.assertTrue(isinstance(model.module, ShardedFusedEmbeddingBagCollection)) |
def main():
args = get_parser().parse_args()
errs = 0
count = 0
with open(args.hypo, 'r') as hf, open(args.reference, 'r') as rf:
for (h, r) in zip(hf, rf):
h = h.rstrip().split()
r = r.rstrip().split()
errs += editdistance.eval(r, h)
count += len(r)
logger.info(f'UER: {((errs / count) * 100):.2f}%') |
def test_run_job():
js = None
cfg = config()
try:
js = rs.job.Service(cfg.job_service_url, cfg.session)
j = js.run_job('/bin/sleep 10')
assert j.id
except rs.NotImplemented as ni:
assert cfg.notimpl_warn_only, ('%s ' % ni)
if cfg.notimpl_warn_only:
print(('%s ' % ni))
except rs.SagaException as se:
assert False, ('Unexpected exception: %s' % se)
finally:
_silent_close_js(js) |
class Translation(CGAThing):
def __init__(self, cga, *args) -> None:
super().__init__(cga)
if (len(args) == 0):
mv = (1 - ((self.cga.base_vector() * self.cga.einf) / 2.0))
elif (len(args) == 1):
arg = args[0]
if isinstance(arg, MultiVector):
if (arg.grades() == {1}):
mv = (1 - ((self.cga.straight_up(arg) * self.cga.einf) / 2.0))
if (arg.grades() == {0, 2}):
mv = args[0]
else:
raise ValueError('bad input')
self.mv = mv
def __repr__(self) -> str:
return 'Translation' |
def warning():
def _warnformat(msg, category, filename, lineno, file=None, line=None):
return ('%s:%s: %s: %s\n' % (filename, lineno, category.__name__, msg))
default_warn_format = warnings.formatwarning
try:
warnings.formatwarning = _warnformat
warnings.filterwarnings('always')
(yield warnings.warn)
finally:
warnings.formatwarning = default_warn_format |
class VendingMachine(VendingMachineStateMixin):
def __init__(self):
self.initialize_state(Idle)
self._pressed = None
self._alpha_pressed = None
self._digit_pressed = None
def press_button(self, button):
if (button in 'ABCD'):
self._pressed = button
self.press_alpha_button()
elif (button in '1234'):
self._pressed = button
self.press_digit_button()
else:
print('Did not recognize button {!r}'.format(str(button)))
def press_alpha_button(self):
try:
super().press_alpha_button()
except VendingMachineState.InvalidTransitionException as ite:
print(ite)
else:
self._alpha_pressed = self._pressed
def press_digit_button(self):
try:
super().press_digit_button()
except VendingMachineState.InvalidTransitionException as ite:
print(ite)
else:
self._digit_pressed = self._pressed
self.dispense()
def dispense(self):
try:
super().dispense()
except VendingMachineState.InvalidTransitionException as ite:
print(ite)
else:
print('Dispensing at {}{}'.format(self._alpha_pressed, self._digit_pressed))
self._alpha_pressed = self._digit_pressed = None |
def custom_pdb_calls() -> List[str]:
called = []
class _CustomPdb():
quitting = False
def __init__(self, *args, **kwargs):
called.append('init')
def reset(self):
called.append('reset')
def interaction(self, *args):
called.append('interaction')
_pytest._CustomPdb = _CustomPdb
return called |
def test_nd_scan_sit_sot_with_carry():
x0 = pt.vector('x0', shape=(3,))
A = pt.matrix('A', shape=(3, 3))
def step(x, A):
return ((A x), x.sum())
(xs, _) = scan(step, outputs_info=[x0, None], non_sequences=[A], n_steps=10, mode=get_mode('JAX'))
fg = FunctionGraph([x0, A], xs)
x0_val = np.arange(3, dtype=config.floatX)
A_val = np.eye(3, dtype=config.floatX)
test_input_vals = [x0_val, A_val]
compare_jax_and_py(fg, test_input_vals) |
def test_insert_with_custom_columns():
sql = 'INSERT INTO tgt_tbl(random1, random2) (SELECT col1,col2 FROM src_tbl)'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('col1', 'src_tbl'), ColumnQualifierTuple('random1', 'tgt_tbl')), (ColumnQualifierTuple('col2', 'src_tbl'), ColumnQualifierTuple('random2', 'tgt_tbl'))], test_sqlparse=False)
sql = 'INSERT INTO tgt_tbl(random1, random2) SELECT col1,col2 FROM src_tbl_new'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('col1', 'src_tbl_new'), ColumnQualifierTuple('random1', 'tgt_tbl')), (ColumnQualifierTuple('col2', 'src_tbl_new'), ColumnQualifierTuple('random2', 'tgt_tbl'))], test_sqlparse=False) |
def ValidateImageSize(arg, argName, argInformed, errors):
errorMsg = ('argument %s: required argument if %s is relative' % (argName, argInformed))
ret = None
if (arg is None):
errors.append(errorMsg)
else:
arg = arg.replace('(', '').replace(')', '')
args = arg.split(',')
if (len(args) != 2):
errors.append(("%s. It must be in the format 'width,height' (e.g. '600,400')" % errorMsg))
elif ((not args[0].isdigit()) or (not args[1].isdigit())):
errors.append(("%s. It must be in INdiaTEGER the format 'width,height' (e.g. '600,400')" % errorMsg))
else:
ret = (int(args[0]), int(args[1]))
return ret |
class MatchFirst(ParseExpression):
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool=False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any((e.mayReturnEmpty for e in self.exprs))
self.skipWhitespace = all((e.skipWhitespace for e in self.exprs))
else:
self.mayReturnEmpty = True
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
if self.exprs:
self.saveAsList = any((e.saveAsList for e in self.exprs))
self.mayReturnEmpty = any((e.mayReturnEmpty for e in self.exprs))
self.skipWhitespace = all(((e.skipWhitespace and (not isinstance(e, White))) for e in self.exprs))
else:
self.saveAsList = False
self.mayReturnEmpty = True
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = (- 1)
maxException = None
for e in self.exprs:
try:
return e._parse(instring, loc, doActions)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parser_element = e
raise
except ParseException as err:
if (err.loc > maxExcLoc):
maxException = err
maxExcLoc = err.loc
except IndexError:
if (len(instring) > maxExcLoc):
maxException = ParseException(instring, len(instring), e.errmsg, self)
maxExcLoc = len(instring)
if (maxException is not None):
if (maxExcLoc == loc):
maxException.msg = self.errmsg
raise maxException
raise ParseException(instring, loc, 'no defined alternatives to match', self)
def __ior__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
return NotImplemented
return self.append(other)
def _generateDefaultName(self) -> str:
return f"{{{' | '.join((str(e) for e in self.exprs))}}}"
def _setResultsName(self, name, listAllMatches=False):
if (__diag__.warn_multiple_tokens_in_named_alternation and (Diagnostics.warn_multiple_tokens_in_named_alternation not in self.suppress_warnings_)):
if any(((isinstance(e, And) and (Diagnostics.warn_multiple_tokens_in_named_alternation not in e.suppress_warnings_)) for e in self.exprs)):
warning = f'warn_multiple_tokens_in_named_alternation: setting results name {name!r} on {type(self).__name__} expression will return a list of all parsed tokens in an And alternative, in prior versions only the first token was returned; enclose contained argument in Group'
warnings.warn(warning, stacklevel=3)
return super()._setResultsName(name, listAllMatches) |
def se_resnet101(num_classes, loss, pretrained='imagenet', **kwargs):
model = SENet(num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs)
if (pretrained == 'imagenet'):
model_url = pretrained_settings['se_resnet101']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model |
.skipif((sys.platform == 'win32'), reason='Windows raises `OSError: [Errno 22] Invalid argument` instead')
def test_no_brokenpipeerror_message(pytester: Pytester) -> None:
popen = pytester.popen((*pytester._getpytestargs(), '--help'))
popen.stdout.close()
ret = popen.wait()
assert (popen.stderr.read() == b'')
assert (ret == 1)
popen.stderr.close() |
def create_pod(body, namespace, timeout=120):
try:
pod_stat = None
pod_stat = cli.create_namespaced_pod(body=body, namespace=namespace)
end_time = (time.time() + timeout)
while True:
pod_stat = cli.read_namespaced_pod(name=body['metadata']['name'], namespace=namespace)
if (pod_stat.status.phase == 'Running'):
break
if (time.time() > end_time):
raise Exception('Starting pod failed')
time.sleep(1)
except Exception as e:
logging.error(('Pod creation failed %s' % e))
if pod_stat:
logging.error(pod_stat.status.container_statuses)
delete_pod(body['metadata']['name'], namespace)
sys.exit(1) |
.parametrize('input,constraint', [('*', AnyConstraint()), ('win32', Constraint('win32', '=')), ('=win32', Constraint('win32', '=')), ('==win32', Constraint('win32', '=')), ('!=win32', Constraint('win32', '!=')), ('!= win32', Constraint('win32', '!='))])
def test_parse_constraint(input: str, constraint: (AnyConstraint | Constraint)) -> None:
assert (parse_constraint(input) == constraint) |
def upgrade(op, tables, tester):
inspector = Inspector.from_engine(op.get_bind())
table_names = inspector.get_table_names()
if ('uploadedblob' not in table_names):
op.create_table('uploadedblob', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('repository_id', sa.Integer(), nullable=False), sa.Column('blob_id', sa.Integer(), nullable=False), sa.Column('uploaded_at', sa.DateTime(), nullable=False), sa.Column('expires_at', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['blob_id'], ['imagestorage.id'], name=op.f('fk_uploadedblob_blob_id_imagestorage')), sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_uploadedblob_repository_id_repository')), sa.PrimaryKeyConstraint('id', name=op.f('pk_uploadedblob')))
uploadedblob_indexes = inspector.get_indexes('uploadedblob')
if (not ('uploadedblob_blob_id' in [i['name'] for i in uploadedblob_indexes])):
op.create_index('uploadedblob_blob_id', 'uploadedblob', ['blob_id'], unique=False)
if (not ('uploadedblob_expires_at' in [i['name'] for i in uploadedblob_indexes])):
op.create_index('uploadedblob_expires_at', 'uploadedblob', ['expires_at'], unique=False)
if (not ('uploadedblob_repository_id' in [i['name'] for i in uploadedblob_indexes])):
op.create_index('uploadedblob_repository_id', 'uploadedblob', ['repository_id'], unique=False)
tester.populate_table('uploadedblob', [('repository_id', tester.TestDataType.Foreign('repository')), ('blob_id', tester.TestDataType.Foreign('imagestorage')), ('uploaded_at', tester.TestDataType.DateTime), ('expires_at', tester.TestDataType.DateTime)]) |
.skip
_db
def test_unsubscribe_not_registered_mail_to_newsletter(graphql_client):
email = ''
variables = {'email': email}
query = '\n mutation($email: String!) {\n unsubscribeToNewsletter(input: {\n email: $email\n }) {\n __typename\n\n ... on UnsubscribeToNewsletterErrors {\n email\n }\n\n ... on NewsletterSubscribeResult {\n status\n }\n }\n }\n '
resp = graphql_client.query(query, variables=variables)
assert (resp['data']['unsubscribeToNewsletter']['status'] is True) |
def test_jax_compile_ops():
x = DeepCopyOp()(pt.as_tensor_variable(1.1))
x_fg = FunctionGraph([], [x])
compare_jax_and_py(x_fg, [])
x_np = np.zeros((20, 1, 1))
x = Unbroadcast(0, 2)(pt.as_tensor_variable(x_np))
x_fg = FunctionGraph([], [x])
compare_jax_and_py(x_fg, [])
x = ViewOp()(pt.as_tensor_variable(x_np))
x_fg = FunctionGraph([], [x])
compare_jax_and_py(x_fg, []) |
def to_bool(val: ((bool | int) | str)) -> bool:
if isinstance(val, bool):
return val
if (isinstance(val, int) or (isinstance(val, str) and val.isdigit())):
return bool(int(val))
if isinstance(val, str):
if (val.lower() == 'true'):
return True
return False |
class DistributedLossWrapper(torch.nn.Module):
def __init__(self, loss, **kwargs):
super().__init__()
has_parameters = (len([p for p in loss.parameters()]) > 0)
self.loss = (DDP(loss, **kwargs) if has_parameters else loss)
def forward(self, embeddings, labels, *args, **kwargs):
(embeddings, labels) = all_gather_embeddings_labels(embeddings, labels)
return self.loss(embeddings, labels, *args, **kwargs) |
class StringValue(Value):
def __init__(self, name, initial, **kwargs):
super(StringValue, self).__init__(name, initial, **kwargs)
def get_msg(self):
if (type(self.value) == type(False)):
strvalue = ('true' if self.value else 'false')
else:
strvalue = (('"' + self.value) + '"')
return strvalue |
class MvpTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token)
cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:(- 1)]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if ((is_split_into_words or add_prefix_space) and ((len(text) > 0) and (not text[0].isspace()))):
text = (' ' + text)
return (text, kwargs) |
def compute_valid_depth_mask(d1, d2=None, min_thred=0.3, max_thred=5.0):
if (d2 is None):
valid_mask = (((d1 < max_thred) & (d1 > min_thred)) & np.isfinite(d1))
else:
valid_mask = ((d1 < max_thred) & (d2 < max_thred))
valid_mask[valid_mask] = ((d1[valid_mask] > min_thred) & (d2[valid_mask] > min_thred))
return valid_mask |
class QlWindowsThread(QlThread):
ID = 0
def __init__(self, ql: Qiling, status: THREAD_STATUS=THREAD_STATUS.RUNNING):
super().__init__(ql)
self.ql = ql
self.id = QlWindowsThread.ID
QlWindowsThread.ID += 1
self.status = status
self.waitforthreads = []
self.tls = {}
self.tls_index = 0
def create(cls, ql: Qiling, stack_size: int, func_addr: int, func_params: int, status: THREAD_STATUS) -> 'QlWindowsThread':
os = cast('QlOsWindows', ql.os)
thread = cls(ql, status)
new_stack = (os.heap.alloc(stack_size) + stack_size)
asize = ql.arch.pointersize
context = ql.arch.regs.save()
ql.mem.write_ptr((new_stack - asize), os.thread_manager.thread_ret_addr)
if (ql.arch.type == QL_ARCH.X86):
ql.mem.write_ptr(new_stack, func_params)
elif (ql.arch.type == QL_ARCH.X8664):
context['rcx'] = func_params
if (ql.arch.type == QL_ARCH.X86):
context['eip'] = func_addr
context['ebp'] = (new_stack - asize)
context['esp'] = (new_stack - asize)
elif (ql.arch.type == QL_ARCH.X8664):
context['rip'] = func_addr
context['rbp'] = (new_stack - asize)
context['rsp'] = (new_stack - asize)
thread.saved_context = context
return thread
def suspend(self) -> None:
self.saved_context = self.ql.arch.regs.save()
def resume(self) -> None:
self.ql.arch.regs.restore(self.saved_context)
self.status = THREAD_STATUS.RUNNING
def stop(self) -> None:
self.status = THREAD_STATUS.TERMINATED
def is_stop(self) -> bool:
return (self.status == THREAD_STATUS.TERMINATED)
def waitfor(self, thread: 'QlWindowsThread') -> None:
self.waitforthreads.append(thread)
def has_waitfor(self) -> bool:
return any(((not thread.is_stop()) for thread in self.waitforthreads)) |
class NoisyNetDQN():
def __init__(self, env, config):
self.sess = tf.InteractiveSession()
self.config = config
self.replay_buffer = deque(maxlen=self.config.replay_buffer_size)
self.time_step = 0
self.state_dim = env.observation_space.shape
self.action_dim = env.action_space.n
print('state_dim:', self.state_dim)
print('action_dim:', self.action_dim)
self.action_batch = tf.placeholder('int32', [None])
self.y_input = tf.placeholder('float', [None, self.action_dim])
batch_shape = [None]
batch_shape.extend(self.state_dim)
self.eval_input = tf.placeholder('float', batch_shape)
self.target_input = tf.placeholder('float', batch_shape)
self.build_noisy_dqn_net()
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
self.save_model()
self.restore_model()
def build_layers(self, state, c_names, units_1, units_2, w_i, b_i, reg=None):
with tf.variable_scope('conv1'):
conv1 = conv(state, [5, 5, 3, 6], [6], [1, 2, 2, 1], w_i, b_i)
with tf.variable_scope('conv2'):
conv2 = conv(conv1, [3, 3, 6, 12], [12], [1, 2, 2, 1], w_i, b_i)
with tf.variable_scope('flatten'):
flatten = tf.contrib.layers.flatten(conv2)
with tf.variable_scope('dense1'):
dense1 = noisy_dense(flatten, units_1, [units_1], c_names, w_i, b_i, noisy_distribution=self.config.noisy_distribution)
with tf.variable_scope('dense2'):
dense2 = noisy_dense(dense1, units_2, [units_2], c_names, w_i, b_i, noisy_distribution=self.config.noisy_distribution)
with tf.variable_scope('dense3'):
dense3 = noisy_dense(dense2, self.action_dim, [self.action_dim], c_names, w_i, b_i, noisy_distribution=self.config.noisy_distribution)
return dense3
def build_noisy_dqn_net(self):
with tf.variable_scope('target_net'):
c_names = ['target_net_arams', tf.GraphKeys.GLOBAL_VARIABLES]
w_i = tf.random_uniform_initializer((- 0.1), 0.1)
b_i = tf.constant_initializer(0.1)
self.q_target = self.build_layers(self.target_input, c_names, 24, 24, w_i, b_i)
with tf.variable_scope('eval_net'):
c_names = ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
w_i = tf.random_uniform_initializer((- 0.1), 0.1)
b_i = tf.constant_initializer(0.1)
self.q_eval = self.build_layers(self.eval_input, c_names, 24, 24, w_i, b_i)
self.loss = tf.reduce_mean(tf.squared_difference(self.q_eval, self.y_input))
self.optimizer = tf.train.AdamOptimizer(self.config.LEARNING_RATE).minimize(self.loss)
eval_params = tf.get_collection('eval_net_params')
target_params = tf.get_collection('target_net_params')
self.update_target_net = [tf.assign(t, e) for (t, e) in zip(target_params, eval_params)]
def save_model(self):
print('Model saved in : ', self.saver.save(self.sess, self.config.MODEL_PATH))
def restore_model(self):
self.saver.restore(self.sess, self.config.MODEL_PATH)
print('Model restored.')
def perceive(self, state, action, reward, next_state, done):
self.replay_buffer.append((state, action, reward, next_state, done))
def train_q_network(self, update=True):
if (len(self.replay_buffer) < self.config.START_TRAINING):
return
self.time_step += 1
minibatch = random.sample(self.replay_buffer, self.config.BATCH_SIZE)
np.random.shuffle(minibatch)
state_batch = [data[0] for data in minibatch]
action_batch = [data[1] for data in minibatch]
reward_batch = [data[2] for data in minibatch]
next_state_batch = [data[3] for data in minibatch]
done = [data[4] for data in minibatch]
q_target = self.sess.run(self.q_target, feed_dict={self.target_input: next_state_batch})
q_eval = self.sess.run(self.q_eval, feed_dict={self.eval_input: state_batch})
done = (np.array(done) + 0)
y_batch = np.zeros((self.config.BATCH_SIZE, self.action_dim))
for i in range(0, self.config.BATCH_SIZE):
temp = q_eval[i]
action = np.argmax(q_target[i])
temp[action_batch[i]] = (reward_batch[i] + (((1 - done[i]) * self.config.GAMMA) * q_target[i][action]))
y_batch[i] = temp
self.sess.run(self.optimizer, feed_dict={self.y_input: y_batch, self.eval_input: state_batch, self.action_batch: action_batch})
if (update and ((self.time_step % self.config.UPDATE_TARGET_NET) == 0)):
self.sess.run(self.update_target_net)
def noisy_action(self, state):
return np.argmax(self.sess.run(self.q_target, feed_dict={self.target_input: [state]})[0]) |
class WebRequestHandler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
try:
logger.debug('Got GET request')
self._set_headers()
port = randint(60000, 65535)
response_obj = {'ProtocolVersion': 1}
response_obj['WebSocketPort'] = port
startWebSocketSvr(self.server.ghost, port)
self.wfile.write(json.dumps(response_obj).encode())
logger.debug('Wrote response %s', response_obj)
except BaseException as e:
logger.error('Caught error', exc_info=e) |
def check_channel(app1: RaidenService, app2: RaidenService, token_network_address: TokenNetworkAddress, settle_timeout: BlockTimeout, deposit_amount: TokenAmount) -> None:
channel_state1 = get_channelstate_by_token_network_and_partner(chain_state=state_from_raiden(app1), token_network_address=token_network_address, partner_address=app2.address)
assert channel_state1, 'app1 does not have a channel with app2.'
netcontract1 = app1.proxy_manager.payment_channel(channel_state=channel_state1, block_identifier=BLOCK_ID_LATEST)
channel_state2 = get_channelstate_by_token_network_and_partner(chain_state=state_from_raiden(app2), token_network_address=token_network_address, partner_address=app1.address)
assert channel_state2, 'app2 does not have a channel with app1.'
netcontract2 = app2.proxy_manager.payment_channel(channel_state=channel_state2, block_identifier=BLOCK_ID_LATEST)
assert (settle_timeout == netcontract1.settle_timeout())
assert (settle_timeout == netcontract2.settle_timeout())
if (deposit_amount > 0):
assert netcontract1.can_transfer(BLOCK_ID_LATEST)
assert netcontract2.can_transfer(BLOCK_ID_LATEST)
app1_details = netcontract1.detail(BLOCK_ID_LATEST)
app2_details = netcontract2.detail(BLOCK_ID_LATEST)
assert (app1_details.participants_data.our_details.address == app2_details.participants_data.partner_details.address)
assert (app1_details.participants_data.partner_details.address == app2_details.participants_data.our_details.address)
assert (app1_details.participants_data.our_details.deposit == app2_details.participants_data.partner_details.deposit)
assert (app1_details.participants_data.partner_details.deposit == app2_details.participants_data.our_details.deposit)
assert (app1_details.chain_id == app2_details.chain_id)
assert (app1_details.participants_data.our_details.deposit == deposit_amount)
assert (app1_details.participants_data.partner_details.deposit == deposit_amount)
assert (app2_details.participants_data.our_details.deposit == deposit_amount)
assert (app2_details.participants_data.partner_details.deposit == deposit_amount)
assert (app2_details.chain_id == UNIT_CHAIN_ID) |
class WindowEventsTestCase(InteractiveTestCase):
window_size = (400, 200)
window = None
question = None
def setUp(self):
self.finished = False
self.failure = None
self.label = None
def fail_test(self, failure):
self.failure = failure
self.finished = True
def pass_test(self):
self.finished = True
def _render_question(self):
fnt = font.load('Courier')
self.label = font.Text(fnt, text=self.question, x=10, y=(self.window_size[1] - 20))
def _draw(self):
gl.glClearColor(0.5, 0, 0, 1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glLoadIdentity()
self.label.draw()
self.window.flip()
def _test_main(self):
assert self.question
(width, height) = self.window_size
self.window = w = Window(width, height, visible=False, resizable=False)
try:
w.push_handlers(self)
self._render_question()
w.set_visible()
while ((not self.finished) and (not w.has_exit)):
self._draw()
w.dispatch_events()
finally:
w.close()
self.assertTrue(self.finished, msg='Test aborted')
self.assertIsNone(self.failure, msg=self.failure) |
class TPanedPreferences(TestCase):
def setUp(self):
config.init()
def tearDown(self):
config.quit()
def test_editor(self):
x = PatternEditor()
x.headers = x.headers
x.destroy()
x.destroy()
def test_button(self):
PreferencesButton(None).destroy()
def test_dialog(self):
Preferences(None).destroy() |
def business_hours_back(end, delta):
delta = datetime.timedelta(seconds=(delta.total_seconds() / 3))
estimate = delta
start = (end - estimate)
while (business_hours(start, end) < delta):
estimate = datetime.timedelta(seconds=(estimate.total_seconds() * 2))
start = (end - estimate)
search_start = start
search_end = end
guess = (search_start + ((search_end - search_start) / 2))
guess_offset = (business_hours(guess, end) - delta).total_seconds()
while (abs(guess_offset) > 60):
if (guess_offset < 0):
search_end = guess
else:
search_start = guess
guess = (search_start + ((search_end - search_start) / 2))
guess_offset = (business_hours(guess, end) - delta).total_seconds()
return guess |
def replace_modules_of_type1_using_constructor(model, type1, constructor):
for (module_name, module_ref) in model.named_children():
if isinstance(module_ref, type1):
setattr(model, module_name, constructor(module_ref))
children_module_list = list(module_ref.modules())
if (len(children_module_list) != 1):
replace_modules_of_type1_using_constructor(module_ref, type1, constructor) |
def test_annotation_fail_runpath(testdir):
testdir.makepyfile("\n import pytest\n pytest_plugins = 'pytest_github_actions_annotate_failures'\n\n def test_fail():\n assert 0\n ")
testdir.monkeypatch.setenv('GITHUB_ACTIONS', 'true')
testdir.monkeypatch.setenv('PYTEST_RUN_PATH', 'some_path')
result = testdir.runpytest_subprocess()
result.stderr.fnmatch_lines(['::error file=some_path/test_annotation_fail_runpath.py,line=5::test_fail*assert 0*']) |
def test_transform_pipeline_radians():
trans = Transformer.from_pipeline('+proj=pipeline +step +inv +proj=cart +ellps=WGS84 +step +proj=unitconvert +xy_in=rad +xy_out=deg')
assert_almost_equal(trans.transform((- 2704026.01), (- 4253051.81), 3895878.82, radians=True), ((- 2.), 0., (- 20.)))
assert_almost_equal(trans.transform((- 2.), 0., (- 20.), radians=True, direction=TransformDirection.INVERSE), ((- 2704026.01), (- 4253051.81), 3895878.82)) |
def find_app_name(config_to_check: Optional[AppConfig], app_list: List[Dict[(str, Any)]]) -> str:
if (not config_to_check):
return NO_APP_RUNNING
for app_def in app_list:
if isinstance(app_def['config'], list):
for config in app_def['config']:
if ((config['APP_ID'] == config_to_check.APP_ID) and (config['NAME_SPACE'] == config_to_check.NAME_SPACE)):
return app_def['name']
elif (isinstance(app_def['config'], dict) and (app_def['config']['APP_ID'] == config_to_check.APP_ID) and (app_def['config']['NAME_SPACE'] == config_to_check.NAME_SPACE)):
return app_def['name']
if (config_to_check.NAME_SPACE in EQUIVALENT_NAME_SPACES):
for app_def in app_list:
if isinstance(app_def['config'], list):
for config in app_def['config']:
if ((config['APP_ID'] == config_to_check.APP_ID) and (config['NAME_SPACE'] in EQUIVALENT_NAME_SPACES)):
return app_def['name']
elif (isinstance(app_def['config'], dict) and (app_def['config']['APP_ID'] == config_to_check.APP_ID) and (app_def['config']['NAME_SPACE'] in EQUIVALENT_NAME_SPACES)):
return app_def['name']
if (config_to_check.NAME_SPACE == 0):
return APP_CAST
return UNKNOWN_APP |
def select_algorithm():
global flag
s = v.get()
if (s == 'HANP-Miner'):
flag = 1
v2.set(2500)
v3.set(0)
v4.set(3)
print(('Function: ' + s))
elif (s == 'HANP-df'):
flag = 2
v2.set(2500)
v3.set(0)
v4.set(3)
print(('Function: ' + s))
elif (s == 'HANP-bf'):
flag = 3
v2.set(2500)
v3.set(0)
v4.set(3)
print(('Function: ' + s))
elif (s == 'HANP-nogap'):
flag = 4
v2.set(2500)
v3.set(0)
v4.set(0)
print(('Function: ' + s))
elif (s == 'NOSEP'):
flag = 5
v2.set(900)
v3.set(0)
v4.set(3)
print(('Function: ' + s)) |
def test_ScanArgs_remove_nonseq_inner_input():
hmm_model_env = create_test_hmm()
scan_args = hmm_model_env['scan_args']
hmm_model_env['scan_op']
hmm_model_env['Y_t']
hmm_model_env['Y_rv']
mus_in = hmm_model_env['mus_in']
mus_t = hmm_model_env['mus_t']
sigmas_in = hmm_model_env['sigmas_in']
sigmas_t = hmm_model_env['sigmas_t']
Gamma_rv = hmm_model_env['Gamma_rv']
Gamma_in = hmm_model_env['Gamma_in']
S_in = hmm_model_env['S_in']
S_t = hmm_model_env['S_t']
rng_in = hmm_model_env['rng_in']
scan_updates = hmm_model_env['scan_updates']
scan_args_copy = copy(scan_args)
test_v = Gamma_in
rm_info = scan_args_copy.remove_from_fields(test_v, rm_dependents=True)
(removed_nodes, _) = zip(*rm_info)
assert (Gamma_in in removed_nodes)
assert (Gamma_rv in removed_nodes)
assert (S_in in removed_nodes)
assert (S_t in removed_nodes)
assert (mus_in in scan_args_copy.outer_in_seqs)
assert (sigmas_in in scan_args_copy.outer_in_seqs)
assert (mus_t in scan_args_copy.inner_in_seqs)
assert (sigmas_t in scan_args_copy.inner_in_seqs)
assert (rng_in not in scan_args_copy.inner_out_shared)
assert (list(scan_updates.values()) == scan_args.outer_out_shared) |
class SpecField(SemVerField):
default_error_messages = {'invalid': _('Enter a valid version number spec list in ==X.Y.Z,>=A.B.C format.')}
description = _('Version specification list')
def __init__(self, *args, **kwargs):
self.syntax = kwargs.pop('syntax', base.DEFAULT_SYNTAX)
super(SpecField, self).__init__(*args, **kwargs)
def deconstruct(self):
(name, path, args, kwargs) = super(SpecField, self).deconstruct()
if (self.syntax != base.DEFAULT_SYNTAX):
kwargs['syntax'] = self.syntax
return (name, path, args, kwargs)
def to_python(self, value):
if ((value is None) or (value == '')):
return value
if isinstance(value, base.BaseSpec):
return value
return base.BaseSpec.parse(value, syntax=self.syntax) |
def list_triggers(name, location='\\'):
pythoncom.CoInitialize()
task_service = win32com.client.Dispatch('Schedule.Service')
task_service.Connect()
task_folder = task_service.GetFolder(location)
task_definition = task_folder.GetTask(name).Definition
triggers = task_definition.Triggers
ret = []
for trigger in triggers:
ret.append(trigger.Id)
return ret |
class DiskCOW(COW):
def __init__(self, addr, imagefd, logger, seek_lock):
self.addr = addr
self.imagefd = imagefd
self.seek_lock = seek_lock
self.logger = helpers.get_child_logger(logger, 'FS')
self.logger.info('Copy-On-Write for {addr} in PyPXE_NBD_COW_{addr[0]}_{addr[1]}'.format(addr=addr))
self.fh = open('PyPXE_NBD_COW_{addr[0]}_{addr[1]}'.format(addr=addr), 'w+b')
self.pages = [] |
def process_prom_query(query):
if prom_cli:
try:
return prom_cli.custom_query(query=query, params=None)
except Exception as e:
logging.error(('Failed to get the metrics: %s' % e))
else:
logging.info("Skipping the prometheus query as the prometheus client couldn't be initilized\n") |
def _get_base_template(name, description, platform, sorting, domain, layer_settings):
layer = dict()
layer['name'] = name
layer['versions'] = {'navigator': ATTACK_NAVIGATOR_VERSION, 'layer': ATTACK_LAYER_VERSION}
if (('includeAttackVersion' in layer_settings.keys()) and (layer_settings['includeAttackVersion'] == 'True')):
layer['versions']['attack'] = ATTACK_VERSION
layer['domain'] = domain
layer['description'] = description
layer['filters'] = {'platforms': platform}
layer['sorting'] = sorting
layer['layout'] = {'layout': 'flat', 'aggregateFunction': 'sum', 'showAggregateScores': True, 'countUnscored': False, 'showName': True, 'showID': False}
for (setting_key, setting_value) in layer_settings.items():
for (k, v) in LAYER_SETTINGS.items():
if ((setting_key == k) and (setting_value.lower() in [val.lower() for val in v]) and (setting_key in LAYER_LAYOUT_SETTINGS)):
if (setting_value.lower() in ('true', 'false')):
layer['layout'][setting_key] = (True if (setting_value.lower() == 'true') else False)
else:
layer['layout'][setting_key] = setting_value.lower()
layer['hideDisable'] = False
layer['selectSubtechniquesWithParent'] = False
layer['techniques'] = []
layer['showTacticRowBackground'] = False
layer['tacticRowBackground'] = COLOR_TACTIC_ROW_BACKGRND
layer['selectTechniquesAcrossTactics'] = True
return layer |
()
('-test-dep', is_flag=True, help='If to install test dependecies')
('-doc-dep', is_flag=True, help='If to install test dependecies')
def install_dependencies(test_dep=False, doc_dep=False):
config = util.get_config()
default_dependencies = config['project']['dependencies']
print('Installing dependencies', default_dependencies)
util.run((['pip', 'install'] + list(default_dependencies)))
if test_dep:
test_dependencies = config['project.optional-dependencies']['test']
print('Installing test-dependencies', test_dependencies)
util.run((['pip', 'install'] + list(test_dependencies)))
if doc_dep:
doc_dependencies = config['project.optional-dependencies']['doc']
print('Installing doc-dependencies', doc_dependencies)
util.run((['pip', 'install'] + list(doc_dependencies))) |
class PerturbationConfidenceMetric():
def __init__(self, perturbation):
self.perturbation = perturbation
def __call__(self, input_tensor: torch.Tensor, cams: np.ndarray, targets: List[Callable], model: torch.nn.Module, return_visualization=False, return_diff=True):
if return_diff:
with torch.no_grad():
outputs = model(input_tensor)
scores = [target(output).cpu().numpy() for (target, output) in zip(targets, outputs)]
scores = np.float32(scores)
batch_size = input_tensor.size(0)
perturbated_tensors = []
for i in range(batch_size):
cam = cams[i]
tensor = self.perturbation(input_tensor[(i, ...)].cpu(), torch.from_numpy(cam))
tensor = tensor.to(input_tensor.device)
perturbated_tensors.append(tensor.unsqueeze(0))
perturbated_tensors = torch.cat(perturbated_tensors)
with torch.no_grad():
outputs_after_imputation = model(perturbated_tensors)
scores_after_imputation = [target(output).cpu().numpy() for (target, output) in zip(targets, outputs_after_imputation)]
scores_after_imputation = np.float32(scores_after_imputation)
if return_diff:
result = (scores_after_imputation - scores)
else:
result = scores_after_imputation
if return_visualization:
return (result, perturbated_tensors)
else:
return result |
def __getdirlist(path, cache_tag, maxage, mask):
try:
result = ops.files.dirs.get_dirlisting(path, cache_tag=cache_tag, maxage=maxage, mask=mask)
except OpsCommandException:
psplog.debug(('Dir list failed (%s).' % path), exc_info=True)
result = None
except:
psplog.debug(('Unexpected error trying to get dir list (%s).' % path), exc_info=True)
result = None
return result |
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=(192 + 128)):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (1, 5), padding=(0, 2))
self.convr1 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (1, 5), padding=(0, 2))
self.convq1 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (1, 5), padding=(0, 2))
self.convz2 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (5, 1), padding=(2, 0))
self.convr2 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (5, 1), padding=(2, 0))
self.convq2 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (5, 1), padding=(2, 0))
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([(r * h), x], dim=1)))
h = (((1 - z) * h) + (z * q))
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([(r * h), x], dim=1)))
h = (((1 - z) * h) + (z * q))
return h |
def forwards(apps, schema_editor):
Flight = apps.get_model('adserver', 'Flight')
for flight in Flight.objects.all().annotate(flight_total_clicks=models.Sum(models.F('advertisements__impressions__clicks')), flight_total_views=models.Sum(models.F('advertisements__impressions__views'))):
flight.total_clicks = (flight.flight_total_clicks or 0)
flight.total_views = (flight.flight_total_views or 0)
flight.save() |
def render_pep440_old(pieces: Dict[(str, Any)]) -> str:
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if (pieces['distance'] or pieces['dirty']):
rendered += ('.post%d' % pieces['distance'])
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = ('0.post%d' % pieces['distance'])
if pieces['dirty']:
rendered += '.dev0'
return rendered |
def main():
args = parse_args()
if args.distributed:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
assert dist.is_initialized(), 'distributed is not initialized'
if (dist.get_rank() == 0):
make_folder(cfg.model_dir)
make_folder(cfg.vis_dir)
make_folder(cfg.log_dir)
make_folder(cfg.result_dir)
dirs = [cfg.model_dir, cfg.vis_dir, cfg.log_dir, cfg.result_dir]
else:
dirs = [None, None, None, None]
dist.broadcast_object_list(dirs, src=0)
(cfg.model_dir, cfg.vis_dir, cfg.log_dir, cfg.result_dir) = dirs
setup_seed()
if (dist.get_rank() == 0):
cfg.set_args(args.continue_train, resume_ckpt=args.resume_ckpt)
if args.cfg:
yml_cfg = cfg.update(args)
trainer = Trainer(cfg)
trainer._make_model()
test_dataset_dict = {}
for dataset_name in best_dict:
if ('3dpw' in dataset_name):
testset_loader = PW3D(transforms.ToTensor(), data_name=dataset_name)
else:
testset_loader = CMU_Panotic()
if cfg.distributed:
testset_sampler = torch.utils.data.distributed.DistributedSampler(testset_loader)
else:
testset_sampler = None
test_batch_generator = DataLoader(dataset=testset_loader, batch_size=cfg.test_batch_size, shuffle=False, num_workers=cfg.num_thread, pin_memory=True, sampler=testset_sampler)
test_dataset_dict[dataset_name] = {'loader': test_batch_generator, 'dataset': testset_loader}
for data_name in best_dict.keys():
ckpt_path = os.path.join('./checkpoint', '{}_best_ckpt.pth.tar'.format(data_name))
ckpt = torch.load(ckpt_path, map_location='cpu')
trainer.model.load_state_dict(ckpt)
trainer.model.eval() |
def conv_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
elif (classname.find('BatchNorm') != (- 1)):
init.constant(m.weight, 1)
init.constant(m.bias, 0) |
def check_new_shows(config, db, update_db=True):
info('Checking for new shows')
for raw_show in _get_new_season_shows(config, db):
if ((raw_show.show_type is not ShowType.UNKNOWN) and (raw_show.show_type not in config.new_show_types)):
debug(" Show isn't an allowed type ({})".format(raw_show.show_type))
debug(' name={}'.format(raw_show.name))
continue
if (not db.has_link(raw_show.site_key, raw_show.show_key)):
debug('New show link: {} on {}'.format(raw_show.show_key, raw_show.site_key))
shows = db.search_show_ids_by_names(raw_show.name, *raw_show.more_names)
show_id = None
if (len(shows) == 0):
debug(' Show not found, adding to database')
if update_db:
show_id = db.add_show(raw_show, commit=False)
elif (len(shows) == 1):
show_id = shows.pop()
else:
warning(' More than one show found, ids={}'.format(shows))
if (show_id and update_db):
db.add_link(raw_show, show_id, commit=False)
if update_db:
db.commit() |
class WireMessage(Message):
def __init__(self, typ_data):
self.type = message_types[typ_data[0][0]]
self.data = typ_data[1]
def serialize(self):
return self[1]
def parse(typ, data):
if (ulong_unpack(data[1:5]) != (len(data) - 1)):
raise ValueError(('invalid wire message where data is %d bytes and internal size stamp is %d bytes' % (len(data), (ulong_unpack(data[1:5]) + 1))))
return typ((data[0:1], data[5:])) |
def run(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
rc = p.returncode
enc = locale.getpreferredencoding()
output = output.decode(enc)
err = err.decode(enc)
return (rc, output.strip(), err.strip()) |
_ignore_inferred
def infer_parameter_objects(pyfunction):
object_info = pyfunction.pycore.object_info
result = object_info.get_parameter_objects(pyfunction)
if (result is None):
result = _parameter_objects(pyfunction)
_handle_first_parameter(pyfunction, result)
return result |
def floquet_master_equation_rates(f_modes_0, f_energies, c_op, H, T, args, J_cb, w_th, kmax=5, f_modes_table_t=None):
warnings.warn(FutureWarning('`floquet_master_equation_rates` is deprecated.'))
floquet_basis = FloquetBasis(H, T, args=args)
energy = floquet_basis.e_quasi
delta = floquet_delta_tensor(energy, kmax, T)
x = floquet_X_matrices(floquet_basis, [c_op], kmax, nT)
gamma = floquet_gamma_matrices(x, delta, [J_cb])
a = floquet_A_matrix(delta, gamma, w_th)
return (delta, x[0], gamma, a) |
def fit_to_rect(frame, size, halign='center', valign='center'):
(fl, ft, fw, fh) = to_rect(frame)
(rw, rh) = (size.width(), size.height())
ft += 1
fh -= 1
fl += 1
fw -= 1
fa = (fh / fw)
ra = (rh / rw)
if (fa <= ra):
rh = fh
rw = (rh / ra)
if (halign == 'left'):
rl = fl
elif (halign == 'center'):
rl = (fl + (0.5 * (fw - rw)))
elif (halign == 'right'):
rl = ((fl + fw) - rw)
rt = ft
else:
rw = fw
rh = (rw * ra)
rl = fl
if (valign == 'top'):
rt = ft
elif (valign == 'center'):
rt = (ft + (0.5 * (fh - rh)))
elif (valign == 'bottom'):
rt = ((ft + fh) - rh)
return qc.QRectF(rl, rt, rw, rh) |
class ChannelShuffle2(nn.Module):
def __init__(self, channels, groups):
super(ChannelShuffle2, self).__init__()
if ((channels % groups) != 0):
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups) |
def make_valid_xml_name(key, attr):
LOG.info(('Inside make_valid_xml_name(). Testing key "%s" with attr "%s"' % (unicode_me(key), unicode_me(attr))))
key = escape_xml(key)
attr = escape_xml(attr)
if key_is_valid_xml(key):
return (key, attr)
if str(key).isdigit():
return (('n%s' % key), attr)
try:
return (('n%s' % float(str(key))), attr)
except ValueError:
pass
if key_is_valid_xml(key.replace(' ', '_')):
return (key.replace(' ', '_'), attr)
attr['name'] = key
key = 'key'
return (key, attr) |
_safe
def impersonate_vector_reference_cont(f, extra, inner, idx, app, env, cont, _vals):
numargs = _vals.num_values()
args = ([None] * (numargs + 2))
args[0] = inner
args[1] = idx
for i in range(numargs):
args[(i + 2)] = _vals.get_value(i)
if (extra is None):
return f.call_with_extra_info(args, env, cont, app)
else:
return f.call_with_extra_info(([extra] + args), env, cont, app) |
def show_banner():
colors = ['bright_red', 'bright_green', 'bright_blue', 'cyan', 'magenta']
try:
click.style('color test', fg='bright_red')
except:
colors = ['red', 'green', 'blue', 'cyan', 'magenta']
try:
columns = get_terminal_size().columns
if (columns >= len(banner.splitlines()[1])):
for line in banner.splitlines():
click.secho(line, fg=random.choice(colors))
except:
pass |
def test_request_scope_covers_blueprint_teardown_request_handlers():
app = Flask(__name__)
UserID = NewType('UserID', int)
blueprint = Blueprint('blueprint', __name__)
('/')
def index():
return 'hello'
_request
def on_teardown(exc, user_id: UserID):
assert (user_id == 321)
def configure(binder):
binder.bind(UserID, to=321, scope=request)
app.register_blueprint(blueprint)
FlaskInjector(app=app, modules=[configure])
client = app.test_client()
response = client.get('/')
assert (response.data.decode() == 'hello') |
def intraday_volatility(returns: ReturnsSeries, interval_in_minutes: int) -> float:
unannualized_volatility = std(returns.values)
minutes_in_trading_day = 390
intervals_in_day = (minutes_in_trading_day / interval_in_minutes)
business_days_per_year = 252
return (unannualized_volatility * sqrt((intervals_in_day * business_days_per_year))) |
def test_structuring_unstructuring_unknown_subclass():
class A():
a: int
class A1(A):
a1: int
converter = Converter()
include_subclasses(A, converter)
class A2(A1):
a2: int
assert (converter.unstructure(A2(1, 2, 3), unstructure_as=A) == {'a': 1, 'a1': 2, 'a2': 3})
assert (converter.unstructure(A2(1, 2, 3), unstructure_as=A1) == {'a': 1, 'a1': 2, 'a2': 3})
assert (converter.structure({'a': 1, 'a1': 2, 'a2': 3}, A) == A1(1, 2)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.