code stringlengths 281 23.7M |
|---|
class HydrogenIntegrationTest(unittest.TestCase):
def setUp(self):
geometry = [('H', (0.0, 0.0, 0.0)), ('H', (0.0, 0.0, 0.7414))]
basis = 'sto-3g'
multiplicity = 1
filename = os.path.join(DATA_DIRECTORY, 'H2_sto-3g_singlet_0.7414')
self.molecule = MolecularData(geometry, basis, multiplicity, filename=filename)
self.molecule.load()
self.molecular_hamiltonian = self.molecule.get_molecular_hamiltonian()
self.fci_rdm = self.molecule.get_molecular_rdm(use_fci=1)
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
self.fermion_hamiltonian = normal_ordered(get_fermion_operator(self.molecular_hamiltonian))
self.qubit_hamiltonian = jordan_wigner(self.fermion_hamiltonian)
self.hamiltonian_matrix = get_sparse_operator(self.molecular_hamiltonian)
def test_integral_data(self):
g0 = 0.71375
g1 = (- 1.2525)
g2 = (- 0.47593)
g3 = (0.67449 / 2.0)
g4 = (0.6974 / 2.0)
g5 = (0.66347 / 2.0)
g6 = (0.18129 / 2.0)
self.assertAlmostEqual(self.nuclear_repulsion, g0, places=4)
self.assertAlmostEqual(self.one_body[(0, 0)], g1, places=4)
self.assertAlmostEqual(self.one_body[(1, 1)], g1, places=4)
self.assertAlmostEqual(self.one_body[(2, 2)], g2, places=4)
self.assertAlmostEqual(self.one_body[(3, 3)], g2, places=4)
self.assertAlmostEqual(self.two_body[(0, 1, 1, 0)], g3, places=4)
self.assertAlmostEqual(self.two_body[(1, 0, 0, 1)], g3, places=4)
self.assertAlmostEqual(self.two_body[(2, 3, 3, 2)], g4, places=4)
self.assertAlmostEqual(self.two_body[(3, 2, 2, 3)], g4, places=4)
self.assertAlmostEqual(self.two_body[(0, 2, 2, 0)], g5, places=4)
self.assertAlmostEqual(self.two_body[(0, 3, 3, 0)], g5, places=4)
self.assertAlmostEqual(self.two_body[(1, 2, 2, 1)], g5, places=4)
self.assertAlmostEqual(self.two_body[(1, 3, 3, 1)], g5, places=4)
self.assertAlmostEqual(self.two_body[(2, 0, 0, 2)], g5, places=4)
self.assertAlmostEqual(self.two_body[(3, 0, 0, 3)], g5, places=4)
self.assertAlmostEqual(self.two_body[(2, 1, 1, 2)], g5, places=4)
self.assertAlmostEqual(self.two_body[(3, 1, 1, 3)], g5, places=4)
self.assertAlmostEqual(self.two_body[(0, 2, 0, 2)], g6, places=4)
self.assertAlmostEqual(self.two_body[(1, 3, 1, 3)], g6, places=4)
self.assertAlmostEqual(self.two_body[(2, 1, 3, 0)], g6, places=4)
self.assertAlmostEqual(self.two_body[(2, 3, 1, 0)], g6, places=4)
self.assertAlmostEqual(self.two_body[(0, 3, 1, 2)], g6, places=4)
self.assertAlmostEqual(self.two_body[(0, 1, 3, 2)], g6, places=4)
def test_qubit_operator(self):
f1 = 0.1712
f2 = (- 0.2228)
f3 = 0.1686
f4 = 0.1205
f5 = 0.1659
f6 = 0.1743
f7 = 0.04532
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((0, 'Z'),)], f1, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((1, 'Z'),)], f1, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((2, 'Z'),)], f2, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((3, 'Z'),)], f2, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((0, 'Z'), (1, 'Z'))], f3, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((0, 'Z'), (2, 'Z'))], f4, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((1, 'Z'), (3, 'Z'))], f4, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((1, 'Z'), (2, 'Z'))], f5, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((0, 'Z'), (3, 'Z'))], f5, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((2, 'Z'), (3, 'Z'))], f6, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((0, 'Y'), (1, 'Y'), (2, 'X'), (3, 'X'))], (- f7), places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((0, 'X'), (1, 'X'), (2, 'Y'), (3, 'Y'))], (- f7), places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((0, 'X'), (1, 'Y'), (2, 'Y'), (3, 'X'))], f7, places=4)
self.assertAlmostEqual(self.qubit_hamiltonian.terms[((0, 'Y'), (1, 'X'), (2, 'X'), (3, 'Y'))], f7, places=4)
def test_reverse_jordan_wigner(self):
fermion_hamiltonian = reverse_jordan_wigner(self.qubit_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue((self.fermion_hamiltonian == fermion_hamiltonian))
def test_interaction_operator_mapping(self):
molecular_hamiltonian = get_interaction_operator(self.fermion_hamiltonian)
fermion_hamiltonian = get_fermion_operator(molecular_hamiltonian)
self.assertTrue((self.fermion_hamiltonian == fermion_hamiltonian))
qubit_hamiltonian = jordan_wigner(self.molecular_hamiltonian)
self.assertTrue((self.qubit_hamiltonian == qubit_hamiltonian))
def test_rdm_numerically(self):
fci_rdm_energy = self.nuclear_repulsion
fci_rdm_energy += numpy.sum((self.fci_rdm.one_body_tensor * self.molecular_hamiltonian.one_body_tensor))
fci_rdm_energy += numpy.sum((self.fci_rdm.two_body_tensor * self.molecular_hamiltonian.two_body_tensor))
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
qubit_rdm = self.fci_rdm.get_qubit_expectations(self.qubit_hamiltonian)
qubit_energy = 0.0
for (term, expectation) in qubit_rdm.terms.items():
qubit_energy += (expectation * self.qubit_hamiltonian.terms[term])
self.assertAlmostEqual(qubit_energy, self.molecule.fci_energy)
new_fermi_rdm = get_interaction_rdm(qubit_rdm)
new_fermi_rdm.expectation(self.molecular_hamiltonian)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
def test_sparse_numerically(self):
(energy, wavefunction) = get_ground_state(self.hamiltonian_matrix)
self.assertAlmostEqual(energy, self.molecule.fci_energy)
expected_energy = expectation(self.hamiltonian_matrix, wavefunction)
self.assertAlmostEqual(expected_energy, energy)
hf_state = jw_hartree_fock_state(self.molecule.n_electrons, count_qubits(self.qubit_hamiltonian))
hf_density = get_density_matrix([hf_state], [1.0])
hf_state = jw_hartree_fock_state(self.molecule.n_electrons, count_qubits(self.qubit_hamiltonian))
hf_density = get_density_matrix([hf_state], [1.0])
expected_hf_density_energy = expectation(self.hamiltonian_matrix, hf_density)
expected_hf_energy = expectation(self.hamiltonian_matrix, hf_state)
self.assertAlmostEqual(expected_hf_energy, self.molecule.hf_energy)
self.assertAlmostEqual(expected_hf_density_energy, self.molecule.hf_energy) |
def getElementRotation(obj, reverse=False):
axis = None
face = getElementShape(obj, Part.Face)
if (not face):
edge = getElementShape(obj, Part.Edge)
if edge:
return getEdgeRotation(edge, reverse)
return FreeCAD.Rotation()
else:
if (face.Orientation == 'Reversed'):
reverse = (not reverse)
surface = face.Surface
base = getattr(surface, 'BasisSurface', None)
if base:
surface = base
rot = getattr(surface, 'Rotation', None)
if rot:
return rot
if isinstance(surface, Part.SurfaceOfRevolution):
return getEdgeRotation(face.Edge1, reverse)
pla = face.Placement
face.Placement = FreeCAD.Placement()
surface = face.Surface
if hasattr(surface, 'Axis'):
axis = surface.Axis
else:
pln = face.findPlane()
if pln:
axis = pln.Axis
if (not axis):
(axis_fitted, _center, error) = fit_rotation_axis_to_surface1(face.Surface)
error_normalized = (error / face.BoundBox.DiagonalLength)
if (error_normalized < (10 ** (- 6))):
axis = FreeCAD.Vector(axis_fitted)
if (not axis):
param = surface.parameter(face.BoundBox.Center)
axis = surface.normal(*param)
rot = FreeCAD.Rotation(FreeCAD.Vector(0, 0, ((- 1) if reverse else 1)), axis)
return (pla.Rotation * rot) |
class StubOutForTestingTest(unittest.TestCase):
def setUp(self):
super(StubOutForTestingTest, self).setUp()
self.stubber = mox3_stubout.StubOutForTesting()
def test_stubout_method_with_set(self):
non_existing_path = 'non_existing_path'
self.assertFalse(mox3_stubout_example.check_if_exists(non_existing_path))
self.stubber.set(os.path, 'exists', (lambda x: True))
self.assertTrue(mox3_stubout_example.check_if_exists(non_existing_path))
self.stubber.unset_all()
self.assertFalse(mox3_stubout_example.check_if_exists(non_existing_path))
def test_stubout_class_with_set(self):
self.assertGreater(mox3_stubout_example.tomorrow().year, 2000)
self.stubber.set(datetime, 'date', GroundhogDate)
self.assertEqual(mox3_stubout_example.tomorrow(), datetime.date(1993, 2, 3))
self.stubber.unset_all()
self.assertGreater(mox3_stubout_example.tomorrow().year, 2000)
def test_stubout_module_with_set(self):
self.assertEqual(10, mox3_stubout_example.fabs((- 10)))
self.stubber.set(mox3_stubout_example, 'math', NoPanicMath)
self.assertEqual(42, mox3_stubout_example.fabs((- 10)))
self.stubber.unset_all()
self.assertEqual(10, mox3_stubout_example.fabs((- 10)))
def test_set_raise_if_unknown_attribute(self):
self.assertRaises(AttributeError, self.stubber.set, os.path, 'exists_not', (lambda x: True))
self.assertRaises(AttributeError, self.stubber.set, datetime, 'tomorrow', GroundhogDate)
self.assertRaises(AttributeError, self.stubber.set, mox3_stubout_example, 'math1', NoPanicMath)
def test_stubout_method_with_smart_set(self):
non_existing_path = 'non_existing_path'
self.stubber.smart_set(os.path, 'exists', (lambda x: True))
self.assertTrue(mox3_stubout_example.check_if_exists(non_existing_path))
self.stubber.smart_unset_all()
self.assertFalse(mox3_stubout_example.check_if_exists(non_existing_path))
def test_stubout_class_with_smart_set(self):
self.stubber.smart_set(datetime, 'date', GroundhogDate)
self.assertEqual(mox3_stubout_example.tomorrow(), datetime.date(1993, 2, 3))
self.stubber.smart_unset_all()
self.assertGreater(mox3_stubout_example.tomorrow().year, 2000)
def test_stubout_module_with_smart_set(self):
self.assertEqual(10, mox3_stubout_example.fabs((- 10)))
self.stubber.smart_set(mox3_stubout_example, 'math', NoPanicMath)
self.assertEqual(42, mox3_stubout_example.fabs((- 10)))
self.stubber.smart_unset_all()
self.assertEqual(10, mox3_stubout_example.fabs((- 10)))
def test_stubout_submodule_with_smart_set(self):
non_existing_path = 'non_existing_path'
self.assertFalse(mox3_stubout_example.check_if_exists(non_existing_path))
self.stubber.smart_set(os, 'path', ExistingPath)
self.assertTrue(mox3_stubout_example.check_if_exists(non_existing_path))
self.stubber.smart_unset_all()
self.assertFalse(mox3_stubout_example.check_if_exists(non_existing_path))
def test_smart_set_raise_if_unknown_attribute(self):
self.assertRaises(AttributeError, self.stubber.smart_set, os.path, 'exists_not', (lambda x: True))
self.assertRaises(AttributeError, self.stubber.smart_set, datetime, 'tomorrow', GroundhogDate)
self.assertRaises(AttributeError, self.stubber.smart_set, mox3_stubout_example, 'math1', NoPanicMath) |
class DescribeRGBColor():
def it_is_natively_constructed_using_three_ints_0_to_255(self):
RGBColor(18, 52, 86)
with pytest.raises(ValueError, match='RGBColor\\(\\) takes three integer valu'):
RGBColor('12', '34', '56')
with pytest.raises(ValueError, match='\\(\\) takes three integer values 0-255'):
RGBColor((- 1), 34, 56)
with pytest.raises(ValueError, match='RGBColor\\(\\) takes three integer valu'):
RGBColor(12, 256, 56)
def it_can_construct_from_a_hex_string_rgb_value(self):
rgb = RGBColor.from_string('123456')
assert (rgb == RGBColor(18, 52, 86))
def it_can_provide_a_hex_string_rgb_value(self):
assert (str(RGBColor(18, 52, 86)) == '123456')
def it_has_a_custom_repr(self):
rgb_color = RGBColor(66, 240, 186)
assert (repr(rgb_color) == 'RGBColor(0x42, 0xf0, 0xba)') |
class SimilarValueTool(BaseTool):
spark: Union[(SparkSession, ConnectSparkSession)] = Field(exclude=True)
name = 'similar_value'
description = '\n This tool takes a string keyword and searches for the most similar value from a vector store with all\n possible values from the desired column.\n Input to this tool is a pipe-separated string in this format: keyword|column_name|temp_view_name.\n The temp_view_name will be queried in the column_name using the most similar value to the keyword.\n '
vector_store_dir: Optional[str]
lru_vector_store: Optional[LRUVectorStore]
def _run(self, inputs: str, run_manager: Optional[CallbackManagerForToolRun]=None) -> str:
input_lst = inputs.split('|')
search_text = input_lst[0]
col = input_lst[1]
temp_view_name = input_lst[2]
vector_store_path = ((((self.vector_store_dir + temp_view_name) + '_') + col) if self.vector_store_dir else None)
if ((not self.vector_store_dir) or (not os.path.exists(vector_store_path))):
new_df = self.spark.sql('select distinct `{}` from {}'.format(col, temp_view_name))
col_lst = [str(row[col]) for row in new_df.collect()]
else:
col_lst = None
return VectorSearchUtil.vector_similarity_search(col_lst, vector_store_path, self.lru_vector_store, search_text)
async def _arun(self, *args: Any, **kwargs: Any) -> Any:
raise NotImplementedError('SimilarityTool does not support async') |
def _freeze_except_cascade_rpn_cls_reg(model):
for v in model.parameters():
v.requires_grad = False
for child in model.module.roi_heads.box_predictor.children():
for v in child.cls_score.parameters():
v.requires_grad = True
for v in child.bbox_pred.parameters():
v.requires_grad = True
print('unfreezing cls_logits')
for v in model.module.proposal_generator.rpn_head.objectness_logits.parameters():
v.requires_grad = True
for v in model.module.proposal_generator.rpn_head.anchor_deltas.parameters():
v.requires_grad = True
return model |
class LogWheelDestroy(Event):
def from_dict(self):
super().from_dict()
self.attack_id = self._data.get('attackId')
self.attacker = objects.Character(self._data.get('attacker', {}))
self.vehicle = objects.Vehicle(self._data.get('vehicle', {}))
self.damage_type_category = self._data.get('damageTypeCategory')
self.damage_causer_name = self._data.get('damageCauserName') |
def normalize_text(s):
def remove_articles(text):
return re.sub('\\b(a|an|the)\\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join((ch for ch in text if (ch not in exclude)))
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) |
def get_real_arch(arch, stages=[2, 3, 3]):
arch = list(arch)
result = ''
for stage in stages:
id_num = 0
for idx in range(stage):
op = arch.pop(0)
if (idx == 0):
result += op
continue
if (op != '0'):
result += op
else:
id_num += 1
result += ('0' * id_num)
return result |
class File(BaseType):
def __init__(self, *, required: bool=True, none_ok: bool=False, completions: _Completions=None) -> None:
super().__init__(none_ok=none_ok, completions=completions)
self.required = required
def to_py(self, value: _StrUnset) -> _StrUnsetNone:
self._basic_py_validation(value, str)
if isinstance(value, usertypes.Unset):
return value
elif (not value):
return None
value = os.path.expanduser(value)
value = os.path.expandvars(value)
try:
if (not os.path.isabs(value)):
value = os.path.join(standarddir.config(), value)
if (self.required and (not os.path.isfile(value))):
raise configexc.ValidationError(value, 'Must be an existing file (absolute or relative to the config directory)!')
except UnicodeEncodeError as e:
raise configexc.ValidationError(value, e)
return value
def __repr__(self) -> str:
return utils.get_repr(self, none_ok=self.none_ok, required=self.required) |
class MnliProcessor(object):
def get_train_examples(self, data_dir, num_train_samples=(- 1)):
if (num_train_samples != (- 1)):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'mnli_train.tsv')), 'mnli_train')[:num_train_samples]
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'mnli_train.tsv')), 'mnli_train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'mnli_dev.tsv')), 'mnli_dev_matched')
def get_labels(self):
return ['entailment', 'non-entailment']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = i
text_a = line[8]
text_b = line[9]
label = line[(- 1)]
if ((label == 'contradiction') or (label == 'neutral')):
label = 'non-entailment'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def _read_tsv(cls, input_file, quotechar=None):
with open(input_file, 'r') as f:
reader = csv.reader(f, delimiter='\t', quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines |
def setup_args():
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('dataset', type=str, help='dataset path')
parent_parser.add_argument('-a', '--architecture', type=str, choices=pretrained_models.keys(), help='model architecture', required=True)
parent_parser.add_argument('-c', '--entropy-coder', choices=compressai.available_entropy_coders(), default=compressai.available_entropy_coders()[0], help='entropy coder (default: %(default)s)')
parent_parser.add_argument('--cuda', action='store_true', help='enable CUDA')
parent_parser.add_argument('--half', action='store_true', help='convert model to half floating point (fp16)')
parent_parser.add_argument('--entropy-estimation', action='store_true', help='use evaluated entropy estimation (no entropy coding)')
parent_parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser = argparse.ArgumentParser(description='Evaluate a model on an image dataset.', add_help=True)
subparsers = parser.add_subparsers(help='model source', dest='source')
pretrained_parser = subparsers.add_parser('pretrained', parents=[parent_parser])
pretrained_parser.add_argument('-m', '--metric', type=str, choices=['mse', 'ms-ssim'], default='mse', help='metric trained against (default: %(default)s)')
pretrained_parser.add_argument('-q', '--quality', dest='qualities', nargs='+', type=int, default=(1,))
checkpoint_parser = subparsers.add_parser('checkpoint', parents=[parent_parser])
checkpoint_parser.add_argument('-p', '--path', dest='paths', type=str, nargs='*', required=True, help='checkpoint path')
return parser |
class AttrVI_ATTR_DEST_INCREMENT(RangeAttribute):
resources = [(constants.InterfaceType.pxi, 'INSTR'), (constants.InterfaceType.pxi, 'MEMACC'), (constants.InterfaceType.vxi, 'INSTR'), (constants.InterfaceType.vxi, 'MEMACC')]
py_name = 'destination_increment'
visa_name = 'VI_ATTR_DEST_INCREMENT'
visa_type = 'ViInt32'
default = 1
(read, write, local) = (True, True, True)
(min_value, max_value, values) = (0, 1, None) |
class BasicDiscriminatorLoss(nn.Module):
def __init__(self, config=None):
super(BasicDiscriminatorLoss, self).__init__()
def forward(self, real_outputs, fake_outputs):
loss = 0
real_losses = []
fake_losses = []
for (dr, dg) in zip(real_outputs, fake_outputs):
dr = dr.float()
dg = dg.float()
real_loss = torch.mean(((1 - dr) ** 2))
fake_loss = torch.mean((dg ** 2))
loss += (real_loss + fake_loss)
real_losses.append(real_loss.item())
fake_losses.append(fake_loss.item())
return loss |
def drop_warning_stat(idata: arviz.InferenceData) -> arviz.InferenceData:
nidata = arviz.InferenceData(attrs=idata.attrs)
for (gname, group) in idata.items():
if ('sample_stat' in gname):
group = group.drop_vars(names=['warning', 'warning_dim_0'], errors='ignore')
nidata.add_groups({gname: group}, coords=group.coords, dims=group.dims)
return nidata |
class NCEAverage(nn.Module):
def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5, Z=None):
super(NCEAverage, self).__init__()
self.nLem = outputSize
self.unigrams = torch.ones(self.nLem)
self.multinomial = AliasMethod(self.unigrams)
self.multinomial.cuda()
self.K = K
self.register_buffer('params', torch.tensor([K, T, (- 1), momentum]))
stdv = (1.0 / math.sqrt((inputSize / 3)))
self.register_buffer('memory', torch.rand(outputSize, inputSize).mul_((2 * stdv)).add_((- stdv)))
def forward(self, x, y):
batchSize = x.size(0)
idx = self.multinomial.draw((batchSize * (self.K + 1))).view(batchSize, (- 1))
out = NCEFunction.apply(x, y, self.memory, idx, self.params)
return out |
('/v1/organization/<orgname>/prototypes')
_param('orgname', 'The name of the organization')
class PermissionPrototypeList(ApiResource):
schemas = {'NewPrototype': {'type': 'object', 'description': 'Description of a new prototype', 'required': ['role', 'delegate'], 'properties': {'role': {'type': 'string', 'description': 'Role that should be applied to the delegate', 'enum': ['read', 'write', 'admin']}, 'activating_user': {'type': 'object', 'description': 'Repository creating user to whom the rule should apply', 'required': ['name'], 'properties': {'name': {'type': 'string', 'description': 'The username for the activating_user'}}}, 'delegate': {'type': 'object', 'description': 'Information about the user or team to which the rule grants access', 'required': ['name', 'kind'], 'properties': {'name': {'type': 'string', 'description': 'The name for the delegate team or user'}, 'kind': {'type': 'string', 'description': 'Whether the delegate is a user or a team', 'enum': ['user', 'team']}}}}}}
_scope(scopes.ORG_ADMIN)
('getOrganizationPrototypePermissions')
def get(self, orgname):
permission = AdministerOrganizationPermission(orgname)
if permission.can():
try:
org = model.organization.get_organization(orgname)
except model.InvalidOrganizationException:
raise NotFound()
permissions = model.permission.get_prototype_permissions(org)
users_filter = ({p.activating_user for p in permissions} | {p.delegate_user for p in permissions})
org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
return {'prototypes': [prototype_view(p, org_members) for p in permissions]}
raise Unauthorized()
_scope(scopes.ORG_ADMIN)
('createOrganizationPrototypePermission')
_json_request('NewPrototype')
def post(self, orgname):
permission = AdministerOrganizationPermission(orgname)
if (permission.can() or allow_if_superuser()):
try:
org = model.organization.get_organization(orgname)
except model.InvalidOrganizationException:
raise NotFound()
details = request.get_json()
activating_username = None
if (('activating_user' in details) and details['activating_user'] and ('name' in details['activating_user'])):
activating_username = details['activating_user']['name']
delegate = (details['delegate'] if ('delegate' in details) else {})
delegate_kind = delegate.get('kind', None)
delegate_name = delegate.get('name', None)
delegate_username = (delegate_name if (delegate_kind == 'user') else None)
delegate_teamname = (delegate_name if (delegate_kind == 'team') else None)
activating_user = (model.user.get_user(activating_username) if activating_username else None)
delegate_user = (model.user.get_user(delegate_username) if delegate_username else None)
delegate_team = (model.team.get_organization_team(orgname, delegate_teamname) if delegate_teamname else None)
if (activating_username and (not activating_user)):
raise request_error(message='Unknown activating user')
if ((not delegate_user) and (not delegate_team)):
raise request_error(message='Missing delegate user or team')
role_name = details['role']
prototype = model.permission.add_prototype_permission(org, role_name, activating_user, delegate_user, delegate_team)
log_prototype_action('create_prototype_permission', orgname, prototype)
users_filter = {prototype.activating_user, prototype.delegate_user}
org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
return prototype_view(prototype, org_members)
raise Unauthorized() |
class WindowSpecificationTestCases(unittest.TestCase):
def setUp(self):
Timings.defaults()
self.app = Application(backend='win32').start(_notepad_exe())
self.dlgspec = self.app.UntitledNotepad
self.ctrlspec = self.app.UntitledNotepad.Edit
def tearDown(self):
self.app.kill()
def test__init__(self):
wspec = WindowSpecification(dict(best_match=u'UntitledNotepad', app=self.app))
self.assertEqual(wspec.window_text(), u'Untitled - Notepad')
self.assertEqual(self.dlgspec.app, self.app)
self.assertEqual(self.ctrlspec.app, self.app)
self.assertEqual(wspec.app, self.app)
def test__init__both_keywords(self):
self.assertRaises(KeyError, WindowSpecification, dict(best_match=u'UntitledNotepad', app=self.app, pid=self.app.process))
def test__call__(self):
self.assertRaises(AttributeError, self.dlgspec)
self.assertRaises(AttributeError, self.ctrlspec)
wspec = WindowSpecification(dict(name=u'blah', app=self.app))
self.assertRaises(AttributeError, wspec)
def test_wrapper_object(self):
self.assertEqual(True, isinstance(self.dlgspec, WindowSpecification))
self.assertEqual(True, isinstance(self.dlgspec.find(), hwndwrapper.HwndWrapper))
def test_window(self):
sub_spec = self.dlgspec.by(class_name='Edit')
sub_spec_legacy = self.dlgspec.window(class_name='Edit')
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), 'Edit')
self.assertEqual(sub_spec_legacy.class_name(), 'Edit')
def test__getitem__(self):
self.assertEqual(True, isinstance(self.dlgspec['Edit'], WindowSpecification))
self.assertEqual(self.dlgspec['Edit'].class_name(), 'Edit')
self.assertRaises(AttributeError, self.ctrlspec.__getitem__, 'edit')
def test_getattr(self):
self.assertEqual(True, isinstance(self.dlgspec.Edit, WindowSpecification))
self.assertEqual(self.dlgspec.Edit.class_name(), 'Edit')
self.assertEqual('Notepad', self.dlgspec.class_name())
spec = self.ctrlspec.by(parent=self.dlgspec, visible=True)
self.assertEqual(spec.class_name(), 'Edit')
def test_non_magic_getattr(self):
ws = WindowSpecification(dict(best_match='Notepad'))
self.assertEqual(ws.allow_magic_lookup, True)
ws_no_magic = WindowSpecification(dict(best_match='Notepad'), allow_magic_lookup=False)
self.assertEqual(ws_no_magic.allow_magic_lookup, False)
dlg = ws_no_magic.by(best_match='Edit')
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
ws_no_magic.Edit
def test_exists(self):
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, self.dlgspec.exists(0))
self.assertEqual(True, self.ctrlspec.exists())
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=0.1))
self.assertEqual(True, ((timestamp() - start) < 0.3))
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=3))
self.assertEqual(True, (2.7 < (timestamp() - start) < 3.3))
def test_exists_timing(self):
start = timestamp()
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, ((timestamp() - start) < 0.3))
start = timestamp()
self.assertEqual(True, self.ctrlspec.exists())
self.assertEqual(True, ((timestamp() - start) < 0.3))
start = timestamp()
self.assertEqual(True, self.dlgspec.exists(0.5))
timedif = (timestamp() - start)
self.assertEqual(True, (0.49 > timedif < 0.6))
def test_find_all_dlg(self):
dlg_spec_list = self.dlgspec.find_all()
self.assertEqual(1, len(dlg_spec_list))
self.assertEqual(self.dlgspec.find(), dlg_spec_list[0])
def test_find_all_notepad(self):
ctrls = self.dlgspec.by(parent=self.dlgspec).find_all()
self.assertEqual(2, len(ctrls))
self.assertEqual(ctrls[0], self.app.Notepad.Edit.find())
self.assertEqual(ctrls[1], self.app.Notepad.StatusBar.find())
def test_wait(self):
allowable_error = 0.2
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait('enaBleD '))
time_taken = (timestamp() - start)
if (not (0 <= time_taken < (0 + (2 * allowable_error)))):
self.assertEqual(0.02, time_taken)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(' ready'))
self.assertEqual(True, (0 <= (timestamp() - start) < (0 + allowable_error)))
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(' exiSTS'))
self.assertEqual(True, (0 <= (timestamp() - start) < (0 + allowable_error)))
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(' VISIBLE '))
self.assertEqual(True, (0 <= (timestamp() - start) < (0 + allowable_error)))
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(' ready enabled'))
self.assertEqual(True, (0 <= (timestamp() - start) < (0 + allowable_error)))
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait('visible exists '))
self.assertEqual(True, (0 <= (timestamp() - start) < (0 + allowable_error)))
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait('exists '))
self.assertEqual(True, (0 <= (timestamp() - start) < (0 + allowable_error)))
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait('actIve '))
self.assertEqual(True, (0 <= (timestamp() - start) < (0 + allowable_error)))
self.assertRaises(SyntaxError, self.dlgspec.wait, 'Invalid_criteria')
def test_wait_non_existing(self):
allowable_error = 0.2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'exists')
expected = Timings.window_find_timeout
self.assertEqual(True, ((expected - allowable_error) <= (timestamp() - start) < (expected + allowable_error)))
def test_wait_invisible(self):
allowable_error = 0.2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'visible')
expected = Timings.window_find_timeout
self.assertEqual(True, ((expected - allowable_error) <= (timestamp() - start) < (expected + allowable_error)))
status_bar_menu = self.app.UntitledNotepad.menu().item('&View').sub_menu().item('&Status Bar')
if status_bar_menu.is_checked():
status_bar_menu.select()
status_bar_spec = self.app.UntitledNotepad.by(class_name='msctls_statusbar32', visible=None)
self.assertEqual('StatusBar', status_bar_spec.wait('exists').friendly_class_name())
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'exists visible')
self.assertEqual(True, ((expected - allowable_error) <= (timestamp() - start) < (expected + allowable_error)))
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'visible exists')
self.assertEqual(True, ((expected - allowable_error) <= (timestamp() - start) < (expected + allowable_error)))
def test_wait_not(self):
allowable_error = 0.16
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, 'enaBleD ', 0.1, 0.05)
taken = (timestamp() - start)
if (0.1 < taken > (0.1 + allowable_error)):
self.assertEqual(0.12, taken)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, ' ready', 0.1, 0.05)
self.assertEqual(True, (0.1 <= (timestamp() - start) < (0.1 + allowable_error)))
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, ' exiSTS', 0.1, 0.05)
self.assertEqual(True, (0.1 <= (timestamp() - start) < (0.1 + allowable_error)))
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, ' VISIBLE ', 0.1, 0.05)
self.assertEqual(True, (0.1 <= (timestamp() - start) < (0.1 + allowable_error)))
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, ' ready enabled', 0.1, 0.05)
self.assertEqual(True, (0.1 <= (timestamp() - start) < (0.1 + allowable_error)))
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, 'visible exists ', 0.1, 0.05)
self.assertEqual(True, (0.1 <= (timestamp() - start) < (0.1 + allowable_error)))
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, 'exists ', 0.1, 0.05)
self.assertEqual(True, (0.1 <= (timestamp() - start) < (0.1 + allowable_error)))
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, 'actIve ', 0.1, 0.05)
self.assertEqual(True, (0.1 <= (timestamp() - start) < (0.1 + allowable_error)))
self.assertRaises(SyntaxError, self.dlgspec.wait_not, 'Invalid_criteria')
def test_depth(self):
self.dlgspec.menu_select('Format -> Font')
self.assertNotEqual(len(self.app['Font'].descendants(depth=1)), len(self.app['Font'].descendants(depth=2)))
def test_dump_tree(self):
self.dlgspec.dump_tree()
self.ctrlspec.dump_tree()
def test_dump_tree_file_output(self):
output_filename = 'test_dump_tree.txt'
self.dlgspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, 'r') as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue((("'Untitled - NotepadEdit'" in content) and ("'Edit'" in content)))
self.assertTrue((".by(class_name='msctls_statusbar32'" in content))
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, 'r') as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue((".by(class_name='Edit')" in content))
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_find_elements_re(self):
self.dlgspec.wait('visible')
windows = findwindows.find_elements(name_re='Untitled - Notepad')
self.assertTrue((len(windows) >= 1)) |
def test_nyquist_exceptions():
sys = ct.rss(2, 2, 2)
with pytest.raises(ct.exception.ControlMIMONotImplemented, match='only supports SISO'):
ct.nyquist_plot(sys)
sys = ct.rss(2, 1, 1)
with pytest.raises(AttributeError):
ct.nyquist_plot(sys, arrow_width=8, arrow_length=6)
with pytest.raises(ValueError, match='unsupported arrow location'):
ct.nyquist_plot(sys, arrows='uniform')
sys = ct.tf([1], [1, 0, 1])
with pytest.raises(ValueError, match='unknown value for indent'):
ct.nyquist_plot(sys, indent_direction='up')
sys = ct.drss(2, 1, 1)
sys.dt = 0.01
with pytest.warns(UserWarning, match='above Nyquist'):
ct.nyquist_plot(sys, np.logspace((- 2), 3)) |
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision, ensure_ascii, date_unit, default_handler=None):
super(JSONTableWriter, self).__init__(obj, orient, date_format, double_precision, ensure_ascii, date_unit, default_handler=default_handler)
if (date_format != 'iso'):
msg = ("Trying to write with `orient='table'` and `date_format='%s'`. Table Schema requires dates to be formatted with `date_format='iso'`" % date_format)
raise ValueError(msg)
self.schema = build_table_schema(obj)
if ((obj.ndim == 2) and isinstance(obj.columns, MultiIndex)):
raise NotImplementedError("orient='table' is not supported for MultiIndex")
if (((obj.ndim == 1) and (obj.name in set(obj.index.names))) or len((obj.columns & obj.index.names))):
msg = 'Overlapping names between the index and columns'
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap((lambda x: x.isoformat()))
self.obj = obj.reset_index()
self.date_format = 'iso'
self.orient = 'records'
def write(self):
data = super(JSONTableWriter, self).write()
serialized = '{{"schema": {}, "data": {}}}'.format(dumps(self.schema), data)
return serialized |
class SilentTestSource(Silence):
def __init__(self, duration, frequency=440, sample_rate=44800, envelope=None):
super().__init__(duration, frequency, sample_rate, envelope)
self.bytes_read = 0
def get_audio_data(self, nbytes):
data = super().get_audio_data(nbytes)
if (data is not None):
self.bytes_read += data.length
return data
def max_offset(self):
return self._max_offset |
def distortionParameter(types):
parameters = []
if (types == 'barrel'):
Lambda = ((np.random.random_sample() * (- 5e-05)) / 4)
x0 = 256
y0 = 256
parameters.append(Lambda)
parameters.append(x0)
parameters.append(y0)
return parameters
elif (types == 'pincushion'):
Lambda = ((np.random.random_sample() * 8.6e-05) / 4)
x0 = 128
y0 = 128
parameters.append(Lambda)
parameters.append(x0)
parameters.append(y0)
return parameters
elif (types == 'rotation'):
theta = ((np.random.random_sample() * 30) - 15)
radian = ((math.pi * theta) / 180)
sina = math.sin(radian)
cosa = math.cos(radian)
parameters.append(sina)
parameters.append(cosa)
return parameters
elif (types == 'shear'):
shear = ((np.random.random_sample() * 0.8) - 0.4)
parameters.append(shear)
return parameters
elif (types == 'projective'):
x1 = 0
x4 = ((np.random.random_sample() * 0.1) + 0.1)
x2 = (1 - x1)
x3 = (1 - x4)
y1 = 0.005
y4 = (1 - y1)
y2 = y1
y3 = y4
a31 = ((((((x1 - x2) + x3) - x4) * (y4 - y3)) - ((((y1 - y2) + y3) - y4) * (x4 - x3))) / (((x2 - x3) * (y4 - y3)) - ((x4 - x3) * (y2 - y3))))
a32 = ((((((y1 - y2) + y3) - y4) * (x2 - x3)) - ((((x1 - x2) + x3) - x4) * (y2 - y3))) / (((x2 - x3) * (y4 - y3)) - ((x4 - x3) * (y2 - y3))))
a11 = ((x2 - x1) + (a31 * x2))
a12 = ((x4 - x1) + (a32 * x4))
a13 = x1
a21 = ((y2 - y1) + (a31 * y2))
a22 = ((y4 - y1) + (a32 * y4))
a23 = y1
parameters.append(a11)
parameters.append(a12)
parameters.append(a13)
parameters.append(a21)
parameters.append(a22)
parameters.append(a23)
parameters.append(a31)
parameters.append(a32)
return parameters
elif (types == 'wave'):
mag = (np.random.random_sample() * 32)
parameters.append(mag)
return parameters |
class ExcelImporter():
def __init__(self):
self.logger = qf_logger.getChild(self.__class__.__name__)
def import_cell(self, file_path: str, cell_address: str, sheet_name: str=None) -> Union[(int, float, str)]:
self.logger.info('Started importing data from {}'.format(file_path))
work_book = self._get_work_book(file_path)
work_sheet = self._get_work_sheet(work_book, sheet_name)
result = work_sheet[cell_address]
work_book.close()
return result.value
def import_container(self, file_path: str, starting_cell: str, ending_cell: str, container_type: type=None, sheet_name: str=None, include_index: bool=True, include_column_names: bool=False) -> Union[(QFSeries, QFDataFrame)]:
self.logger.info('Started importing data from {}'.format(file_path))
start_time = datetime.datetime.now()
work_book = self._get_work_book(file_path)
work_sheet = self._get_work_sheet(work_book, sheet_name)
bounding_box = get_bounding_box(starting_cell, ending_cell)
nr_of_non_index_columns = ((bounding_box.ending_column - bounding_box.starting_column) + 1)
if include_index:
nr_of_non_index_columns -= 1
if (container_type is None):
container_type = self._infer_container_type(nr_of_non_index_columns)
if (not self._is_correct_containers_type(container_type, nr_of_non_index_columns)):
raise ValueError("Incorrect container's type")
container = self._load_container(work_sheet, container_type, bounding_box, include_index, include_column_names)
end_time = datetime.datetime.now()
execution_time = (end_time - start_time)
self.logger.info('Ended importing data from {} after {}'.format(file_path, execution_time))
work_book.close()
return container.squeeze()
def _get_work_book(self, file_path):
assert exists(file_path)
with open(file_path, 'rb') as f:
in_memory_file = io.BytesIO(f.read())
work_book = load_workbook(in_memory_file, read_only=True, data_only=True)
return work_book
def _get_work_sheet(self, work_book, sheet_name):
if (sheet_name is None):
work_sheet = work_book.active
else:
work_sheet = work_book[sheet_name]
return work_sheet
def _infer_container_type(self, nr_of_non_index_columns):
if (nr_of_non_index_columns <= 0):
raise ValueError('Ending column must have higher index than starting column and if the include_index==True,then there must be at least two columns in the bounding box')
container_type = None
if (nr_of_non_index_columns > 1):
container_type = QFDataFrame
elif (nr_of_non_index_columns == 1):
container_type = QFSeries
return container_type
def _is_correct_containers_type(self, container_type, nr_of_non_index_columns):
if (nr_of_non_index_columns > 1):
correct_container_type = issubclass(container_type, QFDataFrame)
else:
correct_container_type = issubclass(container_type, QFSeries)
return correct_container_type
def _load_container(self, work_sheet, container_type, bounding_box, include_index, include_column_names):
container = None
if issubclass(container_type, QFSeries):
container = self._load_series(work_sheet, container_type, bounding_box, include_index, include_column_names)
elif issubclass(container_type, QFDataFrame):
container = self._load_dataframe(work_sheet, container_type, bounding_box, include_index, include_column_names)
return container
def _load_series(self, work_sheet, container_type, bounding_box, include_index, include_column_names):
starting_column = bounding_box.starting_column
starting_row = bounding_box.starting_row
ending_row = bounding_box.ending_row
if include_column_names:
starting_row += 1
index = None
values_column = starting_column
if include_index:
index = self._load_column(work_sheet, starting_row, ending_row, starting_column)
values_column += 1
values = self._load_column(work_sheet, starting_row, ending_row, values_column)
return container_type(data=values, index=index)
def _load_dataframe(self, work_sheet, container_type, bounding_box, include_index, include_column_names):
starting_column = bounding_box.starting_column
starting_row = bounding_box.starting_row
ending_column = bounding_box.ending_column
ending_row = bounding_box.ending_row
column_names = None
if include_column_names:
column_names = self._load_row(work_sheet, starting_row)
column_names = column_names[(starting_column - 1):ending_column]
starting_row += 1
index = None
if include_index:
index = self._load_column(work_sheet, starting_row, ending_row, starting_column)
if (column_names is not None):
column_names = column_names[1:]
starting_column += 1
rows_values = []
for row in islice(work_sheet.rows, (starting_row - 1), ending_row):
row_values = [cell.value for cell in islice(row, (starting_column - 1), ending_column)]
rows_values.append(row_values)
values = np.array(rows_values)
return container_type(index=index, data=values, columns=column_names)
def _load_column(self, work_sheet, starting_row, ending_row, column_nr):
values = []
row_nr = 1
for row in work_sheet.rows:
if (starting_row <= row_nr <= ending_row):
col_nr = 1
for cell in row:
if (col_nr == column_nr):
values.append(cell.value)
col_nr += 1
row_nr += 1
return values
def _load_row(self, work_sheet, row_index):
for (i, row) in enumerate(work_sheet.rows):
if (i == (row_index - 1)):
return [cell.value for cell in row]
return None |
def main():
exp_config = json.loads(_jsonnet.evaluate_file(args.exp_config_file))
model_config_file = exp_config['model_config']
if ('model_config_args' in exp_config):
model_config_args = exp_config['model_config_args']
if (args.model_config_args is not None):
model_config_args_json = _jsonnet.evaluate_snippet('', args.model_config_args)
model_config_args.update(json.loads(model_config_args_json))
model_config_args = json.dumps(model_config_args)
elif (args.model_config_args is not None):
model_config_args = _jsonnet.evaluate_snippet('', args.model_config_args)
else:
model_config_args = None
logdir = (args.logdir or exp_config['logdir'])
name = exp_config['name']
if (args.mode == 'preprocess'):
preprocess_config = PreprocessConfig(model_config_file, model_config_args)
preprocess.main(preprocess_config, partition=args.partition)
elif (args.mode == 'train'):
train_config = TrainConfig(model_config_file, model_config_args, logdir, name)
train.main(train_config, distributed=args.distributed)
elif (args.mode in ('eval', 'eval-wo-infer')):
if model_config_args:
config = json.loads(_jsonnet.evaluate_file(model_config_file, tla_codes={'args': model_config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(model_config_file))
model_preproc = registry.instantiate(registry.lookup('model', config['model']).Preproc, config['model'])
data = {}
for section in exp_config['eval_section']:
print('Load dataset, {} part'.format(section))
orig_data = registry.construct('dataset', config['data'][section])
orig_data.examples = model_preproc.load_raw_dataset(section, paths=config['data'][section]['paths'])
orig_data.examples_with_name = {ex.full_name: ex for ex in orig_data.examples}
data[section] = orig_data
for step in exp_config['eval_steps']:
infer_output_path = f"{exp_config['eval_output']}/{exp_config['eval_name']}-step{step}.infer"
if (args.mode == 'eval'):
infer_config = InferConfig(model_config_file, model_config_args, logdir, exp_config['eval_section'], exp_config['eval_beam_size'], infer_output_path, step, strict_decoding=exp_config.get('eval_strict_decoding', False), limit=exp_config.get('limit', None), shuffle=exp_config.get('shuffle', False), part=exp_config.get('part', 'spider'), data=data)
infer.main(infer_config)
eval_output_path = f"{exp_config['eval_output']}/{exp_config['eval_name']}-step{step}.eval"
eval_config = EvalConfig(model_config_file, model_config_args, logdir, exp_config['eval_section'], infer_output_path, eval_output_path, exp_config['eval_tb_dir'], vis_dir=exp_config.get('vis_dir'), part=exp_config.get('part', 'spider'), data=data, virtuoso_server=exp_config.get('virtuoso_server'))
eval_output_path = eval.main(eval_config)
res_json = json.load(open(eval_output_path))
print('exec', step, res_json['total_scores']['ex_val'], res_json['total_scores']['ex_test']) |
class RegisterObject():
__slots__ = ['_register_name', '_value', '_called_by_func', '_current_type', '_type_history']
def __init__(self, register_name, value, called_by_func=None, value_type=None):
self._register_name = register_name
self._value = value
self._current_type = value_type
self._type_history = []
self._called_by_func = []
if (called_by_func is not None):
self._called_by_func.append(called_by_func)
def __repr__(self):
return f"<VarabileObject-register:{self._register_name}, value:{self._value}, called_by_func:{','.join(self._called_by_func)}, current_type:{self._value_type}>"
def __eq__(self, obj):
return (isinstance(obj, RegisterObject) and (obj.called_by_func == self.called_by_func) and (obj.register_name == self.register_name) and (obj.value == self.value) and (obj.current_type == self.current_type))
def called_by_func(self):
return self._called_by_func
_by_func.setter
def called_by_func(self, called_by_func):
self._called_by_func.append(called_by_func)
self._type_history.append(self._current_type)
def register_name(self):
return self._register_name
_name.setter
def register_name(self, reg_name):
self._register_name = reg_name
def value(self):
return self._value
def value(self, value):
self._value = value
def current_type(self):
return self._current_type
_type.setter
def current_type(self, value):
self._current_type = value
def type_histroy(self):
return self._type_history
def hash_index(self):
return int(self.register_name[1:]) |
def bounds(geometry, north_up=True, transform=None):
geometry = (getattr(geometry, '__geo_interface__', None) or geometry)
if ('bbox' in geometry):
return tuple(geometry['bbox'])
geom = (geometry.get('geometry') or geometry)
if (not (('coordinates' in geom) or ('geometries' in geom) or ('features' in geom))):
raise ValueError('geometry must be a GeoJSON-like geometry, GeometryCollection, or FeatureCollection')
return _bounds(geom, north_up=north_up, transform=transform) |
(repr=False, slots=True, hash=True)
class _SubclassOfValidator():
type = attrib()
def __call__(self, inst, attr, value):
if (not issubclass(value, self.type)):
msg = f"'{attr.name}' must be a subclass of {self.type!r} (got {value!r})."
raise TypeError(msg, attr, self.type, value)
def __repr__(self):
return f'<subclass_of validator for type {self.type!r}>' |
.mongo
def test_mongo_being_calculated():
(mongetter=_test_mongetter)
def _takes_time(arg_1, arg_2):
sleep(3)
return ((random() + arg_1) + arg_2)
_takes_time.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(target=_calls_takes_time, kwargs={'res_queue': res_queue}, daemon=True)
thread2 = threading.Thread(target=_calls_takes_time, kwargs={'res_queue': res_queue}, daemon=True)
thread1.start()
sleep(1)
thread2.start()
thread1.join(timeout=4)
thread2.join(timeout=4)
assert (res_queue.qsize() == 2)
res1 = res_queue.get()
res2 = res_queue.get()
assert (res1 == res2) |
def preprocess_request_body(body: Optional[RequestParams]) -> Optional[RequestParams]:
if (not body):
return None
for resource in ['project', 'observation']:
if (resource in body):
body[resource] = preprocess_request_params(body[resource], convert_lists=False)
else:
body = preprocess_request_params(body, convert_lists=False)
return body |
.slow
.parametrize('kwargs,op', [({}, 'sum'), ({}, 'mean'), pytest.param({}, 'min', marks=pytest.mark.slow), ({}, 'median'), pytest.param({}, 'max', marks=pytest.mark.slow), pytest.param({}, 'var', marks=pytest.mark.slow), pytest.param({}, 'count', marks=pytest.mark.slow), ({'ddof': 0}, 'std'), pytest.param({'quantile': 0.5}, 'quantile', marks=pytest.mark.slow)])
.parametrize('window', [pytest.param(2, marks=pytest.mark.slow), 7, pytest.param('3h', marks=pytest.mark.slow), pd.Timedelta('200 minutes')])
.parametrize('m', [2, pytest.param(5, marks=pytest.mark.slow)])
.parametrize('pre_get,post_get', [((lambda df: df), (lambda df: df)), ((lambda df: df.x), (lambda x: x)), ((lambda df: df), (lambda df: df.x))])
def test_rolling_count_aggregations(op, window, m, pre_get, post_get, kwargs, stream):
index = pd.date_range(start='2000-01-01', end='2000-01-03', freq='1h')
df = pd.DataFrame({'x': np.arange(len(index))}, index=index)
expected = getattr(post_get(pre_get(df).rolling(window)), op)(**kwargs)
sdf = DataFrame(example=df.iloc[:0], stream=stream)
roll = getattr(post_get(pre_get(sdf).rolling(window)), op)(**kwargs)
L = roll.stream.gather().sink_to_list()
assert (len(L) == 0)
for i in range(0, len(df), m):
sdf.emit(df.iloc[i:(i + m)])
assert (len(L) > 1)
assert_eq(pd.concat(L), expected) |
class StructureBranch(nn.Module):
def __init__(self, in_channels=4, use_sigmoid=True, use_spectral_norm=True, init_weights=True):
super(StructureBranch, self).__init__()
self.use_sigmoid = use_sigmoid
self.conv1 = self.features = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=4, stride=2, padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True))
self.conv2 = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True))
self.conv3 = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True))
self.conv4 = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=1, padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True))
self.conv5 = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4, stride=1, padding=1, bias=(not use_spectral_norm)), use_spectral_norm))
if init_weights:
self.apply(weights_init())
def forward(self, edge):
edge_pred = self.conv5(self.conv4(self.conv3(self.conv2(self.conv1(edge)))))
if self.use_sigmoid:
edge_pred = torch.sigmoid(edge_pred)
return edge_pred |
class Vex2Esil():
def __init__(self, arch, bits=64):
self.arch = arch
self.bits = bits
self.aarch = self.arch
if ((bits in arch_dict) and (arch in arch_dict[bits])):
self.aarch = arch_dict[bits][arch]
self.arch_class = archinfo_dict[self.aarch]()
self.vex_addr = 4195328
self.ops = [Unop, Binop, Triop, Qop]
self.do_lookahead = True
def convert_str(self, instruction=None, code=None):
r2p = r2pipe.open('-', ['-a', self.arch, '-b', str(self.bits), '-2'])
self.r2p = r2p
if (instruction == None):
r2p.cmd(('wx %s' % hexlify(code).decode()))
else:
r2p.cmd(('wa %s' % instruction))
instr = r2p.cmdj('pdj 1')[0]
return self.convert(instr, code=code)
def convert_c(self, instruction=None, code=None):
esilex = self.convert_str(instruction, code)
return self.replace_regs(instruction, esilex)
def convert(self, instr, code=None):
if (code == None):
print(instr['bytes'])
code = unhexlify(instr['bytes'])
print(instr['esil'])
if all([(x == 0) for x in code]):
print('[!] failed to assemble instruction')
return
self.irsb = lift(code, self.vex_addr, self.arch_class)
self.irsb.pp()
self.exprs = []
self.stacklen = 0
self.temp_to_stack = {}
self.temp_to_exprs = {}
self.skip_next = False
for (ind, statement) in enumerate(self.irsb.statements):
if self.skip_next:
self.skip_next = False
continue
stmt_type = type(statement)
next_stmt = None
if (len(self.irsb.statements) > (ind + 1)):
next_stmt = self.irsb.statements[(ind + 1)]
if (stmt_type == WrTmp):
if self.do_lookahead:
if (type(statement.data) in (Get, GetI)):
(src, size) = self.offset_to_reg(statement.data, True)
conv_op = ('%dto' % (size * 8))
if ((type(next_stmt) == Unop) and (type(next_stmt.data) in self.ops) and (conv_op in next_stmt.data.op)):
to_size = next_stmt.data.op[(4 + len(conv_op)):]
if to_size.isdigit():
new_size = (int(to_size) // 8)
new_offset = statement.data.offset
if ((new_offset, new_size) in self.arch_class.register_size_names):
new_exprs = [self.arch_class.register_size_names[(new_offset, new_size)]]
self.temp_to_exprs[next_stmt.tmp] = new_exprs
self.skip_next = True
continue
elif (type(next_stmt) in (Put, PutI)):
(dst, size) = self.offset_to_reg(next_stmt)
conv_op = ('to%d' % (size * 8))
if ((type(statement.data) in self.ops) and (conv_op in statement.data.op)):
to_size = statement.data.op[4:statement.data.op.index(conv_op)][:2]
if (to_size[0] == '8'):
to_size = '8'
if to_size.isdigit():
new_size = (int(to_size) // 8)
new_offset = next_stmt.offset
if ((new_offset, new_size) in self.arch_class.register_size_names):
new_dst = self.arch_class.register_size_names[(new_offset, new_size)]
self.exprs += (self.temp_to_exprs[statement.data.args[0].tmp] + [new_dst, '='])
self.skip_next = True
continue
new_exprs = self.data_to_esil(statement.data)
self.temp_to_exprs[statement.tmp] = new_exprs
elif (stmt_type in (Put, PutI)):
(dst, size) = self.offset_to_reg(statement)
if ('cc_' not in dst):
self.exprs += self.data_to_esil(statement.data, dst=dst)
elif (stmt_type in (Store, StoreG)):
size = int((statement.data.result_size(self.irsb.tyenv) / 8))
self.exprs += self.data_to_esil(statement.data)
self.exprs += self.temp_to_exprs[statement.addr.tmp]
self.exprs += [('=[%d]' % size)]
elif (stmt_type == Exit):
pass
esilex = ','.join(self.exprs)
return esilex
def replace_regs(self, instr, esilex):
regs = dict([(reg['name'], reg) for reg in self.r2p.cmdj('aerpj')['reg_info']])
new_esilex = []
arg_strs = []
args = []
if (' ' in instr):
args = ' '.join(instr.split(' ')[1:]).split(', ')
def arg_index(word):
for (ind, arg) in enumerate(args):
if (word in arg):
return ind
elif (word.isdigit() and (('0x' + word) in arg)):
return ind
elif ((word[:2] == '0x') and (str(int(word, 16)) in arg)):
return ind
return (- 1)
for word in esilex.split(','):
if ((word in regs) and (arg_index(word) != (- 1))):
new_esilex.append('%1$s')
arg_strs.append(('REG(%d)' % arg_index(word)))
elif (word.isdigit() or ((word[:2] == '0x') and (arg_index(word) != (- 1)))):
new_esilex.append('%d')
arg_strs.append(('IMM(%d)' % arg_index(word)))
else:
new_esilex.append(word)
replaced = ','.join(new_esilex)
c_code = ('esilprintf("%s", %s)' % (replaced, ', '.join(arg_strs)))
return c_code
def offset_to_reg(self, stmt, is_data=False):
offset = stmt.offset
if is_data:
size = int((stmt.result_size(self.irsb.tyenv) / 8))
else:
size = int((stmt.data.result_size(self.irsb.tyenv) / 8))
return (self.arch_class.register_size_names[(offset, size)], size)
def data_to_esil(self, data, dst=None, flag=False):
exprs = []
dtype = type(data)
if (dtype == Const):
exprs.append(('0x%x' % data.con.value))
elif (dtype == RdTmp):
exprs += self.temp_to_exprs[data.tmp]
elif (dtype in (Get, GetI)):
(src, size) = self.offset_to_reg(data, True)
exprs += [src]
elif (dtype in self.ops):
args = data.args[::(- 1)]
exprs += self.do_op(data.op, args)
elif (dtype == Load):
size = int((data.result_size(self.irsb.tyenv) / 8))
exprs += self.temp_to_exprs[data.addr.tmp]
exprs += [('[%d]' % size)]
if (dst != None):
eq = '='
if flag:
eq = ':='
exprs += [dst, eq]
return exprs
def do_op(self, op, args):
final_exprs = []
op_key = op
if (op_key in op_dict):
exprs = op_dict[op_key]
for expr in exprs:
if (type(expr) == int):
val = self.data_to_esil(args[expr])
final_exprs += val
else:
final_exprs += [expr]
return final_exprs
else:
return [] |
def _fetch_build_eggs(dist):
try:
dist.fetch_build_eggs(dist.setup_requires)
except Exception as ex:
msg = "\n It is possible a package already installed in your system\n contains an version that is invalid according to PEP 440.\n You can try `pip install --use-pep517` as a workaround for this problem,\n or rely on a new virtual environment.\n\n If the problem refers to a package that is not installed yet,\n please contact that package's maintainers or distributors.\n "
if ('InvalidVersion' in ex.__class__.__name__):
if hasattr(ex, 'add_note'):
ex.add_note(msg)
else:
dist.announce(f'''
{msg}
''')
raise |
def pretix_categories():
return {'count': 3, 'next': None, 'previous': None, 'results': [{'id': 1, 'name': {'en': 'Tickets', 'it': 'Biglietti'}, 'internal_name': 'tickets', 'description': {'en': ''}, 'position': 0, 'is_addon': False}, {'id': 2, 'name': {'en': 'Gadget', 'it': 'Premi'}, 'internal_name': None, 'description': {'en': 'Gadget', 'it': 'Premi'}, 'position': 0, 'is_addon': False}, {'id': 3, 'name': {'en': 'Python Italia Association Membership', 'it': "Iscrizione all'associazione Python Italia"}, 'internal_name': 'Association', 'description': {}, 'position': 0, 'is_addon': False}]} |
class SimulatorMaster(threading.Thread):
class ClientState(object):
def __init__(self):
self.memory = [[] for _ in range(3)]
self.ident = None
def __init__(self, pipe_c2s, pipe_s2c):
super(SimulatorMaster, self).__init__()
assert (os.name != 'nt'), "Doesn't support windows!"
self.daemon = True
self.name = 'SimulatorMaster'
self.context = zmq.Context()
self.c2s_socket = self.context.socket(zmq.PULL)
self.c2s_socket.bind(pipe_c2s)
self.c2s_socket.set_hwm(20)
self.s2c_socket = self.context.socket(zmq.ROUTER)
self.s2c_socket.bind(pipe_s2c)
self.s2c_socket.set_hwm(20)
self.send_queue = queue.Queue(maxsize=1000)
def f():
msg = self.send_queue.get()
self.s2c_socket.send_multipart(msg, copy=False)
self.send_thread = LoopThread(f)
self.send_thread.daemon = True
self.send_thread.start()
def clean_context(soks, context):
for s in soks:
s.close()
context.term()
import atexit
atexit.register(clean_context, [self.c2s_socket, self.s2c_socket], self.context)
def run(self):
self.clients = defaultdict(self.ClientState)
try:
while True:
msg = loads(self.c2s_socket.recv(copy=False).bytes)
(ident, role_id, prob_state, all_state, last_cards, first_st, mask, minor_type, mode, reward, isOver) = msg
client = self.clients[ident]
if (client.ident is None):
client.ident = ident
self._process_msg(client, role_id, prob_state, all_state, last_cards, first_st, mask, minor_type, mode, reward, isOver)
except zmq.ContextTerminated:
logger.info('[Simulator] Context was terminated.')
def __del__(self):
self.context.destroy(linger=0) |
def cos_similarity(ref_counts, gen_counts):
if ((len(ref_counts) == 0) or (len(gen_counts) == 0)):
return np.nan
keys = np.unique((list(ref_counts.keys()) + list(gen_counts.keys())))
ref_vec = np.array([ref_counts.get(k, 0) for k in keys])
gen_vec = np.array([gen_counts.get(k, 0) for k in keys])
return (1 - cos_distance(ref_vec, gen_vec)) |
class Screens(object):
bars = Bars()
def init_mono_screen_single_bar(self):
return [Screen(top=self.bars.init_top_single_bar())]
def init_mono_screen_double_bar(self):
return [Screen(top=self.bars.init_top_double_bar(), bottom=self.bars.init_bottom_double_bar())]
def init_dual_screen_single_bar(self):
return [Screen(top=self.bars.init_top_single_bar()), Screen(top=self.bars.init_top_single_bar())]
def init_dual_screen_double_bar(self):
return [Screen(top=self.bars.init_top_double_bar(), bottom=self.bars.init_bottom_double_bar()), Screen(top=self.bars.init_top_double_bar(), bottom=self.bars.init_bottom_double_bar())] |
class SELinuxRoleTest(ProvyTestCase):
def setUp(self):
super(SELinuxRoleTest, self).setUp()
self.role = SELinuxRole(prov=None, context={'cleanup': []})
def provisions_correctly(self):
with self.mock_role_methods('install_packages', 'activate'):
self.role.provision()
self.role.install_packages.assert_called_with()
self.role.activate.assert_called_with()
def installs_packages_in_debian(self):
with self.using_stub(AptitudeRole) as aptitude, self.provisioning_to('debian'):
self.role.install_packages()
expected_packages = [call('selinux-basics'), call('selinux-policy-default'), call('selinux-utils'), call('auditd'), call('audispd-plugins')]
self.assertEqual(aptitude.ensure_package_installed.mock_calls, expected_packages)
def installs_packages_in_ubuntu(self):
with self.using_stub(AptitudeRole) as aptitude, self.provisioning_to('ubuntu'):
self.role.install_packages()
expected_packages = [call('selinux'), call('selinux-utils'), call('auditd'), call('audispd-plugins')]
self.assertEqual(aptitude.ensure_package_installed.mock_calls, expected_packages)
def activates_on_debian(self):
with self.execute_mock() as execute, self.provisioning_to('debian'), patch.object(self.role, 'enforce'):
self.role.activate()
expected_calls = [call('selinux-activate', stdout=False, sudo=True), call("semanage login -m -s 'user_u' -r s0 __default__", stdout=False, sudo=True)]
self.assertEqual(execute.mock_calls, expected_calls)
self.role.enforce.assert_called_with()
def activates_on_ubuntu(self):
with self.execute_mock() as execute, self.provisioning_to('ubuntu'), patch.object(self.role, 'enforce'):
self.role.activate()
expected_calls = [call("semanage login -m -s 'user_u' -r s0 __default__", stdout=False, sudo=True)]
self.assertEqual(execute.mock_calls, expected_calls)
self.role.enforce.assert_called_with()
def puts_environment_in_enforce_mode(self):
with self.execute_mock(), self.mock_role_method('ensure_line'), self.warn_only():
self.role.enforce()
self.role.execute.assert_called_with('setenforce 1', stdout=False, sudo=True)
self.role.ensure_line.assert_called_with('SELINUX=enforcing', '/etc/selinux/config', sudo=True)
def ensures_that_a_login_mapping_exists(self):
with self.execute_mock() as execute, self.warn_only():
self.role.ensure_login_mapping('foo')
execute.assert_called_with('semanage login -a foo', stdout=False, sudo=True)
def maps_a_login_user_to_an_selinux_user(self):
with self.execute_mock() as execute, patch.object(self.role, 'ensure_login_mapping'):
self.role.map_login('foo', 'staff_u')
self.role.ensure_login_mapping.assert_called_with('foo')
execute.assert_called_with('semanage login -m -s staff_u foo', stdout=False, sudo=True)
def maps_a_login_user_to_selinux_roles(self):
with self.execute_mock() as execute, patch.object(self.role, 'ensure_login_mapping'):
self.role.map_role('foo', ['staff_r', 'sysadm_r'])
self.role.ensure_login_mapping.assert_called_with('foo')
execute.assert_called_with("semanage user -m -R 'staff_r sysadm_r' foo", stdout=False, sudo=True) |
def _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
if (progress >= 1.0):
return 0.0
return max(0.0, (0.5 * (1.0 + math.cos((math.pi * ((float(num_cycles) * progress) % 1.0)))))) |
def _generate_optimizer_class_with_gradient_clipping(optimizer: Type[torch.optim.Optimizer], *, per_param_clipper: Optional[_GradientClipper]=None, global_clipper: Optional[_GradientClipper]=None) -> Type[torch.optim.Optimizer]:
assert ((per_param_clipper is None) or (global_clipper is None)), 'Not allowed to use both per-parameter clipping and global clipping'
def optimizer_wgc_step(self, closure=None):
if (per_param_clipper is not None):
for group in self.param_groups:
for p in group['params']:
per_param_clipper(p)
else:
all_params = itertools.chain(*[g['params'] for g in self.param_groups])
global_clipper(all_params)
super(type(self), self).step(closure)
OptimizerWithGradientClip = type((optimizer.__name__ + 'WithGradientClip'), (optimizer,), {'step': optimizer_wgc_step})
return OptimizerWithGradientClip |
def gimme_save_string(opt):
varx = vars(opt)
base_str = ''
for key in varx:
base_str += str(key)
if isinstance(varx[key], dict):
for (sub_key, sub_item) in varx[key].items():
base_str += ((('\n\t' + str(sub_key)) + ': ') + str(sub_item))
else:
base_str += ('\n\t' + str(varx[key]))
base_str += '\n\n'
return base_str |
def test_postloop_hooks(capsys):
testargs = ['prog', 'say hello', 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.register_postloop_hook(app.prepost_hook_two)
app.cmdloop()
(out, err) = capsys.readouterr()
assert (out == 'hello\none\ntwo\n')
assert (not err) |
def gen_candidate(level):
global compnum
size = len(freArr[(level - 1)])
start = 0
for i in range(size):
Q = ''
R = ''
R = freArr[(level - 1)][i].name[1:level]
Q = freArr[(level - 1)][start].name[0:(level - 1)]
if (Q != R):
start = binary_search(level, R, 0, (size - 1))
if (start < (0 | start) >= size):
start = 0
else:
Q = freArr[(level - 1)][start].name[0:(level - 1)]
while (Q == R):
compnum = (compnum + 1)
cand = sorted_incomplete_nettree('', 0, [])
cand.name = freArr[(level - 1)][i].name[0:level]
cand.name = (cand.name + freArr[(level - 1)][start].name[(level - 1):level])
global sigmasize
global sigma
for t in range(sigmasize):
if (freArr[(level - 1)][start].name[(level - 1)] == sigma[t]):
position = t
break
cand.pos_sup = other_level(freArr[(level - 1)][i], position, cand)
if (cand.pos_sup >= minsup):
freArr[level].append(cand)
start = (start + 1)
if (start >= size):
start = 0
break
Q = freArr[(level - 1)][start].name[0:(level - 1)] |
def add_QdrantServicer_to_server(servicer, server):
rpc_method_handlers = {'HealthCheck': grpc.unary_unary_rpc_method_handler(servicer.HealthCheck, request_deserializer=qdrant__pb2.HealthCheckRequest.FromString, response_serializer=qdrant__pb2.HealthCheckReply.SerializeToString)}
generic_handler = grpc.method_handlers_generic_handler('qdrant.Qdrant', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) |
.parametrize('levels_setting, excludes_setting, level, source, msg, expected_ret, expected_level', [({}, {}, usertypes.JsLogLevel.error, 'qute:test', 'msg', False, None), ({'qute:*': ['error']}, {}, usertypes.JsLogLevel.error, 'qute:bla', 'msg', True, usertypes.MessageLevel.error), ({'qute:*': ['error']}, {'qute:*': ['filter*']}, usertypes.JsLogLevel.error, 'qute:bla', 'notfiltered', True, usertypes.MessageLevel.error), ({'qute:*': ['error']}, {'qute:*': ['filter*']}, usertypes.JsLogLevel.error, 'qute:bla', 'filtered', False, None), ({'qute:*': ['error']}, {'qutie:*': ['*']}, usertypes.JsLogLevel.error, 'qute:bla', 'msg', True, usertypes.MessageLevel.error), ({'qute:*': ['error']}, {}, usertypes.JsLogLevel.info, 'qute:bla', 'msg', False, None), ({'qute:*': ['error', 'info']}, {}, usertypes.JsLogLevel.info, 'qute:bla', 'msg', True, usertypes.MessageLevel.info)])
def test_js_log_to_ui(config_stub, message_mock, caplog, levels_setting, excludes_setting, level, source, msg, expected_ret, expected_level):
config_stub.val.content.javascript.log_message.levels = levels_setting
config_stub.val.content.javascript.log_message.excludes = excludes_setting
with caplog.at_level(logging.ERROR):
ret = shared._js_log_to_ui(level=level, source=source, line=0, msg=msg)
assert (ret == expected_ret)
if (expected_level is not None):
assert (message_mock.getmsg(expected_level).text == f'JS: [{source}:0] {msg}')
else:
assert (not message_mock.messages) |
class ReducedFocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, reduce=True, reduce_th=0.5, **kwargs):
super(ReducedFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduce = reduce
self.reduce_th = reduce_th
def forward(self, inputs, targets):
ce = F.cross_entropy(inputs, targets, reduction='none')
p = torch.exp((- ce))
focal_reduction = (((1.0 - p) / self.reduce_th) ** self.gamma)
focal_reduction[(p < self.reduce_th)] = 1
loss = (focal_reduction * ce)
if self.reduce:
return torch.mean(loss)
return loss |
class WRNConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, activate):
super(WRNConv, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x |
def _should_use_custom_op(input):
assert isinstance(input, torch.Tensor)
if ((not enabled) or (not torch.backends.cudnn.enabled)):
return False
if (input.device.type != 'cuda'):
return False
if (LooseVersion(torch.__version__) >= LooseVersion('1.7.0')):
return True
warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
return False |
class Router(object):
default_pattern = '[^/]+'
default_filter = 're'
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = []
self._groups = {}
self.builder = {}
self.static = {}
self.dyna_routes = {}
self.dyna_regexes = {}
self.strict_order = strict
self.filters = {'re': (lambda conf: (_re_flatten((conf or self.default_pattern)), None, None)), 'int': (lambda conf: ('-?\\d+', int, (lambda x: str(int(x))))), 'float': (lambda conf: ('-?[\\d.]+', float, (lambda x: str(float(x))))), 'path': (lambda conf: ('.+?', None, None))}
def add_filter(self, name, func):
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
(offset, prefix) = (0, '')
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if (len(g[0]) % 2):
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
(yield (prefix, None, None))
(name, filtr, conf) = (g[4:7] if (g[2] is None) else g[1:4])
(yield (name, (filtr or 'default'), (conf or None)))
(offset, prefix) = (match.end(), '')
if ((offset <= len(rule)) or prefix):
(yield ((prefix + rule[offset:]), None, None))
def add(self, rule, method, target, name=None):
anons = 0
keys = []
pattern = ''
filters = []
builder = []
is_static = True
for (key, mode, conf) in self._itertokens(rule):
if mode:
is_static = False
if (mode == 'default'):
mode = self.default_filter
(mask, in_filter, out_filter) = self.filters[mode](conf)
if (not key):
pattern += ('(?:%s)' % mask)
key = ('anon%d' % anons)
anons += 1
else:
pattern += ('(?P<%s>%s)' % (key, mask))
keys.append(key)
if in_filter:
filters.append((key, in_filter))
builder.append((key, (out_filter or str)))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name:
self.builder[name] = builder
if (is_static and (not self.strict_order)):
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile(('^(%s)$' % pattern))
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError(('Could not add Route: %s (%s)' % (rule, _e())))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for (name, wildcard_filter) in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if ((flatpat, method) in self._groups):
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn((msg % (method, rule)), RuntimeWarning)
self.dyna_routes[method][self._groups[(flatpat, method)]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[(flatpat, method)] = (len(self.dyna_routes[method]) - 1)
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:(x + maxgroups)]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join((('(^%s$)' % flatpat) for flatpat in combined))
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
builder = self.builder.get(_name)
if (not builder):
raise RouteBuildError('No route with that name.', _name)
try:
for (i, value) in enumerate(anons):
query[('anon%d' % i)] = value
url = ''.join([(f(query.pop(n)) if n else f) for (n, f) in builder])
return (url if (not query) else ((url + '?') + urlencode(query)))
except KeyError:
raise RouteBuildError(('Missing URL argument: %r' % _e().args[0]))
def match(self, environ):
verb = environ['REQUEST_METHOD'].upper()
path = (environ['PATH_INFO'] or '/')
target = None
if (verb == 'HEAD'):
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if ((method in self.static) and (path in self.static[method])):
(target, getargs) = self.static[method][path]
return (target, (getargs(path) if getargs else {}))
elif (method in self.dyna_regexes):
for (combined, rules) in self.dyna_regexes[method]:
match = combined(path)
if match:
(target, getargs) = rules[(match.lastindex - 1)]
return (target, (getargs(path) if getargs else {}))
allowed = set([])
nocheck = set(methods)
for method in (set(self.static) - nocheck):
if (path in self.static[method]):
allowed.add(verb)
for method in ((set(self.dyna_regexes) - allowed) - nocheck):
for (combined, rules) in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ','.join(sorted(allowed))
raise HTTPError(405, 'Method not allowed.', Allow=allow_header)
raise HTTPError(404, ('Not found: ' + repr(path))) |
class TestGraphPartition(QiskitOptimizationTestCase):
def setUp(self):
super().setUp()
aqua_globals.random_seed = 100
self.num_nodes = 4
self.w = random_graph(self.num_nodes, edge_prob=0.8, weight_range=10)
(self.qubit_op, self.offset) = graph_partition.get_operator(self.w)
def _brute_force(self):
def bitfield(n, length):
result = np.binary_repr(n, length)
return [int(digit) for digit in result]
nodes = self.num_nodes
maximum = (2 ** nodes)
minimal_v = np.inf
for i in range(maximum):
cur = bitfield(i, nodes)
how_many_nonzero = np.count_nonzero(cur)
if ((how_many_nonzero * 2) != nodes):
continue
cur_v = graph_partition.objective_value(np.array(cur), self.w)
if (cur_v < minimal_v):
minimal_v = cur_v
return minimal_v
def test_graph_partition(self):
algo = NumPyMinimumEigensolver(self.qubit_op, aux_operators=[])
result = algo.run()
x = sample_most_likely(result.eigenstate)
ising_sol = graph_partition.get_graph_solution(x)
self.assertEqual(graph_partition.objective_value(np.array([0, 1, 0, 1]), self.w), graph_partition.objective_value(ising_sol, self.w))
oracle = self._brute_force()
self.assertEqual(graph_partition.objective_value(x, self.w), oracle)
def test_graph_partition_vqe(self):
aqua_globals.random_seed = 10213
wavefunction = RealAmplitudes(self.qubit_op.num_qubits, insert_barriers=True, reps=5, entanglement='linear')
result = VQE(self.qubit_op, wavefunction, SPSA(maxiter=300), max_evals_grouped=2).run(QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed))
x = sample_most_likely(result.eigenstate)
ising_sol = graph_partition.get_graph_solution(x)
self.assertEqual(graph_partition.objective_value(np.array([0, 1, 0, 1]), self.w), graph_partition.objective_value(ising_sol, self.w))
oracle = self._brute_force()
self.assertEqual(graph_partition.objective_value(x, self.w), oracle) |
def make_commitment_output_to_local_address(revocation_pubkey: bytes, to_self_delay: int, delayed_pubkey: bytes) -> str:
local_script = make_commitment_output_to_local_witness_script(revocation_pubkey, to_self_delay, delayed_pubkey)
return bitcoin.redeem_script_to_address('p2wsh', bh2u(local_script)) |
def pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold):
assert isinstance(score, (torch.Tensor, np.ndarray))
assert isinstance(mask, (torch.Tensor, np.ndarray))
assert isinstance(embedding, (torch.Tensor, np.ndarray))
assert isinstance(kernel_label, (torch.Tensor, np.ndarray))
assert isinstance(kernel_contour, (torch.Tensor, np.ndarray))
assert isinstance(kernel_region_num, int)
assert isinstance(distance_threshold, float)
if isinstance(score, np.ndarray):
score = torch.from_numpy(score)
if isinstance(mask, np.ndarray):
mask = torch.from_numpy(mask)
if isinstance(embedding, np.ndarray):
embedding = torch.from_numpy(embedding)
if isinstance(kernel_label, np.ndarray):
kernel_label = torch.from_numpy(kernel_label)
if isinstance(kernel_contour, np.ndarray):
kernel_contour = torch.from_numpy(kernel_contour)
if (torch.__version__ == 'parrots'):
label = ext_module.pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num=kernel_region_num, distance_threshold=distance_threshold)
label = label.tolist()
label = label[0]
list_index = kernel_region_num
pixel_assignment = []
for x in range(kernel_region_num):
pixel_assignment.append(np.array(label[list_index:(list_index + int(label[x]))], dtype=np.float))
list_index = (list_index + int(label[x]))
else:
pixel_assignment = ext_module.pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold)
return pixel_assignment |
('python_ta.config.toml.load', side_effect=FileNotFoundError)
def test_load_messages_config_logging(_, caplog):
try:
load_messages_config('non_existent_file.toml', 'default_file.toml', True)
except FileNotFoundError:
assert ('Could not find messages config file at' in caplog.text)
assert ('WARNING' in [record.levelname for record in caplog.records]) |
def filter_ss_table(store_sales_df, filtered_item_df):
filtered_ss_df = store_sales_df[store_sales_df['ss_customer_sk'].notnull()].reset_index(drop=True)
filtered_ss_df = filtered_ss_df.loc[((filtered_ss_df['ss_sold_date_sk'] >= q12_store_sale_sk_start_date) & (filtered_ss_df['ss_sold_date_sk'] <= (q12_store_sale_sk_start_date + 90)))].reset_index(drop=True)
filtered_ss_df = filtered_ss_df.merge(filtered_item_df, left_on=['ss_item_sk'], right_on=['i_item_sk'], how='inner')
return filtered_ss_df[['ss_customer_sk', 'ss_sold_date_sk']] |
.parametrize('option', ['-o', '--output-image'])
def test_output_image(mock_image_optimization, mock_write_image, set_argv, option):
expected_file = '/path/to/output/image'
expected_image = object()
mock_image_optimization(return_value=expected_image)
mock = mock_write_image()
set_argv(f'{option}={expected_file}')
with exits():
cli.main()
((actual_image, actual_file), _) = mock.call_args
assert (actual_image is expected_image)
assert (actual_file == expected_file) |
def load_zip_file(file, fileNameRegExp='', allEntries=False):
try:
archive = zipfile.ZipFile(file, mode='r', allowZip64=True)
except:
raise Exception('Error loading the ZIP archive')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
if (fileNameRegExp != ''):
m = re.match(fileNameRegExp, name)
if (m == None):
addFile = False
elif (len(m.groups()) > 0):
keyName = m.group(1)
if addFile:
pairs.append([keyName, archive.read(name)])
elif allEntries:
raise Exception(('ZIP entry not valid: %s' % name))
return dict(pairs) |
class Logger(object):
def __init__(self, name, exp_dir, opt, commend='', HTML_doc=False, log_dir='log', checkpoint_dir='checkpoint', sample='samples', web='web', test_dir='test'):
self.name = name
self.exp_dir = os.path.join(os.path.abspath('experiments'), exp_dir)
self.log_dir = os.path.join(self.exp_dir, log_dir)
self.sample = os.path.join(self.exp_dir, sample)
self.web = os.path.join(self.exp_dir, web)
self.img = os.path.join(self.web, 'images')
self.checkpoint_dir = os.path.join(self.exp_dir, checkpoint_dir)
self.test_dir = os.path.join(self.exp_dir, test_dir)
self.opt = opt
try:
os.mkdir(self.exp_dir)
os.mkdir(self.log_dir)
os.mkdir(self.checkpoint_dir)
os.mkdir(self.sample)
os.mkdir(self.web)
os.mkdir(self.img)
os.mkdir(self.test_dir)
print(('Creating: %s\n %s\n %s\n %s\n %s' % (self.exp_dir, self.log_dir, self.sample, self.checkpoint_dir, self.test_dir)))
except NotImplementedError:
raise Exception('Check your dir.')
except FileExistsError:
pass
with open(os.path.join(self.exp_dir, 'run_commend.txt'), 'w') as f:
f.write(commend)
if (opt.train_file is not ''):
copy(opt.train_file, self.exp_dir)
copy(opt.config_file, self.exp_dir)
self.html_tag = HTML_doc
if HTML_doc:
self.html = HTML(self)
self.html.add_header(opt.exp_name)
self.html.save()
self._parse()
def _parse(self):
attr_list = list()
exp_readme = os.path.join(self.exp_dir, 'exp_params.txt')
for attr in dir(self.opt):
if (not attr.startswith('_')):
attr_list.append(attr)
print('Init parameters...')
with open(exp_readme, 'w') as readme:
readme.write((self.name + '\n'))
for attr in attr_list:
line = ('%s : %s' % (attr, self.opt.__getattribute__(attr)))
print(line)
readme.write(line)
readme.write('\n')
def init_scala_log(self, log_name, title_list):
return _FileLogger(self, log_name, title_list)
def _parse_save_name(self, tag, epoch, step='_', type='.pth'):
return (((str(epoch) + step) + tag) + type)
def save_epoch(self, epoch, name, state_dict):
torch.save(state_dict, os.path.join(self.checkpoint_dir, self._parse_save_name(name, epoch)))
def save(self, name, model, optim, dataparallel=1):
save_path = os.path.join(self.checkpoint_dir, name)
epoch = {'Name': name, 'state_dict': model.state_dict(), 'optim': optim.state_dict()}
torch.save(epoch, save_path)
print((' saving: %s......' % save_path))
def load_epoch(self, name, epoch):
return torch.load(os.path.join(self.checkpoint_dir, self._parse_save_name(name, epoch)))
def print_log(self, string, with_time=True, same_line=False):
if with_time:
time_stamp = datetime.datetime.now()
time_stamp = time_stamp.strftime('%Y.%m.%d-%H:%M:%S')
time_stamp += string
string = time_stamp
(print(string, end='\r') if same_line else print(string))
sys.stdout.flush()
with open(os.path.join(self.log_dir, (self.name + '.log')), 'a') as f:
f.write((string.strip('\n') + '\n'))
def _parse_web_image_name(self, Nom, tag, step='_', type='.png'):
return (((('No.' + str(Nom)) + step) + tag) + type)
def _save_web_images(self, pil, name):
save_path = os.path.join(self.img, name)
pil.save(save_path)
def _add_image_table(self, img_list, tag_list):
assert (len(img_list) == len(tag_list)), 'check input'
self.html.add_images(img_list, tag_list, img_list)
self.html.save()
def save_image_record(self, epoch, image_dict):
img_list = list()
tag_list = list()
for (tag, image) in image_dict.items():
image_name = self._parse_web_image_name(epoch, tag)
img_list.append(image_name)
tag_list.append(tag)
image.save(os.path.join(self.img, image_name))
self.html.add_header(('Epoch: %d' % epoch))
self._add_image_table(img_list, tag_list)
def save_logger(self):
with open(os.path.join(self.exp_dir, 'logger.pkl'), 'w') as f:
pickle.dump(self, f)
def save_training_result(self, im_name, im, dir=False, epoch=0):
if dir:
save_path = os.path.join(self.sample, str(epoch))
if (not os.path.exists(save_path)):
os.mkdir(save_path)
else:
save_path = self.sample
if isinstance(im, Variable):
im = (im.cpu() if im.is_cuda else im)
im = VAR2PIL(torch.clamp(im, min=0.0, max=1.0))
else:
im = to_pil_image(torch.clamp(im, min=0.0, max=1.0))
im.save(os.path.join(save_path, im_name))
def save_test_result(self, epoch_idx, test_set, im_name, im):
epoch_folder = os.path.join(self.test_dir, str(epoch_idx))
if (not os.path.exists(epoch_folder)):
os.mkdir(epoch_folder)
set_folder = os.path.join(epoch_folder, test_set)
if (not os.path.exists(set_folder)):
os.mkdir(set_folder)
cv2.imwrite(os.path.join(set_folder, im_name), im) |
class Terminal():
_terminals = {}
_detached_terminals = []
def __init__(self, view=None):
self.view = view
self._cached_cursor = [0, 0]
self._size = sublime.load_settings('Terminus.sublime-settings').get('size', (None, None))
self._cached_cursor_is_hidden = [True]
self.image_count = 0
self.images = {}
self._strings = Queue()
self._pending_to_send_string = [False]
self._pending_to_clear_scrollback = [False]
self._pending_to_reset = [None]
self.lock = threading.Lock()
def from_id(cls, vid):
if (vid not in cls._terminals):
return None
return cls._terminals[vid]
def from_tag(cls, tag, current_window_only=True):
for terminal in cls._terminals.values():
if (terminal.tag == tag):
if current_window_only:
active_window = sublime.active_window()
if (terminal.window and active_window):
if (terminal.window == active_window):
return terminal
else:
return terminal
return None
def cull_terminals(cls):
terminals_to_kill = []
for terminal in cls._terminals.values():
if (not terminal.is_hosted()):
terminals_to_kill.append(terminal)
for terminal in terminals_to_kill:
terminal.kill()
def window(self):
if self.detached:
return None
if self.show_in_panel:
return get_panel_window(self.view)
else:
return self.view.window()
def attach_view(self, view, offset=None):
with self.lock:
self.view = view
self.detached = False
Terminal._terminals[view.id()] = self
if (self in Terminal._detached_terminals):
Terminal._detached_terminals.remove(self)
self.screen.dirty.update(range(self.screen.lines))
self.set_offset(offset)
def detach_view(self):
with self.lock:
self.detached = True
Terminal._detached_terminals.append(self)
if (self.view.id() in Terminal._terminals):
del Terminal._terminals[self.view.id()]
self.view = None
(period=1, default=True)
def is_hosted(self):
if self.detached:
return True
return (self.window is not None)
def _need_to_render(self):
flag = False
if self.screen.dirty:
flag = True
elif ((self.screen.cursor.x != self._cached_cursor[0]) or (self.screen.cursor.y != self._cached_cursor[1])):
flag = True
elif (self.screen.cursor.hidden != self._cached_cursor_is_hidden[0]):
flag = True
if flag:
self._cached_cursor[0] = self.screen.cursor.x
self._cached_cursor[1] = self.screen.cursor.y
self._cached_cursor_is_hidden[0] = self.screen.cursor.hidden
return flag
def _start_rendering(self):
data = ['']
done = [False]
(period=1, default=False)
def was_resized():
size = view_size(self.view, force=self._size)
return ((self.screen.lines != size[0]) or (self.screen.columns != size[1]))
def reader():
while True:
try:
temp = self.process.read(1024)
except EOFError:
break
with self.lock:
data[0] += temp
if (done[0] or (not self.is_hosted())):
logger.debug('reader breaks')
break
done[0] = True
threading.Thread(target=reader).start()
def renderer():
def feed_data():
if (len(data[0]) > 0):
logger.debug('receieved: {}'.format(data[0]))
self.stream.feed(data[0])
data[0] = ''
while True:
with intermission(period=0.03), self.lock:
feed_data()
if (not self.detached):
if was_resized():
self.handle_resize()
self.view.run_command('terminus_show_cursor')
if self._need_to_render():
self.view.run_command('terminus_render')
self.screen.dirty.clear()
if (done[0] or (not self.is_hosted())):
logger.debug('renderer breaks')
break
feed_data()
done[0] = True
def _cleanup():
if self.view:
self.view.run_command('terminus_cleanup')
sublime.set_timeout(_cleanup)
threading.Thread(target=renderer).start()
def set_offset(self, offset=None):
if (offset is not None):
self.offset = offset
elif (self.view and (self.view.size() > 0)):
view = self.view
self.offset = (view.rowcol(view.size())[0] + 1)
else:
self.offset = 0
logger.debug('activating with offset %s', self.offset)
def start(self, cmd, cwd=None, env=None, default_title=None, title=None, show_in_panel=None, panel_name=None, tag=None, auto_close=True, cancellable=False, timeit=False):
view = self.view
if view:
self.detached = False
Terminal._terminals[view.id()] = self
else:
Terminal._detached_terminals.append(self)
self.detached = True
self.show_in_panel = show_in_panel
self.panel_name = panel_name
self.tag = tag
self.auto_close = auto_close
self.cancellable = cancellable
self.timeit = timeit
if timeit:
self.start_time = time.time()
self.default_title = default_title
self.title = title
if view:
self.set_offset()
size = view_size((view or sublime.active_window().active_view()), default=(40, 80), force=self._size)
logger.debug('view size: {}'.format(str(size)))
_env = os.environ.copy()
_env.update(env)
self.process = TerminalPtyProcess.spawn(cmd, cwd=cwd, env=_env, dimensions=size)
self.screen = TerminalScreen(size[1], size[0], process=self.process, history=10000, clear_callback=self.clear_callback, reset_callback=self.reset_callback)
self.stream = TerminalStream(self.screen)
self.screen.set_show_image_callback(self.show_image)
self._start_rendering()
def kill(self):
logger.debug('kill')
self.process.terminate()
vid = self.view.id()
if (vid in self._terminals):
del self._terminals[vid]
def handle_resize(self):
size = view_size(self.view, force=self._size)
logger.debug('handle resize {} {} -> {} {}'.format(self.screen.lines, self.screen.columns, size[0], size[1]))
try:
self.process.setwinsize(*size)
self.screen.resize(*size)
except RuntimeError:
pass
def clear_callback(self):
self._pending_to_clear_scrollback[0] = True
def reset_callback(self):
if (self._pending_to_reset[0] is None):
self._pending_to_reset[0] = False
else:
self._pending_to_reset[0] = True
def send_key(self, *args, **kwargs):
kwargs['application_mode'] = self.application_mode_enabled()
kwargs['new_line_mode'] = self.new_line_mode_enabled()
self.send_string(get_key_code(*args, **kwargs), normalized=False)
def send_string(self, string, normalized=True):
if normalized:
string = string.replace('\r\n', '\n')
if self.new_line_mode_enabled():
string = string.replace('\n', '\r\n')
else:
string = string.replace('\n', '\r')
no_queue = (not self._pending_to_send_string[0])
if (no_queue and (len(string) <= 512)):
logger.debug('sent: {}'.format((string[0:64] if (len(string) > 64) else string)))
self.process.write(string)
else:
for i in range(0, len(string), 512):
self._strings.put(string[i:(i + 512)])
if no_queue:
self._pending_to_send_string[0] = True
threading.Thread(target=self.process_send_string).start()
def process_send_string(self):
while True:
try:
string = self._strings.get(False)
logger.debug('sent: {}'.format((string[0:64] if (len(string) > 64) else string)))
self.process.write(string)
except Empty:
self._pending_to_send_string[0] = False
return
else:
time.sleep(0.1)
def bracketed_paste_mode_enabled(self):
return ((2004 << 5) in self.screen.mode)
def new_line_mode_enabled(self):
return ((20 << 5) in self.screen.mode)
def application_mode_enabled(self):
return ((1 << 5) in self.screen.mode)
def find_image(self, pt):
view = self.view
for pid in self.images:
region = view.query_phantom(pid)[0]
if (region.end() == pt):
return pid
return None
def show_image(self, data, args, cr=None):
view = self.view
if (('inline' not in args) or (not args['inline'])):
return
cursor = self.screen.cursor
pt = view.text_point((self.offset + cursor.y), cursor.x)
databytes = base64.decodebytes(data.encode())
image_info = get_image_info(databytes)
if (not image_info):
logger.error('cannot get image info')
return
(what, width, height) = image_info
(_, image_path) = tempfile.mkstemp(suffix=('.' + what))
with open(image_path, 'wb') as f:
f.write(databytes)
(width, height) = image_resize(width, height, (args['width'] if ('width' in args) else None), (args['height'] if ('height' in args) else None), view.em_width(), (view.viewport_extent()[0] - (3 * view.em_width())), (args['preserveAspectRatio'] if ('preserveAspectRatio' in args) else 1))
if self.find_image(pt):
self.view.run_command('terminus_insert', {'point': pt, 'character': ' '})
pt += 1
self.image_count += 1
p = view.add_phantom('terminus_image#{}'.format(self.image_count), sublime.Region(pt, pt), IMAGE.format(what=what, data=data, width=width, height=height, count=self.image_count), sublime.LAYOUT_INLINE)
self.images[p] = image_path
if cr:
self.screen.index()
def clean_images(self):
view = self.view
for pid in list(self.images.keys()):
region = view.query_phantom(pid)[0]
if (region.empty() and (region.begin() == 0)):
view.erase_phantom_by_id(pid)
if (pid in self.images):
try:
os.remove(self.images[pid])
except Exception:
pass
del self.images[pid]
def __del__(self):
self.process.terminate(force=True)
for image_path in list(self.images.values()):
try:
os.remove(image_path)
except Exception:
pass
if self.process.isalive():
logger.debug('process becomes orphaned')
else:
logger.debug('process is terminated') |
def aggregate(prob, keep_bg=False):
k = prob.shape
new_prob = torch.cat([torch.prod((1 - prob), dim=0, keepdim=True), prob], 0).clamp(1e-07, (1 - 1e-07))
logits = torch.log((new_prob / (1 - new_prob)))
if keep_bg:
return F.softmax(logits, dim=0)
else:
return F.softmax(logits, dim=0)[1:] |
class NumexprGroup(Numexpr):
def __init__(self, expr: Numexpr):
self.__expr = expr
def evaluate(self, data, time, use_date):
return self.__expr.evaluate(data, time, use_date)
def __repr__(self):
return ('<NumexprGroup expr=%r>' % self.__expr)
def use_date(self):
return self.__expr.use_date() |
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(input.float(), self.num_groups, (self.weight.float() if (self.weight is not None) else None), (self.bias.float() if (self.bias is not None) else None), self.eps)
return output.type_as(input) |
def get_index_class(index: (type[BaseIndex] | str)) -> type[BaseIndex]:
if (isinstance(index, type) and issubclass(index, BaseIndex)):
return index
if (index == 'l2'):
return L2Index
elif (index == 'annoy'):
return AnnoyIndex
elif (index == 'kd_tree'):
return KDTreeIndex
else:
raise ValueError(f'Index {index} not supported') |
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, max_norm: float=0, args=None, writer=None):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Exp: {}, Epoch: [{}]'.format(args.output_dir, epoch)
print_freq = 10
n_iters = 0
for (samples, targets) in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
captions = [t['caption'] for t in targets]
targets = utils.targets_to(targets, device)
if args.online:
loss_dict = model(samples, captions, targets)
elif args.semi_online:
loss_dict = model(samples, captions, targets)
else:
outputs = model(samples, captions, targets)
(loss_dict, _) = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(((loss_dict[k] * weight_dict[k]) for k in loss_dict.keys() if (k in weight_dict)))
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if (max_norm > 0):
grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
else:
grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.update(grad_norm=grad_total_norm)
for k in loss_dict.keys():
writer.add_scalar(str(k), loss_dict[k].cpu().detach().item(), ((len(data_loader) * epoch) + n_iters))
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], ((len(data_loader) * epoch) + n_iters))
n_iters += 1
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
class MultiphaseBuilder(ThermalBuilder):
def __init__(self, casePath, solverSettings=getDefaultMultiphaseSolverSettings(), templatePath='tutorials/heatTransfer/buoyantBoussinesqSimpleFoam/hotRoom/', fluidProperties={'name': 'air', 'compressible': False, 'kinematicViscosity': 100000.0}, turbulenceProperties={'name': 'kEpsilon'}, boundarySettings=[], internalFields={}, paralleSettings={'method': 'simple', 'numberOfSubdomains': multiprocessing.cpu_count()}, transientSettings={'startTime': 0.0, 'endTime': 1.0, 'timeStep': 0.001, 'writeInterval': 100}):
super(MultiphaseBuilder, self).__init__(casePath, solverSettings, templatePath, fluidProperties, turbulenceProperties, boundarySettings, internalFields, paralleSettings, transientSettings)
self._solverName = getMultiphaseSolver(self._solverSettings)
self._solverCreatedVariables = self.getSolverCreatedVariables()
def build(self):
pass
def getSolverName(self):
return _getMultiphaseSolver(self._solverSettings)
def getFoamTemplate(self):
raise
def setupFluidProperties(self, value=None):
if (value and isinstance(value, dict)):
self.fluidProperties = value
if self._solverSettings['compressible']:
self.setupThermophysicalProperties()
else:
self.setupTransportProperties()
def setInternalFields(self):
pass
def getPhases(self):
pass
def getSolverCreatedVariables(self):
pass |
def load_lvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if (timer.seconds() > 1):
logger.info('Loading {} takes {:.2f} seconds.'.format(json_file, timer.seconds()))
if (dataset_name is not None):
meta = get_lvis_instances_meta(dataset_name)
MetadataCatalog.get(dataset_name).set(**meta)
img_ids = sorted(lvis_api.imgs.keys())
imgs = lvis_api.load_imgs(img_ids)
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
ann_ids = [ann['id'] for anns_per_image in anns for ann in anns_per_image]
assert (len(set(ann_ids)) == len(ann_ids)), "Annotation ids in '{}' are not unique".format(json_file)
imgs_anns = list(zip(imgs, anns))
logger.info('Loaded {} images in the LVIS format from {}'.format(len(imgs_anns), json_file))
if extra_annotation_keys:
logger.info('The following extra annotation keys will be loaded: {} '.format(extra_annotation_keys))
else:
extra_annotation_keys = []
def get_file_name(img_root, img_dict):
(split_folder, file_name) = img_dict['coco_url'].split('/')[(- 2):]
return os.path.join((img_root + split_folder), file_name)
dataset_dicts = []
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
record['file_name'] = get_file_name(image_root, img_dict)
record['height'] = img_dict['height']
record['width'] = img_dict['width']
record['not_exhaustive_category_ids'] = img_dict.get('not_exhaustive_category_ids', [])
record['neg_category_ids'] = img_dict.get('neg_category_ids', [])
image_id = record['image_id'] = img_dict['id']
objs = []
for anno in anno_dict_list:
assert (anno['image_id'] == image_id)
obj = {'bbox': anno['bbox'], 'bbox_mode': BoxMode.XYWH_ABS}
if ((dataset_name is not None) and ('thing_dataset_id_to_contiguous_id' in meta)):
obj['category_id'] = meta['thing_dataset_id_to_contiguous_id'][anno['category_id']]
else:
obj['category_id'] = (anno['category_id'] - 1)
segm = anno['segmentation']
valid_segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
assert (len(segm) == len(valid_segm)), 'Annotation contains an invalid polygon with < 3 points'
assert (len(segm) > 0)
obj['segmentation'] = segm
for extra_ann_key in extra_annotation_keys:
obj[extra_ann_key] = anno[extra_ann_key]
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
return dataset_dicts |
class TestFormatSize():
TESTS = [((- 1024), '-1.00k'), ((- 1), '-1.00'), (0, '0.00'), (1023, '1023.00'), (1024, '1.00k'), (1034.24, '1.01k'), (((1024 * 1024) * 2), '2.00M'), ((1024 ** 10), '1024.00Y'), (None, '?.??')]
KILO_TESTS = [(999, '999.00'), (1000, '1.00k'), (1010, '1.01k')]
.parametrize('size, out', TESTS)
def test_format_size(self, size, out):
assert (utils.format_size(size) == out)
.parametrize('size, out', TESTS)
def test_suffix(self, size, out):
assert (utils.format_size(size, suffix='B') == (out + 'B'))
.parametrize('size, out', KILO_TESTS)
def test_base(self, size, out):
assert (utils.format_size(size, base=1000) == out) |
_state_transitions.register
def _handle_channel_settled(action: ContractReceiveChannelSettled, channel_state: NettingChannelState, **kwargs: Optional[Dict[(Any, Any)]]) -> TransitionResult[Optional[NettingChannelState]]:
events: List[Event] = []
if (action.channel_identifier == channel_state.identifier):
set_settled(channel_state, action.block_number)
our_locksroot = action.our_onchain_locksroot
partner_locksroot = action.partner_onchain_locksroot
should_clear_channel = ((our_locksroot == LOCKSROOT_OF_NO_LOCKS) and (partner_locksroot == LOCKSROOT_OF_NO_LOCKS))
is_coop_settle = False
initiator_lock_check = (action.our_onchain_locksroot == LOCKSROOT_OF_NO_LOCKS)
partner_lock_check = (action.partner_onchain_locksroot == LOCKSROOT_OF_NO_LOCKS)
if channel_state.our_state.initiated_coop_settle:
coop_settle = channel_state.our_state.initiated_coop_settle
initiator_transfer_check = (TokenAmount(coop_settle.total_withdraw_initiator) == action.our_transferred_amount)
partner_transfer_check = (TokenAmount(coop_settle.total_withdraw_partner) == action.partner_transferred_amount)
initiator_checks = (initiator_transfer_check and initiator_lock_check)
partner_checks = (partner_transfer_check and partner_lock_check)
if (initiator_checks and partner_checks):
set_coop_settled(channel_state.our_state, action.block_number)
is_coop_settle = True
if channel_state.partner_state.initiated_coop_settle:
coop_settle = channel_state.partner_state.initiated_coop_settle
partner_transfer_check = (TokenAmount(coop_settle.total_withdraw_initiator) == action.partner_transferred_amount)
initiator_transfer_check = (TokenAmount(coop_settle.total_withdraw_partner) == action.our_transferred_amount)
initiator_checks = (initiator_transfer_check and initiator_lock_check)
partner_checks = (partner_transfer_check and partner_lock_check)
if (initiator_checks and partner_checks):
set_coop_settled(channel_state.partner_state, action.block_number)
is_coop_settle = True
if is_coop_settle:
channel_state.partner_state.onchain_total_withdraw = WithdrawAmount(action.partner_transferred_amount)
channel_state.our_state.onchain_total_withdraw = WithdrawAmount(action.our_transferred_amount)
if should_clear_channel:
return TransitionResult(None, events)
channel_state.our_state.onchain_locksroot = our_locksroot
channel_state.partner_state.onchain_locksroot = partner_locksroot
onchain_unlock = ContractSendChannelBatchUnlock(canonical_identifier=channel_state.canonical_identifier, sender=channel_state.partner_state.address, triggered_by_block_hash=action.block_hash)
events.append(onchain_unlock)
return TransitionResult(channel_state, events) |
def get_preresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
assert (num_classes in [10, 100])
if bottleneck:
assert (((blocks - 2) % 9) == 0)
layers = ([((blocks - 2) // 9)] * 3)
else:
assert (((blocks - 2) % 6) == 0)
layers = ([((blocks - 2) // 6)] * 3)
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[(cij * 4) for cij in ci] for ci in channels]
net = CIFARPreResNet(channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
class Period():
def __init__(self, months: int=0, days: int=0) -> None:
self._months = months
self._days = days
def make(cls, data: Any) -> Period:
if isinstance(data, cls):
return data
elif isinstance(data, str):
return cls().add_tenure(data)
else:
raise TypeError(('Cannot convert %s to Period' % data))
def isempty(self) -> bool:
return ((self._months == 0) and (self._days == 0))
def add_days(self, days: int) -> None:
self._days += days
def add_weeks(self, weeks: int) -> None:
self._days += int((7 * weeks))
def add_months(self, months: int) -> None:
self._months += months
def add_years(self, years: int) -> None:
self._months += int((12 * years))
def years(self) -> int:
return safediv(self._months, 12)
def months(self) -> int:
return safemod(self._months, 12)
def weeks(self) -> int:
return safediv(self._days, 7)
def days(self) -> int:
return safemod(self._days, 7)
def totaldays(self) -> int:
return ((30 * self._months) + self._days)
def __repr__(self) -> str:
return self.components()
def __str__(self) -> str:
return self.__repr__()
def components(self) -> str:
p = ''
neg = (self.totaldays < 0)
y = self.years
m = self.months
w = self.weeks
d = self.days
if y:
p = ('%sY' % abs(y))
if m:
p = ('%s%sM' % (p, abs(m)))
if w:
p = ('%s%sW' % (p, abs(w)))
if d:
p = ('%s%sD' % (p, abs(d)))
return (('-' + p) if neg else p)
def simple(self) -> str:
if self._days:
return ('%sD' % self.totaldays)
elif self.months:
return ('%sM' % self._months)
elif self.years:
return ('%sY' % self.years)
else:
return ''
def add_tenure(self, pstr: str) -> Period:
if isinstance(pstr, self.__class__):
self._months += pstr._months
self._days += pstr._days
return self
st = str(pstr).upper()
done = False
sign = 1
while (not done):
if (not st):
done = True
else:
ip = find_first_of(st, 'DWMY')
if (ip == (- 1)):
raise ValueError(('Unknown period %s' % pstr))
p = st[ip]
v = int(st[:ip])
sign = (sign if (v > 0) else (- sign))
v = (sign * abs(v))
if (p == 'D'):
self.add_days(v)
elif (p == 'W'):
self.add_weeks(v)
elif (p == 'M'):
self.add_months(v)
elif (p == 'Y'):
self.add_years(v)
ip += 1
st = st[ip:]
return self
def __add__(self, other: Any) -> Period:
p = self.make(other)
return self.__class__((self._months + p._months), (self._days + p._days))
def __radd__(self, other: Any) -> Period:
return (self + other)
def __sub__(self, other: Any) -> Period:
p = self.make(other)
return self.__class__((self._months - p._months), (self._days - p._days))
def __rsub__(self, other: Any) -> Period:
return (self.make(other) - self)
def __gt__(self, other: Any) -> bool:
return (self.totaldays > self.make(other).totaldays)
def __lt__(self, other: Any) -> bool:
return (self.totaldays < self.make(other).totaldays)
def __ge__(self, other: Any) -> bool:
return (self.totaldays >= self.make(other).totaldays)
def __le__(self, other: Any) -> bool:
return (self.totaldays <= self.make(other).totaldays)
def __eq__(self, other: Any) -> bool:
return (self.totaldays == self.make(other).totaldays) |
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.drop_path_prob = 0.0
self.stem0 = nn.Sequential(nn.Conv2d(3, (C // 2), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d((C // 2)), nn.ReLU(inplace=True), nn.Conv2d((C // 2), C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
self.stem1 = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
(C_prev_prev, C_prev, C_curr) = (C, C, C)
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if (i in [(layers // 3), ((2 * layers) // 3)]):
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr))
if (i == ((2 * layers) // 3)):
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = self.stem0(input)
s1 = self.stem1(s0)
for (i, cell) in enumerate(self.cells):
(s0, s1) = (s1, cell(s0, s1, self.drop_path_prob))
if (i == ((2 * self._layers) // 3)):
if (self._auxiliary and self.training):
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return (logits, logits_aux) |
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
layout = QHBoxLayout()
self.ax = pg.PlotWidget()
self.ax.showGrid(True, True)
self.line = pg.InfiniteLine(pos=(- 20), pen=pg.mkPen('k', width=3), movable=False)
self.ax.addItem(self.line)
self.ax.setLabel('left', text='Rate')
self.p1 = self.ax.getPlotItem()
self.p1.scene().sigMouseMoved.connect(self.mouse_move_handler)
self.p2 = pg.ViewBox()
self.p2.enableAutoRange(axis=pg.ViewBox.XYAxes, enable=True)
self.p1.showAxis('right')
self.p1.scene().addItem(self.p2)
self.p2.setXLink(self.p1)
self.ax2 = self.p1.getAxis('right')
self.ax2.linkToView(self.p2)
self.ax2.setGrid(False)
self.ax2.setLabel(text='Volume')
self._market_activity = pg.PlotCurveItem(np.arange(NUMBER_OF_TIMEPOINTS), np.arange(NUMBER_OF_TIMEPOINTS), pen=pg.mkPen('k', style=Qt.DashLine, width=1))
self.p2.addItem(self._market_activity)
self.p1.vb.sigResized.connect(self.update_plot_scale)
self.base_currency = DEFAULT_BASE_CURRENCY
self._data_lines = dict()
self._data_items = dict()
self._data_colors = dict()
self._data_visible = DEFAULT_DISPLAY_CURRENCIES
self.listView = QTableView()
self.model = QStandardItemModel()
self.model.setHorizontalHeaderLabels(['Currency', 'Rate'])
self.model.itemChanged.connect(self.check_check_state)
self.listView.setModel(self.model)
self.threadpool = QThreadPool()
self.worker = False
layout.addWidget(self.ax)
layout.addWidget(self.listView)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
self.listView.setFixedSize(226, 400)
self.setFixedSize(650, 400)
toolbar = QToolBar('Main')
self.addToolBar(toolbar)
self.currencyList = QComboBox()
toolbar.addWidget(self.currencyList)
self.update_currency_list(AVAILABLE_BASE_CURRENCIES)
self.currencyList.setCurrentText(self.base_currency)
self.currencyList.currentTextChanged.connect(self.change_base_currency)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
toolbar.addWidget(self.progress)
self.refresh_historic_rates()
self.setWindowTitle('Goodforbitcoin')
self.show()
def update_currency_list(self, currencies):
for currency in currencies:
self.currencyList.addItem(currency)
self.currencyList.model().sort(0)
def check_check_state(self, i):
if (not i.isCheckable()):
return
currency = i.text()
checked = (i.checkState() == Qt.Checked)
if (currency in self._data_visible):
if (not checked):
self._data_visible.remove(currency)
self.redraw()
elif checked:
self._data_visible.append(currency)
self.redraw()
def get_currency_color(self, currency):
if (currency not in self._data_colors):
self._data_colors[currency] = next(BREWER12PAIRED)
return self._data_colors[currency]
def get_or_create_data_row(self, currency):
if (currency not in self._data_items):
self._data_items[currency] = self.add_data_row(currency)
return self._data_items[currency]
def add_data_row(self, currency):
citem = QStandardItem()
citem.setText(currency)
citem.setForeground(QBrush(QColor(self.get_currency_color(currency))))
citem.setColumnCount(2)
citem.setCheckable(True)
if (currency in DEFAULT_DISPLAY_CURRENCIES):
citem.setCheckState(Qt.Checked)
vitem = QStandardItem()
vitem.setTextAlignment((Qt.AlignRight | Qt.AlignVCenter))
self.model.setColumnCount(2)
self.model.appendRow([citem, vitem])
self.model.sort(0)
return (citem, vitem)
def mouse_move_handler(self, pos):
pos = self.ax.getViewBox().mapSceneToView(pos)
self.line.setPos(pos.x())
self.update_data_viewer(int(pos.x()))
def update_data_viewer(self, i):
if (i not in range(NUMBER_OF_TIMEPOINTS)):
return
for (currency, data) in self.data.items():
self.update_data_row(currency, data[i])
def update_data_row(self, currency, data):
(citem, vitem) = self.get_or_create_data_row(currency)
vitem.setText(('%.4f' % data['close']))
def change_base_currency(self, currency):
self.base_currency = currency
self.refresh_historic_rates()
def refresh_historic_rates(self):
if self.worker:
self.worker.signals.cancel.emit()
self.data = {}
self.volume = []
self.worker = UpdateWorker(self.base_currency)
self.worker.signals.data.connect(self.result_data_callback)
self.worker.signals.finished.connect(self.refresh_finished)
self.worker.signals.progress.connect(self.progress_callback)
self.worker.signals.error.connect(self.notify_error)
self.threadpool.start(self.worker)
def result_data_callback(self, rates, volume):
self.data = rates
self.volume = volume
self.redraw()
self.update_data_viewer((NUMBER_OF_TIMEPOINTS - 1))
def progress_callback(self, progress):
self.progress.setValue(progress)
def refresh_finished(self):
self.worker = False
self.redraw()
def notify_error(self, error):
(e, tb) = error
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText(e.__class__.__name__)
msg.setInformativeText(str(e))
msg.setDetailedText(tb)
msg.exec_()
def update_plot_scale(self):
self.p2.setGeometry(self.p1.vb.sceneBoundingRect())
def redraw(self):
(y_min, y_max) = (sys.maxsize, 0)
x = np.arange(NUMBER_OF_TIMEPOINTS)
for (currency, data) in self.data.items():
if data:
(_, close, high, low) = zip(*[(v['time'], v['close'], v['high'], v['low']) for v in data])
if (currency in self._data_visible):
if (currency not in self._data_lines):
self._data_lines[currency] = {}
self._data_lines[currency]['high'] = self.ax.plot(x, high, pen=pg.mkPen(self.get_currency_color(currency), width=2, style=Qt.DotLine))
self._data_lines[currency]['low'] = self.ax.plot(x, low, pen=pg.mkPen(self.get_currency_color(currency), width=2, style=Qt.DotLine))
self._data_lines[currency]['close'] = self.ax.plot(x, close, pen=pg.mkPen(self.get_currency_color(currency), width=3))
else:
self._data_lines[currency]['high'].setData(x, high)
self._data_lines[currency]['low'].setData(x, low)
self._data_lines[currency]['close'].setData(x, close)
(y_min, y_max) = (min(y_min, *low), max(y_max, *high))
elif (currency in self._data_lines):
self._data_lines[currency]['high'].clear()
self._data_lines[currency]['low'].clear()
self._data_lines[currency]['close'].clear()
self.ax.setLimits(yMin=(y_min * 0.9), yMax=(y_max * 1.1), xMin=min(x), xMax=max(x))
self._market_activity.setData(x, self.volume)
self.p2.setYRange(0, max(self.volume)) |
def mock_clone(url: str, *_: Any, source_root: (Path | None)=None, **__: Any) -> MockDulwichRepo:
parsed = ParsedUrl.parse(url)
assert (parsed.pathname is not None)
path = re.sub('(.git)?$', '', parsed.pathname.lstrip('/'))
assert (parsed.resource is not None)
folder = (((FIXTURE_PATH / 'git') / parsed.resource) / path)
if (not source_root):
source_root = (Path(Config.create().get('cache-dir')) / 'src')
dest = (source_root / path)
dest.parent.mkdir(parents=True, exist_ok=True)
copy_or_symlink(folder, dest)
return MockDulwichRepo(dest) |
def doc2js(doc):
cls2ner = ['PER', 'LOC', 'ORG', 'MISC', 'FP-PER', 'FP-LOC', 'FP-ORG', 'FP-MISC', 'FN-PER', 'FN-LOC', 'FN-ORG', 'FN-MISC']
(text, entities, offset, n_entities) = ('', [], 0, 0)
for (sent, boe, eoe, coe) in doc:
acc_len = [offset]
for w in sent:
acc_len.append(((acc_len[(- 1)] + len(w)) + 1))
for i in xrange(len(coe)):
entities.append([('T%d' % n_entities), cls2ner[coe[i]], [[acc_len[boe[i]], (acc_len[eoe[i]] - 1)]]])
n_entities += 1
text += (u' '.join(sent) + u'\n')
offset = acc_len[(- 1)]
return {'text': text.encode('ascii', 'ignore'), 'entities': entities} |
class TransformValuesRewrite(GraphRewriter):
transform_rewrite = in2out(transform_values, ignore_newtrees=True)
scan_transform_rewrite = in2out(transform_scan_values, ignore_newtrees=True)
def __init__(self, values_to_transforms: Dict[(TensorVariable, Union[(Transform, None)])]):
self.values_to_transforms = values_to_transforms
def add_requirements(self, fgraph):
values_transforms_feature = TransformValuesMapping(self.values_to_transforms)
fgraph.attach_feature(values_transforms_feature)
def apply(self, fgraph: FunctionGraph):
self.transform_rewrite.rewrite(fgraph)
self.scan_transform_rewrite.rewrite(fgraph) |
class HTML():
def __init__(self, web_dir, title, refresh=0):
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if (not os.path.exists(self.web_dir)):
os.makedirs(self.web_dir)
if (not os.path.exists(self.img_dir)):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if (refresh > 0):
with self.doc.head:
meta( content=str(refresh))
def get_image_dir(self):
return self.img_dir
def add_header(self, text):
with self.doc:
h3(text)
def add_images(self, ims, txts, links, width=400):
self.t = table(border=1, style='table-layout: fixed;')
self.doc.add(self.t)
with self.t:
with tr():
for (im, txt, link) in zip(ims, txts, links):
with td(style='word-wrap: break-word;', halign='center', valign='top'):
with p():
with a(href=os.path.join('images', link)):
img(style=('width:%dpx' % width), src=os.path.join('images', im))
br()
p(txt)
def save(self):
html_file = ('%s/index.html' % self.web_dir)
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close() |
def _deque_mock():
base_deque_class = '\n class deque(object):\n maxlen = 0\n def __init__(self, iterable=None, maxlen=None):\n self.iterable = iterable or []\n def append(self, x): pass\n def appendleft(self, x): pass\n def clear(self): pass\n def count(self, x): return 0\n def extend(self, iterable): pass\n def extendleft(self, iterable): pass\n def pop(self): return self.iterable[0]\n def popleft(self): return self.iterable[0]\n def remove(self, value): pass\n def reverse(self): return reversed(self.iterable)\n def rotate(self, n=1): return self\n def __iter__(self): return self\n def __reversed__(self): return self.iterable[::-1]\n def __getitem__(self, index): return self.iterable[index]\n def __setitem__(self, index, value): pass\n def __delitem__(self, index): pass\n def __bool__(self): return bool(self.iterable)\n def __nonzero__(self): return bool(self.iterable)\n def __contains__(self, o): return o in self.iterable\n def __len__(self): return len(self.iterable)\n def __copy__(self): return deque(self.iterable)\n def copy(self): return deque(self.iterable)\n def index(self, x, start=0, end=0): return 0\n def insert(self, i, x): pass\n def __add__(self, other): pass\n def __iadd__(self, other): pass\n def __mul__(self, other): pass\n def __imul__(self, other): pass\n def __rmul__(self, other): pass'
if PY39_PLUS:
base_deque_class += '\n \n def __class_getitem__(self, item): return cls'
return base_deque_class |
class Lighting(QGraphicsView):
def __init__(self, parent=None):
super(Lighting, self).__init__(parent)
self.angle = 0.0
self.m_scene = QGraphicsScene()
self.m_lightSource = None
self.m_items = []
self.setScene(self.m_scene)
self.setupScene()
timer = QTimer(self)
timer.timeout.connect(self.animate)
timer.setInterval(30)
timer.start()
self.setRenderHint(QPainter.Antialiasing)
self.setFrameStyle(QFrame.NoFrame)
def setupScene(self):
self.m_scene.setSceneRect((- 300), (- 200), 600, 460)
linearGrad = QLinearGradient(QPointF((- 100), (- 100)), QPointF(100, 100))
linearGrad.setColorAt(0, QColor(255, 255, 255))
linearGrad.setColorAt(1, QColor(192, 192, 255))
self.setBackgroundBrush(linearGrad)
radialGrad = QRadialGradient(30, 30, 30)
radialGrad.setColorAt(0, Qt.yellow)
radialGrad.setColorAt(0.2, Qt.yellow)
radialGrad.setColorAt(1, Qt.transparent)
pixmap = QPixmap(60, 60)
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
painter.setPen(Qt.NoPen)
painter.setBrush(radialGrad)
painter.drawEllipse(0, 0, 60, 60)
painter.end()
self.m_lightSource = self.m_scene.addPixmap(pixmap)
self.m_lightSource.setZValue(2)
for i in range((- 2), 3):
for j in range((- 2), 3):
if ((i + j) & 1):
item = QGraphicsEllipseItem(0, 0, 50, 50)
else:
item = QGraphicsRectItem(0, 0, 50, 50)
item.setPen(QPen(Qt.black, 1))
item.setBrush(QBrush(Qt.white))
effect = QGraphicsDropShadowEffect(self)
effect.setBlurRadius(8)
item.setGraphicsEffect(effect)
item.setZValue(1)
item.setPos((i * 80), (j * 80))
self.m_scene.addItem(item)
self.m_items.append(item)
def animate(self):
self.angle += (math.pi / 30)
xs = (((200 * math.sin(self.angle)) - 40) + 25)
ys = (((200 * math.cos(self.angle)) - 40) + 25)
self.m_lightSource.setPos(xs, ys)
for item in self.m_items:
effect = item.graphicsEffect()
delta = QPointF((item.x() - xs), (item.y() - ys))
effect.setOffset(QPointF((delta.toPoint() / 30)))
dd = math.hypot(delta.x(), delta.y())
color = effect.color()
color.setAlphaF(max(0.4, min((1 - (dd / 200.0)), 0.7)))
effect.setColor(color)
self.m_scene.update() |
class FocalLoss(nn.Module):
def __init__(self, loss_weight=1.0, pos_weight=1.0, gamma=1.5, alpha=0.25, reduction='mean'):
super(FocalLoss, self).__init__()
self.loss_weight = loss_weight
self.pos_weight = pos_weight
self.loss_fcn = nn.BCEWithLogitsLoss(pos_weight=self.pos_weight, reduction=reduction)
self.gamma = gamma
self.alpha = alpha
self.reduction = self.loss_fcn.reduction
self.loss_fcn.reduction = 'none'
def forward(self, pred, true):
device = pred.device
loss = self.loss_fcn(pred, true).to(device)
pred_prob = torch.sigmoid(pred)
p_t = ((true * pred_prob) + ((1 - true) * (1 - pred_prob)))
alpha_factor = ((true * self.alpha) + ((1 - true) * (1 - self.alpha)))
modulating_factor = ((1.0 - p_t) ** self.gamma)
loss *= (alpha_factor * modulating_factor)
if (self.reduction == 'mean'):
return loss.mean()
elif (self.reduction == 'sum'):
return loss.sum()
else:
return loss |
.parametrize('username,password', users)
.parametrize('project_id', projects)
.parametrize('condition_id', conditions)
def test_resolve(db, client, username, password, project_id, condition_id):
client.login(username=username, password=password)
url = (reverse(urlnames['resolve'], args=[project_id]) + f'?condition={condition_id}')
response = client.get(url)
if (project_id in view_project_permission_map.get(username, [])):
assert (response.status_code == 200)
assert isinstance(response.json(), dict)
elif password:
assert (response.status_code == 404)
else:
assert (response.status_code == 401) |
def test_raises(pytester: Pytester) -> None:
pytester.makepyfile('\n from nose.tools import raises\n\n (RuntimeError)\n def test_raises_runtimeerror():\n raise RuntimeError\n\n (Exception)\n def test_raises_baseexception_not_caught():\n raise BaseException\n\n (BaseException)\n def test_raises_baseexception_caught():\n raise BaseException\n ')
result = pytester.runpytest('-vv')
result.stdout.fnmatch_lines(['test_raises.py::test_raises_runtimeerror PASSED*', 'test_raises.py::test_raises_baseexception_not_caught FAILED*', 'test_raises.py::test_raises_baseexception_caught PASSED*', '*= FAILURES =*', '*_ test_raises_baseexception_not_caught _*', '', 'arg = (), kw = {}', '', ' def newfunc(*arg, **kw):', ' try:', '> func(*arg, **kw)', '', '*/nose/*: ', '_ _ *', '', ' (Exception)', ' def test_raises_baseexception_not_caught():', '> raise BaseException', 'E BaseException', '', 'test_raises.py:9: BaseException', '* 1 failed, 2 passed *']) |
class Effect6558(BaseEffect):
type = 'overheat'
def handler(fit, module, context, projectionRange, **kwargs):
overloadBonus = module.getModifiedItemAttr('overloadTrackingModuleStrengthBonus')
module.boostItemAttr('maxRangeBonus', overloadBonus, **kwargs)
module.boostItemAttr('falloffBonus', overloadBonus, **kwargs)
module.boostItemAttr('trackingSpeedBonus', overloadBonus, **kwargs)
module.boostItemAttr('aoeCloudSizeBonus', overloadBonus, **kwargs)
module.boostItemAttr('aoeVelocityBonus', overloadBonus, **kwargs) |
class ArmorRRColumn(GraphColumn):
name = 'ArmorRR'
stickPrefixToValue = True
def __init__(self, fittingView, params):
super().__init__(fittingView, 80, (3, 0, 3))
def _getValue(self, fit):
defaultSpoolValue = eos.config.settings['globalDefaultSpoolupPercentage']
return (fit.getRemoteReps(spoolOptions=SpoolOptions(SpoolType.SPOOL_SCALE, defaultSpoolValue, False)).armor, 'HP/s')
def _getFitTooltip(self):
return 'Declared armor repair speed' |
class IteratorProducer(Producer):
_e_factors = ('iterator',)
protocol = PROTOCOL_CHUNKS
def __init__(self, iterator):
self.iterator = iter(iterator)
self.__next__ = self.iterator.__next__
super().__init__()
def realign(self):
pass
def __next__(self, next=next):
n = next(self.iterator)
self.total_messages += len(n)
self.total_bytes += sum(map(len, n))
return n |
def test_relative_in_modules(fixture_path):
result = fixture_path.runpytest('-v')
result.assert_outcomes(passed=9, failed=0)
result.stdout.fnmatch_lines(['mod2_test.py::TestB::test_a PASSED', 'mod1_test.py::TestA::test_c PASSED', 'mod2_test.py::TestB::test_b PASSED', 'mod1_test.py::TestA::test_a PASSED', 'sub/mod3_test.py::test_a PASSED', 'mod2_test.py::TestB::test_c PASSED', 'sub/mod3_test.py::test_b PASSED', 'mod1_test.py::TestA::test_b PASSED', 'sub/mod3_test.py::test_c PASSED']) |
def maybe_add_to_os_environ_pathlist(var, newpath):
import os
if os.path.isabs(newpath):
try:
oldpaths = os.environ[var].split(os.pathsep)
if (newpath not in oldpaths):
newpaths = os.pathsep.join(([newpath] + oldpaths))
os.environ[var] = newpaths
except Exception:
pass |
class Card(QGraphicsPixmapItem):
def __init__(self, value, suit, *args, **kwargs):
super(Card, self).__init__(*args, **kwargs)
self.signals = Signals()
self.stack = None
self.child = None
self.value = value
self.suit = suit
self.side = None
self.vector = None
self.setShapeMode(QGraphicsPixmapItem.BoundingRectShape)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges)
self.load_images()
def load_images(self):
self.face = QPixmap(os.path.join('cards', ('%s%s.png' % (self.value, self.suit))))
self.back = QPixmap(os.path.join('images', 'back.png'))
def turn_face_up(self):
self.side = SIDE_FACE
self.setPixmap(self.face)
def turn_back_up(self):
self.side = SIDE_BACK
self.setPixmap(self.back)
def is_face_up(self):
return (self.side == SIDE_FACE)
def color(self):
return ('r' if (self.suit in ('H', 'D')) else 'b')
def mousePressEvent(self, e):
if ((not self.is_face_up) and (self.stack.cards[(- 1)] == self)):
self.turn_face_up()
e.accept()
return
if (self.stack and (not self.stack.is_free_card(self))):
e.ignore()
return
self.stack.activate()
e.accept()
super(Card, self).mouseReleaseEvent(e)
def mouseReleaseEvent(self, e):
self.stack.deactivate()
items = self.collidingItems()
if items:
for item in items:
if ((isinstance(item, Card) and (item.stack != self.stack)) or (isinstance(item, StackBase) and (item != self.stack))):
if item.stack.is_valid_drop(self):
cards = self.stack.remove_card(self)
item.stack.add_cards(cards)
break
self.stack.update()
super(Card, self).mouseReleaseEvent(e)
def mouseDoubleClickEvent(self, e):
if self.stack.is_free_card(self):
self.signals.doubleclicked.emit()
e.accept()
super(Card, self).mouseDoubleClickEvent(e) |
class QSVR(SVR, SerializableModelMixin):
def __init__(self, *, quantum_kernel: Optional[BaseKernel]=None, **kwargs):
if ('kernel' in kwargs):
msg = "'kernel' argument is not supported and will be discarded, please use 'quantum_kernel' instead."
warnings.warn(msg, QiskitMachineLearningWarning, stacklevel=2)
del kwargs['kernel']
self._quantum_kernel = (quantum_kernel if quantum_kernel else FidelityQuantumKernel())
super().__init__(kernel=self._quantum_kernel.evaluate, **kwargs)
def quantum_kernel(self) -> BaseKernel:
return self._quantum_kernel
_kernel.setter
def quantum_kernel(self, quantum_kernel: BaseKernel):
self._quantum_kernel = quantum_kernel
self.kernel = self._quantum_kernel.evaluate
def _get_param_names(cls):
names = SVR._get_param_names()
names.remove('kernel')
return sorted((names + ['quantum_kernel'])) |
class Key(object):
def __init__(self, network, compressed=False):
self.network = network
self.compressed = compressed
def __eq__(self, other):
return (other and (self.network == other.network) and (type(self) == type(other)))
def __ne__(self, other):
return (not (self == other))
__hash__ = object.__hash__
def get_key(self):
raise NotImplementedError() |
def train(args):
multi_gpus = False
if (len(args.gpus.split(',')) > 1):
multi_gpus = True
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
save_dir = os.path.join(args.save_dir, (((args.model_pre + args.backbone.upper()) + '_') + datetime.now().strftime('%Y%m%d_%H%M%S')))
if os.path.exists(save_dir):
raise NameError('model dir exists!')
os.makedirs(save_dir)
logging = init_log(save_dir)
_print = logging.info
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
trainset = CASIAWebFace(args.train_root, args.train_file_list, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=8, drop_last=False)
lfwdataset = LFW(args.lfw_test_root, args.lfw_file_list, transform=transform)
lfwloader = torch.utils.data.DataLoader(lfwdataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False)
agedbdataset = AgeDB30(args.agedb_test_root, args.agedb_file_list, transform=transform)
agedbloader = torch.utils.data.DataLoader(agedbdataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False)
cfpfpdataset = CFP_FP(args.cfpfp_test_root, args.cfpfp_file_list, transform=transform)
cfpfploader = torch.utils.data.DataLoader(cfpfpdataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False)
if (args.backbone == 'MobileFace'):
net = MobileFaceNet()
elif (args.backbone == 'Res50_IR'):
net = CBAMResNet(50, feature_dim=args.feature_dim, mode='ir')
elif (args.backbone == 'SERes50_IR'):
net = CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se')
elif (args.backbone == 'Res100_IR'):
net = CBAMResNet(100, feature_dim=args.feature_dim, mode='ir')
elif (args.backbone == 'SERes100_IR'):
net = CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')
elif (args.backbone == 'Attention_56'):
net = ResidualAttentionNet_56(feature_dim=args.feature_dim)
elif (args.backbone == 'Attention_92'):
net = ResidualAttentionNet_92(feature_dim=args.feature_dim)
else:
print(args.backbone, ' is not available!')
if (args.margin_type == 'ArcFace'):
margin = ArcMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size)
elif (args.margin_type == 'MultiMargin'):
margin = MultiMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size)
elif (args.margin_type == 'CosFace'):
margin = CosineMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size)
elif (args.margin_type == 'Softmax'):
margin = InnerProduct(args.feature_dim, trainset.class_nums)
elif (args.margin_type == 'SphereFace'):
pass
else:
print(args.margin_type, 'is not available!')
if args.resume:
print('resume the model parameters from: ', args.net_path, args.margin_path)
net.load_state_dict(torch.load(args.net_path)['net_state_dict'])
margin.load_state_dict(torch.load(args.margin_path)['net_state_dict'])
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer_ft = optim.SGD([{'params': net.parameters(), 'weight_decay': 0.0005}, {'params': margin.parameters(), 'weight_decay': 0.0005}], lr=0.1, momentum=0.9, nesterov=True)
exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones=[6, 11, 16], gamma=0.1)
if multi_gpus:
net = DataParallel(net).to(device)
margin = DataParallel(margin).to(device)
else:
net = net.to(device)
margin = margin.to(device)
best_lfw_acc = 0.0
best_lfw_iters = 0
best_agedb30_acc = 0.0
best_agedb30_iters = 0
best_cfp_fp_acc = 0.0
best_cfp_fp_iters = 0
total_iters = 0
vis = Visualizer(env=(args.model_pre + args.backbone))
for epoch in range(1, (args.total_epoch + 1)):
exp_lr_scheduler.step()
_print('Train Epoch: {}/{} ...'.format(epoch, args.total_epoch))
net.train()
since = time.time()
for data in trainloader:
(img, label) = (data[0].to(device), data[1].to(device))
optimizer_ft.zero_grad()
raw_logits = net(img)
output = margin(raw_logits, label)
total_loss = criterion(output, label)
total_loss.backward()
optimizer_ft.step()
total_iters += 1
if ((total_iters % 100) == 0):
(_, predict) = torch.max(output.data, 1)
total = label.size(0)
correct = (np.array(predict.cpu()) == np.array(label.data.cpu())).sum()
time_cur = ((time.time() - since) / 100)
since = time.time()
vis.plot_curves({'softmax loss': total_loss.item()}, iters=total_iters, title='train loss', xlabel='iters', ylabel='train loss')
vis.plot_curves({'train accuracy': (correct / total)}, iters=total_iters, title='train accuracy', xlabel='iters', ylabel='train accuracy')
_print('Iters: {:0>6d}/[{:0>2d}], loss: {:.4f}, train_accuracy: {:.4f}, time: {:.2f} s/iter, learning rate: {}'.format(total_iters, epoch, total_loss.item(), (correct / total), time_cur, exp_lr_scheduler.get_lr()[0]))
if ((total_iters % args.save_freq) == 0):
msg = 'Saving checkpoint: {}'.format(total_iters)
_print(msg)
if multi_gpus:
net_state_dict = net.module.state_dict()
margin_state_dict = margin.module.state_dict()
else:
net_state_dict = net.state_dict()
margin_state_dict = margin.state_dict()
if (not os.path.exists(save_dir)):
os.mkdir(save_dir)
torch.save({'iters': total_iters, 'net_state_dict': net_state_dict}, os.path.join(save_dir, ('Iter_%06d_net.ckpt' % total_iters)))
torch.save({'iters': total_iters, 'net_state_dict': margin_state_dict}, os.path.join(save_dir, ('Iter_%06d_margin.ckpt' % total_iters)))
if ((total_iters % args.test_freq) == 0):
net.eval()
getFeatureFromTorch('./result/cur_lfw_result.mat', net, device, lfwdataset, lfwloader)
lfw_accs = evaluation_10_fold('./result/cur_lfw_result.mat')
_print('LFW Ave Accuracy: {:.4f}'.format((np.mean(lfw_accs) * 100)))
if (best_lfw_acc <= (np.mean(lfw_accs) * 100)):
best_lfw_acc = (np.mean(lfw_accs) * 100)
best_lfw_iters = total_iters
getFeatureFromTorch('./result/cur_agedb30_result.mat', net, device, agedbdataset, agedbloader)
age_accs = evaluation_10_fold('./result/cur_agedb30_result.mat')
_print('AgeDB-30 Ave Accuracy: {:.4f}'.format((np.mean(age_accs) * 100)))
if (best_agedb30_acc <= (np.mean(age_accs) * 100)):
best_agedb30_acc = (np.mean(age_accs) * 100)
best_agedb30_iters = total_iters
getFeatureFromTorch('./result/cur_cfpfp_result.mat', net, device, cfpfpdataset, cfpfploader)
cfp_accs = evaluation_10_fold('./result/cur_cfpfp_result.mat')
_print('CFP-FP Ave Accuracy: {:.4f}'.format((np.mean(cfp_accs) * 100)))
if (best_cfp_fp_acc <= (np.mean(cfp_accs) * 100)):
best_cfp_fp_acc = (np.mean(cfp_accs) * 100)
best_cfp_fp_iters = total_iters
_print('Current Best Accuracy: LFW: {:.4f} in iters: {}, AgeDB-30: {:.4f} in iters: {} and CFP-FP: {:.4f} in iters: {}'.format(best_lfw_acc, best_lfw_iters, best_agedb30_acc, best_agedb30_iters, best_cfp_fp_acc, best_cfp_fp_iters))
vis.plot_curves({'lfw': np.mean(lfw_accs), 'agedb-30': np.mean(age_accs), 'cfp-fp': np.mean(cfp_accs)}, iters=total_iters, title='test accuracy', xlabel='iters', ylabel='test accuracy')
net.train()
_print('Finally Best Accuracy: LFW: {:.4f} in iters: {}, AgeDB-30: {:.4f} in iters: {} and CFP-FP: {:.4f} in iters: {}'.format(best_lfw_acc, best_lfw_iters, best_agedb30_acc, best_agedb30_iters, best_cfp_fp_acc, best_cfp_fp_iters))
print('finishing training') |
.parametrize('node_type', [TeleporterNetworkNode])
def test_unchanged_create_new_node_corruption(skip_qtbot, corruption_game_description, node_type):
node = next((node for node in corruption_game_description.region_list.iterate_nodes() if isinstance(node, node_type)))
dialog = NodeDetailsPopup(corruption_game_description, node)
skip_qtbot.addWidget(dialog)
new_node = dialog.create_new_node()
assert (node == new_node) |
def smoke_test(executable: pathlib.Path, debug: bool, qt5: bool) -> None:
stdout_whitelist = []
stderr_whitelist = ['\\[.*\\] PyInstaller Bootloader .*', '\\[.*\\] LOADER: .*']
if IS_MACOS:
stderr_whitelist.extend(['objc\\[.*\\]: .* One of the two will be used\\. Which one is undefined\\.', 'QCoreApplication::applicationDirPath: Please instantiate the QApplication object first', '\\[.*:ERROR:mach_port_broker.mm\\(48\\)\\] bootstrap_look_up org\\.chromium\\.Chromium\\.rohitfork\\.1: Permission denied \\(1100\\)', '\\[.*:ERROR:mach_port_broker.mm\\(43\\)\\] bootstrap_look_up: Unknown service name \\(1102\\)', '[0-9:]* WARNING: The available OpenGL surface format was either not version 3\\.2 or higher or not a Core Profile\\.', 'Chromium on macOS will fall back to software rendering in this case\\.', 'Hardware acceleration and features such as WebGL will not be available\\.', 'Unable to create basic Accelerated OpenGL renderer\\.', 'Core Image is now using the software OpenGL renderer\\. This will be slow\\.', '[0-9:]* ERROR: Load error: ERR_FILE_NOT_FOUND', '[0-9:]* WARNING: Failed to load libssl/libcrypto\\.', '\\[.*:ERROR:command_buffer_proxy_impl.cc\\([0-9]*\\)\\] ContextResult::kTransientFailure: Failed to send .*CreateCommandBuffer\\.'])
if (not qt5):
stderr_whitelist.extend(['[0-9:]* WARNING: Incompatible version of OpenSSL', '[0-9:]* WARNING: Qt WebEngine resources not found at .*', '[0-9:]* WARNING: Installed Qt WebEngine locales directory not found at location /qtwebengine_locales\\. Trying application directory\\.\\.\\.'])
elif IS_WINDOWS:
stderr_whitelist.extend(['\\[.*:ERROR:dxva_video_decode_accelerator_win.cc\\(\\d+\\)\\] DXVAVDA fatal error: could not LoadLibrary: .*: The specified module could not be found. \\(0x7E\\)'])
proc = _smoke_test_run(executable)
if debug:
print('Skipping output check for debug build')
return
stdout = '\n'.join(_filter_whitelisted(proc.stdout, stdout_whitelist))
stderr = '\n'.join(_filter_whitelisted(proc.stderr, stderr_whitelist))
if (stdout or stderr):
print('Unexpected output, running with --debug')
proc = _smoke_test_run(executable, '--debug')
debug_stdout = proc.stdout.decode('utf-8')
debug_stderr = proc.stderr.decode('utf-8')
lines = ['Unexpected output!', '']
if stdout:
lines += ['stdout', '------', '', stdout, '']
if stderr:
lines += ['stderr', '------', '', stderr, '']
if debug_stdout:
lines += ['debug rerun stdout', '', '', debug_stdout, '']
if debug_stderr:
lines += ['debug rerun stderr', '', '', debug_stderr, '']
raise Exception('\n'.join(lines)) |
def test_ds_non_existent(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'DOES_NOT_EXIST')
pytester.makepyfile('def test_ds(): pass')
result = pytester.runpytest_subprocess()
result.stderr.fnmatch_lines(['*ImportError:*DOES_NOT_EXIST*'])
assert (result.ret != 0) |
def test_extract_variable_with_similar(config, workspace, code_action_context):
document = create_document(workspace, 'simple.py')
line = 6
start_col = document.lines[line].index('a + b')
end_col = document.lines[line].index(')\n')
selection = Range((line, start_col), (line, end_col))
response = plugin.pylsp_code_actions(config=config, workspace=workspace, document=document, range=selection, context=code_action_context)
expected: typing.CodeAction = {'title': 'Extract variable including similar statements', 'kind': 'refactor.extract', 'command': {'title': 'Extract variable including similar statements', 'command': commands.COMMAND_REFACTOR_EXTRACT_VARIABLE, 'arguments': [{'document_uri': document.uri, 'range': selection, 'global_': False, 'similar': True}]}}
assert (expected in response)
assert (expected['command'] is not None)
command = expected['command']['command']
arguments = expected['command']['arguments']
response = plugin.pylsp_execute_command(config=config, workspace=workspace, command=command, arguments=arguments)
edit_request = workspace._endpoint.request.call_args
document_edits = assert_single_document_edit(edit_request, document)
new_text = assert_text_edits(document_edits, target='simple_extract_variable_with_similar.py')
assert ('extracted_variable = ' in new_text)
assert (new_text.count('a + b') == 2) |
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Manufacturer', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=120)), ('location', models.CharField(max_length=120)), ('active', models.BooleanField(default=True))]), migrations.CreateModel(name='Product', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=120)), ('description', models.TextField(blank=True, null=True)), ('photo', models.ImageField(blank=True, null=True, upload_to='')), ('price', models.FloatField()), ('shipping_cost', models.FloatField()), ('quantity', models.PositiveSmallIntegerField()), ('manufacturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='products.Manufacturer'))])] |
class GraphAttentionLayer(nn.Module):
def __init__(self, input_dim, output_dim, num_gat_iters=1, num_heads=4, dropout=0.5, alpha=0.2):
super(GraphAttentionLayer, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_gat_iters = num_gat_iters
self.num_heads = num_heads
self.alpha = alpha
self.W = nn.ModuleList([nn.Linear(input_dim, output_dim) for _ in range(self.num_heads)])
self.a_1 = nn.ModuleList([nn.Linear(output_dim, 1) for _ in range(self.num_heads)])
self.a_2 = nn.ModuleList([nn.Linear(output_dim, 1) for _ in range(self.num_heads)])
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.dropout = nn.Dropout(dropout)
def forward(self, h, adjacency):
att_weights = []
cur_h = h
for iter in range(self.num_gat_iters):
head_embeds = []
for head in range(self.num_heads):
cur_h_transformed = self.W[head](cur_h)
att_half_1 = self.a_1[head](cur_h_transformed).squeeze((- 1))
att_half_2 = self.a_2[head](cur_h_transformed).squeeze((- 1))
att_coeff = (att_half_1.unsqueeze((- 2)) + att_half_2.unsqueeze((- 3)))
att_coeff = self.leakyrelu(att_coeff)
with torch.no_grad():
masked_att_max = torch.max(att_coeff, 2)[0]
masked_att_reduced = (att_coeff.squeeze((- 1)) - masked_att_max)
masked_att_exp = (masked_att_reduced.exp() * adjacency)
masked_att_exp = masked_att_exp.unsqueeze((- 1))
mask_sum = masked_att_exp.sum(dim=2, keepdim=True)
mask_ones = torch.ones_like(mask_sum)
mask_sum_normalized = torch.where((mask_sum == 0.0), mask_ones, mask_sum)
att_values = torch.div(masked_att_exp, mask_sum_normalized)
att_values = self.dropout(att_values)
h_prime = torch.bmm(att_values.squeeze((- 1)), cur_h_transformed.squeeze((- 2))).unsqueeze((- 2))
head_embeds.append(h_prime)
if (iter == 0):
att_weights.append(att_values.squeeze((- 1)).detach())
cur_h = torch.tanh((cur_h + torch.mean(torch.stack(head_embeds, dim=(- 1)), dim=(- 1))))
out = cur_h
att_weights = torch.stack(att_weights, dim=1)
return (out, att_weights) |
def generate_dealloc_for_class(cl: ClassIR, dealloc_func_name: str, clear_func_name: str, emitter: Emitter) -> None:
emitter.emit_line('static void')
emitter.emit_line(f'{dealloc_func_name}({cl.struct_name(emitter.names)} *self)')
emitter.emit_line('{')
emitter.emit_line('PyObject_GC_UnTrack(self);')
emitter.emit_line(f'CPy_TRASHCAN_BEGIN(self, {dealloc_func_name})')
emitter.emit_line(f'{clear_func_name}(self);')
emitter.emit_line('Py_TYPE(self)->tp_free((PyObject *)self);')
emitter.emit_line('CPy_TRASHCAN_END(self)')
emitter.emit_line('}') |
def test_base_case_call() -> None:
with RecursionTable('fact') as table:
def fact(n):
if (n == 0):
return 1
else:
return (n * fact((n - 1)))
fact(0)
recursive_dict = table.get_recursive_dict()
assert (len(list(recursive_dict.keys())) == 3)
assert (recursive_dict['n'] == [0])
assert (recursive_dict['called by'] == ['N/A'])
assert (recursive_dict['return value'] == [1]) |
class Window(_Window, base.Window):
_window_mask = (((EventMask.StructureNotify | EventMask.PropertyChange) | EventMask.EnterWindow) | EventMask.FocusChange)
def __init__(self, window, qtile):
_Window.__init__(self, window, qtile)
self._wm_class: (list[str] | None) = None
self.update_wm_class()
self.update_name()
self.set_group()
qtile.core.conn.conn.core.ChangeSaveSet(SetMode.Insert, self.window.wid)
self.update_wm_net_icon()
self._grab_click()
def group(self):
return self._group
def group(self, group):
if group:
try:
self.window.set_property('_NET_WM_DESKTOP', self.qtile.groups.index(group))
except xcffib.xproto.WindowError:
logger.exception('whoops, got error setting _NET_WM_DESKTOP, too early?')
self._group = group
def edges(self):
return (self.x, self.y, (self.x + self.width), (self.y + self.height))
def floating(self):
return (self._float_state != FloatStates.NOT_FLOATING)
def floating(self, do_float):
stack = self.qtile.core._root.query_tree()
tiled = [win.window.wid for win in (self.group.tiled_windows if self.group else [])]
tiled_stack = [wid for wid in stack if ((wid in tiled) and (wid != self.window.wid))]
if (do_float and (self._float_state == FloatStates.NOT_FLOATING)):
if (self.group and self.group.screen):
screen = self.group.screen
self._enablefloating((screen.x + self.float_x), (screen.y + self.float_y), self._float_width, self._float_height)
if (tiled_stack and ((not self.kept_above) or self.qtile.config.floats_kept_above)):
stack_list = list(stack)
highest_tile = tiled_stack[(- 1)]
if (stack_list.index(self.window.wid) < stack_list.index(highest_tile)):
self.window.configure(stackmode=xcffib.xproto.StackMode.Above, sibling=highest_tile)
else:
self._float_state = FloatStates.FLOATING
if ((not self.kept_above) and self.qtile.config.floats_kept_above):
self.keep_above(enable=True)
elif ((not do_float) and (self._float_state != FloatStates.NOT_FLOATING)):
self.update_fullscreen_wm_state(False)
if (self._float_state == FloatStates.FLOATING):
self._float_width = self.width
self._float_height = self.height
self._float_state = FloatStates.NOT_FLOATING
self.group.mark_floating(self, False)
if tiled_stack:
self.window.configure(stackmode=xcffib.xproto.StackMode.Above, sibling=tiled_stack[(- 1)])
hook.fire('float_change')
def wants_to_fullscreen(self):
try:
return ('fullscreen' in self.window.get_net_wm_state())
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
pass
return False
_command()
def toggle_floating(self):
self.floating = (not self.floating)
def set_wm_state(self, old_state, new_state):
if (new_state != old_state):
self.window.set_property('_NET_WM_STATE', list(new_state))
def update_fullscreen_wm_state(self, do_full):
if (do_full == self.fullscreen):
return
atom = set([self.qtile.core.conn.atoms['_NET_WM_STATE_FULLSCREEN']])
prev_state = set(self.window.get_property('_NET_WM_STATE', 'ATOM', unpack=int))
if do_full:
self.set_wm_state(prev_state, (prev_state | atom))
else:
self.set_wm_state(prev_state, (prev_state - atom))
def fullscreen(self):
return (self._float_state == FloatStates.FULLSCREEN)
def fullscreen(self, do_full):
if do_full:
needs_change = (self._float_state != FloatStates.FULLSCREEN)
screen = (self.group.screen or self.qtile.find_closest_screen(self.x, self.y))
if (self._float_state not in (FloatStates.MAXIMIZED, FloatStates.FULLSCREEN)):
self._save_geometry()
bw = self.group.floating_layout.fullscreen_border_width
self._enablefloating(screen.x, screen.y, (screen.width - (2 * bw)), (screen.height - (2 * bw)), new_float_state=FloatStates.FULLSCREEN)
if needs_change:
self.change_layer()
return
if (self._float_state == FloatStates.FULLSCREEN):
self._restore_geometry()
self.floating = False
self.change_layer()
return
def maximized(self):
return (self._float_state == FloatStates.MAXIMIZED)
def maximized(self, do_maximize):
if do_maximize:
screen = (self.group.screen or self.qtile.find_closest_screen(self.x, self.y))
if (self._float_state not in (FloatStates.MAXIMIZED, FloatStates.FULLSCREEN)):
self._save_geometry()
bw = self.group.floating_layout.max_border_width
self._enablefloating(screen.dx, screen.dy, (screen.dwidth - (2 * bw)), (screen.dheight - (2 * bw)), new_float_state=FloatStates.MAXIMIZED)
elif (self._float_state == FloatStates.MAXIMIZED):
self._restore_geometry()
self.floating = False
def minimized(self):
return (self._float_state == FloatStates.MINIMIZED)
def minimized(self, do_minimize):
if do_minimize:
if (self._float_state != FloatStates.MINIMIZED):
self._enablefloating(new_float_state=FloatStates.MINIMIZED)
elif (self._float_state == FloatStates.MINIMIZED):
self.floating = False
_command()
def toggle_minimize(self):
self.minimized = (not self.minimized)
_command()
def is_visible(self) -> bool:
return ((not self.hidden) and (not self.minimized))
_command()
def static(self, screen: (int | None)=None, x: (int | None)=None, y: (int | None)=None, width: (int | None)=None, height: (int | None)=None) -> None:
self.defunct = True
if (screen is None):
screen = self.qtile.current_screen
else:
screen = self.qtile.screens[screen]
if self.group:
self.group.remove(self)
s = Static(self.window, self.qtile, screen, x, y, width, height)
self.qtile.windows_map[self.window.wid] = s
self.qtile.core.update_client_lists()
hook.fire('client_managed', s)
def tweak_float(self, x=None, y=None, dx=0, dy=0, w=None, h=None, dw=0, dh=0):
if (x is not None):
self.x = x
self.x += dx
if (y is not None):
self.y = y
self.y += dy
if (w is not None):
self.width = w
self.width += dw
if (h is not None):
self.height = h
self.height += dh
if (self.height < 0):
self.height = 0
if (self.width < 0):
self.width = 0
screen = self.qtile.find_closest_screen((self.x + (self.width // 2)), (self.y + (self.height // 2)))
if (self.group and (screen is not None) and (screen != self.group.screen)):
self.group.remove(self, force=True)
screen.group.add(self, force=True)
self.qtile.focus_screen(screen.index)
self._reconfigure_floating()
_command()
def get_size(self):
return (self.width, self.height)
_command()
def get_position(self):
return (self.x, self.y)
def _reconfigure_floating(self, new_float_state=FloatStates.FLOATING):
self.update_fullscreen_wm_state((new_float_state == FloatStates.FULLSCREEN))
if (new_float_state == FloatStates.MINIMIZED):
self.state = IconicState
self.hide()
else:
self.place(self.x, self.y, self.width, self.height, self.borderwidth, self.bordercolor, above=False, respect_hints=True)
if (self._float_state != new_float_state):
self._float_state = new_float_state
if self.group:
self.group.mark_floating(self, True)
if (new_float_state == FloatStates.FLOATING):
if self.qtile.config.floats_kept_above:
self.keep_above(enable=True)
elif (new_float_state == FloatStates.MAXIMIZED):
self.move_to_top()
hook.fire('float_change')
def _enablefloating(self, x=None, y=None, w=None, h=None, new_float_state=FloatStates.FLOATING):
if (new_float_state != FloatStates.MINIMIZED):
self.x = x
self.y = y
self.width = w
self.height = h
self._reconfigure_floating(new_float_state=new_float_state)
def set_group(self):
group = None
index = self.window.get_wm_desktop()
if ((index is not None) and (index < len(self.qtile.groups))):
group = self.qtile.groups[index]
elif (index is None):
transient_for = self.is_transient_for()
if (transient_for is not None):
group = transient_for._group
if (group is not None):
group.add(self)
self._group = group
if (group != self.qtile.current_screen.group):
self.hide()
_command()
def togroup(self, group_name=None, *, switch_group=False, toggle=False):
if (group_name is None):
group = self.qtile.current_group
else:
group = self.qtile.groups_map.get(group_name)
if (group is None):
raise CommandError(('No such group: %s' % group_name))
if (self.group is group):
if (toggle and self.group.screen.previous_group):
group = self.group.screen.previous_group
else:
return
self.hide()
if self.group:
if self.group.screen:
self.x -= self.group.screen.x
self.group.remove(self)
if (group.screen and (self.x < group.screen.x)):
self.x += group.screen.x
group.add(self)
if switch_group:
group.toscreen(toggle=toggle)
_command()
def match(self, match):
try:
return match.compare(self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return False
def handle_EnterNotify(self, e):
hook.fire('client_mouse_enter', self)
if self.qtile.config.follow_mouse_focus:
if (self.group.current_window != self):
self.group.focus(self, False)
if (self.group.screen and (self.qtile.current_screen != self.group.screen)):
self.qtile.focus_screen(self.group.screen.index, False)
return True
def handle_ButtonPress(self, e):
self.qtile.core.focus_by_click(e, window=self)
self.qtile.core.conn.conn.core.AllowEvents(xcffib.xproto.Allow.ReplayPointer, e.time)
def handle_ConfigureRequest(self, e):
if (self.qtile._drag and (self.qtile.current_window == self)):
return
if getattr(self, 'floating', False):
cw = xcffib.xproto.ConfigWindow
width = (e.width if (e.value_mask & cw.Width) else self.width)
height = (e.height if (e.value_mask & cw.Height) else self.height)
x = (e.x if (e.value_mask & cw.X) else self.x)
y = (e.y if (e.value_mask & cw.Y) else self.y)
else:
(width, height, x, y) = (self.width, self.height, self.x, self.y)
if (self.group and self.group.screen):
self.place(x, y, width, height, self.borderwidth, self.bordercolor)
self.update_state()
return False
def update_wm_net_icon(self):
icon = self.window.get_property('_NET_WM_ICON', 'CARDINAL')
if (not icon):
return
icon = list(map(ord, icon.value))
icons = {}
while True:
if (not icon):
break
size = icon[:8]
if ((len(size) != 8) or (not size[0]) or (not size[4])):
break
icon = icon[8:]
width = size[0]
height = size[4]
next_pix = ((width * height) * 4)
data = icon[:next_pix]
arr = array.array('B', data)
for i in range(0, len(arr), 4):
mult = (arr[(i + 3)] / 255.0)
arr[(i + 0)] = int((arr[(i + 0)] * mult))
arr[(i + 1)] = int((arr[(i + 1)] * mult))
arr[(i + 2)] = int((arr[(i + 2)] * mult))
icon = icon[next_pix:]
icons[('%sx%s' % (width, height))] = arr
self.icons = icons
hook.fire('net_wm_icon_change', self)
def handle_ClientMessage(self, event):
atoms = self.qtile.core.conn.atoms
opcode = event.type
data = event.data
if (atoms['_NET_WM_STATE'] == opcode):
prev_state = self.window.get_property('_NET_WM_STATE', 'ATOM', unpack=int)
current_state = set(prev_state)
action = data.data32[0]
for prop in (data.data32[1], data.data32[2]):
if (not prop):
continue
if (action == _NET_WM_STATE_REMOVE):
current_state.discard(prop)
elif (action == _NET_WM_STATE_ADD):
current_state.add(prop)
elif (action == _NET_WM_STATE_TOGGLE):
current_state ^= set([prop])
self.window.set_property('_NET_WM_STATE', list(current_state))
elif (atoms['_NET_ACTIVE_WINDOW'] == opcode):
source = data.data32[0]
if (source == 2):
logger.debug('Focusing window by pager')
self.qtile.current_screen.set_group(self.group)
self.group.focus(self)
else:
focus_behavior = self.qtile.config.focus_on_window_activation
if (focus_behavior == 'focus'):
logger.debug('Focusing window')
self.qtile.current_screen.set_group(self.group)
self.group.focus(self)
elif (focus_behavior == 'smart'):
if (not self.group.screen):
logger.debug("Ignoring focus request (focus_on_window_activation='smart')")
return
if (self.group.screen == self.qtile.current_screen):
logger.debug('Focusing window')
self.qtile.current_screen.set_group(self.group)
self.group.focus(self)
else:
logger.debug('Setting urgent flag for window')
self.urgent = True
elif (focus_behavior == 'urgent'):
logger.debug('Setting urgent flag for window')
self.urgent = True
elif (focus_behavior == 'never'):
logger.debug("Ignoring focus request (focus_on_window_activation='never')")
else:
logger.debug('Invalid value for focus_on_window_activation: %s', focus_behavior)
elif (atoms['_NET_CLOSE_WINDOW'] == opcode):
self.kill()
elif (atoms['WM_CHANGE_STATE'] == opcode):
state = data.data32[0]
if (state == NormalState):
self.minimized = False
elif ((state == IconicState) and self.qtile.config.auto_minimize):
self.minimized = True
else:
logger.debug('Unhandled client message: %s', atoms.get_name(opcode))
def handle_PropertyNotify(self, e):
name = self.qtile.core.conn.atoms.get_name(e.atom)
if (name == 'WM_TRANSIENT_FOR'):
pass
elif (name == 'WM_CLASS'):
self.update_wm_class()
elif (name == 'WM_HINTS'):
self.update_hints()
elif (name == 'WM_NORMAL_HINTS'):
self.update_hints()
elif (name == 'WM_NAME'):
self.update_name()
elif (name == '_NET_WM_NAME'):
self.update_name()
elif (name == '_NET_WM_VISIBLE_NAME'):
self.update_name()
elif (name == 'WM_ICON_NAME'):
pass
elif (name == '_NET_WM_ICON_NAME'):
pass
elif (name == '_NET_WM_ICON'):
self.update_wm_net_icon()
elif (name == 'ZOOM'):
pass
elif (name == '_NET_WM_WINDOW_OPACITY'):
pass
elif (name == 'WM_STATE'):
pass
elif (name == '_NET_WM_STATE'):
self.update_state()
elif (name == 'WM_PROTOCOLS'):
pass
elif (name == '_NET_WM_DESKTOP'):
self.update_state()
else:
logger.debug('Unknown window property: %s', name)
return False
def _items(self, name: str) -> ItemT:
if (name == 'group'):
return (True, [])
if (name == 'layout'):
if self.group:
return (True, list(range(len(self.group.layouts))))
return None
if (name == 'screen'):
if (self.group and self.group.screen):
return (True, [])
return None
def _select(self, name, sel):
if (name == 'group'):
return self.group
elif (name == 'layout'):
if (sel is None):
return self.group.layout
else:
return utils.lget(self.group.layouts, sel)
elif (name == 'screen'):
return self.group.screen
_command()
def move_floating(self, dx, dy):
self.tweak_float(dx=dx, dy=dy)
_command()
def resize_floating(self, dw, dh):
self.tweak_float(dw=dw, dh=dh)
_command()
def set_position_floating(self, x, y):
self.tweak_float(x=x, y=y)
_command()
def set_size_floating(self, w, h):
self.tweak_float(w=w, h=h)
_command()
def enable_floating(self):
self.floating = True
_command()
def disable_floating(self):
self.floating = False
_command()
def toggle_maximize(self):
self.maximized = (not self.maximized)
_command()
def toggle_fullscreen(self):
self.fullscreen = (not self.fullscreen)
_command()
def enable_fullscreen(self):
self.fullscreen = True
_command()
def disable_fullscreen(self):
self.fullscreen = False
_command()
def bring_to_front(self):
if (self.get_wm_type() != 'desktop'):
self.window.configure(stackmode=xcffib.xproto.StackMode.Above)
self.raise_children()
self.qtile.core.update_client_lists()
def _is_in_window(self, x, y, window):
return ((window.edges[0] <= x <= window.edges[2]) and (window.edges[1] <= y <= window.edges[3]))
_command()
def set_position(self, x, y):
if self.floating:
self.tweak_float(x, y)
return
(curx, cury) = self.qtile.core.get_mouse_position()
for window in self.group.windows:
if ((window == self) or window.floating):
continue
if self._is_in_window(curx, cury, window):
self.group.layout.swap(self, window)
return
_command
def focus(self, warp: bool=True) -> None:
_Window.focus(self, warp)
if (self.fullscreen and (not self.previous_layer[4])):
self.change_layer() |
def make_solver(iter_idx, output_dir):
solver_content = 'net: "{0}/model/train_nyu_pose_ren_s{1}.prototxt"\ntest_iter: 64\ntest_interval: 1000\nbase_lr: 0.001\nlr_policy: "step"\ngamma: 0.1\nstepsize: 40000\ndisplay: 100\nmax_iter: 160000\nmomentum: 0.9\nweight_decay: 0.0005\nsnapshot: 40000\nsnapshot_prefix: "{0}/snapshot/nyu_pose_ren_s{1}"'.format(output_dir, iter_idx)
return solver_content |
def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]:
argkeys_cache: Dict[(Scope, Dict[(nodes.Item, Dict[(FixtureArgKey, None)])])] = {}
items_by_argkey: Dict[(Scope, Dict[(FixtureArgKey, Deque[nodes.Item])])] = {}
for scope in HIGH_SCOPES:
scoped_argkeys_cache = argkeys_cache[scope] = {}
scoped_items_by_argkey = items_by_argkey[scope] = defaultdict(deque)
for item in items:
keys = dict.fromkeys(get_parametrized_fixture_keys(item, scope), None)
if keys:
scoped_argkeys_cache[item] = keys
for key in keys:
scoped_items_by_argkey[key].append(item)
items_dict = dict.fromkeys(items, None)
return list(reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, Scope.Session)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.