code stringlengths 281 23.7M |
|---|
(cache_hash=True)
class ReflectionUsingPrepare(GateWithRegisters):
prepare_gate: PrepareOracle
control_val: Optional[int] = None
_property
def control_registers(self) -> Tuple[(Register, ...)]:
return (() if (self.control_val is None) else (Register('control', 1),))
_property
def selection_registers(self) -> Tuple[(SelectionRegister, ...)]:
return self.prepare_gate.selection_registers
_property
def signature(self) -> Signature:
return Signature([*self.control_registers, *self.selection_registers])
def decompose_from_registers(self, context: cirq.DecompositionContext, **quregs: NDArray[cirq.Qid]) -> cirq.OP_TREE:
qm = context.qubit_manager
phase_target = (qm.qalloc(1)[0] if (self.control_val is None) else quregs.pop('control')[0])
state_prep_ancilla = {reg.name: qm.qalloc(reg.total_bits()) for reg in self.prepare_gate.junk_registers}
state_prep_selection_regs = quregs
prepare_op = self.prepare_gate.on_registers(**state_prep_selection_regs, **state_prep_ancilla)
(yield cirq.inverse(prepare_op))
phase_control = merge_qubits(self.selection_registers, **state_prep_selection_regs)
(yield (cirq.X(phase_target) if (not self.control_val) else []))
(yield MultiControlPauli(([0] * len(phase_control)), target_gate=cirq.Z).on_registers(controls=phase_control, target=phase_target))
(yield (cirq.X(phase_target) if (not self.control_val) else []))
(yield prepare_op)
qm.qfree([q for anc in state_prep_ancilla.values() for q in anc])
if (self.control_val is None):
qm.qfree([phase_target])
def _circuit_diagram_info_(self, args: cirq.CircuitDiagramInfoArgs) -> cirq.CircuitDiagramInfo:
wire_symbols = ([('' if self.control_val else '(0)')] * total_bits(self.control_registers))
wire_symbols += (['R_L'] * total_bits(self.selection_registers))
return cirq.CircuitDiagramInfo(wire_symbols=wire_symbols)
def controlled(self, num_controls: Optional[int]=None, control_values: Optional[Union[(cirq.ops.AbstractControlValues, Sequence[Union[(int, Collection[int])]])]]=None, control_qid_shape: Optional[Tuple[(int, ...)]]=None) -> 'ReflectionUsingPrepare':
if (num_controls is None):
num_controls = 1
if (control_values is None):
control_values = ([1] * num_controls)
if (isinstance(control_values, Sequence) and isinstance(control_values[0], int) and (len(control_values) == 1) and (self.control_val is None)):
return ReflectionUsingPrepare(self.prepare_gate, control_val=control_values[0])
raise NotImplementedError(f'Cannot create a controlled version of {self} with control_values={control_values}.') |
def _check_const_name(node_type: str, name: str) -> List[str]:
error_msgs = []
if (not _is_in_upper_case_with_underscores(name)):
msg = f'{node_type.capitalize()} name "{name}" should be in UPPER_CASE_WITH_UNDERSCORES format. Constants should be all-uppercase words with each word separated by an underscore. A single leading underscore can be used to denote a private constant.'
if (node_type == 'class constant'):
msg += " A double leading underscore invokes Python's name-mangling rules."
error_msgs.append(msg)
return error_msgs |
.parametrize('width, height, minsize, expected', [(256, 256, 256, 0), (257, 257, 256, 1), (1000, 1000, 128, 3), (1000, 100, 128, 0)])
def test_max_overview(width, height, minsize, expected):
overview_level = get_maximum_overview_level(width, height, minsize)
assert (overview_level == expected) |
class ProjectQuerySet(TreeQuerySet):
def filter_current_site(self):
return self.filter(site=settings.SITE_ID)
def filter_user(self, user):
if user.is_authenticated:
if user.has_perm('projects.view_project'):
return self.all()
elif is_site_manager(user):
return self.filter_current_site()
else:
queryset = self.filter(user=user)
for instance in queryset:
queryset |= instance.get_descendants()
return queryset.distinct()
else:
return self.none() |
def get_target(args):
target = Image.open(args.target)
if (target.mode == 'RGBA'):
new_image = Image.new('RGBA', target.size, 'WHITE')
new_image.paste(target, (0, 0), target)
target = new_image
target = target.convert('RGB')
(masked_im, mask) = utils.get_mask_u2net(args, target)
if args.mask_object:
target = masked_im
if args.fix_scale:
target = utils.fix_image_scale(target)
transforms_ = []
if (target.size[0] != target.size[1]):
transforms_.append(transforms.Resize((args.image_scale, args.image_scale), interpolation=PIL.Image.BICUBIC))
else:
transforms_.append(transforms.Resize(args.image_scale, interpolation=PIL.Image.BICUBIC))
transforms_.append(transforms.CenterCrop(args.image_scale))
transforms_.append(transforms.ToTensor())
data_transforms = transforms.Compose(transforms_)
target_ = data_transforms(target).unsqueeze(0).to(args.device)
return (target_, mask) |
class EditIntWindow(sd.Dialog):
def __init__(self, parent, configitem, current):
self.parent = parent
self.config_item = configitem
self.current = current
sd.Dialog.__init__(self, parent, 'Edit integer configuration')
def body(self, master):
self.configure(background=GetBackground())
str = ((((self.config_item['name'] + ' Max = ') + self.config_item['max']) + ' Min = ') + self.config_item['min'])
ttk.Label(self, text=str).pack()
self.input = tk.Entry(self)
self.input.pack(pady=4)
self.input.insert(0, self.current)
ttk.Button(self, text=CONFIG_UNSET, command=self.unset).pack(pady=5)
def validate(self):
self.result = self.input.get()
return True
def unset(self):
self.result = CONFIG_UNSET
self.destroy()
def get(self):
return self.result |
.skipif((not _has_h5py), reason='h5py not found.')
class TestH5Serialization():
def worker(cls, cyberbliptronics, q1, q2):
assert isinstance(cyberbliptronics, PersistentTensorDict)
assert cyberbliptronics.file.filename.endswith('groups.hdf5')
q1.put(cyberbliptronics['Base_Group'][('Sub_Group',)])
assert (q2.get(timeout=TIMEOUT) == 'checked')
val = (cyberbliptronics[('Base_Group', 'Sub_Group', 'default')] + 1)
q1.put(val)
assert (q2.get(timeout=TIMEOUT) == 'checked')
q1.close()
q2.close()
def test_h5_serialization(self, tmp_path):
arr = np.random.randn(1000)
fn = (tmp_path / 'groups.hdf5')
with h5py.File(fn, 'w') as f:
g = f.create_group('Base_Group')
gg = g.create_group('Sub_Group')
_ = g.create_dataset('default', data=arr)
_ = gg.create_dataset('default', data=arr)
persistent_td = PersistentTensorDict(filename=fn, batch_size=[])
q1 = mp.Queue(1)
q2 = mp.Queue(1)
p = mp.Process(target=self.worker, args=(persistent_td, q1, q2))
p.start()
try:
val = q1.get(timeout=TIMEOUT)
assert (torch.tensor(arr) == val['default']).all()
q2.put('checked')
val = q1.get(timeout=TIMEOUT)
assert ((torch.tensor(arr) + 1) == val).all()
q2.put('checked')
q1.close()
q2.close()
finally:
p.join() |
('/json/save_config', methods=['POST'], endpoint='save_config')
_required('SETTINGS')
def save_config():
api = flask.current_app.config['PYLOAD_API']
category = flask.request.args.get('category')
if (category not in ('core', 'plugin')):
return (jsonify(False), 500)
for (key, value) in flask.request.form.items():
try:
(section, option) = key.split('|')
except Exception:
continue
api.set_config_value(section, option, value, category)
return jsonify(True) |
class NominationViewSet(CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet):
serializer_class = NominationSerializer
queryset = Nomination.objects.all().prefetch_related('entries')
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filterset_fields = ('user__id', 'active')
frozen_on_create = ('ended_at', 'end_reason', 'active', 'inserted_at', 'reviewed')
def create(self, request: HttpRequest, *args, **kwargs) -> Response:
for field in request.data:
if (field in self.frozen_on_create):
raise ValidationError({field: ['This field cannot be set at creation.']})
user_id = request.data.get('user')
nomination_filter = Nomination.objects.filter(active=True, user__id=user_id)
if (not nomination_filter.exists()):
serializer = NominationSerializer(data=ChainMap(request.data, {'active': True}))
serializer.is_valid(raise_exception=True)
nomination = Nomination.objects.create(**serializer.validated_data)
entry_serializer = NominationEntrySerializer(data=ChainMap(request.data, {'nomination': nomination.id}))
entry_serializer.is_valid(raise_exception=True)
NominationEntry.objects.create(**entry_serializer.validated_data)
data = NominationSerializer(nomination).data
headers = self.get_success_headers(data)
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
entry_serializer = NominationEntrySerializer(data=ChainMap(request.data, {'nomination': nomination_filter[0].id}))
entry_serializer.is_valid(raise_exception=True)
if NominationEntry.objects.filter(nomination_id=nomination_filter[0].id, actor__id=entry_serializer.validated_data['actor'].id).exists():
raise ValidationError({'actor': ['This actor has already endorsed this nomination.']})
NominationEntry.objects.create(**entry_serializer.validated_data)
data = NominationSerializer(nomination_filter[0]).data
headers = self.get_success_headers(data)
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
def partial_update(self, request: HttpRequest, *args, **kwargs) -> Response:
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
if (instance.active and ('active' not in data)):
if ('end_reason' in data):
raise ValidationError({'end_reason': ["An active nomination can't have an end reason."]})
elif (instance.active and (not data['active'])):
if ('reason' in request.data):
raise ValidationError({'reason': ['This field cannot be set when ending a nomination.']})
if ('end_reason' not in request.data):
raise ValidationError({'end_reason': ['This field is required when ending a nomination.']})
if ('reviewed' in request.data):
raise ValidationError({'reviewed': ['This field cannot be set while you are ending a nomination.']})
if ('thread_id' in request.data):
raise ValidationError({'thread_id': ['This field cannot be set when ending a nomination.']})
instance.ended_at = timezone.now()
elif ('active' in data):
raise ValidationError({'active': ['This field can only be used to end a nomination']})
elif ((not instance.active) and ('reviewed' in request.data)):
raise ValidationError({'reviewed': ['This field cannot be set if the nomination is inactive.']})
elif ((not instance.active) and ('thread_id' in request.data)):
raise ValidationError({'thread_id': ['This field cannot be set if the nomination is inactive.']})
if ('reason' in request.data):
if ('actor' not in request.data):
raise ValidationError({'actor': ['This field is required when editing the reason.']})
entry_filter = NominationEntry.objects.filter(nomination_id=instance.id, actor__id=request.data['actor'])
if (not entry_filter.exists()):
raise ValidationError({'actor': ["The actor doesn't exist or has not nominated the user."]})
entry = entry_filter[0]
entry.reason = request.data['reason']
entry.save()
serializer.save()
return Response(serializer.data) |
def install_minimum(c):
with open('setup.py', 'r') as setup_py:
lines = setup_py.read().splitlines()
versions = []
started = False
for line in lines:
if started:
if (line == ']'):
started = False
continue
line = line.strip()
if _validate_python_version(line):
requirement = re.match('[^>]*', line).group(0)
requirement = re.sub('[\'",]', '', requirement)
version = re.search('>=?[^(,|#)]*', line).group(0)
if version:
version = re.sub('>=?', '==', version)
version = re.sub('[\'",]', '', version)
requirement += version
versions.append(requirement)
elif (line.startswith('install_requires = [') or line.startswith('pomegranate_requires = [')):
started = True
c.run(f"python -m pip install {' '.join(versions)}") |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
def test_generate_costing_table_df():
mf = make_diamond_113_szv()
thresh = np.array([0.1, 0.01, 1e-14])
table = generate_costing_table(mf, cutoffs=thresh, chi=10, beta=22, dE_for_qpe=0.001)
assert np.allclose(table.dE, 0.001)
assert np.allclose(table.chi, 10)
assert np.allclose(table.beta, 22)
assert np.allclose(table.cutoff, thresh)
assert np.allclose(table.num_aux, ([648] * 3))
assert np.isclose(table.approx_energy.values[2], table.exact_energy.values[0]) |
class QUBEKitHandler(vdWHandler):
hfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
xfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
cfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
nfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
ofree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
clfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
sfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
ffree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
brfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
pfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
ifree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
bfree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
sifree = ParameterAttribute((0 * unit.angstroms), unit=unit.angstroms)
alpha = ParameterAttribute(1)
beta = ParameterAttribute(0)
lj_on_polar_h = ParameterAttribute(default='True', converter=_allow_only(['True', 'False']))
class QUBEKitvdWType(ParameterType):
_VALENCE_TYPE = 'Atom'
_ELEMENT_NAME = 'Atom'
name = ParameterAttribute(default=None)
volume = IndexedParameterAttribute(unit=(unit.bohr ** 3))
def __init__(self, **kwargs):
super().__init__(**kwargs)
(unique_tags, connectivity) = GLOBAL_TOOLKIT_REGISTRY.call('get_tagged_smarts_connectivity', self.smirks)
if (len(self.volume) != len(unique_tags)):
raise SMIRNOFFSpecError(f'QUBEKitHandler {self} was initialized with unequal number of tagged atoms and volumes')
_TAGNAME = 'QUBEKitvdWTS'
_INFOTYPE = QUBEKitvdWType
_DEPENDENCIES = [vdWHandler, ElectrostaticsHandler]
def create_force(self, system, topology, **kwargs):
force = _get_nonbonded_force(system=system, topology=topology)
if (self.method == 'PME'):
if (topology.box_vectors is None):
force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)
else:
force.setNonbondedMethod(openmm.NonbondedForce.LJPME)
force.setCutoffDistance(self.cutoff)
force.setEwaldErrorTolerance(0.0001)
elif (self.method == 'cutoff'):
if (topology.box_vectors is None):
force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)
else:
force.setNonbondedMethod(openmm.NonbondedForce.PME)
force.setUseDispersionCorrection(True)
force.setCutoffDistance(self.cutoff)
lj = LennardJones612(free_parameters={'H': h_base(r_free=self.hfree.value_in_unit(unit.angstroms)), 'C': c_base(r_free=self.cfree.value_in_unit(unit.angstroms)), 'X': h_base(r_free=self.xfree.value_in_unit(unit.angstroms)), 'O': o_base(r_free=self.ofree.value_in_unit(unit.angstroms)), 'N': n_base(r_free=self.nfree.value_in_unit(unit.angstroms)), 'Cl': cl_base(r_free=self.clfree.value_in_unit(unit.angstroms)), 'S': s_base(r_free=self.sfree.value_in_unit(unit.angstroms)), 'F': f_base(r_free=self.ffree.value_in_unit(unit.angstroms)), 'Br': br_base(r_free=self.brfree.value_in_unit(unit.angstroms)), 'I': i_base(r_free=self.ifree.value_in_unit(unit.angstroms)), 'P': p_base(r_free=self.ifree.value_in_unit(unit.angstroms)), 'B': b_base(r_free=self.bfree.value_in_unit(unit.angstroms)), 'Si': si_base(r_free=self.sifree.value_in_unit(unit.angstroms))}, alpha=self.alpha, beta=self.beta, lj_on_polar_h=self.lj_on_polar_h)
water = Molecule.from_smiles('O')
for ref_mol in topology.reference_molecules:
if (ref_mol == water):
continue
if (ref_mol.n_conformers == 0):
ref_mol.generate_conformers(n_conformers=1)
qb_mol = Ligand.from_rdkit(ref_mol.to_rdkit())
for parameter in self.parameters:
matches = ref_mol.chemical_environment_matches(parameter.smirks, unique=True)
if (matches and (len(matches[0]) != qb_mol.n_atoms)):
raise SMIRNOFFSpecError(f'Parameter {parameter.smirks} matched with {ref_mol} but the whole molecule was not covered!')
if matches:
for atom in matches[0]:
qb_mol.atoms[atom].aim.volume = parameter.volume[atom].value_in_unit((unit.bohr ** 3))
for i in range(qb_mol.n_atoms):
atom = qb_mol.atoms[i]
assert (atom.aim.volume is not None)
qb_mol.NonbondedForce.create_parameter(atoms=(i,), charge=0, sigma=0, epsilon=0)
lj.run(qb_mol)
for topology_molecule in topology._reference_molecule_to_topology_molecules[ref_mol]:
for topology_particle in topology_molecule.atoms:
if (type(topology_particle) is TopologyAtom):
ref_mol_particle_index = topology_particle.atom.molecule_particle_index
elif (type(topology_particle) is TopologyVirtualSite):
ref_mol_particle_index = topology_particle.virtual_site.molecule_particle_index
else:
raise ValueError(f'Particles of type {type(topology_particle)} are not supported')
topology_particle_index = topology_particle.topology_particle_index
particle_parameters = qb_mol.NonbondedForce[(ref_mol_particle_index,)]
(charge, _, _) = force.getParticleParameters(topology_particle_index)
force.setParticleParameters(topology_particle_index, charge, particle_parameters.sigma, particle_parameters.epsilon) |
def load_the_parser(parser_module_name):
logger.debug('starting')
parser_module = pypyr.moduleloader.get_module(parser_module_name)
logger.debug('context parser module found: %s', parser_module_name)
try:
get_parsed_context = getattr(parser_module, 'get_parsed_context')
except AttributeError:
logger.error("The context parser %s doesn't have a get_parsed_context(context_arg): function.", parser_module_name)
raise
logger.debug('done')
return get_parsed_context |
.parametrize('locale', ('ru', 'pl'))
def test_gettext_compilation(locale):
ru_rules = localedata.load(locale)['plural_form'].rules
chars = 'ivwft'
assert any(((f' {ch} ' in rule) for ch in chars for rule in ru_rules.values()))
ru_rules_gettext = plural.to_gettext(ru_rules)
assert (not any(((ch in ru_rules_gettext) for ch in chars))) |
def _migrate_v47(preset: dict) -> dict:
if (preset['game'] == 'prime1'):
preset['configuration'].pop('deterministic_idrone')
preset['configuration'].pop('deterministic_maze')
preset['configuration'].pop('qol_game_breaking')
preset['configuration'].pop('qol_pickup_scans')
preset['configuration'].pop('heat_protection_only_varia')
preset['configuration']['legacy_mode'] = False
return preset |
class SOCKETCALL(IntEnum):
SYS_SOCKET = 1
SYS_BIND = 2
SYS_CONNECT = 3
SYS_LISTEN = 4
SYS_ACCEPT = 5
SYS_GETSOCKNAME = 6
SYS_GETPEERNAME = 7
SYS_SOCKETPAIR = 8
SYS_SEND = 9
SYS_RECV = 10
SYS_SENDTO = 11
SYS_RECVFROM = 12
SYS_SHUTDOWN = 13
SYS_SETSOCKOPT = 14
SYS_GETSOCKOPT = 15
SYS_SENDMSG = 16
SYS_RECVMSG = 17
SYS_ACCEPT4 = 18
SYS_RECVMMSG = 19
SYS_SENDMMSG = 20 |
def Gen_I(FP, ItemS):
global CanNum
Item = FP[:]
ExpSet = []
for i in range(len(Item)):
for j in range((i + 1), len(Item)):
pre = Item[i]
suf = Item[j]
p = []
p.append(pre[0])
p.append(suf[0])
CanNum += 1
(count, ItemS[str(p)]) = GetUnit(p)
if (count >= int(minsup)):
FP.append(p)
ExpSet.append(p)
else:
del ItemS[str(p)]
Join_I(FP, ExpSet, ItemS) |
def train_generate_(dataset, batch_size, few, symbol2id, ent2id, e1rel_e2, num_neg=1):
logging.info('LOADING TRAINING DATA')
train_tasks = json.load(open((dataset + '/train_tasks.json')))
logging.info('LOADING CANDIDATES')
rel2candidates = json.load(open((dataset + '/rel2candidates.json')))
task_pool = list(train_tasks.keys())
num_tasks = len(task_pool)
rel_idx = 0
while True:
if ((rel_idx % num_tasks) == 0):
random.shuffle(task_pool)
query = task_pool[(rel_idx % num_tasks)]
rel_idx += 1
candidates = rel2candidates[query]
train_and_test = train_tasks[query]
random.shuffle(train_and_test)
support_triples = train_and_test[:few]
support_pairs = [[symbol2id[triple[0]], symbol2id[triple[2]]] for triple in support_triples]
support_left = [ent2id[triple[0]] for triple in support_triples]
support_right = [ent2id[triple[2]] for triple in support_triples]
all_test_triples = train_and_test[few:]
if (len(all_test_triples) < batch_size):
query_triples = [random.choice(all_test_triples) for _ in range(batch_size)]
else:
query_triples = random.sample(all_test_triples, batch_size)
query_pairs = [[symbol2id[triple[0]], symbol2id[triple[2]]] for triple in query_triples]
query_left = [ent2id[triple[0]] for triple in query_triples]
query_right = [ent2id[triple[2]] for triple in query_triples]
labels = ([1] * len(query_triples))
for triple in query_triples:
e_h = triple[0]
e_t = triple[2]
if (e_t in candidates):
candidates.remove(e_t)
if (len(candidates) >= num_neg):
noises = random.sample(candidates, num_neg)
else:
noises = candidates
for noise in noises:
query_pairs.append([symbol2id[e_h], symbol2id[noise]])
query_left.append(ent2id[e_h])
query_right.append(ent2id[noise])
labels.append(0)
(yield (support_pairs, query_pairs, support_left, support_right, query_left, query_right, labels)) |
class Test_ChangeAttributes(unittest.TestCase):
def setUp(self):
self.s = serial.serial_for_url(PORT, do_not_open=True)
def tearDown(self):
self.s.close()
def test_PortSetting(self):
self.s.port = PORT
self.assertEqual(self.s.portstr.lower(), PORT.lower())
self.assertEqual(self.s._port, PORT)
self.s.open()
self.assertTrue(self.s.isOpen())
def test_DoubleOpen(self):
self.s.open()
self.assertRaises(serial.SerialException, self.s.open)
def test_BaudrateSetting(self):
self.s.open()
for baudrate in (300, 9600, 19200, 115200):
self.s.baudrate = baudrate
self.assertEqual(self.s.baudrate, baudrate)
self.assertEqual(self.s._baudrate, baudrate)
for illegal_value in ((- 300), (- 1), 'a', None):
self.assertRaises(ValueError, setattr, self.s, 'baudrate', illegal_value)
def disabled_test_BaudrateSetting2(self):
self.s.open()
for illegal_value in (500000, 576000, 921600, 92160):
self.assertRaises(ValueError, setattr, self.s, 'baudrate', illegal_value)
def test_BytesizeSetting(self):
for bytesize in (5, 6, 7, 8):
self.s.bytesize = bytesize
self.assertEqual(self.s.bytesize, bytesize)
self.assertEqual(self.s._bytesize, bytesize)
for illegal_value in (0, 1, 3, 4, 9, 10, 'a', None):
self.assertRaises(ValueError, setattr, self.s, 'bytesize', illegal_value)
def test_ParitySetting(self):
for parity in (serial.PARITY_NONE, serial.PARITY_EVEN, serial.PARITY_ODD):
self.s.parity = parity
self.assertEqual(self.s.parity, parity)
self.assertEqual(self.s._parity, parity)
for illegal_value in (0, 57, 'a', None):
self.assertRaises(ValueError, setattr, self.s, 'parity', illegal_value)
def test_StopbitsSetting(self):
for stopbits in (1, 2):
self.s.stopbits = stopbits
self.assertEqual(self.s.stopbits, stopbits)
self.assertEqual(self.s._stopbits, stopbits)
for illegal_value in (0, 3, 2.5, 57, 'a', None):
self.assertRaises(ValueError, setattr, self.s, 'stopbits', illegal_value)
def test_TimeoutSetting(self):
for timeout in (None, 0, 1, 3.14159, 10, 1000, 3600):
self.s.timeout = timeout
self.assertEqual(self.s.timeout, timeout)
self.assertEqual(self.s._timeout, timeout)
for illegal_value in ((- 1), 'a'):
self.assertRaises(ValueError, setattr, self.s, 'timeout', illegal_value)
def test_XonXoffSetting(self):
for xonxoff in (True, False):
self.s.xonxoff = xonxoff
self.assertEqual(self.s.xonxoff, xonxoff)
self.assertEqual(self.s._xonxoff, xonxoff)
def test_RtsCtsSetting(self):
for rtscts in (True, False):
self.s.rtscts = rtscts
self.assertEqual(self.s.rtscts, rtscts)
self.assertEqual(self.s._rtscts, rtscts)
def disabled_test_UnconfiguredPort(self):
self.assertRaises(serial.SerialException, self.s.open)
def test_PortOpenClose(self):
for i in range(3):
self.assertTrue((not self.s.isOpen()))
self.s.open()
self.assertTrue(self.s.isOpen())
self.s.close()
self.assertTrue((not self.s.isOpen())) |
class TestAccountOverviewView(TestCase):
def setUp(self):
self.email = ''
self.name = 'Test User'
self.user = get(get_user_model(), name=self.name, email=self.email)
self.url = reverse('account')
self.client.force_login(self.user)
def test_logged_out(self):
self.client.logout()
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp['location'].startswith('/accounts/login/'))
def test_update(self):
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, self.name)
new_name = 'New Name'
resp = self.client.post(self.url, {'name': new_name}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'Successfully updated your account.')
self.assertContains(resp, new_name)
self.assertNotContains(resp, self.name)
self.user.refresh_from_db()
self.assertEqual(self.user.name, new_name) |
class InteractionBroker(object):
def __init__(self, peer1, peer2, poll_interval=1):
self.peers = (peer1, peer2)
self.poll_interval = poll_interval
def interact(self):
directions = (self.peers, tuple(reversed(self.peers)))
while True:
start = time.time()
for direction in directions:
try:
received = direction[0].receive(timeout=0)
if (not received):
continue
direction[1].send(received)
except EOFError:
return
end = time.time()
if ((end - start) < self.poll_interval):
time.sleep((self.poll_interval - (end - start))) |
def test_plugin_does_not_interfere_with_doctest_collection(pytester: Pytester):
pytester.makepyfile(dedent(' def any_function():\n """\n >>> 42\n 42\n """\n '))
result = pytester.runpytest('--asyncio-mode=strict', '--doctest-modules')
result.assert_outcomes(passed=1) |
class Migration(migrations.Migration):
dependencies = [('comms', '0010_auto__1912')]
operations = [migrations.AlterField(model_name='channeldb', name='db_attributes', field=models.ManyToManyField(help_text='attributes on this object. An attribute can hold any pickle-able python object (see docs for special cases).', to='typeclasses.Attribute')), migrations.AlterField(model_name='channeldb', name='db_object_subscriptions', field=models.ManyToManyField(blank=True, db_index=True, related_name='object_subscription_set', to='objects.ObjectDB', verbose_name='subscriptions')), migrations.AlterField(model_name='channeldb', name='db_subscriptions', field=models.ManyToManyField(blank=True, db_index=True, related_name='subscription_set', to=settings.AUTH_USER_MODEL, verbose_name='subscriptions')), migrations.AlterField(model_name='channeldb', name='db_tags', field=models.ManyToManyField(help_text='tags on this object. Tags are simple string markers to identify, group and alias objects.', to='typeclasses.Tag')), migrations.AlterField(model_name='msg', name='db_hide_from_channels', field=models.ManyToManyField(blank=True, related_name='hide_from_channels_set', to='comms.ChannelDB')), migrations.AlterField(model_name='msg', name='db_hide_from_objects', field=models.ManyToManyField(blank=True, related_name='hide_from_objects_set', to='objects.ObjectDB')), migrations.AlterField(model_name='msg', name='db_hide_from_accounts', field=models.ManyToManyField(blank=True, related_name='hide_from_accounts_set', to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name='msg', name='db_receivers_channels', field=models.ManyToManyField(blank=True, help_text='channel recievers', related_name='channel_set', to='comms.ChannelDB')), migrations.AlterField(model_name='msg', name='db_receivers_objects', field=models.ManyToManyField(blank=True, help_text='object receivers', related_name='receiver_object_set', to='objects.ObjectDB')), migrations.AlterField(model_name='msg', name='db_receivers_accounts', field=models.ManyToManyField(blank=True, help_text='account receivers', related_name='receiver_account_set', to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name='msg', name='db_sender_objects', field=models.ManyToManyField(blank=True, db_index=True, related_name='sender_object_set', to='objects.ObjectDB', verbose_name='sender(object)')), migrations.AlterField(model_name='msg', name='db_sender_accounts', field=models.ManyToManyField(blank=True, db_index=True, related_name='sender_account_set', to=settings.AUTH_USER_MODEL, verbose_name='sender(account)')), migrations.AlterField(model_name='msg', name='db_tags', field=models.ManyToManyField(blank=True, help_text='tags on this message. Tags are simple string markers to identify, group and alias messages.', to='typeclasses.Tag'))] |
(web_fixture=WebFixture)
class AddressAppFixture(Fixture):
def new_browser(self):
return Browser(self.web_fixture.new_wsgi_app(site_root=AddressBookUI))
def new_existing_address(self):
address = Address(name='John Doe', email_address='')
address.save()
return address
def is_on_home_page(self):
return (self.browser.title == 'Show')
def is_on_add_page(self):
return (self.browser.title == 'Add')
def is_on_edit_page_for(self, address):
return (self.browser.title == ('Edit %s' % address.name))
def address_is_listed_as(self, name, email_address):
return self.browser.is_element_present(XPath.paragraph().including_text(('%s: %s' % (name, email_address)))) |
def test_default_sort_key(cmd2_app):
text = ''
line = 'test_sort_key {}'.format(text)
endidx = len(line)
begidx = (endidx - len(text))
cmd2_app.default_sort_key = cmd2.Cmd.ALPHABETICAL_SORT_KEY
expected = ['1', '11', '2']
first_match = complete_tester(text, line, begidx, endidx, cmd2_app)
assert ((first_match is not None) and (cmd2_app.completion_matches == expected))
cmd2_app.default_sort_key = cmd2.Cmd.NATURAL_SORT_KEY
expected = ['1', '2', '11']
first_match = complete_tester(text, line, begidx, endidx, cmd2_app)
assert ((first_match is not None) and (cmd2_app.completion_matches == expected)) |
def get_data(stock_symbol, financial_metrics, source):
template = 'Between >>> and <<< are the content from HTML.\n The website contains company financial information.\n Extract the answer to the question \'{query}\' or say "not found" if the information is not contained\n Make sure to remove commas and include units when parsing numbers\n\n >>> {requests_result} <<<\n Use the format json format to return data:\n {{\n "Revenue": "10B"\n }}\n Extracted:<answer or "not found">\n '
PROMPT = PromptTemplate(input_variables=['query', 'requests_result'], template=template)
chain = LLMRequestsChain(llm_chain=LLMChain(llm=OpenAI(temperature=0, model_name='gpt-3.5-turbo'), prompt=PROMPT, verbose=True), requests_wrapper=requests_wrapper)
if (source == 'Yahoo Finance'):
url = f'
elif (source == 'MarketWatch'):
url = f'
query = f"what are the {','.join(financial_metrics)}?"
inputs = {'query': query, 'url': url}
chain_output = chain(inputs)
chain_output = json.loads(chain_output['output'].replace('\n', '').replace(' ', ''))
output = {'stock': stock_symbol}
output = {**output, **chain_output}
return output |
class BasisFamily():
def __init__(self, N):
self.N = N
self.nvars = None
self.coef_offset = [0]
self.coef_length = [N]
def __repr__(self):
return (f'<{self.__class__.__name__}: nvars={self.nvars}, ' + f'N={self.N}>')
def __call__(self, i, t, var=None):
return self.eval_deriv(i, 0, t, var=var)
def var_ncoefs(self, var):
return (self.N if (self.nvars is None) else self.coef_length[var])
def eval(self, coeffs, tlist, var=None):
if ((self.nvars is None) and (var != None)):
raise SystemError('multi-variable call to a scalar basis')
elif (self.nvars is None):
return [sum([(coeffs[i] * self(i, t)) for i in range(self.N)]) for t in tlist]
elif (var is None):
values = np.empty((self.nvars, tlist.size))
offset = 0
for j in range(self.nvars):
coef_len = self.var_ncoefs(j)
values[j] = np.array([sum([(coeffs[(offset + i)] * self(i, t, var=j)) for i in range(coef_len)]) for t in tlist])
offset += coef_len
return values
else:
return np.array([sum([(coeffs[i] * self(i, t, var=var)) for i in range(self.var_ncoefs(var))]) for t in tlist])
def eval_deriv(self, i, j, t, var=None):
raise NotImplementedError('Internal error; improper basis functions') |
class TestInterpolated(BaseTestDistributionRandom):
def interpolated_rng_fn(self, size, mu, sigma, rng):
return st.norm.rvs(loc=mu, scale=sigma, size=size)
pymc_dist = pm.Interpolated
mu = sigma = 1
x_points = pdf_points = np.linspace(1, 100, 100)
pymc_dist_params = {'x_points': x_points, 'pdf_points': pdf_points}
reference_dist_params = {'mu': mu, 'sigma': sigma}
reference_dist = (lambda self: ft.partial(self.interpolated_rng_fn, rng=self.get_random_state()))
checks_to_run = ['check_rv_size', 'check_draws']
def check_draws(self):
for mu in R.vals:
for sigma in Rplus.vals:
rng = self.get_random_state()
def ref_rand(size):
return st.norm.rvs(loc=mu, scale=sigma, size=size, random_state=rng)
class TestedInterpolated(pm.Interpolated):
rv_op = interpolated
def dist(cls, **kwargs):
x_points = np.linspace((mu - (5 * sigma)), (mu + (5 * sigma)), 100)
pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma)
return super().dist(x_points=x_points, pdf_points=pdf_points, **kwargs)
continuous_random_tester(TestedInterpolated, {}, extra_args={'rng': pytensor.shared(rng)}, ref_rand=ref_rand) |
def test_text_formatting_function(capsys: pytest.CaptureFixture[str]) -> None:
def format_text(seconds: float) -> str:
return f'Function: {(seconds + 1):.0f}'
with Timer(text=format_text):
waste_time()
(stdout, stderr) = capsys.readouterr()
assert (stdout.strip() == 'Function: 1')
assert (not stderr.strip()) |
class Effect8229(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Gas Cloud Harvesting')), 'duration', ship.getModifiedItemAttr('miningBargeBonusGasHarvestingDuration'), skill='Mining Barge', **kwargs) |
class Effect5503(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Drones')), 'trackingSpeed', ship.getModifiedItemAttr('eliteBonusCommandShips2'), skill='Command Ships', **kwargs) |
def set_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='bert', type=str, required=False, help='')
parser.add_argument('--problem_type', default='single_label_classification', type=str, required=False, help='')
parser.add_argument('--dir_name', default='xinwen', type=str, required=False, help=',train.csv test.csv dev.csv')
parser.add_argument('--batch_size', default=16, type=int, required=False, help='batch size')
parser.add_argument('--max_seq_len', default=150, type=int, required=False, help=',')
parser.add_argument('--text_col_name', default='text', type=str, required=False, help='train.csv')
parser.add_argument('--class_col_name', default=None, type=str, required=False, help='train.csv')
parser.add_argument('--csv_sep', default=',', type=str, required=False, help='csv')
parser.add_argument('--csv_encoding', default='utf-8', type=str, required=False, help='csv')
args = parser.parse_args()
return args |
class Polygon(ShapeBase):
def __init__(self, *coordinates, color=(255, 255, 255, 255), batch=None, group=None):
self._rotation = 0
self._coordinates = list(coordinates)
(self._x, self._y) = self._coordinates[0]
self._num_verts = ((len(self._coordinates) - 2) * 3)
(r, g, b, *a) = color
self._rgba = (r, g, b, (a[0] if a else 255))
program = get_default_shader()
self._batch = (batch or Batch())
self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)
self._create_vertex_list()
def __contains__(self, point):
assert (len(point) == 2)
point = _rotate_point(self._coordinates[0], point, math.radians(self._rotation))
return _sat(self._coordinates, point)
def _create_vertex_list(self):
self._vertex_list = self._group.program.vertex_list(self._num_verts, self._draw_mode, self._batch, self._group, position=('f', self._get_vertices()), colors=('Bn', (self._rgba * self._num_verts)), translation=('f', ((self._x, self._y) * self._num_verts)))
def _get_vertices(self):
if (not self._visible):
return ((0, 0) * self._num_verts)
else:
(trans_x, trans_y) = self._coordinates[0]
trans_x += self._anchor_x
trans_y += self._anchor_y
coords = [[(x - trans_x), (y - trans_y)] for (x, y) in self._coordinates]
triangles = []
for n in range((len(coords) - 2)):
triangles += [coords[0], coords[(n + 1)], coords[(n + 2)]]
return tuple((value for coordinate in triangles for value in coordinate))
def _update_vertices(self):
self._vertex_list.position[:] = self._get_vertices() |
class TestRole():
def test_creation(self, parent_role):
r = Role(child_roles=[parent_role, parent_role])
assert (r.chat_ids == set())
assert (str(r) == 'Role({})')
assert (r.child_roles == {parent_role})
assert isinstance(r._admin, Role)
assert (str(r._admin) == f'Role({Role._DEFAULT_ADMIN_NAME})')
r = Role(1)
assert (r.chat_ids == {1})
assert (str(r) == 'Role({1})')
r = Role([1, 2])
assert (r.chat_ids == {1, 2})
assert (str(r) == 'Role({1, 2})')
r = Role([1, 2], name='role')
assert (r.chat_ids == {1, 2})
assert (str(r) == 'Role(role)')
def test_add_member(self, role):
assert (role.chat_ids == set())
role.add_member(1)
assert (role.chat_ids == {1})
role.add_member(2)
assert (role.chat_ids == {1, 2})
role.add_member(1)
assert (role.chat_ids == {1, 2})
def test_kick_member(self, role):
assert (role.chat_ids == set())
role.add_member(1)
role.add_member(2)
assert (role.chat_ids == {1, 2})
role.kick_member(1)
assert (role.chat_ids == {2})
role.kick_member(1)
assert (role.chat_ids == {2})
role.kick_member(2)
assert (role.chat_ids == set())
def test_add_remove_child_role(self, role, parent_role):
assert (role.child_roles == set())
parent2 = Role(chat_ids=456, name='pr2')
role.add_child_role(parent_role)
assert (role.child_roles == {parent_role})
role.add_child_role(parent2)
assert (role.child_roles == {parent_role, parent2})
role.remove_child_role(parent_role)
assert (role.child_roles == {parent2})
role.remove_child_role(parent2)
assert (role.child_roles == set())
with pytest.raises(ValueError, match='You must not add a role as its own child!'):
role.add_child_role(role)
parent_role.add_child_role(role)
with pytest.raises(ValueError, match='You must not add a parent role as a child!'):
role.add_child_role(parent_role)
def test_equals(self, role, parent_role):
r = Role(name='test1')
r2 = Role(name='test2')
r3 = Role(name='test3', chat_ids=[1, 2])
r4 = Role(name='test4')
assert role.equals(parent_role)
role.add_child_role(r)
assert (not role.equals(parent_role))
parent_role.add_child_role(r2)
assert role.equals(parent_role)
parent_role.add_child_role(r3)
role.add_child_role(r4)
assert (not role.equals(parent_role))
role.remove_child_role(r4)
parent_role.remove_child_role(r3)
role.add_member(1)
assert (not role.equals(parent_role))
parent_role.add_member(1)
assert role.equals(parent_role)
role.add_member(2)
assert (not role.equals(parent_role))
parent_role.add_member(2)
assert role.equals(parent_role)
role.kick_member(2)
assert (not role.equals(parent_role))
parent_role.kick_member(2)
assert role.equals(parent_role)
r.add_member(1)
assert (not role.equals(parent_role))
r2.add_member(1)
assert role.equals(parent_role)
def test_comparison(self, role, parent_role):
assert (not (role <= 1))
assert (not (role >= 1))
assert (not (role < parent_role))
assert (not (parent_role < role))
assert (role <= role)
assert (role >= role)
assert (parent_role <= parent_role)
assert (parent_role >= parent_role)
parent_role.add_child_role(role)
assert (role < parent_role)
assert (role <= parent_role)
assert (parent_role >= role)
assert (parent_role > role)
parent_role.remove_child_role(role)
assert (not (role < parent_role))
assert (not (parent_role < role))
def test_hash(self, role, parent_role):
assert (role != parent_role)
assert (hash(role) != hash(parent_role))
assert (role == role)
assert (hash(role) == hash(role))
assert (parent_role == parent_role)
assert (hash(parent_role) == hash(parent_role))
def test_deepcopy(self, role, parent_role):
child = Role(name='cr', chat_ids=[1, 2, 3])
role.add_child_role(child)
role.add_member(7)
copied_role = deepcopy(role)
assert (role is not copied_role)
assert role.equals(copied_role)
assert (role.chat_ids is not copied_role.chat_ids)
assert (role.chat_ids == copied_role.chat_ids)
(copied_child,) = copied_role.child_roles
assert (child is not copied_child)
assert child.equals(copied_child)
def test_filter_user(self, update, role, parent_role):
update.message.chat = None
assert (not role.check_update(update))
role.add_member(0)
assert role.check_update(update)
update.message.from_user.id = 1
assert (not role.check_update(update))
parent_role.add_child_role(role)
parent_role.add_member(1)
assert role.check_update(update)
def test_filter_chat(self, update, role, parent_role):
update.message.from_user = None
assert (not role.check_update(update))
role.add_member(0)
assert role.check_update(update)
update.message.chat.id = 1
assert (not role.check_update(update))
parent_role.add_child_role(role)
parent_role.add_member(1)
assert role.check_update(update)
def test_filter_merged_roles(self, update, role):
role.add_member(0)
r = Role(0)
assert (not (role & (~ r)).check_update(update))
r = Role(1)
assert (not (role & r).check_update(update))
assert (role | r).check_update(update)
def test_filter_allow_parent(self, update, role, parent_role):
role.add_member(0)
parent_role.add_member(1)
parent_role.add_child_role(role)
test_role = (~ role)
assert (not test_role.check_update(update))
update.message.from_user.id = 1
update.message.chat.id = 1
assert test_role.check_update(update)
def test_filter_exclude_children(self, update, role, parent_role):
parent_role.add_child_role(role)
parent_role.add_member(0)
role.add_member(1)
test_role = (~ parent_role)
assert (not test_role.check_update(update))
update.message.from_user.id = 1
update.message.chat.id = 1
assert (not test_role.check_update(update))
update.message.from_user.id = 2
update.message.chat.id = 1
assert (not test_role.check_update(update))
def test_filter_without_user_and_chat(self, update, role):
role.add_member(0)
update.message = None
assert (not role.check_update(update))
assert (not (~ role).check_update(update))
def test_always_allow_admin(self, update, role):
role._admin.add_member(0)
try:
assert (~ Role(0)).check_update(update)
assert (Role(0) & (~ Role(0))).check_update(update)
assert (Role(1) & (~ Role(0))).check_update(update)
assert (Role(1) & (~ Role(2))).check_update(update)
assert (Role(0) | (~ Role(0))).check_update(update)
assert (Role(1) | (~ Role(0))).check_update(update)
assert (Role(1) | (~ Role(2))).check_update(update)
finally:
role._admin.kick_member(0)
def test_non_message_update(self, update, role):
update.message = None
assert (not role.check_update(update))
assert (not (~ role).check_update(update))
update.callback_query = CallbackQuery(id='id', from_user=User(id=0, is_bot=False, first_name='first_name'), chat_instance='chat_instance')
assert (not role.check_update(update))
assert (~ role).check_update(update)
role.add_member(0)
assert role.check_update(update)
assert (not (~ role).check_update(update))
def test_pickle(self, role, parent_role):
role.add_member([0, 1, 3])
parent_role.add_member([4, 5, 6])
child_role = Role(name='child_role', chat_ids=[7, 8, 9])
role.add_child_role(child_role)
parent_role.add_child_role(child_role)
data = {'role': role, 'parent': parent_role, 'child': child_role}
with open('pickle', 'wb') as file:
pickle.dump(data, file)
with open('pickle', 'rb') as file:
data = pickle.load(file)
assert data['role'].equals(role)
assert data['parent'].equals(parent_role)
assert data['child'].equals(child_role)
(child_1,) = data['role'].child_roles
(child_2,) = data['parent'].child_roles
assert (child_1 is child_2)
assert (data['role'] in Role._admin.child_roles)
assert (data['parent'] in Role._admin.child_roles)
assert (data['child'] in Role._admin.child_roles)
assert (data['child'] <= data['role'])
assert (data['child'] <= data['parent'])
assert (not (data['role'] <= data['parent'])) |
def make_servicer(echo_pb2, echo_grpc):
class Servicer(echo_grpc.EchoServicer):
async def Echo(self, message):
return echo_pb2.EchoReply(data=message.data)
async def EchoTwoTimes(self, message):
(yield echo_pb2.EchoReply(data=message.data))
(yield echo_pb2.EchoReply(data=message.data))
async def EchoEachTime(self, messages):
async for message in messages:
(yield echo_pb2.EchoReply(data=message.data))
async def EchoLast(self, messages):
data = []
async for message in messages:
data.append(message.data)
return echo_pb2.EchoReply(data=''.join(data))
async def EchoLastV2(self, messages):
data = []
async for message in messages:
data.append(message.data)
(yield echo_pb2.EchoReply(data=''.join(data)))
return Servicer |
def create_contour(series_slice: Dataset, contour_data: np.ndarray) -> Dataset:
contour_image = Dataset()
contour_image.ReferencedSOPClassUID = series_slice.SOPClassUID
contour_image.ReferencedSOPInstanceUID = series_slice.SOPInstanceUID
contour_image_sequence = Sequence()
contour_image_sequence.append(contour_image)
contour = Dataset()
contour.ContourImageSequence = contour_image_sequence
contour.ContourGeometricType = 'CLOSED_PLANAR'
contour.NumberOfContourPoints = (len(contour_data) / 3)
contour.ContourData = [round(val, 10) for val in contour_data]
return contour |
_cli(name='reduce')
('reduce', cls=cli_tools.DocumentedCommand, section='Traversals', short_help='Reduce a sequence with a function like ``operator.mul``.', help=reduce.__doc__)
_exec_before
('function_name')
def _reduce(function_name, **parameters):
return [{'code': f'toolz.curry({function_name})', 'name': 'reduce', 'parameters': parameters}] |
class WinnowResNet18Test(unittest.TestCase):
def test_winnowing_multiple_zeroed_resnet34(self):
model = models.resnet34(pretrained=False)
model.eval()
input_shape = [1, 3, 224, 224]
list_of_modules_to_winnow = []
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[1].conv2, input_channels_to_prune))
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[0].conv1, input_channels_to_prune))
input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
list_of_modules_to_winnow.append((model.layer3[1].conv2, input_channels_to_prune))
input_channels_to_prune = [33, 44, 55]
list_of_modules_to_winnow.append((model.layer2[1].conv2, input_channels_to_prune))
input_channels_to_prune = [11, 12, 13, 14, 15]
list_of_modules_to_winnow.append((model.layer2[0].conv2, input_channels_to_prune))
input_channels_to_prune = [55, 56, 57, 58, 59]
list_of_modules_to_winnow.append((model.layer1[1].conv1, input_channels_to_prune))
input_channels_to_prune = [42, 44, 46]
list_of_modules_to_winnow.append((model.layer1[0].conv2, input_channels_to_prune))
(new_model, _) = winnow_model(model, input_shape, list_of_modules_to_winnow, reshape=True, in_place=False, verbose=True)
input_tensor = torch.rand(input_shape).double()
model.double()
model.eval()
validation_output = model(input_tensor)
new_model.double()
new_model.eval()
test_output = new_model(input_tensor)
self.assertTrue((test_output.shape == validation_output.shape))
self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
self.assertEqual(list(new_model.layer1[0].conv2.weight.shape), [64, 61, 3, 3])
self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)
self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 59)
self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape), [64, 59, 3, 3])
self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
self.assertEqual(list(new_model.layer2[0].conv2.weight.shape), [128, 123, 3, 3])
self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)
self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
self.assertEqual(list(new_model.layer2[1].conv2.weight.shape), [128, 125, 3, 3])
self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)
self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
self.assertEqual(list(new_model.layer3[1].conv2.weight.shape), [256, 245, 3, 3])
self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)
self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 245)
self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape), [512, 245, 3, 3])
self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
self.assertEqual(list(new_model.layer4[1].conv2.weight.shape), [512, 501, 3, 3])
self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
def test_winnowing_multiple_zeroed_resnet50(self):
model = models.resnet50(pretrained=False)
model.eval()
input_shape = [1, 3, 224, 224]
list_of_modules_to_winnow = []
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[1].conv2, input_channels_to_prune))
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[0].conv1, input_channels_to_prune))
input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
list_of_modules_to_winnow.append((model.layer3[1].conv2, input_channels_to_prune))
input_channels_to_prune = [33, 44, 55]
list_of_modules_to_winnow.append((model.layer2[1].conv2, input_channels_to_prune))
input_channels_to_prune = [11, 12, 13, 14, 15]
list_of_modules_to_winnow.append((model.layer2[0].conv2, input_channels_to_prune))
input_channels_to_prune = [55, 56, 57, 58, 59]
list_of_modules_to_winnow.append((model.layer1[1].conv1, input_channels_to_prune))
input_channels_to_prune = [42, 44, 46]
list_of_modules_to_winnow.append((model.layer1[0].conv2, input_channels_to_prune))
(new_model, _) = winnow_model(model, input_shape, list_of_modules_to_winnow, reshape=True, in_place=False, verbose=True)
input_tensor = torch.rand(input_shape).double()
model.double()
model.eval()
validation_output = model(input_tensor)
new_model.double()
new_model.eval()
test_output = new_model(input_tensor)
self.assertTrue((test_output.shape == validation_output.shape))
self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
self.assertEqual(list(new_model.layer1[0].conv2.weight.shape), [64, 61, 3, 3])
self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)
self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 251)
self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape), [64, 251, 1, 1])
self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
self.assertEqual(list(new_model.layer2[0].conv2.weight.shape), [128, 123, 3, 3])
self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)
self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
self.assertEqual(list(new_model.layer2[1].conv2.weight.shape), [128, 125, 3, 3])
self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)
self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
self.assertEqual(list(new_model.layer3[1].conv2.weight.shape), [256, 245, 3, 3])
self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)
self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 1013)
self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape), [512, 1013, 1, 1])
self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
self.assertEqual(list(new_model.layer4[1].conv2.weight.shape), [512, 501, 3, 3])
self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
def test_winnowing_multiple_zeroed_resnet101(self):
model = models.resnet101(pretrained=False)
model.eval()
input_shape = [1, 3, 224, 224]
list_of_modules_to_winnow = []
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[1].conv2, input_channels_to_prune))
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[0].conv1, input_channels_to_prune))
input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
list_of_modules_to_winnow.append((model.layer3[1].conv2, input_channels_to_prune))
input_channels_to_prune = [33, 44, 55]
list_of_modules_to_winnow.append((model.layer2[1].conv2, input_channels_to_prune))
input_channels_to_prune = [11, 12, 13, 14, 15]
list_of_modules_to_winnow.append((model.layer2[0].conv2, input_channels_to_prune))
input_channels_to_prune = [55, 56, 57, 58, 59]
list_of_modules_to_winnow.append((model.layer1[1].conv1, input_channels_to_prune))
input_channels_to_prune = [42, 44, 46]
list_of_modules_to_winnow.append((model.layer1[0].conv2, input_channels_to_prune))
(new_model, _) = winnow_model(model, input_shape, list_of_modules_to_winnow, reshape=True, in_place=False, verbose=True)
input_tensor = torch.rand(input_shape).double()
model.double()
model.eval()
validation_output = model(input_tensor)
new_model.double()
new_model.eval()
test_output = new_model(input_tensor)
self.assertTrue((test_output.shape == validation_output.shape))
self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
self.assertEqual(list(new_model.layer1[0].conv2.weight.shape), [64, 61, 3, 3])
self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)
self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 251)
self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape), [64, 251, 1, 1])
self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
self.assertEqual(list(new_model.layer2[0].conv2.weight.shape), [128, 123, 3, 3])
self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)
self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
self.assertEqual(list(new_model.layer2[1].conv2.weight.shape), [128, 125, 3, 3])
self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)
self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
self.assertEqual(list(new_model.layer3[1].conv2.weight.shape), [256, 245, 3, 3])
self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)
self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 1013)
self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape), [512, 1013, 1, 1])
self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
self.assertEqual(list(new_model.layer4[1].conv2.weight.shape), [512, 501, 3, 3])
self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
def test_winnowing_multiple_zeroed_resnet152(self):
model = models.resnet152(pretrained=False)
model.eval()
input_shape = [1, 3, 224, 224]
list_of_modules_to_winnow = []
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[1].conv2, input_channels_to_prune))
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[0].conv1, input_channels_to_prune))
input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
list_of_modules_to_winnow.append((model.layer3[1].conv2, input_channels_to_prune))
input_channels_to_prune = [33, 44, 55]
list_of_modules_to_winnow.append((model.layer2[1].conv2, input_channels_to_prune))
input_channels_to_prune = [11, 12, 13, 14, 15]
list_of_modules_to_winnow.append((model.layer2[0].conv2, input_channels_to_prune))
input_channels_to_prune = [55, 56, 57, 58, 59]
list_of_modules_to_winnow.append((model.layer1[1].conv1, input_channels_to_prune))
input_channels_to_prune = [42, 44, 46]
list_of_modules_to_winnow.append((model.layer1[0].conv2, input_channels_to_prune))
(new_model, _) = winnow_model(model, input_shape, list_of_modules_to_winnow, reshape=True, in_place=False, verbose=True)
input_tensor = torch.rand(input_shape).double()
model.double()
model.eval()
validation_output = model(input_tensor)
new_model.double()
new_model.eval()
test_output = new_model(input_tensor)
self.assertTrue((test_output.shape == validation_output.shape))
self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
self.assertEqual(list(new_model.layer1[0].conv2.weight.shape), [64, 61, 3, 3])
self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)
self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 251)
self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape), [64, 251, 1, 1])
self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
self.assertEqual(list(new_model.layer2[0].conv2.weight.shape), [128, 123, 3, 3])
self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)
self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
self.assertEqual(list(new_model.layer2[1].conv2.weight.shape), [128, 125, 3, 3])
self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)
self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
self.assertEqual(list(new_model.layer3[1].conv2.weight.shape), [256, 245, 3, 3])
self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)
self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 1013)
self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape), [512, 1013, 1, 1])
self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
self.assertEqual(list(new_model.layer4[1].conv2.weight.shape), [512, 501, 3, 3])
self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
def test_inception_model_conv_below_conv(self):
OpConnectivity.pytorch_dict['relu'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['max_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['adaptive_avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['dropout'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['flatten'] = ConnectivityType.skip
model = models.Inception3()
model.eval()
input_shape = [1, 3, 299, 299]
input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
list_of_modules_to_winnow = [(model.Mixed_5b.branch3x3dbl_2.conv, input_channels_to_prune)]
print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)
(new_model, _) = winnow_model(model, input_shape, list_of_modules_to_winnow, reshape=True, in_place=False, verbose=True)
self.assertEqual(new_model.Mixed_5b.branch3x3dbl_1.conv.out_channels, 56)
self.assertEqual(list(new_model.Mixed_5b.branch3x3dbl_1.conv.weight.shape), [56, 192, 1, 1])
del model
del new_model
def test_inception_model_conv_below_split(self):
OpConnectivity.pytorch_dict['relu'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['max_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['adaptive_avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['dropout'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['flatten'] = ConnectivityType.skip
model = models.Inception3()
model.eval()
input_shape = [1, 3, 299, 299]
input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
list_of_modules_to_winnow = [(model.Mixed_5b.branch3x3dbl_1.conv, input_channels_to_prune)]
print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)
(new_model, _) = winnow_model(model, input_shape, list_of_modules_to_winnow, reshape=True, in_place=False, verbose=True)
del model
del new_model
model = models.Inception3()
model.eval()
input_shape = [1, 3, 299, 299]
input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
list_of_modules_to_winnow = [(model.Mixed_5b.branch1x1.conv, input_channels_to_prune)]
print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)
(new_model, _) = winnow_model(model, input_shape, list_of_modules_to_winnow, reshape=True, in_place=False, verbose=True)
del model
del new_model
self.assertEqual(0, 0)
def test_inception_model_conv_below_avgpool(self):
OpConnectivity.pytorch_dict['relu'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['max_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['adaptive_avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['dropout'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['flatten'] = ConnectivityType.skip
model = models.Inception3()
model.eval()
input_shape = [1, 3, 299, 299]
input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
list_of_modules_to_winnow = [(model.Mixed_5b.branch_pool.conv, input_channels_to_prune)]
print(model.Mixed_5b.branch_pool.conv)
print(model.Mixed_5b.branch_pool.conv.out_channels, model.Mixed_5b.branch_pool.conv.in_channels)
(new_model, _) = winnow_model(model, input_shape, list_of_modules_to_winnow, reshape=True, in_place=False, verbose=True)
self.assertEqual(new_model.Mixed_5b.branch_pool.conv[1].out_channels, 32)
self.assertEqual(list(new_model.Mixed_5b.branch_pool.conv[1].weight.shape), [32, 184, 1, 1])
del model
del new_model |
.skipif((not PY_3_8_PLUS), reason='cached_property is 3.8+')
def test_slots_cached_property_called_independent_across_instances():
(slots=True)
class A():
x = attr.ib()
_property
def f(self):
return self.x
obj_1 = A(1)
obj_2 = A(2)
assert (obj_1.f == 1)
assert (obj_2.f == 2) |
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if (not pair):
continue
nv = pair.split('=', 1)
if (len(nv) != 2):
nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r |
def write_to_tsv(output_file: str, data: Dict[(str, str)]):
with open(output_file, 'w') as fOut:
writer = csv.writer(fOut, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['query-id', 'corpus-id', 'score'])
for (query_id, corpus_dict) in data.items():
for (corpus_id, score) in corpus_dict.items():
writer.writerow([query_id, corpus_id, score]) |
(bind=True, base=StatsTask)
def ptt_monthly_summary(self, year, month) -> Dict:
logger.info('Get monthly summary: %s-%s', year, month)
daily_summaries = self.sess.query(func.year(PttPost.created_at), func.month(PttPost.created_at), func.day(PttPost.created_at), func.count(PttPost.id)).filter((func.year(PttPost.created_at) == year), (func.month(PttPost.created_at) == month)).group_by(func.year(PttPost.created_at), func.month(PttPost.created_at), func.day(PttPost.created_at)).all()
for (year_idx, month_idx, day_idx, num_posts) in daily_summaries:
exist_row = self.sess.query(DailySummary).filter((DailySummary.source == SourceType.PTT), (DailySummary.day == day_idx), (DailySummary.month == month_idx), (DailySummary.year == year_idx)).first()
daily_sum = stats.DailySummary(source=SourceType.PTT, total_posts=num_posts, year=year_idx, month=month_idx, day=day_idx)
if exist_row:
exist_row.total_posts = daily_sum.total_posts
exist_row.updated_at = datetime.utcnow()
self.sess.merge(exist_row)
try:
self.sess.commit()
except:
logger.warning('Commit failed on %s, %s, will call session.rollback()', year, month)
self.sess.rollback()
else:
row = DailySummary(source=SourceType.PTT, total_posts=daily_sum.total_posts, year=daily_sum.year, month=daily_sum.month, day=daily_sum.day)
self.sess.add(row)
try:
self.sess.commit()
except:
logger.warning('Commit failed on %s, %s, will call session.rollback()', year, month)
self.sess.rollback()
total_posts = self.sess.query(PttPost).filter((extract('year', PttPost.created_at) == int(year)), (extract('month', PttPost.created_at) == int(month))).count()
total_comments = self.sess.query(PttComment).filter((extract('year', PttComment.created_at) == int(year)), (extract('month', PttComment.created_at) == int(month))).count()
monthly_sum = stats.MonthlySummary(source=SourceType.PTT, total_posts=total_posts, total_comments=total_comments, year=year, month=month)
exist_row = self.sess.query(MonthlySummary).filter((MonthlySummary.source == SourceType.PTT), (MonthlySummary.month == int(month)), (MonthlySummary.year == int(year))).first()
if exist_row:
exist_row.total_posts = total_posts
exist_row.total_comments = total_comments
exist_row.updated_at = datetime.utcnow()
self.sess.merge(exist_row)
try:
self.sess.commit()
except:
logger.warning('Commit failed on %s, %s, will call session.rollback()', year, month)
self.sess.rollback()
else:
row = MonthlySummary(source=SourceType.PTT, total_posts=monthly_sum.total_posts, total_comments=monthly_sum.total_comments, year=monthly_sum.year, month=monthly_sum.month)
self.sess.add(row)
try:
self.sess.commit()
except:
logger.warning('Commit failed on %s, %s, will call session.rollback()', year, month)
self.sess.rollback()
return {'year': monthly_sum.year, 'month': monthly_sum.month, 'total_posts': monthly_sum.total_posts, 'total_comments': monthly_sum.total_comments} |
class FitBert():
def __init__(self, model=None, tokenizer=None, model_name='bert-large-uncased', mask_token='***mask***', disable_gpu=False):
self.mask_token = mask_token
self.delemmatizer = Delemmatizer()
self.device = torch.device(('cuda' if (torch.cuda.is_available() and (not disable_gpu)) else 'cpu'))
print('device:', self.device)
if (not model):
print('using model:', model_name)
if ('distilbert' in model_name):
self.bert = DistilBertForMaskedLM.from_pretrained(model_name)
else:
self.bert = BertForMaskedLM.from_pretrained(model_name)
self.bert.to(self.device)
else:
print('using custom model:', model.config.architectures)
self.bert = model
self.bert.to(self.device)
if (not tokenizer):
if ('distilbert' in model_name):
self.tokenizer = DistilBertTokenizer.from_pretrained(model_name)
else:
self.tokenizer = BertTokenizer.from_pretrained(model_name)
else:
self.tokenizer = tokenizer
self.bert.eval()
def softmax(x):
return (x.exp() / x.exp().sum((- 1)).unsqueeze((- 1)))
def is_multi(options: List[str]) -> bool:
return seq(options).filter((lambda x: (len(x.split()) != 1))).non_empty()
def mask(self, s: str, span: Tuple[(int, int)]) -> Tuple[(str, str)]:
return _mask(s, span, mask_token=self.mask_token)
def _tokens_to_masked_ids(self, tokens, mask_ind):
masked_tokens = tokens[:]
masked_tokens[mask_ind] = '[MASK]'
masked_tokens = ((['[CLS]'] + masked_tokens) + ['[SEP]'])
masked_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
return masked_ids
def _get_sentence_probability(self, sent: str) -> float:
tokens = self.tokenizer.tokenize(sent)
input_ids = seq(tokens).enumerate().starmap((lambda i, x: self._tokens_to_masked_ids(tokens, i))).list()
tens = torch.tensor(input_ids).to(self.device)
with torch.no_grad():
preds = self.bert(tens)[0]
probs = self.softmax(preds)
tokens_ids = self.tokenizer.convert_tokens_to_ids(tokens)
prob = seq(tokens_ids).enumerate().starmap((lambda i, x: float(probs[i][(i + 1)][x].item()))).reduce((lambda x, y: (x * y)), 1)
del tens, preds, probs, tokens, input_ids
if (self.device == 'cuda'):
torch.cuda.empty_cache()
return prob
def _delemmatize_options(self, options: List[str]) -> List[str]:
options = seq(options[:]).flat_map((lambda x: self.delemmatizer(x))).union(options).list()
return options
def guess_single(self, masked_sent: str, n: int=1):
(pre, post) = masked_sent.split(self.mask_token)
tokens = (['[CLS]'] + self.tokenizer.tokenize(pre))
target_idx = len(tokens)
tokens += ['[MASK]']
tokens += (self.tokenizer.tokenize(post) + ['[SEP]'])
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
tens = torch.tensor(input_ids).unsqueeze(0)
tens = tens.to(self.device)
with torch.no_grad():
preds = self.bert(tens)[0]
probs = self.softmax(preds)
pred_top = torch.topk(probs[(0, target_idx)], n)
pred_prob = pred_top[0].tolist()
pred_idx = pred_top[1].tolist()
pred_tok = self.tokenizer.convert_ids_to_tokens(pred_idx)
del pred_top, pred_idx, tens, preds, probs, input_ids, tokens
if (self.device == 'cuda'):
torch.cuda.empty_cache()
return (pred_tok, pred_prob)
def rank_single(self, masked_sent: str, words: List[str]):
(pre, post) = masked_sent.split(self.mask_token)
tokens = (['[CLS]'] + self.tokenizer.tokenize(pre))
target_idx = len(tokens)
tokens += ['[MASK]']
tokens += (self.tokenizer.tokenize(post) + ['[SEP]'])
words_ids = seq(words).map((lambda x: self.tokenizer.tokenize(x))).map((lambda x: self.tokenizer.convert_tokens_to_ids(x)[0]))
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
tens = torch.tensor(input_ids).unsqueeze(0)
tens = tens.to(self.device)
with torch.no_grad():
preds = self.bert(tens)[0]
probs = self.softmax(preds)
ranked_pairs = seq(words_ids).map((lambda x: float(probs[0][target_idx][x].item()))).zip(words).sorted(key=(lambda x: x[0]), reverse=True)
ranked_options = seq(ranked_pairs).map((lambda x: x[1])).list()
ranked_options_prob = seq(ranked_pairs).map((lambda x: x[0])).list()
del tens, preds, probs, tokens, words_ids, input_ids
if (self.device == 'cuda'):
torch.cuda.empty_cache()
return (ranked_options, ranked_options_prob)
def rank_multi(self, masked_sent: str, options: List[str]):
ranked_pairs = seq(options).map((lambda x: masked_sent.replace(self.mask_token, x))).map((lambda x: self._get_sentence_probability(x))).zip(options).sorted(key=(lambda x: x[0]), reverse=True)
ranked_options = seq(ranked_pairs).map((lambda x: x[1])).list()
ranked_options_prob = seq(ranked_pairs).map((lambda x: x[0])).list()
return (ranked_options, ranked_options_prob)
def _simplify_options(self, sent: str, options: List[str]):
options_split = seq(options).map((lambda x: x.split()))
trans_start = list(zip(*options_split))
start = seq(trans_start).take_while((lambda x: (seq(x).distinct().len() == 1))).map((lambda x: x[0])).list()
options_split_reversed = seq(options_split).map((lambda x: seq(x[len(start):]).reverse()))
trans_end = list(zip(*options_split_reversed))
end = seq(trans_end).take_while((lambda x: (seq(x).distinct().len() == 1))).map((lambda x: x[0])).list()
start_words = seq(start).make_string(' ')
end_words = seq(end).reverse().make_string(' ')
options = seq(options_split).map((lambda x: x[len(start):(len(x) - len(end))])).map((lambda x: seq(x).make_string(' ').strip())).list()
sub = seq([start_words, self.mask_token, end_words]).make_string(' ').strip()
sent = sent.replace(self.mask_token, sub)
return (options, sent, start_words, end_words)
def rank(self, sent: str, options: List[str], delemmatize: bool=False, with_prob: bool=False):
options = seq(options).distinct().list()
if delemmatize:
options = seq(self._delemmatize_options(options)).distinct().list()
if (seq(options).len() == 1):
return options
(options, sent, start_words, end_words) = self._simplify_options(sent, options)
if self.is_multi(options):
(ranked, prob) = self.rank_multi(sent, options)
else:
(ranked, prob) = self.rank_single(sent, options)
ranked = seq(ranked).map((lambda x: [start_words, x, end_words])).map((lambda x: seq(x).make_string(' ').strip())).list()
if with_prob:
return (ranked, prob)
else:
return ranked
def rank_with_prob(self, sent: str, options: List[str], delemmatize: bool=False):
(ranked, prob) = self.rank(sent, options, delemmatize, True)
return (ranked, prob)
def guess(self, sent: str, n: int=1) -> List[str]:
(pred_tok, _) = self.guess_single(sent, n)
return pred_tok
def guess_with_prob(self, sent: str, n: int=1):
(pred_tok, pred_prob) = self.guess_single(sent, n)
return (pred_tok, pred_prob)
def fitb(self, sent: str, options: List[str], delemmatize: bool=False) -> str:
ranked = self.rank(sent, options, delemmatize)
best_word = ranked[0]
return sent.replace(self.mask_token, best_word)
def mask_fitb(self, sent: str, span: Tuple[(int, int)]) -> str:
(masked_str, replaced) = self.mask(sent, span)
options = [replaced]
return self.fitb(masked_str, options, delemmatize=True) |
class TestFCIDumpH2(QiskitNatureTestCase, BaseTestFCIDump):
def setUp(self):
super().setUp()
self.nuclear_repulsion_energy = 0.7199
self.num_molecular_orbitals = 2
self.num_alpha = 1
self.num_beta = 1
self.mo_onee = np.array([[1.2563, 0.0], [0.0, 0.4719]])
self.mo_onee_b = None
self.mo_eri = np.array([[[[0.6757, 0.0], [0.0, 0.6646]], [[0.0, 0.1809], [0.1809, 0.0]]], [[[0.0, 0.1809], [0.1809, 0.0]], [[0.6646, 0.0], [0.0, 0.6986]]]])
self.mo_eri_ba = None
self.mo_eri_bb = None
fcidump = FCIDump.from_file(self.get_resource_path('test_fcidump_h2.fcidump', 'second_q/formats/fcidump'))
self.problem = fcidump_to_problem(fcidump) |
class AllowedValueRangeType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, minimum=None, maximum=None, step=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.minimum = minimum
self.minimum_nsprefix_ = None
self.maximum = maximum
self.maximum_nsprefix_ = None
self.step = step
self.step_nsprefix_ = None
def factory(*args_, **kwargs_):
if (CurrentSubclassModule_ is not None):
subclass = getSubclassFromModule_(CurrentSubclassModule_, AllowedValueRangeType)
if (subclass is not None):
return subclass(*args_, **kwargs_)
if AllowedValueRangeType.subclass:
return AllowedValueRangeType.subclass(*args_, **kwargs_)
else:
return AllowedValueRangeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_minimum(self):
return self.minimum
def set_minimum(self, minimum):
self.minimum = minimum
def get_maximum(self):
return self.maximum
def set_maximum(self, maximum):
self.maximum = maximum
def get_step(self):
return self.step
def set_step(self, step):
self.step = step
def has__content(self):
if ((self.minimum is not None) or (self.maximum is not None) or (self.step is not None)):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_=' xmlns:None="urn:schemas-upnp-org:service-1-0" ', name_='AllowedValueRangeType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('AllowedValueRangeType')
if (imported_ns_def_ is not None):
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if ((self.original_tagname_ is not None) and (name_ == 'AllowedValueRangeType')):
name_ = self.original_tagname_
if (UseCapturedNS_ and self.ns_prefix_):
namespaceprefix_ = (self.ns_prefix_ + ':')
showIndent(outfile, level, pretty_print)
outfile.write(('<%s%s%s' % (namespaceprefix_, name_, ((namespacedef_ and (' ' + namespacedef_)) or ''))))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AllowedValueRangeType')
if self.has__content():
outfile.write(('>%s' % (eol_,)))
self._exportChildren(outfile, (level + 1), namespaceprefix_, namespacedef_, name_='AllowedValueRangeType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write(('</%s%s>%s' % (namespaceprefix_, name_, eol_)))
else:
outfile.write(('/>%s' % (eol_,)))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='AllowedValueRangeType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_=' xmlns:None="urn:schemas-upnp-org:service-1-0" ', name_='AllowedValueRangeType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if (self.minimum is not None):
namespaceprefix_ = ((self.minimum_nsprefix_ + ':') if (UseCapturedNS_ and self.minimum_nsprefix_) else '')
showIndent(outfile, level, pretty_print)
outfile.write(('<%sminimum>%s</%sminimum>%s' % (namespaceprefix_, self.gds_format_decimal(self.minimum, input_name='minimum'), namespaceprefix_, eol_)))
if (self.maximum is not None):
namespaceprefix_ = ((self.maximum_nsprefix_ + ':') if (UseCapturedNS_ and self.maximum_nsprefix_) else '')
showIndent(outfile, level, pretty_print)
outfile.write(('<%smaximum>%s</%smaximum>%s' % (namespaceprefix_, self.gds_format_decimal(self.maximum, input_name='maximum'), namespaceprefix_, eol_)))
if (self.step is not None):
namespaceprefix_ = ((self.step_nsprefix_ + ':') if (UseCapturedNS_ and self.step_nsprefix_) else '')
showIndent(outfile, level, pretty_print)
outfile.write(('<%sstep>%s</%sstep>%s' % (namespaceprefix_, self.gds_format_decimal(self.step, input_name='step'), namespaceprefix_, eol_)))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[(- 1)]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if ((nodeName_ == 'minimum') and child_.text):
sval_ = child_.text
fval_ = self.gds_parse_decimal(sval_, node, 'minimum')
fval_ = self.gds_validate_decimal(fval_, node, 'minimum')
self.minimum = fval_
self.minimum_nsprefix_ = child_.prefix
elif ((nodeName_ == 'maximum') and child_.text):
sval_ = child_.text
fval_ = self.gds_parse_decimal(sval_, node, 'maximum')
fval_ = self.gds_validate_decimal(fval_, node, 'maximum')
self.maximum = fval_
self.maximum_nsprefix_ = child_.prefix
elif ((nodeName_ == 'step') and child_.text):
sval_ = child_.text
fval_ = self.gds_parse_decimal(sval_, node, 'step')
fval_ = self.gds_validate_decimal(fval_, node, 'step')
self.step = fval_
self.step_nsprefix_ = child_.prefix |
_model
def test_multibonds():
Monomer('A', ['a'])
Monomer('B', ['b'])
Parameter('k1', 100)
Parameter('A_0', 200)
Parameter('B_0', 50)
Rule('r1', (((A(a=None) + A(a=None)) + B(b=None)) >> ((A(a=1) % A(a=[1, 2])) % B(b=2))), k1)
Initial(A(a=None), A_0)
Initial(B(b=None), B_0)
generate_equations(model)
assert model.species[2].is_equivalent_to(((A(a=1) % A(a=[1, 2])) % B(b=2))) |
class AutoEncoder(nn.Module):
def __init__(self, args):
super(AutoEncoder, self).__init__()
self.args = args
self.input_dim = args.input_dim
self.output_dim = self.input_dim
self.hidden_dims = args.hidden_dims
self.hidden_dims.append(args.latent_dim)
self.dims_list = (args.hidden_dims + args.hidden_dims[:(- 1)][::(- 1)])
self.n_layers = len(self.dims_list)
self.latent_dim = args.latent_dim
self.n_clusters = args.n_clusters
assert ((self.n_layers % 2) > 0)
assert (self.dims_list[(self.n_layers // 2)] == self.latent_dim)
layers = OrderedDict()
for (idx, hidden_dim) in enumerate(self.hidden_dims):
if (idx == 0):
layers.update({'linear0': nn.Linear(self.input_dim, hidden_dim), 'activation0': nn.ReLU()})
else:
layers.update({'linear{}'.format(idx): nn.Linear(self.hidden_dims[(idx - 1)], hidden_dim), 'activation{}'.format(idx): nn.ReLU(), 'bn{}'.format(idx): nn.BatchNorm1d(self.hidden_dims[idx])})
self.encoder = nn.Sequential(layers)
layers = OrderedDict()
tmp_hidden_dims = self.hidden_dims[::(- 1)]
for (idx, hidden_dim) in enumerate(tmp_hidden_dims):
if (idx == (len(tmp_hidden_dims) - 1)):
layers.update({'linear{}'.format(idx): nn.Linear(hidden_dim, self.output_dim)})
else:
layers.update({'linear{}'.format(idx): nn.Linear(hidden_dim, tmp_hidden_dims[(idx + 1)]), 'activation{}'.format(idx): nn.ReLU(), 'bn{}'.format(idx): nn.BatchNorm1d(tmp_hidden_dims[(idx + 1)])})
self.decoder = nn.Sequential(layers)
def __repr__(self):
repr_str = '[Structure]: {}-'.format(self.input_dim)
for (idx, dim) in enumerate(self.dims_list):
repr_str += '{}-'.format(dim)
repr_str += (str(self.output_dim) + '\n')
repr_str += ('[n_layers]: {}'.format(self.n_layers) + '\n')
repr_str += ('[n_clusters]: {}'.format(self.n_clusters) + '\n')
repr_str += '[input_dims]: {}'.format(self.input_dim)
return repr_str
def __str__(self):
return self.__repr__()
def forward(self, X, latent=False):
output = self.encoder(X)
if latent:
return output
return self.decoder(output) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default='.')
parser.add_argument('--output', default='training')
parser.add_argument('--dataset', default='ljspeech', choices=['blizzard', 'ljspeech', 'blizzard2013'])
parser.add_argument('--num_workers', type=int, default=cpu_count())
args = parser.parse_args()
if (args.dataset == 'blizzard'):
preprocess_blizzard(args)
elif (args.dataset == 'ljspeech'):
preprocess_ljspeech(args)
elif (args.dataset == 'blizzard2013'):
preprocess_blizzard2013(args) |
class InlineQueryHandler(BaseHandler[(Update, CCT)]):
__slots__ = ('pattern', 'chat_types')
def __init__(self, callback: HandlerCallback[(Update, CCT, RT)], pattern: Optional[Union[(str, Pattern[str])]]=None, block: DVType[bool]=DEFAULT_TRUE, chat_types: Optional[List[str]]=None):
super().__init__(callback, block=block)
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.pattern: Optional[Union[(str, Pattern[str])]] = pattern
self.chat_types: Optional[List[str]] = chat_types
def check_update(self, update: object) -> Optional[Union[(bool, Match[str])]]:
if (isinstance(update, Update) and update.inline_query):
if ((self.chat_types is not None) and (update.inline_query.chat_type not in self.chat_types)):
return False
if (self.pattern and update.inline_query.query and (match := re.match(self.pattern, update.inline_query.query))):
return match
if (not self.pattern):
return True
return None
def collect_additional_context(self, context: CCT, update: Update, application: 'Application[Any, CCT, Any, Any, Any, Any]', check_result: Optional[Union[(bool, Match[str])]]) -> None:
if self.pattern:
check_result = cast(Match, check_result)
context.matches = [check_result] |
def get_multi_hop_model(rnn_dim, c2c: bool, q2c: bool, res_rnn: bool, res_self_att: bool, post_merge: bool, encoder: str, merge_type: str, num_c2c_hops: int):
recurrent_layer = CudnnGru(rnn_dim, w_init=TruncatedNormal(stddev=0.05))
answer_encoder = BinaryAnswerEncoder()
res_model = get_res_fc_seq_fc(model_rnn_dim=rnn_dim, rnn=res_rnn, self_att=res_self_att)
context_to_context = (AttentionWithPostMapper(BiAttention(TriLinear(bias=True), True), post_mapper=res_model) if c2c else None)
question_to_context = (AttentionWithPostMapper(BiAttention(TriLinear(bias=True), True), post_mapper=res_model) if q2c else None)
if (encoder == 'max'):
sequence_encoder = MaxPool(map_layer=None, min_val=0, regular_reshape=True)
elif (encoder == 'rnn'):
sequence_encoder = CudnnGruEncoder(rnn_dim, w_init=TruncatedNormal(stddev=0.05))
else:
raise NotImplementedError()
if (merge_type == 'max'):
attention_merger = MaxMerge(pre_map_layer=None, post_map_layer=(res_model if post_merge else None))
else:
attention_merger = WeightedMerge(pre_map_layer=None, post_map_layer=(res_model if post_merge else None), weight_type=merge_type)
return MultiHopContextsToQuestionModel(encoder=QuestionsAndParagraphsEncoder(answer_encoder), word_embed=FixedWordEmbedder(vec_name='glove.840B.300d', word_vec_init_scale=0, learn_unk=False, cpu=True), char_embed=CharWordEmbedder(LearnedCharEmbedder(word_size_th=14, char_th=50, char_dim=20, init_scale=0.05, force_cpu=True), MaxPool(Conv1d(100, 5, 0.8)), shared_parameters=True), embed_mapper=SequenceMapperSeq(VariationalDropoutLayer(0.8), recurrent_layer, VariationalDropoutLayer(0.8)), question_to_context_attention=question_to_context, context_to_context_attention=context_to_context, c2c_hops=num_c2c_hops, context_to_question_attention=BiAttention(TriLinear(bias=True), True), attention_merger=attention_merger, sequence_encoder=sequence_encoder, predictor=BinaryFixedPredictor()) |
def main(args):
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
transform = [T.ToTensor()]
transform.append(T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD))
im_transform = T.Compose(transform)
orig_images = os.listdir(args.orig_image_path)
N = len(orig_images)
print(N)
net = lpips.LPIPS(net='alex')
net = net.cuda()
net.eval()
scores = []
with torch.no_grad():
for i in tqdm(range(N)):
orig_image = im_transform(Image.open(os.path.join(args.orig_image_path, orig_images[i])).convert('RGB'))
orig_image = orig_image.cuda()
orig_image = orig_image.unsqueeze(0)
for j in range(args.generated_image_number):
generated_image = im_transform(Image.open(os.path.join(args.generated_image_path, (((orig_images[i][:(- 4)] + '_numb_') + str(j)) + '.jpg'))).convert('RGB'))
generated_image = generated_image.cuda()
generated_image = generated_image.unsqueeze(0)
score = net(orig_image, generated_image).squeeze()
scores.append(score.cpu().numpy())
scores_all = np.asarray(scores)
scores_mean = np.mean(scores_all)
scores_std = np.std(scores_all)
print(('mean diversity scores = %4.2f%% +- %4.2f%%' % (scores_mean, scores_std))) |
class Effect2296(BaseEffect):
type = 'passive'
def handler(fit, booster, context, projectionRange, **kwargs):
for (srcResType, tgtResType) in (('Em', 'Em'), ('Explosive', 'Explosive'), ('Kinetic', 'Kinetic'), ('Thermic', 'Thermal')):
fit.ship.boostItemAttr(f'armor{tgtResType}DamageResonance', booster.getModifiedItemAttr(f'passive{srcResType}DamageResistanceBonus'), **kwargs) |
class Event(object):
def __init__(self, raw):
self.raw = raw
self.from_user = False
self.from_chat = False
self.from_group = False
self.from_me = False
self.to_me = False
self.attachments = {}
self.message_data = None
self.message_id = None
self.timestamp = None
self.peer_id = None
self.flags = None
self.extra = None
self.extra_values = None
self.type_id = None
try:
self.type = VkEventType(self.raw[0])
self._list_to_attr(self.raw[1:], EVENT_ATTRS_MAPPING[self.type])
except ValueError:
self.type = self.raw[0]
if self.extra_values:
self._dict_to_attr(self.extra_values)
if (self.type in PARSE_PEER_ID_EVENTS):
self._parse_peer_id()
if (self.type in PARSE_MESSAGE_FLAGS_EVENTS):
self._parse_message_flags()
if (self.type is VkEventType.CHAT_UPDATE):
self._parse_chat_info()
try:
self.update_type = VkChatEventType(self.type_id)
except ValueError:
self.update_type = self.type_id
elif (self.type is VkEventType.NOTIFICATION_SETTINGS_UPDATE):
self._dict_to_attr(self.values)
self._parse_peer_id()
elif (self.type is VkEventType.PEER_FLAGS_REPLACE):
self._parse_peer_flags()
elif (self.type in [VkEventType.MESSAGE_NEW, VkEventType.MESSAGE_EDIT]):
self._parse_message()
elif (self.type in [VkEventType.USER_ONLINE, VkEventType.USER_OFFLINE]):
self.user_id = abs(self.user_id)
self._parse_online_status()
elif (self.type is VkEventType.USER_RECORDING_VOICE):
if isinstance(self.user_id, list):
self.user_id = self.user_id[0]
if self.timestamp:
self.datetime = datetime.utcfromtimestamp(self.timestamp)
def _list_to_attr(self, raw, attrs):
for i in range(min(len(raw), len(attrs))):
self.__setattr__(attrs[i], raw[i])
def _dict_to_attr(self, values):
for (k, v) in values.items():
self.__setattr__(k, v)
def _parse_peer_id(self):
if (self.peer_id < 0):
self.from_group = True
self.group_id = abs(self.peer_id)
elif (self.peer_id > CHAT_START_ID):
self.from_chat = True
self.chat_id = (self.peer_id - CHAT_START_ID)
if (self.extra_values and ('from' in self.extra_values)):
self.user_id = int(self.extra_values['from'])
else:
self.from_user = True
self.user_id = self.peer_id
def _parse_message_flags(self):
self.message_flags = {x for x in VkMessageFlag if (self.flags & x)}
def _parse_peer_flags(self):
self.peer_flags = {x for x in VkPeerFlag if (self.flags & x)}
def _parse_message(self):
if (self.type is VkEventType.MESSAGE_NEW):
if (self.flags & VkMessageFlag.OUTBOX):
self.from_me = True
else:
self.to_me = True
self.text = self.text.replace('<br>', '\n')
self.message = self.text.replace('<', '<').replace('>', '>').replace('"', '"').replace('&', '&')
def _parse_online_status(self):
try:
if (self.type is VkEventType.USER_ONLINE):
self.platform = VkPlatform((self.extra & 255))
elif (self.type is VkEventType.USER_OFFLINE):
self.offline_type = VkOfflineType(self.flags)
except ValueError:
pass
def _parse_chat_info(self):
if (self.type_id == VkChatEventType.ADMIN_ADDED.value):
self.info = {'admin_id': self.info}
elif (self.type_id == VkChatEventType.MESSAGE_PINNED.value):
self.info = {'conversation_message_id': self.info}
elif (self.type_id in [VkChatEventType.USER_JOINED.value, VkChatEventType.USER_LEFT.value, VkChatEventType.USER_KICKED.value, VkChatEventType.ADMIN_REMOVED.value]):
self.info = {'user_id': self.info} |
class GitlabCLI():
def __init__(self, gl: gitlab.Gitlab, gitlab_resource: str, resource_action: str, args: Dict[(str, str)]) -> None:
self.cls: Type[gitlab.base.RESTObject] = cli.gitlab_resource_to_cls(gitlab_resource, namespace=gitlab.v4.objects)
self.cls_name = self.cls.__name__
self.gitlab_resource = gitlab_resource.replace('-', '_')
self.resource_action = resource_action.lower()
self.gl = gl
self.args = args
self.parent_args: Dict[(str, Any)] = {}
self.mgr_cls: Union[(Type[gitlab.mixins.CreateMixin], Type[gitlab.mixins.DeleteMixin], Type[gitlab.mixins.GetMixin], Type[gitlab.mixins.GetWithoutIdMixin], Type[gitlab.mixins.ListMixin], Type[gitlab.mixins.UpdateMixin])] = getattr(gitlab.v4.objects, f'{self.cls.__name__}Manager')
if TYPE_CHECKING:
assert (self.mgr_cls._path is not None)
self._process_from_parent_attrs()
self.mgr_cls._path = self.mgr_cls._path.format(**self.parent_args)
self.mgr = self.mgr_cls(gl)
self.mgr._from_parent_attrs = self.parent_args
if self.mgr_cls._types:
for (attr_name, type_cls) in self.mgr_cls._types.items():
if (attr_name in self.args.keys()):
obj = type_cls()
obj.set_from_cli(self.args[attr_name])
self.args[attr_name] = obj.get()
def _process_from_parent_attrs(self) -> None:
for key in self.mgr_cls._from_parent_attrs:
if (key not in self.args):
continue
self.parent_args[key] = gitlab.utils.EncodedId(self.args[key])
del self.args[key]
def run(self) -> Any:
method = f'do_{self.gitlab_resource}_{self.resource_action}'
if hasattr(self, method):
return getattr(self, method)()
method = f'do_{self.resource_action}'
if hasattr(self, method):
return getattr(self, method)()
return self.do_custom()
def do_custom(self) -> Any:
class_instance: Union[(gitlab.base.RESTManager, gitlab.base.RESTObject)]
in_obj = cli.custom_actions[self.cls_name][self.resource_action][2]
if in_obj:
data = {}
if self.mgr._from_parent_attrs:
for k in self.mgr._from_parent_attrs:
data[k] = self.parent_args[k]
if (not issubclass(self.cls, gitlab.mixins.GetWithoutIdMixin)):
if TYPE_CHECKING:
assert isinstance(self.cls._id_attr, str)
data[self.cls._id_attr] = self.args.pop(self.cls._id_attr)
class_instance = self.cls(self.mgr, data)
else:
class_instance = self.mgr
method_name = self.resource_action.replace('-', '_')
return getattr(class_instance, method_name)(**self.args)
def do_project_export_download(self) -> None:
try:
project = self.gl.projects.get(self.parent_args['project_id'], lazy=True)
export_status = project.exports.get()
if TYPE_CHECKING:
assert (export_status is not None)
data = export_status.download()
if TYPE_CHECKING:
assert (data is not None)
assert isinstance(data, bytes)
sys.stdout.buffer.write(data)
except Exception as e:
cli.die('Impossible to download the export', e)
def do_validate(self) -> None:
if TYPE_CHECKING:
assert isinstance(self.mgr, gitlab.v4.objects.CiLintManager)
try:
self.mgr.validate(self.args)
except GitlabCiLintError as e:
cli.die('CI YAML Lint failed', e)
except Exception as e:
cli.die('Cannot validate CI YAML', e)
def do_create(self) -> gitlab.base.RESTObject:
if TYPE_CHECKING:
assert isinstance(self.mgr, gitlab.mixins.CreateMixin)
try:
result = self.mgr.create(self.args)
except Exception as e:
cli.die('Impossible to create object', e)
return result
def do_list(self) -> Union[(gitlab.base.RESTObjectList, List[gitlab.base.RESTObject])]:
if TYPE_CHECKING:
assert isinstance(self.mgr, gitlab.mixins.ListMixin)
try:
result = self.mgr.list(**self.args)
except Exception as e:
cli.die('Impossible to list objects', e)
return result
def do_get(self) -> Optional[gitlab.base.RESTObject]:
if isinstance(self.mgr, gitlab.mixins.GetWithoutIdMixin):
try:
result = self.mgr.get(id=None, **self.args)
except Exception as e:
cli.die('Impossible to get object', e)
return result
if TYPE_CHECKING:
assert isinstance(self.mgr, gitlab.mixins.GetMixin)
assert isinstance(self.cls._id_attr, str)
id = self.args.pop(self.cls._id_attr)
try:
result = self.mgr.get(id, lazy=False, **self.args)
except Exception as e:
cli.die('Impossible to get object', e)
return result
def do_delete(self) -> None:
if TYPE_CHECKING:
assert isinstance(self.mgr, gitlab.mixins.DeleteMixin)
assert isinstance(self.cls._id_attr, str)
id = self.args.pop(self.cls._id_attr)
try:
self.mgr.delete(id, **self.args)
except Exception as e:
cli.die('Impossible to destroy object', e)
def do_update(self) -> Dict[(str, Any)]:
if TYPE_CHECKING:
assert isinstance(self.mgr, gitlab.mixins.UpdateMixin)
if issubclass(self.mgr_cls, gitlab.mixins.GetWithoutIdMixin):
id = None
else:
if TYPE_CHECKING:
assert isinstance(self.cls._id_attr, str)
id = self.args.pop(self.cls._id_attr)
try:
result = self.mgr.update(id, self.args)
except Exception as e:
cli.die('Impossible to update object', e)
return result |
_model
def test_complex_pattern_equivalence_bond_state():
Monomer('A', ['s'], {'s': ['x', 'y', 'z']})
cp0 = (A(s=('x', 1)) % A(s=('y', 1)))
cp1 = (A(s=('y', 1)) % A(s=('x', 1)))
cp2 = (A(s=('z', 1)) % A(s=('y', 1)))
cp3 = (A(s='x') % A(s='y'))
_check_pattern_equivalence((cp0, cp1))
_check_pattern_equivalence((cp0, cp2), False)
_check_pattern_equivalence((cp0, cp3), False) |
def p2g(model: MPMModelStruct, state_in: MPMStateStruct, state_out: MPMStateStruct, gravity: wp.vec3, dt: float):
p = wp.tid()
contact_force = state_in.particle_f[p]
x = state_in.particle_q[p]
x = (x - (wp.vec3(float(state_in.grid_lower[0]), float(state_in.grid_lower[1]), float(state_in.grid_lower[2])) * model.dx))
C = state_in.particle_C[p]
mass = model.particle_mass[p]
I33 = wp.mat33(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
F_tmp = ((I33 + (state_in.particle_C[p] * dt)) * state_in.particle_F[p])
(U, s, V) = wp.svd3(F_tmp)
new_v = (state_in.particle_qd[p] + (gravity * dt))
k_mu = model.particle_mu_lam_ys[p][0]
k_lam = model.particle_mu_lam_ys[p][1]
k_ys = model.particle_mu_lam_ys[p][2]
vc = state_in.particle_volume_correction[p]
type = model.particle_type[p]
if (type == 0):
(new_F, vc) = compute_von_mises(F_tmp, U, s, V, k_ys, k_mu)
if (type == 1):
k_friction = model.particle_friction_cohesion[p][0]
k_cohesion = model.particle_friction_cohesion[p][1]
(new_F, delta_vc) = compute_drucker_prager(F_tmp, U, s, V, k_mu, k_lam, k_friction, k_cohesion, vc)
state_out.particle_volume_correction[p] = (vc + delta_vc)
rest_volume = model.particle_vol[p]
state_out.particle_F[p] = new_F
if (type == 2):
current_volume = state_in.particle_vol[p]
k_viscosity = model.particle_friction_cohesion[p][2]
pressure = (1000.0 * (((rest_volume / current_volume) ** 7.0) - 1.0))
pressure = wp.max((- 10.0), pressure)
c01 = ((C[(0, 1)] + C[(1, 0)]) * k_viscosity)
c02 = ((C[(0, 2)] + C[(2, 0)]) * k_viscosity)
c12 = ((C[(1, 2)] + C[(2, 1)]) * k_viscosity)
stress_cauchy = wp.mat33((- pressure), c01, c02, c01, (- pressure), c12, c02, c12, (- pressure))
else:
J = wp.determinant(new_F)
r = (U * wp.transpose(V))
stress_cauchy = ((((new_F - r) * wp.transpose(new_F)) * (2.0 * k_mu)) + (I33 * ((k_lam * J) * (J - 1.0))))
stress = (stress_cauchy * (((((- 4.0) * model.inv_dx) * model.inv_dx) * rest_volume) * dt))
affine = (stress + (C * mass))
mv = ((new_v * mass) + (contact_force * dt))
grid_pos = (x * model.inv_dx)
base_pos_x = wp.int((grid_pos[0] - 0.5))
base_pos_y = wp.int((grid_pos[1] - 0.5))
base_pos_z = wp.int((grid_pos[2] - 0.5))
fx = (grid_pos - wp.vec3(wp.float(base_pos_x), wp.float(base_pos_y), wp.float(base_pos_z)))
wa = (wp.vec3(1.5) - fx)
wb = (fx - wp.vec3(1.0))
wc = (fx - wp.vec3(0.5))
w = wp.mat33((wp.cw_mul(wa, wa) * 0.5), ((wp.vec3(0.0, 0.0, 0.0) - wp.cw_mul(wb, wb)) + wp.vec3(0.75)), (wp.cw_mul(wc, wc) * 0.5))
for i in range(0, 3):
for j in range(0, 3):
for k in range(0, 3):
weight = ((w[(0, i)] * w[(1, j)]) * w[(2, k)])
dpos = ((wp.vec3(wp.float(i), wp.float(j), wp.float(k)) - fx) * model.dx)
momentum = (mv + (affine * dpos))
ix = (base_pos_x + i)
iy = (base_pos_y + j)
iz = (base_pos_z + k)
wp.atomic_add(state_in.grid_mv, ix, iy, iz, (momentum * weight))
wp.atomic_add(state_in.grid_m, ix, iy, iz, (mass * weight)) |
def main():
Format()
a = Matrix(2, 2, (1, 2, 3, 4))
b = Matrix(2, 1, (5, 6))
c = (a * b)
print(a, b, '=', c)
(x, y) = symbols('x, y')
d = Matrix(1, 2, ((x ** 3), (y ** 3)))
e = Matrix(2, 2, ((x ** 2), ((2 * x) * y), ((2 * x) * y), (y ** 2)))
f = (d * e)
print('%', d, e, '=', f)
xpdf(pdfprog=None)
return |
def total_processes_number(local_rank):
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif ((local_rank != (- 1)) and is_torch_available()):
import torch
return torch.distributed.get_world_size()
return 1 |
class ReBrainTest(unittest.TestCase):
def test_regex_flags(self) -> None:
names = [name for name in dir(re) if name.isupper()]
re_ast = MANAGER.ast_from_module_name('re')
for name in names:
self.assertIn(name, re_ast)
self.assertEqual(next(re_ast[name].infer()).value, getattr(re, name))
_utils.require_version(maxver='3.9')
def test_re_pattern_unsubscriptable(self):
right_node1 = builder.extract_node('\n import re\n re.Pattern\n ')
inferred1 = next(right_node1.infer())
assert isinstance(inferred1, nodes.ClassDef)
with self.assertRaises(AttributeInferenceError):
assert isinstance(inferred1.getattr('__class_getitem__')[0], nodes.FunctionDef)
right_node2 = builder.extract_node('\n import re\n re.Pattern\n ')
inferred2 = next(right_node2.infer())
assert isinstance(inferred2, nodes.ClassDef)
with self.assertRaises(AttributeInferenceError):
assert isinstance(inferred2.getattr('__class_getitem__')[0], nodes.FunctionDef)
wrong_node1 = builder.extract_node('\n import re\n re.Pattern[int]\n ')
with self.assertRaises(InferenceError):
next(wrong_node1.infer())
wrong_node2 = builder.extract_node('\n import re\n re.Match[int]\n ')
with self.assertRaises(InferenceError):
next(wrong_node2.infer())
_utils.require_version(minver='3.9')
def test_re_pattern_subscriptable(self):
node1 = builder.extract_node('\n import re\n re.Pattern[str]\n ')
inferred1 = next(node1.infer())
assert isinstance(inferred1, nodes.ClassDef)
assert isinstance(inferred1.getattr('__class_getitem__')[0], nodes.FunctionDef)
node2 = builder.extract_node('\n import re\n re.Match[str]\n ')
inferred2 = next(node2.infer())
assert isinstance(inferred2, nodes.ClassDef)
assert isinstance(inferred2.getattr('__class_getitem__')[0], nodes.FunctionDef) |
class CollaborativeAdaptiveOptimizer(CollaborativeOptimizer):
def __init__(self, opt: torch.optim.Optimizer, average_opt_statistics: Sequence[str], **kwargs):
super().__init__(opt, average_opt_statistics=average_opt_statistics, **kwargs)
def _make_averager(self, average_opt_statistics, **kwargs):
return TrainingAverager(self.opt, dht=self.dht, average_parameters=True, average_gradients=False, average_opt_statistics=average_opt_statistics, prefix=f'{self.prefix}_averaging', allreduce_timeout=self.averaging_timeout, client_mode=self.client_mode, **kwargs) |
class ConvBNLayer(nn.Module):
def __init__(self, ch_in, ch_out, filter_size=3, stride=1, groups=1, padding=0, act='swish'):
super(ConvBNLayer, self).__init__()
self.conv = nn.Conv2d(in_channels=ch_in, out_channels=ch_out, kernel_size=filter_size, stride=stride, padding=padding, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(ch_out)
self.act = get_activation(act, inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x |
def print_new_wheels(msg: str, output_dir: Path) -> Generator[(None, None, None)]:
start_time = time.time()
existing_contents = set(output_dir.iterdir())
(yield)
final_contents = set(output_dir.iterdir())
new_contents = [FileReport(wheel.name, f'{((wheel.stat().st_size + 1023) // 1024):,d}') for wheel in (final_contents - existing_contents)]
if (not new_contents):
return
max_name_len = max((len(f.name) for f in new_contents))
max_size_len = max((len(f.size) for f in new_contents))
n = len(new_contents)
s = (time.time() - start_time)
m = (s / 60)
print(msg.format(n=n, s=s, m=m), *sorted((f' {f.name:<{max_name_len}s} {f.size:>{max_size_len}s} kB' for f in new_contents)), sep='\n') |
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
def forward(self, x):
x = self.conv(x)
return x |
(frozen=True)
class MinLen(AnnotatedTypesCheck):
value: Any
def predicate(self, value: Any) -> bool:
return (len(value) >= self.value)
def is_compatible_metadata(self, metadata: AnnotatedTypesCheck) -> bool:
if isinstance(metadata, MinLen):
return (metadata.value >= self.value)
else:
return False
def can_assign_non_literal(self, value: Value) -> CanAssign:
min_len = _min_len_of_value(value)
if ((min_len is not None) and (min_len >= self.value)):
return {}
return super().can_assign_non_literal(value) |
def generator_unet(image, out_dim, params=dict(), is_training=True, name='generator'):
feat_ch = int(params.get('feat_ch', 64))
dropout_rate = (0.5 if is_training else 1.0)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
e1 = inst_norm(conv2d(image, feat_ch, name='g_e1_conv'))
e2 = inst_norm(conv2d(lrelu(e1), (feat_ch * 2), name='g_e2_conv'), 'g_bn_e2')
e3 = inst_norm(conv2d(lrelu(e2), (feat_ch * 4), name='g_e3_conv'), 'g_bn_e3')
e4 = inst_norm(conv2d(lrelu(e3), (feat_ch * 8), name='g_e4_conv'), 'g_bn_e4')
e5 = inst_norm(conv2d(lrelu(e4), (feat_ch * 8), name='g_e5_conv'), 'g_bn_e5')
d3 = e5
d4 = conv2d(upsample(tf.nn.relu(d3), blinear=False), (feat_ch * 8), ks=3, s=1, name='g_d4')
d4 = tf.concat([inst_norm(d4, 'g_bn_d4'), e4], 3)
d5 = conv2d(upsample(tf.nn.relu(d4), blinear=False), (feat_ch * 4), ks=3, s=1, name='g_d5')
d5 = tf.concat([inst_norm(d5, 'g_bn_d5'), e3], 3)
d6 = conv2d(upsample(tf.nn.relu(d5), blinear=False), (feat_ch * 2), ks=3, s=1, name='g_d6')
d6 = tf.concat([inst_norm(d6, 'g_bn_d6'), e2], 3)
d7 = conv2d(upsample(tf.nn.relu(d6), blinear=False), feat_ch, ks=3, s=1, name='g_d7')
d7 = tf.concat([inst_norm(d7, 'g_bn_d7'), e1], 3)
d8 = conv2d(upsample(tf.nn.relu(d7), blinear=False), out_dim, ks=3, s=1, name='g_d8')
return d8 |
class XTSEExchangeCalendar(TradingCalendar):
regular_early_close = time(13)
name = 'XTSE'
tz = timezone('America/Toronto')
open_times = ((None, time(9, 31)),)
close_times = ((None, time(16)),)
def regular_holidays(self):
return HolidayCalendar([XTSENewYearsDay, FamilyDay, GoodFriday, VictoriaDay, CanadaDay, CivicHoliday, LaborDay, CanadianThanksgiving, Christmas, WeekendChristmas, BoxingDay, WeekendBoxingDay])
def adhoc_holidays(self):
return list(chain(September11ClosingsCanada))
def special_closes(self):
return [(self.regular_early_close, HolidayCalendar([ChristmasEveEarlyClose2010Onwards]))] |
class GRAFConfig(BaseConfig):
name = 'graf'
hint = 'Train a GRAF model.'
info = '\nTo train a GRAF model, the recommend settings are as follows:\n\n\x08\n- batch_size: 8 (for FF-HQ dataset, 8 GPU)\n- val_batch_size: 8 (for FF-HQ dataset, 8 GPU)\n- data_repeat: 200 (for FF-HQ dataset)\n- total_img: 25_000_000 (for FF-HQ dataset)\n- train_data_mirror: True (for FF-HQ dataset)\n'
def __init__(self, kwargs):
super().__init__(kwargs)
self.config.runner_type = RUNNER
def get_options(cls):
options = super().get_options()
options['Data transformation settings'].extend([cls.command_option('--resolution', type=cls.int_type, default=128, help='Resolution of the training images.'), cls.command_option('--image_channels', type=cls.int_type, default=3, help='Number of channels of the training images.'), cls.command_option('--min_val', type=cls.float_type, default=(- 1.0), help='Minimum pixel value of the training images.'), cls.command_option('--max_val', type=cls.float_type, default=1.0, help='Maximum pixel value of the training images.'), cls.command_option('--resize_size', type=cls.int_type, default=0, help='Size for resizing images before cropping. `0` means no cropping.'), cls.command_option('--crop_size', type=cls.int_type, default=0, help='Size for cropping images. `0` means no cropping.')])
options['Network settings'].extend([cls.command_option('--latent_dim', type=cls.int_type, default=256, help='The dimension of the latent space Z.'), cls.command_option('--label_dim', type=cls.int_type, default=0, help='Number of classes in conditioning training. Set to `0` to disable conditional training.'), cls.command_option('--chunk', type=cls.int_type, default=32768, help='Chunk of tensor, which is used for splitting tensor into small chunks in case of OOM.')])
options['Training settings'].extend([cls.command_option('--d_lr', type=cls.float_type, default=0.002, help='The learning rate of discriminator.'), cls.command_option('--d_beta_1', type=cls.float_type, default=0.0, help='The Adam hyper-parameter `beta_1` for discriminator optimizer.'), cls.command_option('--d_beta_2', type=cls.float_type, default=0.99, help='The Adam hyper-parameter `beta_2` for discriminator optimizer.'), cls.command_option('--g_lr', type=cls.float_type, default=0.002, help='The learning rate of generator.'), cls.command_option('--g_beta_1', type=cls.float_type, default=0.0, help='The Adam hyper-parameter `beta_1` for generator optimizer.'), cls.command_option('--g_beta_2', type=cls.float_type, default=0.99, help='The Adam hyper-parameter `beta_2` for generator optimizer.'), cls.command_option('--r1_gamma', type=cls.float_type, default=10.0, help='Factor to control the strength of gradient penalty.'), cls.command_option('--g_ema_img', type=cls.int_type, default=10000, help='Factor for updating the smoothed generator, which is particularly used for inference.'), cls.command_option('--g_ema_rampup', type=cls.float_type, default=0.0, help='Rampup factor for updating the smoothed generator, which is particularly used for inference. Set as `0` to disable warming up.'), cls.command_option('--use_ada', type=cls.bool_type, default=False, help='Whether to use adaptive augmentation pipeline.')])
options['Rendering options'].extend([cls.command_option('--rendering_resolution', type=cls.int_type, default=32, help='Resolution of volume rendering images.'), cls.command_option('--clamp_mode', type=click.Choice(['softplus', 'relu', 'mipnerf']), default='relu', help='clamp mode of `sigmas` in intergration process.'), cls.command_option('--num_points', type=cls.int_type, default=64, help='Number of uniform samples to take per ray in coarse pass.'), cls.command_option('--num_importance', type=cls.int_type, default=0, help='Number of importance samples to take per ray in fine pass.'), cls.command_option('--ray_start', type=cls.float_type, default=0.88, help='Near point along each ray to start taking samples.'), cls.command_option('--ray_end', type=cls.float_type, default=1.12, help='Far point along each ray to start taking samples.'), cls.command_option('--radius_fix', type=cls.float_type, default=1.0, help='Radius of sphere for sampling camera position.'), cls.command_option('--polar_mean', type=cls.float_type, default=(PI / 2), help='Mean of polar (vertical) angle for sampling camera position.'), cls.command_option('--polar_stddev', type=cls.float_type, default=0.155, help='Standard deviation of polar (vertical) angle of sphere for sampling camera position.'), cls.command_option('--azimuthal_mean', type=cls.float_type, default=(PI / 2), help='Mean of azimuthal (horizontal) angle for sampling camera position.'), cls.command_option('--azimuthal_stddev', type=cls.float_type, default=0.3, help='Standard deviation of azimuthal (horizontal) angle of sphere for sampling camera position.'), cls.command_option('--fov', type=cls.float_type, default=12, help='Field of view of the camera.'), cls.command_option('--perturbation_strategy', type=click.Choice(['no', 'middle_uniform', 'uniform', 'self_uniform']), default='self_uniform', help='clamp mode of `sigmas` in intergration process.')])
return options
def parse_options(self):
super().parse_options()
resolution = self.args.pop('resolution')
image_channels = self.args.pop('image_channels')
min_val = self.args.pop('min_val')
max_val = self.args.pop('max_val')
data_transform_kwargs = dict(image_size=resolution, image_channels=image_channels, min_val=min_val, max_val=max_val, resize_size_pre=self.args.pop('resize_size'), crop_size_pre=self.args.pop('crop_size'))
self.config.data.train.dataset_type = DATASET
self.config.data.train.transform_kwargs = data_transform_kwargs
self.config.data.val.dataset_type = DATASET
self.config.data.val.transform_kwargs = data_transform_kwargs
latent_dim = self.args.pop('latent_dim')
label_dim = self.args.pop('label_dim')
d_lr = self.args.pop('d_lr')
d_beta_1 = self.args.pop('d_beta_1')
d_beta_2 = self.args.pop('d_beta_2')
g_lr = self.args.pop('g_lr')
g_beta_1 = self.args.pop('g_beta_1')
g_beta_2 = self.args.pop('g_beta_2')
rendering_resolution = self.args.pop('rendering_resolution')
patch_kwargs = dict(patch_resolution=rendering_resolution, full_resolution=resolution, min_scale=0.25, max_scale=1.0, scale_anneal=0.0025)
self.config.patch_kwargs = patch_kwargs
point_sampling_kwargs = dict(image_boundary_value=1.0, x_axis_right=True, y_axis_up=True, z_axis_out=True, radius_strategy='fix', radius_fix=self.args.pop('radius_fix'), polar_strategy='normal', polar_mean=self.args.pop('polar_mean'), polar_stddev=self.args.pop('polar_stddev'), azimuthal_strategy='normal', azimuthal_mean=self.args.pop('azimuthal_mean'), azimuthal_stddev=self.args.pop('azimuthal_stddev'), fov=self.args.pop('fov'), perturbation_strategy=self.args.pop('perturbation_strategy'), dis_min=self.args.pop('ray_start'), dis_max=self.args.pop('ray_end'), num_points=self.args.pop('num_points'))
ray_marching_kwargs = dict(use_mid_point=False, density_clamp_mode=self.args.pop('clamp_mode'), normalize_radial_dist=False, clip_radial_dist=False, scale_color=False)
self.config.models.update(discriminator=dict(model=dict(model_type=DISCRIMINATOR, nc=3, ndf=64, imsize=rendering_resolution, hflip=False), lr=dict(lr_type='FIXED'), opt=dict(opt_type='Adam', base_lr=d_lr, betas=(d_beta_1, d_beta_2)), kwargs_train=dict(), kwargs_val=dict(), has_unused_parameters=True), generator=dict(model=dict(model_type=GENERATOR, z_shape_dim=128, z_appearance_dim=128, mlp_depth=8, mlp_width=256, output_ch=4, skips=[4], use_viewdirs=True, pe_input_dim=3, pe_num_freqs=10, pe_viewdirs_num_freqs=4, chunk=self.args.pop('chunk'), num_importance=self.args.pop('num_importance'), point_sampling_kwargs=point_sampling_kwargs, ray_marching_kwargs=ray_marching_kwargs), lr=dict(lr_type='FIXED'), opt=dict(opt_type='Adam', base_lr=g_lr, betas=(g_beta_1, g_beta_2)), kwargs_train=dict(), kwargs_val=dict(), g_ema_img=self.args.pop('g_ema_img'), g_ema_rampup=self.args.pop('g_ema_rampup'), has_unused_parameters=True))
self.config.loss.update(loss_type=LOSS, d_loss_kwargs=dict(r1_gamma=self.args.pop('r1_gamma')), g_loss_kwargs=dict())
self.config.controllers.update(DatasetVisualizer=dict(viz_keys='raw_image', viz_num=(32 if (label_dim == 0) else 8), viz_name='Real Data', viz_groups=(4 if (label_dim == 0) else 1), viz_classes=min(10, label_dim), row_major=True, min_val=min_val, max_val=max_val, shuffle=False))
if self.args.pop('use_ada'):
self.config.aug.update(aug_type='AdaAug', xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=0, noise=0, cutout=0)
self.config.aug_kwargs.update(impl='cuda')
self.config.controllers.update(AdaAugController=dict(every_n_iters=4, init_p=0.0, target_p=0.6, speed_img=500000, strategy='adaptive'))
self.config.metrics.update(FID=dict(init_kwargs=dict(name='fid', latent_dim=latent_dim, label_dim=label_dim, real_num=20000, fake_num=1000, image_size=resolution), eval_kwargs=dict(generator_smooth=dict()), interval=None, first_iter=None, save_best=True), GANSnapshot=dict(init_kwargs=dict(name='snapshot', latent_dim=latent_dim, latent_num=32, label_dim=label_dim, min_val=min_val, max_val=max_val), eval_kwargs=dict(generator_smooth=dict()), interval=None, first_iter=None, save_best=False)) |
def test_show_fixtures_and_test(pytester: Pytester, dummy_yaml_custom_test: None) -> None:
pytester.makepyfile('\n import pytest\n \n def arg():\n assert False\n def test_arg(arg):\n assert False\n ')
result = pytester.runpytest('--setup-plan')
assert (result.ret == 0)
result.stdout.fnmatch_lines(['*SETUP F arg*', '*test_arg (fixtures used: arg)', '*TEARDOWN F arg*']) |
class TestHooks():
def test_test_report(self, pytester: Pytester, pytestconfig: Config) -> None:
pytester.makepyfile('\n def test_a(): assert False\n def test_b(): pass\n ')
reprec = pytester.inline_run()
reports = reprec.getreports('pytest_runtest_logreport')
assert (len(reports) == 6)
for rep in reports:
data = pytestconfig.hook.pytest_report_to_serializable(config=pytestconfig, report=rep)
assert (data['$report_type'] == 'TestReport')
new_rep = pytestconfig.hook.pytest_report_from_serializable(config=pytestconfig, data=data)
assert (new_rep.nodeid == rep.nodeid)
assert (new_rep.when == rep.when)
assert (new_rep.outcome == rep.outcome)
def test_collect_report(self, pytester: Pytester, pytestconfig: Config) -> None:
pytester.makepyfile('\n def test_a(): assert False\n def test_b(): pass\n ')
reprec = pytester.inline_run()
reports = reprec.getreports('pytest_collectreport')
assert (len(reports) == 3)
for rep in reports:
data = pytestconfig.hook.pytest_report_to_serializable(config=pytestconfig, report=rep)
assert (data['$report_type'] == 'CollectReport')
new_rep = pytestconfig.hook.pytest_report_from_serializable(config=pytestconfig, data=data)
assert (new_rep.nodeid == rep.nodeid)
assert (new_rep.when == 'collect')
assert (new_rep.outcome == rep.outcome)
.parametrize('hook_name', ['pytest_runtest_logreport', 'pytest_collectreport'])
def test_invalid_report_types(self, pytester: Pytester, pytestconfig: Config, hook_name: str) -> None:
pytester.makepyfile('\n def test_a(): pass\n ')
reprec = pytester.inline_run()
reports = reprec.getreports(hook_name)
assert reports
rep = reports[0]
data = pytestconfig.hook.pytest_report_to_serializable(config=pytestconfig, report=rep)
data['$report_type'] = 'Unknown'
with pytest.raises(AssertionError):
_ = pytestconfig.hook.pytest_report_from_serializable(config=pytestconfig, data=data) |
def test_env_global_override_project_platform(tmp_path, platform):
pyproject_toml = (tmp_path / 'pyproject.toml')
pyproject_toml.write_text('\n[tool.cibuildwheel.linux]\nrepair-wheel-command = "repair-project-linux"\n[tool.cibuildwheel.windows]\nrepair-wheel-command = "repair-project-windows"\n[tool.cibuildwheel.macos]\nrepair-wheel-command = "repair-project-macos"\n')
options_reader = OptionsReader(pyproject_toml, platform=platform, env={'CIBW_REPAIR_WHEEL_COMMAND': 'repair-env-global'})
assert (options_reader.get('repair-wheel-command') == 'repair-env-global') |
def create_logger(name, log_file, level=logging.INFO):
logger = logging.getLogger(name)
global logger_id
if (logger_id == 1):
return logger
formatter = logging.Formatter('[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s]%(message)s')
fh = logging.FileHandler(log_file)
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(fh)
logger.addHandler(sh)
logger_id = 1
return logger |
def showSolution(solution):
for i in range(1, 6):
print(('House %d' % i))
print('')
print(('Nationality: %s' % solution[('nationality%d' % i)]))
print(('Color: %s' % solution[('color%d' % i)]))
print(('Drink: %s' % solution[('drink%d' % i)]))
print(('Smoke: %s' % solution[('smoke%d' % i)]))
print(('Pet: %s' % solution[('pet%d' % i)]))
print('') |
class TestFlaskOpenAPIResponse():
def test_type_invalid(self):
with pytest.raises(TypeError):
FlaskOpenAPIResponse(None)
def test_invalid_server(self, response_factory):
data = b'Not Found'
status_code = 404
response = response_factory(data, status_code=status_code)
openapi_response = FlaskOpenAPIResponse(response)
assert (openapi_response.data == data)
assert (openapi_response.status_code == status_code)
assert (openapi_response.content_type == response.mimetype) |
class CommonAPIRequestTools(object):
CREDENTIAL_ACCESS = 'cred_access'
CREDENTIAL_SECRET = 'cred_secret'
CREDENTIAL_ACCOUNT = 'cred_account'
CREDENTIAL_TOKEN = 'cred_token'
api_class = mws.mws.MWS
def setUp(self):
self.api = self.api_class(self.CREDENTIAL_ACCESS, self.CREDENTIAL_SECRET, self.CREDENTIAL_ACCOUNT, auth_token=self.CREDENTIAL_TOKEN)
self.api._test_request_params = True
def assert_common_params(self, params, action=None):
if action:
assert (params['Action'] == action)
assert (params['AWSAccessKeyId'] == self.CREDENTIAL_ACCESS)
assert (params[self.api.ACCOUNT_TYPE] == self.CREDENTIAL_ACCOUNT)
assert (params['MWSAuthToken'] == self.CREDENTIAL_TOKEN)
assert (params['SignatureMethod'] == 'HmacSHA256')
assert (params['SignatureVersion'] == '2')
isoformat_str = '%Y-%m-%dT%H:%M:%S'
try:
datetime.datetime.strptime(params['Timestamp'], isoformat_str)
except ValueError:
pytest.fail('Timestamp expected an ISO-8601 datetime string url encoded with format [YYYY-MM-DDTHH:MM:SS].')
def test_service_status(self):
response = self.api.get_service_status()
assert (response['Action'] == 'GetServiceStatus')
def test_generic_request_uri_correct_value(self):
api_orig_uri = self.api.uri
try:
self._generic_request_uri_correct_value()
except Exception:
raise
finally:
self.api.uri = api_orig_uri
def _generic_request_uri_correct_value(self):
action = 'GenericRequestURIException'
params = {'DoesNotMatter': 'foobar'}
assert self.api.generic_request(action=action, params=params)
self.api.uri = '/Something/that/should/work'
assert self.api.generic_request(action=action, params=params)
uri_values = [None, False, '', '/']
for val in uri_values:
self.api.uri = val
with pytest.raises(ValueError):
assert self.api.generic_request(action=action, params=params)
def test_generic_request_correct_params_type(self):
action = 'GenericRequestBadParamsException'
assert self.api.generic_request(action=action, params={})
param_values = [['Lists', "don't", 'work'], ('Tuples', "don't", 'ether'), 3, 'No to a string!', {'You', 'made', 'a', 'set,', 'silly!'}]
for val in param_values:
with pytest.raises(ValueError):
assert self.api.generic_request(action, params=val)
def test_basic_generic_request(self):
action = 'BasicGenericRequest'
test_datetime = datetime.datetime(2021, 1, 27, 22, 59, 59)
params = {'ADateTime': test_datetime, 'ATrueBool': True, 'AFalseBool': False, 'NoneShouldNotExist': None}
request_params = self.api.generic_request(action=action, params=params)
self.assert_common_params(request_params, action='BasicGenericRequest')
assert (request_params['ADateTime'] == '2021-01-27T22:59:59')
assert (request_params['ATrueBool'] == 'true')
assert (request_params['AFalseBool'] == 'false')
assert ('NoneShouldNotExist' not in request_params)
def test_complex_generic_request(self):
action = 'ComplexGenericRequest'
params = {'Enumerated': ['A', 'B', 'C'], 'Keyed': {'Foo': 'bar', 'Bar': 4, 'Baz': False}, 'Multi': {'A': [{'Foo': 'baz', 'Bar': 12}, {'Foo': 'what', 'Bar': 'ever', 'Something': [4, 6, 7, 9]}], 'B': [1, 2, 3]}}
request_params = self.api.generic_request(action=action, params=params)
self.assert_common_params(request_params, action=action)
expected = {'Enumerated.1': 'A', 'Enumerated.2': 'B', 'Enumerated.3': 'C', 'Keyed.Foo': 'bar', 'Keyed.Bar': '4', 'Keyed.Baz': 'false', 'Multi.A.1.Foo': 'baz', 'Multi.A.1.Bar': '12', 'Multi.A.1.Bar': '12', 'Multi.A.2.Foo': 'what', 'Multi.A.2.Bar': 'ever', 'Multi.A.2.Something.1': '4', 'Multi.A.2.Something.2': '6', 'Multi.A.2.Something.3': '7', 'Multi.A.2.Something.4': '9', 'Multi.B.1': '1', 'Multi.B.2': '2', 'Multi.B.3': '3'}
for (key, val) in expected.items():
assert (request_params[key] == val) |
def run_test(case, m):
m.elaborate()
tr = mk_TestStructuralTranslator(StructuralTranslatorL1)(m)
tr.clear(m)
tr.translate_structural(m)
try:
name = tr.structural.component_unique_name[m]
assert (name == case.REF_NAME)
decl_ports = tr.structural.decl_ports[m]
assert (decl_ports == case.REF_PORT)
decl_wires = tr.structural.decl_wires[m]
assert (decl_wires == case.REF_WIRE)
decl_consts = tr.structural.decl_consts[m]
assert (decl_consts == case.REF_CONST)
connections = tr.structural.connections[m]
assert (connections == case.REF_CONN)
vector_types = tr.structural.decl_type_vector
assert (list(vector_types.items()) == case.REF_VECTOR)
except AttributeError:
pass |
def generic_test(sdr, test_async=True, test_exceptions=True, use_numpy=True):
print(('Testing %r' % sdr))
sdr.rs = 2048000.0
assert check_close(7, 2048000.0, sdr.rs)
print(('sample_rate: %s' % sdr.rs))
bw = (sdr.rs / 2)
print('setting bandwidth to {}'.format(bw))
sdr.bandwidth = bw
assert check_close(7, bw, sdr.bandwidth)
print('applied bandwidth={}'.format(sdr.bandwidth))
prev_fc = sdr.fc
sdr.fc = (prev_fc + 1000000.0)
assert check_close(7, (prev_fc + 1000000.0), sdr.fc)
print(('center_freq: %s' % sdr.fc))
sdr.gain = 10
assert check_close(2, 10, sdr.gain)
print(('gain: %s' % sdr.gain))
samples = sdr.read_samples(1024)
assert (len(samples) == 1024)
check_generated_data(samples, use_numpy=use_numpy)
print(('read %s samples' % len(samples)))
sdr.set_direct_sampling('i')
samples = sdr.read_bytes(1024)
check_generated_data(samples, 1, use_numpy=use_numpy)
sdr.set_direct_sampling('q')
samples = sdr.read_bytes(1024)
check_generated_data(samples, 2, use_numpy=use_numpy)
if test_exceptions:
with pytest.raises(SyntaxError):
sdr.set_direct_sampling('foo')
sdr.set_direct_sampling(0)
if test_async:
async_read_test(sdr, use_numpy=use_numpy) |
def start_env_episode_distance(task, episode, pickup_order):
pathfinder = task._simple_pathfinder
agent_start_pos = episode.start_position
prev_obj_end_pos = agent_start_pos
object_positions = [obj.position for obj in episode.objects]
rec_positions = [rec.position for rec in episode.get_receptacles()]
pickup_order = [(id - 1) for id in pickup_order]
shortest_dist = 0
for i in range(len(pickup_order)):
curr_idx = pickup_order[i]
curr_obj_start_pos = object_positions[curr_idx]
curr_obj_end_pos = rec_positions[curr_idx]
shortest_dist += (geodesic_distance(pathfinder, prev_obj_end_pos, [curr_obj_start_pos]) - 1.0)
shortest_dist += (geodesic_distance(pathfinder, curr_obj_start_pos, [curr_obj_end_pos]) - 0.5)
prev_obj_end_pos = curr_obj_end_pos
return shortest_dist |
def reiddataset_downloader(data_dir, data_name, hdf5=True):
if (not os.path.exists(data_dir)):
os.makedirs(data_dir)
if hdf5:
dataset_dir = os.path.join(data_dir, data_name)
if (not os.path.exists(dataset_dir)):
os.makedirs(dataset_dir)
destination = os.path.join(dataset_dir, (data_name + '.hdf5'))
if (not os.path.isfile(destination)):
id = dataset_hdf5[data_name]
print(('Downloading %s in HDF5 Formate' % data_name))
gdrive_downloader(destination, id)
print('Done')
else:
print(('Dataset Check Success: %s exists!' % data_name))
else:
data_dir_exist = os.path.join(data_dir, data_name)
if (not os.path.exists(data_dir_exist)):
temp_dir = os.path.join(data_dir, 'temp')
if (not os.path.exists(temp_dir)):
os.makedirs(temp_dir)
destination = os.path.join(temp_dir, data_name)
id = dataset[data_name]
print(('Downloading %s in Original Images' % data_name))
gdrive_downloader(destination, id)
zip_ref = zipfile.ZipFile(destination)
print(('Extracting %s' % data_name))
zip_ref.extractall(data_dir)
zip_ref.close()
shutil.rmtree(temp_dir)
print('Done')
if (data_name == 'CUHK03'):
print('Converting cuhk03.mat into images')
cuhk03_to_image(os.path.join(data_dir, 'CUHK03'))
print('Done')
else:
print(('Dataset Check Success: %s exists!' % data_name)) |
def test_log_action(first_model, second_model, combined_model, initialized_db):
day = date(2019, 1, 1)
with freeze_time(day):
combined_model.log_action('push_repo', namespace_name='devtable', repository_name='simple', ip='1.2.3.4')
simple_repo = model.repository.get_repository('devtable', 'simple')
assert (combined_model.count_repository_actions(simple_repo, day) == 1)
assert (first_model.count_repository_actions(simple_repo, day) == 1)
assert (second_model.count_repository_actions(simple_repo, day) == 0) |
class TestTransforms(unittest.TestCase):
def setUp(self) -> None:
self.transformer = transforms.TransformVisitor()
def parse_transform(self, code: str) -> Module:
module = parse(code, apply_transforms=False)
return self.transformer.visit(module)
def test_function_inlining_transform(self) -> None:
def transform_call(node: Call) -> Const:
inferred = next(node.infer())
return inferred
self.transformer.register_transform(nodes.Call, transform_call)
module = self.parse_transform('\n def test(): return 42\n test() #\n ')
self.assertIsInstance(module.body[1], nodes.Expr)
self.assertIsInstance(module.body[1].value, nodes.Const)
self.assertEqual(module.body[1].value.value, 42)
def test_recursive_transforms_into_astroid_fields(self) -> None:
def transform_compare(node: Compare) -> Const:
(_, right) = node.ops[0]
return nodes.const_factory((node.left.value < right.value))
def transform_name(node: Name) -> Const:
return next(node.infer())
self.transformer.register_transform(nodes.Compare, transform_compare)
self.transformer.register_transform(nodes.Name, transform_name)
module = self.parse_transform('\n a = 42\n b = 24\n a < b\n ')
self.assertIsInstance(module.body[2], nodes.Expr)
self.assertIsInstance(module.body[2].value, nodes.Const)
self.assertFalse(module.body[2].value.value)
def test_transform_patches_locals(self) -> None:
def transform_function(node: FunctionDef) -> None:
assign = nodes.Assign(parent=node, lineno=node.lineno, col_offset=node.col_offset, end_lineno=node.end_lineno, end_col_offset=node.end_col_offset)
name = nodes.AssignName(name='value', lineno=0, col_offset=0, parent=assign, end_lineno=None, end_col_offset=None)
assign.targets = [name]
assign.value = nodes.const_factory(42)
node.body.append(assign)
self.transformer.register_transform(nodes.FunctionDef, transform_function)
module = self.parse_transform('\n def test():\n pass\n ')
func = module.body[0]
self.assertEqual(len(func.body), 2)
self.assertIsInstance(func.body[1], nodes.Assign)
self.assertEqual(func.body[1].as_string(), 'value = 42')
def test_predicates(self) -> None:
def transform_call(node: Call) -> Const:
inferred = next(node.infer())
return inferred
def should_inline(node: Call) -> bool:
return node.func.name.startswith('inlineme')
self.transformer.register_transform(nodes.Call, transform_call, should_inline)
module = self.parse_transform('\n def inlineme_1():\n return 24\n def dont_inline_me():\n return 42\n def inlineme_2():\n return 2\n inlineme_1()\n dont_inline_me()\n inlineme_2()\n ')
values = module.body[(- 3):]
self.assertIsInstance(values[0], nodes.Expr)
self.assertIsInstance(values[0].value, nodes.Const)
self.assertEqual(values[0].value.value, 24)
self.assertIsInstance(values[1], nodes.Expr)
self.assertIsInstance(values[1].value, nodes.Call)
self.assertIsInstance(values[2], nodes.Expr)
self.assertIsInstance(values[2].value, nodes.Const)
self.assertEqual(values[2].value.value, 2)
def test_transforms_are_separated(self) -> None:
def transform_function(node: FunctionDef) -> Const:
if node.decorators:
for decorator in node.decorators.nodes:
inferred = next(decorator.infer())
if (inferred.qname() == 'abc.abstractmethod'):
return next(node.infer_call_result(None))
return None
manager = MANAGER
with add_transform(manager, nodes.FunctionDef, transform_function):
module = builder.parse('\n import abc\n from abc import abstractmethod\n\n class A(object):\n \n def ala(self):\n return 24\n\n \n def bala(self):\n return 42\n ')
cls = module['A']
ala = cls.body[0]
bala = cls.body[1]
self.assertIsInstance(ala, nodes.Const)
self.assertEqual(ala.value, 24)
self.assertIsInstance(bala, nodes.Const)
self.assertEqual(bala.value, 42)
def test_transforms_are_called_for_builtin_modules(self) -> None:
def transform_function(node: FunctionDef) -> FunctionDef:
name = nodes.AssignName(name='value', lineno=0, col_offset=0, parent=node.args, end_lineno=None, end_col_offset=None)
node.args.args = [name]
return node
manager = MANAGER
def predicate(node: FunctionDef) -> bool:
return (node.root().name == 'time')
with add_transform(manager, nodes.FunctionDef, transform_function, predicate):
builder_instance = builder.AstroidBuilder()
module = builder_instance.module_build(time)
asctime = module['asctime']
self.assertEqual(len(asctime.args.args), 1)
self.assertIsInstance(asctime.args.args[0], nodes.AssignName)
self.assertEqual(asctime.args.args[0].name, 'value')
def test_builder_apply_transforms(self) -> None:
def transform_function(node):
return nodes.const_factory(42)
manager = MANAGER
with add_transform(manager, nodes.FunctionDef, transform_function):
astroid_builder = builder.AstroidBuilder(apply_transforms=False)
module = astroid_builder.string_build('def test(): pass')
self.assertIsInstance(module.body[0], nodes.FunctionDef)
def test_transform_crashes_on_is_subtype_of(self) -> None:
def transform_class(cls):
if cls.is_subtype_of('django.db.models.base.Model'):
return cls
return cls
self.transformer.register_transform(nodes.ClassDef, transform_class)
self.parse_transform("\n # Change environ to automatically call putenv() if it exists\n import os\n putenv = os.putenv\n try:\n # This will fail if there's no putenv\n putenv\n except NameError:\n pass\n else:\n import UserDict\n ") |
def make_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, larger_than_cls):
example_id_to_features = collections.defaultdict(list)
for feature in all_features:
example_id_to_features[feature.example_id].append(feature)
example_id_to_results = collections.defaultdict(list)
for result in all_results:
example_id_to_results[result.example_id].append(result)
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['start_index', 'end_index', 'start_logit', 'end_logit'])
all_predictions = collections.OrderedDict()
final_all_predictions = collections.OrderedDict()
for (example_id, example) in enumerate(all_examples):
features = example_id_to_features[example_id]
results = example_id_to_results[example_id]
all_predictions[example_id] = collections.OrderedDict()
final_all_predictions[example_id] = []
for (feature_index, feature) in enumerate(features):
event_type_argument_type = '_'.join([feature.event_type, feature.argument_type])
event_type_offset_argument_type = '_'.join([feature.event_type, str(feature.token_to_orig_map[feature.fea_trigger_offset]), feature.argument_type])
(start_indexes, end_indexes) = (None, None)
prelim_predictions = []
for result in results:
if (result.event_type_offset_argument_type == event_type_offset_argument_type):
start_indexes = _get_best_indexes(result.start_logits, n_best_size, larger_than_cls, result.start_logits[0])
end_indexes = _get_best_indexes(result.end_logits, n_best_size, larger_than_cls, result.end_logits[0])
for start_index in start_indexes:
for end_index in end_indexes:
if ((start_index >= len(feature.tokens)) or (end_index >= len(feature.tokens))):
continue
if ((start_index not in feature.token_to_orig_map) or (end_index not in feature.token_to_orig_map)):
continue
if (end_index < start_index):
continue
length = ((end_index - start_index) + 1)
if (length > max_answer_length):
continue
prelim_predictions.append(_PrelimPrediction(start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index]))
if (not larger_than_cls):
feature_null_score = (result.start_logits[0] + result.end_logits[0])
prelim_predictions.append(_PrelimPrediction(start_index=0, end_index=0, start_logit=result.start_logits[0], end_logit=result.end_logits[0]))
prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_logit + x.end_logit)), reverse=True)
max_num_pred_per_arg = 1
for (idx, pred) in enumerate(prelim_predictions):
if ((idx + 1) > max_num_pred_per_arg):
break
if ((pred.start_index == 0) and (pred.end_index == 0)):
break
(orig_sent_start, orig_sent_end) = (feature.token_to_orig_map[pred.start_index], feature.token_to_orig_map[pred.end_index])
final_all_predictions[example_id].append([event_type_argument_type, [orig_sent_start, orig_sent_end]])
return final_all_predictions |
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(self._queue, self._iterable, self.total, (torch.cuda.current_device() if torch.cuda.is_available() else None))
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
if hasattr(self._iterable, 'take'):
self._iterable.take(n)
return self
def __next__(self):
if (self._consumer is None):
self._create_consumer()
if (self._queue.qsize() < min(2, max(1, (self._queue.maxsize // 2)))):
if ((time.time() - self.start_time) > (5 * 60)):
if ((self.warning_time is None) or ((time.time() - self.warning_time) > (15 * 60))):
logger.debug('Data loading buffer is empty or nearly empty. This may indicate a data loading bottleneck, and increasing the number of workers (--num-workers) may help.')
self.warning_time = time.time()
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if (item is _sentinel):
raise StopIteration()
return item |
class clean(distutils.command.clean.clean):
def initialize_options(self):
self.template_files = None
self.commands = None
super().initialize_options()
def finalize_options(self):
self.set_undefined_options('pre_build_templates', ('template_files', 'template_files'))
self.set_undefined_options('pre_build_exec', ('commands', 'commands'))
super().finalize_options()
def run(self):
if self.all:
for outfile in self.template_files:
if os.path.isfile(outfile[(- 1)]):
if (not self.dry_run):
os.remove(outfile[(- 1)])
log.info("removing '%s'", outfile[(- 1)])
for outfile in self.commands:
outpath = os.path.join(*outfile[(- 1)])
if os.path.isfile(outpath):
if (not self.dry_run):
os.remove(outpath)
log.info("removing '%s'", outpath)
super().run() |
class RoIAwarePool3dFunction(Function):
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):
assert ((rois.shape[1] == 7) and (pts.shape[1] == 3))
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert (len(out_size) == 3)
for k in range(3):
assert isinstance(out_size[k], int)
(out_x, out_y, out_z) = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[(- 1)]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)
pool_method_map = {'max': 0, 'avg': 1}
pool_method = pool_method_map[pool_method]
roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)
ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)
return pooled_features
def backward(ctx, grad_out):
(pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels) = ctx.roiaware_pool3d_for_backward
grad_in = grad_out.new_zeros((num_pts, num_channels))
roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)
return (None, None, grad_in, None, None, None) |
def which_model(input_csv_path: str) -> str:
with open(input_csv_path, 'r') as csv_file:
params_reader = csv.reader(csv_file, delimiter=';')
for (key, value) in params_reader:
if (key == 'model'):
return value
raise ValueError('Model type not specified.') |
class Dataset(object):
def get_epoch(self):
raise NotImplementedError(self.__class__)
def get_batches(self, n_batches):
if (len(self) < n_batches):
raise ValueError()
return itertools.islice(self.get_epoch(), n_batches)
def get_epochs(self, n_epochs: int):
for _ in range(n_epochs):
for batch in self.get_epoch():
(yield batch)
def get_samples(self, n_samples: int):
return (self.get_batches(n_samples), n_samples)
def percent_filtered(self):
return None
def get_vocab(self):
raise NotImplementedError(self.__class__)
def get_word_counts(self):
raise NotImplementedError(self.__class__)
def __len__(self):
raise NotImplementedError(self.__class__) |
class FairseqLanguageModel(BaseFairseqModel):
def __init__(self, decoder):
super().__init__()
self.decoder = decoder
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, **kwargs):
return self.decoder(src_tokens, **kwargs)
def extract_features(self, src_tokens, **kwargs):
return self.decoder.extract_features(src_tokens, **kwargs)
def output_layer(self, features, **kwargs):
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
return self.decoder.max_positions()
def max_decoder_positions(self):
return self.decoder.max_positions()
def supported_targets(self):
return {'future'} |
class ValueEnum(menu):
def __init__(self, name, pypilot_path, pypilot_items_path=None):
super(ValueEnum, self).__init__(name, [])
self.pypilot_path = pypilot_path
self.pypilot_items_path = pypilot_items_path
self.items_val = None
self.selection = (- 1)
def process(self):
if self.pypilot_items_path:
items_val = self.last_val(self.pypilot_items_path)
if (items_val != self.items_val):
self.items_val = items_val
try:
self.items = list(map((lambda choice: ValueEnumSelect(self.lcd, choice, self.pypilot_path)), items_val))
except Exception as e:
print('failed choices', e)
elif (not self.items):
try:
values = self.lcd.client.get_values()
if values:
info = values[self.pypilot_path]
choices = info['choices']
else:
choices = []
self.items = list(map((lambda choice: ValueEnumSelect(self.lcd, choice, self.pypilot_path)), choices))
except Exception as e:
print('failed choices', e)
if (self.selection < 0):
val = self.last_val(self.pypilot_path)
for i in range(len(self.items)):
if (self.items[i].name == val):
self.selection = i
return super(ValueEnum, self).process() |
class EvaluatedName(PyName):
def __init__(self, callback, module=None, lineno=None):
self.module = module
self.lineno = lineno
self.callback = callback
self.pyobject = _Inferred(callback, _get_concluded_data(module))
def get_object(self):
return self.pyobject.get()
def get_definition_location(self):
return (self.module, self.lineno)
def invalidate(self):
self.pyobject.set(None) |
def main():
args = parse_args()
if (args.world_size > 1):
rank = init_distributed(args)
torch.cuda.set_device(args.local_rank)
else:
rank = 0
set_random_seed((args.seed + rank))
(train_env, val_envs, aug_env) = build_dataset(args, rank=rank, is_test=args.test)
if (not args.test):
if args.zero_shot:
zero_shot(args, train_env, val_envs, aug_env=aug_env, rank=rank)
else:
train(args, train_env, val_envs, aug_env=aug_env, rank=rank)
else:
valid(args, train_env, val_envs, rank=rank) |
class CvtAttention(nn.Module):
def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token=True):
super().__init__()
self.attention = CvtSelfAttention(num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token)
self.output = CvtSelfOutput(embed_dim, drop_rate)
self.pruned_heads = set()
def prune_heads(self, heads):
if (len(heads) == 0):
return
(heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = (self.attention.num_attention_heads - len(heads))
self.attention.all_head_size = (self.attention.attention_head_size * self.attention.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_state, height, width):
self_output = self.attention(hidden_state, height, width)
attention_output = self.output(self_output, hidden_state)
return attention_output |
class EmotionBot(Bot):
class TimeoutException(Exception):
def __init__(self, uuid, status):
self.uuid = uuid
self.status = status
def __init__(self, name=None, need_login=True, timeout_max=15, qr_callback=None, *args, **kwargs):
self.name = name
self.timeout_count = 0
self.setting = None
if need_login:
self.login(*args, timeout_max=timeout_max, qr_callback=qr_callback, **kwargs)
def login(self, timeout_max=15, qr_callback=None, *args, **kwargs):
def _qr_callback(uuid, status, qrcode):
if (status == '408'):
self.timeout_count += 1
if (self.timeout_count > timeout_max):
raise self.TimeoutException(uuid, status)
elif (status == '400'):
raise self.TimeoutException(uuid, status)
if callable(qr_callback):
qr_callback(uuid, status, qrcode)
super().__init__(*args, qr_callback=(_qr_callback if qr_callback else None), **kwargs)
uin = str(self.self.uin)
with settings_lock:
with shelve.open('settings') as settings:
self.setting = (settings.get(uin, None) or BotSetting())
def save_setting(setting, name, value):
setting.__dict__[name] = value
with settings_lock:
with shelve.open('settings') as settings:
settings[uin] = self.setting
logger.info('%s updated setting', self.self.name)
BotSetting.__setattr__ = save_setting
reg_event(self)
def self_msg(self, msg):
try:
self.self.send(msg)
except exceptions.ResponseError:
self.file_helper.send(msg) |
def test_require_gdal_version_param_values():
for values in [('bar',), ['bar'], {'bar'}]:
_gdal_version('1.0', param='foo', values=values)
def a(foo=None):
return foo
assert (a() is None)
assert (a('bar') == 'bar')
assert (a(foo='bar') == 'bar') |
_module()
class BottomUpCrowdPoseDataset(BottomUpCocoDataset):
def __init__(self, ann_file, img_prefix, data_cfg, pipeline, dataset_info=None, test_mode=False):
if (dataset_info is None):
warnings.warn('dataset_info is missing. Check for details.', DeprecationWarning)
cfg = Config.fromfile('configs/_base_/datasets/crowdpose.py')
dataset_info = cfg._cfg_dict['dataset_info']
super(BottomUpCocoDataset, self).__init__(ann_file, img_prefix, data_cfg, pipeline, dataset_info=dataset_info, test_mode=test_mode)
self.ann_info['use_different_joint_weights'] = False
print(f'=> num_images: {self.num_images}')
def _do_python_keypoint_eval(self, res_file):
stats_names = ['AP', 'AP .5', 'AP .75', 'AR', 'AR .5', 'AR .75', 'AP(E)', 'AP(M)', 'AP(H)']
with open(res_file, 'r') as file:
res_json = json.load(file)
if (not res_json):
info_str = list(zip(stats_names, ([0] * len(stats_names))))
return info_str
coco_det = self.coco.loadRes(res_file)
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_crowd', self.sigmas, use_area=False)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
info_str = list(zip(stats_names, coco_eval.stats))
return info_str |
class OptSimilarity_Mestranol(Molecule):
def _reward(self):
scorer = similarity(smiles='COc1ccc2[]3CC[]4(C)[](CC[]4(O)C#C)[]3CCc2c1', name='Mestranol', fp_type='AP', threshold=0.75)
s_fn = scorer.wrapped_objective
molecule = Chem.MolFromSmiles(self._state)
if (molecule is None):
return 0.0
return (s_fn.score(self._state) * (self.discount_factor ** (self.max_steps - self.num_steps_taken))) |
class InlineResponse20016(BaseModel, extra='forbid'):
time: Optional[float] = Field(default=None, description='Time spent to process this request')
status: Optional[str] = Field(default=None, description='')
result: Optional[List[List['ScoredPoint']]] = Field(default=None, description='') |
class _Config():
def __init__(self):
self._init_logging_handler()
self.cuda_device = 6
self.eos_m_token = 'EOS_M'
self.beam_len_bonus = 0.6
self.mode = 'unknown'
self.m = 'TSD'
self.prev_z_method = 'none'
self.dataset = 'unknown'
self.root_dir = '/home/wyshi/simulator/sequicity/'
self.seed = 0
def init_handler(self, m):
init_method = {'tsdf-camrest': self._camrest_tsdf_init, 'tsdf-kvret': self._kvret_tsdf_init}
init_method[m]()
def _camrest_tsdf_init(self):
self.beam_len_bonus = 0.5
self.prev_z_method = 'separate'
self.vocab_size = 800
self.embedding_size = 50
self.hidden_size = 50
self.split = (3, 1, 1)
self.lr = 0.003
self.lr_decay = 0.5
self.layer_num = 1
self.split = (9, 1, 1)
self.model_path = ('/data/qkun/sequicity_multiwoz_0.4/' + 'models/multiwoz_sys_test.pkl')
self.result_path = (self.root_dir + 'results/multiwoz_sys.csv')
self.vocab_path = ('/data/qkun/sequicity_multiwoz_0.4/' + 'vocab/vocab-multiwoz_sys.pkl')
self.data = '/data/qkun/simulator/data/multiwoz-master/data/multi-woz/rest_sys.json'
self.entity = (self.root_dir + 'data/multi_woz/ontology.json')
self.db = ('/data/qkun/simulator/data/multiwoz-master/' + 'data/multi-woz/restaurant_db.json')
self.glove_path = '/data/qkun/sequicity/data/glove/glove.6B.50d.txt'
self.batch_size = 32
self.z_length = 12
self.degree_size = 5
self.dropout_rate = 0.5
self.epoch_num = 100
self.rl_epoch_num = 1
self.cuda = False
self.spv_proportion = 100
self.max_ts = 40
self.early_stop_count = 5
self.new_vocab = True
self.teacher_force = 100
self.beam_search = False
self.beam_size = 10
self.sampling = False
self.use_positional_embedding = False
self.unfrz_attn_epoch = 0
self.skip_unsup = False
self.truncated = False
self.pretrain = False
def _kvret_tsdf_init(self):
self.prev_z_method = 'separate'
self.intent = 'all'
self.vocab_size = 1400
self.embedding_size = 50
self.hidden_size = 50
self.split = None
self.lr = 0.003
self.lr_decay = 0.5
self.vocab_path = (self.root_dir + 'vocab/vocab-kvret.pkl')
self.train = (self.root_dir + 'data/kvret/kvret_train_public.json')
self.dev = (self.root_dir + 'data/kvret/kvret_dev_public.json')
self.test = (self.root_dir + 'data/kvret/kvret_test_public.json')
self.entity = (self.root_dir + 'data/kvret/kvret_entities.json')
self.glove_path = '/data/qkun/sequicity/data/glove/glove.6B.50d.txt'
self.batch_size = 32
self.degree_size = 5
self.z_length = 8
self.layer_num = 1
self.dropout_rate = 0.5
self.epoch_num = 100
self.rl_epoch_num = 2
self.cuda = False
self.spv_proportion = 100
self.alpha = 0.0
self.max_ts = 40
self.early_stop_count = 3
self.new_vocab = True
self.model_path = './models/kvret.pkl'
self.result_path = './results/kvret.csv'
self.teacher_force = 100
self.beam_search = False
self.beam_size = 10
self.sampling = False
self.use_positional_embedding = False
self.unfrz_attn_epoch = 0
self.skip_unsup = False
self.truncated = False
self.pretrain = False
def __str__(self):
s = ''
for (k, v) in self.__dict__.items():
s += '{} : {}\n'.format(k, v)
return s
def _init_logging_handler(self):
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
stderr_handler = logging.StreamHandler()
file_handler = logging.FileHandler('./log/log_{}.txt'.format(current_time))
logging.basicConfig(handlers=[stderr_handler, file_handler])
logger = logging.getLogger()
logger.setLevel(logging.INFO) |
class TestCommunication(coroutine_tests.CoroutineTestCase):
server_config = h2.config.H2Configuration(client_side=False)
def test_basic_request_response(self):
request_headers = [(b':method', b'GET'), (b':path', b'/'), (b':authority', b'example.com'), (b':scheme', b' (b'user-agent', b'test-client/0.1.0')]
response_headers = [(b':status', b'204'), (b'server', b'test-server/0.1.0'), (b'content-length', b'0')]
def client():
c = h2.connection.H2Connection()
c.initiate_connection()
data = (yield c.data_to_send())
events = c.receive_data(data)
assert (len(events) == 2)
assert isinstance(events[0], h2.events.SettingsAcknowledged)
assert isinstance(events[1], h2.events.RemoteSettingsChanged)
changed = events[1].changed_settings
assert (changed[h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS].new_value == 100)
events = c.send_headers(1, request_headers, end_stream=True)
assert (not events)
data = (yield c.data_to_send())
events = c.receive_data(data)
assert (len(events) == 2)
assert isinstance(events[0], h2.events.ResponseReceived)
assert (events[0].stream_id == 1)
assert (events[0].headers == response_headers)
assert isinstance(events[1], h2.events.StreamEnded)
assert (events[1].stream_id == 1)
def server():
c = h2.connection.H2Connection(config=self.server_config)
data = (yield)
events = c.receive_data(data)
assert (len(events) == 1)
assert isinstance(events[0], h2.events.RemoteSettingsChanged)
changed = events[0].changed_settings
assert (changed[h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS].new_value == 100)
c.initiate_connection()
data = (yield c.data_to_send())
events = c.receive_data(data)
assert (len(events) == 3)
assert isinstance(events[0], h2.events.SettingsAcknowledged)
assert isinstance(events[1], h2.events.RequestReceived)
assert (events[1].stream_id == 1)
assert (events[1].headers == request_headers)
assert isinstance(events[2], h2.events.StreamEnded)
assert (events[2].stream_id == 1)
events = c.send_headers(1, response_headers, end_stream=True)
assert (not events)
(yield c.data_to_send())
self.run_until_complete(client(), server()) |
(Advertiser)
class AdvertiserAdmin(RemoveDeleteMixin, SimpleHistoryAdmin):
actions = ['action_create_draft_invoice']
inlines = (CampaignInline,)
list_display = ('name', 'report', 'stripe_customer')
list_per_page = 500
prepopulated_fields = {'slug': ('name',)}
raw_id_fields = ('djstripe_customer',)
readonly_fields = ('modified', 'created')
search_fields = ('name', 'slug', 'djstripe_customer__id')
(description=_('Create a draft invoice for this customer'))
def action_create_draft_invoice(self, request, queryset):
if (not settings.STRIPE_ENABLED):
messages.add_message(request, messages.ERROR, _('Stripe is not configured. Please set the envvar STRIPE_SECRET_KEY.'))
return
flight_start = timezone.now()
flight_end = (flight_start + timedelta(days=30))
for advertiser in queryset:
if advertiser.djstripe_customer:
stripe.InvoiceItem.create(customer=advertiser.djstripe_customer.id, description='Advertising - per 1k impressions', quantity=200, unit_amount=300, currency='USD')
inv = stripe.Invoice.create(customer=advertiser.djstripe_customer.id, auto_advance=False, collection_method='send_invoice', custom_fields=[{'name': 'Advertiser', 'value': advertiser.slug[:30]}, {'name': 'Estimated Start', 'value': flight_start.strftime('%Y-%m-%d')}, {'name': 'Estimated End', 'value': flight_end.strftime('%Y-%m-%d')}], days_until_due=30)
invoice = Invoice.sync_from_stripe_data(inv)
messages.add_message(request, messages.SUCCESS, _('New Stripe invoice for {}: {}'.format(advertiser, invoice.get_stripe_dashboard_url())))
else:
messages.add_message(request, messages.ERROR, _('No Stripe customer ID for {}'.format(advertiser)))
def report(self, instance):
if (not instance.pk):
return ''
return mark_safe('<a href="{url}">{name}</a>'.format(name=(escape(instance.name) + ' Report'), url=instance.get_absolute_url()))
def stripe_customer(self, obj):
if obj.djstripe_customer:
return format_html('<a href="{}" target="_blank" rel="noopener noreferrer">{}</a>', obj.djstripe_customer.get_stripe_dashboard_url(), obj.djstripe_customer.name)
return None |
def insert_sphere(arr, sp_radius=4, sp_centre=(0, 0, 0)):
arr_copy = arr[:]
(x, y, z) = np.indices(arr.shape)
if (not hasattr(sp_radius, '__iter__')):
sp_radius = ([sp_radius] * 3)
(sp_radius_x, sp_radius_y, sp_radius_z) = sp_radius
arr_copy[((((((x - sp_centre[0]) / sp_radius_x) ** 2.0) + (((y - sp_centre[1]) / sp_radius_y) ** 2.0)) + (((z - sp_centre[2]) / sp_radius_z) ** 2.0)) <= 1)] = 1
return arr_copy |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.