code stringlengths 281 23.7M |
|---|
def datetime_to_djd(time):
if (time.tzinfo is None):
time_utc = pytz.utc.localize(time)
else:
time_utc = time.astimezone(pytz.utc)
djd_start = pytz.utc.localize(dt.datetime(1899, 12, 31, 12))
djd = (((time_utc - djd_start).total_seconds() * 1.0) / ((60 * 60) * 24))
return djd |
class ScriptMakerCustom(ScriptMaker):
def __init__(self, target_dir, version_info, executable, name) -> None:
super().__init__(None, str(target_dir))
self.clobber = True
self.set_mode = True
self.executable = enquote_executable(str(executable))
self.version_info = (version_info.major, version_info.minor)
self.variants = {'', 'X', 'X.Y'}
self._name = name
def _write_script(self, names, shebang, script_bytes, filenames, ext):
names.add(f'{self._name}{self.version_info[0]}.{self.version_info[1]}')
super()._write_script(names, shebang, script_bytes, filenames, ext) |
def read_file_list():
basedir = (radare2_includedir + '/')
return [(basedir + 'r_core.h'), (basedir + 'r_asm.h'), (basedir + 'r_anal.h'), (basedir + 'r_bin.h'), (basedir + 'r_debug.h'), (basedir + 'r_io.h'), (basedir + 'r_config.h'), (basedir + 'r_flag.h'), (basedir + 'r_sign.h'), (basedir + 'r_hash.h'), (basedir + 'r_egg.h'), (basedir + 'r_fs.h'), (basedir + 'r_lang.h'), (basedir + 'r_pdb.h')] |
def adjust_rel_elec_density(dicom_dataset, adjustment_map, ignore_missing_structure=False):
new_dicom_dataset = deepcopy(dicom_dataset)
ROI_name_to_number_map = {structure_set.ROIName: structure_set.ROINumber for structure_set in new_dicom_dataset.StructureSetROISequence}
ROI_number_to_observation_map = {observation.ReferencedROINumber: observation for observation in new_dicom_dataset.RTROIObservationsSequence}
for (structure_name, new_red) in adjustment_map.items():
try:
ROI_number = ROI_name_to_number_map[structure_name]
except KeyError:
if ignore_missing_structure:
continue
raise
observation = ROI_number_to_observation_map[ROI_number]
try:
physical_properties = observation.ROIPhysicalPropertiesSequence
except AttributeError:
physical_properties = []
physical_properties = delete_sequence_item_with_matching_key(physical_properties, 'ROIPhysicalProperty', 'REL_ELEC_DENSITY')
physical_properties.append(dicom_dataset_from_dict({'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': new_red}))
observation.ROIPhysicalPropertiesSequence = physical_properties
return new_dicom_dataset |
class GraphRewriter(Rewriter):
def apply(self, fgraph):
raise NotImplementedError()
def rewrite(self, fgraph, *args, **kwargs):
self.add_requirements(fgraph)
return self.apply(fgraph, *args, **kwargs)
def __call__(self, fgraph):
return self.rewrite(fgraph)
def add_requirements(self, fgraph):
...
def print_summary(self, stream=sys.stdout, level=0, depth=(- 1)):
name = getattr(self, 'name', None)
print(f"{(' ' * level)}{self.__class__.__name__} {name} id={id(self)}", file=stream)
def print_profile(cls, stream, prof, level=0):
if (prof is not None):
raise NotImplementedError('The function `print_profile` must be overridden when the rewriter returns profiling information.') |
class TableTruncate(ABC):
def __init__(self, tokenizer: BasicTokenizer=None, max_input_length: int=1024):
if (tokenizer is None):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path='facebook/bart-large')
else:
self.tokenizer = tokenizer
self.max_length = max_input_length
def truncate_table(self, table_content: Dict, question: str, answer: List):
pass |
def keras_model():
model = Sequential([Conv2D(8, (2, 2), input_shape=(16, 16, 3)), BatchNormalization(momentum=0.3, epsilon=0.65), AvgPool2D(), MaxPool2D(), BatchNormalization(momentum=0.4, epsilon=0.25), Conv2D(4, (2, 2), activation=tf.nn.tanh, kernel_regularizer=tf.keras.regularizers.l2(0.5)), Flatten(), Dense(2, activation='softmax', name='keras_model')])
return model |
class COCOFeaturesDataset(BaseFeaturesDataset):
def __init__(self, *args, **kwargs):
super(COCOFeaturesDataset, self).__init__()
self.feature_readers = []
self.feature_dict = {}
self.fast_read = kwargs['fast_read']
self.writer = registry.get('writer')
for image_feature_dir in kwargs['directories']:
feature_reader = FeatureReader(base_path=image_feature_dir, depth_first=kwargs['depth_first'], max_features=kwargs['max_features'])
self.feature_readers.append(feature_reader)
self.imdb = kwargs['imdb']
self.kwargs = kwargs
self.should_return_info = kwargs.get('return_info', True)
if self.fast_read:
self.writer.write(('Fast reading features from %s' % ', '.join(kwargs['directories'])))
self.writer.write('Hold tight, this may take a while...')
self._threaded_read()
def _threaded_read(self):
elements = [idx for idx in range(1, len(self.imdb))]
pool = ThreadPool(processes=4)
with tqdm.tqdm(total=len(elements), disable=(not is_main_process())) as pbar:
for (i, _) in enumerate(pool.imap_unordered(self._fill_cache, elements)):
if ((i % 100) == 0):
pbar.update(100)
pool.close()
def _fill_cache(self, idx):
feat_file = self.imdb[idx]['feature_path']
(features, info) = self._read_features_and_info(feat_file)
self.feature_dict[feat_file] = (features, info)
def _read_features_and_info(self, feat_file):
features = []
infos = []
for feature_reader in self.feature_readers:
(feature, info) = feature_reader.read(feat_file)
features.append(feature)
infos.append(info)
if (not self.should_return_info):
infos = None
return (features, infos)
def _get_image_features_and_info(self, feat_file):
(image_feats, infos) = self.feature_dict.get(feat_file, (None, None))
if (image_feats is None):
(image_feats, infos) = self._read_features_and_info(feat_file)
return (image_feats, infos)
def __len__(self):
return (len(self.imdb) - 1)
def __getitem__(self, idx):
image_info = self.imdb[idx]
image_file_name = image_info.get('feature_path', None)
if (image_file_name is None):
image_file_name = '{}.npy'.format(image_info['image_id'])
(image_features, infos) = self._get_image_features_and_info(image_file_name)
item = {}
for (idx, image_feature) in enumerate(image_features):
item[('image_feature_%s' % idx)] = image_feature
if (infos is not None):
item[('image_info_%s' % idx)] = infos[idx]
return item |
def get_preprocessor(model_name: str) -> Optional[Union[('AutoTokenizer', 'AutoFeatureExtractor', 'AutoProcessor')]]:
from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer
try:
return AutoProcessor.from_pretrained(model_name)
except (ValueError, OSError, KeyError):
(tokenizer, feature_extractor) = (None, None)
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
except (OSError, KeyError):
pass
try:
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
except (OSError, KeyError):
pass
if ((tokenizer is not None) and (feature_extractor is not None)):
raise ValueError(f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor.")
elif ((tokenizer is None) and (feature_extractor is None)):
return None
elif (tokenizer is not None):
return tokenizer
else:
return feature_extractor |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, stride, 1, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), 1, bias=False)
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
('lr_scheduler', 'warmup_polynomial')
class WarmupPolynomialLRScheduler():
param_groups = attr.ib()
num_warmup_steps = attr.ib()
start_lr = attr.ib()
end_lr = attr.ib()
decay_steps = attr.ib()
power = attr.ib()
def update_lr(self, current_step):
if (current_step < self.num_warmup_steps):
warmup_frac_done = (current_step / self.num_warmup_steps)
new_lr = (self.start_lr * warmup_frac_done)
else:
new_lr = (((self.start_lr - self.end_lr) * ((1 - ((current_step - self.num_warmup_steps) / self.decay_steps)) ** self.power)) + self.end_lr)
for param_group in self.param_groups:
param_group['lr'] = new_lr |
def test_list_from_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for (i, lines) in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
with open(filename, 'w', encoding='utf-8') as f:
f.writelines((f'''{line}
''' for line in lines))
lines2 = list_from_file(filename, encoding='utf-8')
lines = list(map(str, lines))
assert (len(lines) == len(lines2))
assert all(((line1 == line2) for (line1, line2) in zip(lines, lines2)))
for (i, lines) in enumerate(dicts):
filename = f'{tmpdirname}/{i}.jsonl'
with open(filename, 'w', encoding='utf-8') as f:
f.writelines((f'''{line}
''' for line in lines))
lines2 = list_from_file(filename, encoding='utf-8')
lines = list(map(str, lines))
assert (len(lines) == len(lines2))
assert all(((line1 == line2) for (line1, line2) in zip(lines, lines2))) |
class LFPluginCollWrapper():
def __init__(self, lfplugin: 'LFPlugin') -> None:
self.lfplugin = lfplugin
self._collected_at_least_one_failure = False
(wrapper=True)
def pytest_make_collect_report(self, collector: nodes.Collector) -> Generator[(None, CollectReport, CollectReport)]:
res = (yield)
if isinstance(collector, (Session, Directory)):
lf_paths = self.lfplugin._last_failed_paths
def sort_key(node: Union[(nodes.Item, nodes.Collector)]) -> bool:
return (node.path in lf_paths)
res.result = sorted(res.result, key=sort_key, reverse=True)
elif isinstance(collector, File):
if (collector.path in self.lfplugin._last_failed_paths):
result = res.result
lastfailed = self.lfplugin.lastfailed
if (not self._collected_at_least_one_failure):
if (not any(((x.nodeid in lastfailed) for x in result))):
return res
self.lfplugin.config.pluginmanager.register(LFPluginCollSkipfiles(self.lfplugin), 'lfplugin-collskip')
self._collected_at_least_one_failure = True
session = collector.session
result[:] = [x for x in result if ((x.nodeid in lastfailed) or session.isinitpath(x.path) or isinstance(x, nodes.Collector))]
return res |
class PositionConfig(Config):
auto_fullscreen = True
groups = [config.Group('a'), config.Group('b')]
layouts = [layout.MonadTall(), layout.TreeTab()]
floating_layout = resources.default_config.floating_layout
keys = []
mouse = []
screens = []
follow_mouse_focus = False |
def repo_with_no_tags_emoji_commits(git_repo_factory, file_in_repo):
git_repo = git_repo_factory()
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='Initial commit')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=':bug: add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=':sparkles: add much more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=':bug: more text')
(yield git_repo)
git_repo.close() |
class TestOptional():
def test_success_with_type(self):
c = optional(int)
assert (c('42') == 42)
def test_success_with_none(self):
c = optional(int)
assert (c(None) is None)
def test_fail(self):
c = optional(int)
with pytest.raises(ValueError):
c('not_an_int') |
class CmdLineApp(cmd.Cmd):
MUMBLES = ['like', '...', 'um', 'er', 'hmmm', 'ahh']
MUMBLE_FIRST = ['so', 'like', 'well']
MUMBLE_LAST = ['right?']
def do_exit(self, line):
return True
do_EOF = do_exit
do_quit = do_exit
def do_speak(self, line):
print(line, file=self.stdout)
do_say = do_speak
def do_mumble(self, line):
words = line.split(' ')
output = []
if (random.random() < 0.33):
output.append(random.choice(self.MUMBLE_FIRST))
for word in words:
if (random.random() < 0.4):
output.append(random.choice(self.MUMBLES))
output.append(word)
if (random.random() < 0.25):
output.append(random.choice(self.MUMBLE_LAST))
print(' '.join(output), file=self.stdout) |
def flat_xml_to_elements(root):
elements = {}
ns_map = get_ns_map(root)
uri_attrib = get_ns_tag('dc:uri', ns_map)
for node in root:
uri = get_uri(node, ns_map)
element = {'uri': get_uri(node, ns_map), 'model': models[node.tag]}
for sub_node in node:
tag = strip_ns(sub_node.tag, ns_map)
if (uri_attrib in sub_node.attrib):
element[tag] = {'uri': sub_node.attrib[uri_attrib]}
if (sub_node.tag in models):
element[tag]['model'] = models[sub_node.tag]
elif ('lang' in sub_node.attrib):
element['{}_{}'.format(tag, sub_node.attrib['lang'])] = sub_node.text
elif list(sub_node):
element[tag] = []
for sub_sub_node in sub_node:
sub_element = {'uri': sub_sub_node.attrib[uri_attrib]}
if (sub_sub_node.tag in models):
sub_element['model'] = models[sub_sub_node.tag]
if ('order' in sub_sub_node.attrib):
sub_element['order'] = sub_sub_node.attrib['order']
element[tag].append(sub_element)
elif ((sub_node.text is None) or (not sub_node.text.strip())):
element[tag] = None
else:
element[tag] = sub_node.text
elements[uri] = element
return elements |
class KnownValues(unittest.TestCase):
def test_tda(self):
td = tdscf.TDA(mf).run(nstates=nstates)
tdg = td.nuc_grad_method()
g1 = tdg.kernel(state=3)
self.assertAlmostEqual(g1[(0, 2)], (- 0.), 5)
td_solver = td.as_scanner()
e1 = td_solver(pmol.set_geom_('H 0 0 1.805; F 0 0 0', unit='B'))
e2 = td_solver(pmol.set_geom_('H 0 0 1.803; F 0 0 0', unit='B'))
self.assertAlmostEqual(((e1[2] - e2[2]) / 0.002), g1[(0, 2)], 4)
def test_tdhf(self):
td = tdscf.TDDFT(mf).run(nstates=nstates)
tdg = td.nuc_grad_method()
g1 = tdg.kernel(td.xy[2])
self.assertAlmostEqual(g1[(0, 2)], (- 0.), 5)
td_solver = td.as_scanner()
e1 = td_solver(pmol.set_geom_('H 0 0 1.805; F 0 0 0', unit='B'))
e2 = td_solver(pmol.set_geom_('H 0 0 1.803; F 0 0 0', unit='B'))
self.assertAlmostEqual(((e1[2] - e2[2]) / 0.002), g1[(0, 2)], 4) |
class Effect6599(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.ship.boostItemAttr('armorKineticDamageResonance', src.getModifiedItemAttr('shipBonusCarrierA1'), skill='Amarr Carrier', **kwargs)
fit.ship.boostItemAttr('armorEmDamageResonance', src.getModifiedItemAttr('shipBonusCarrierA1'), skill='Amarr Carrier', **kwargs)
fit.ship.boostItemAttr('armorExplosiveDamageResonance', src.getModifiedItemAttr('shipBonusCarrierA1'), skill='Amarr Carrier', **kwargs)
fit.ship.boostItemAttr('armorThermalDamageResonance', src.getModifiedItemAttr('shipBonusCarrierA1'), skill='Amarr Carrier', **kwargs) |
def create_h5_sdf_pt(h5_file, sdf_file, norm_obj_file, centroid, m, sdf_res, num_sample, bandwidth, iso_val, max_verts, normalize, reduce=8):
sdf_dict = get_sdf(sdf_file, sdf_res)
ori_verts = np.asarray([0.0, 0.0, 0.0], dtype=np.float32).reshape((1, 3))
(samplesdf, is_insideout) = sample_sdf(num_sample, bandwidth, iso_val, sdf_dict, sdf_res, reduce)
print('[*] start writing: ', h5_file)
norm_params = np.concatenate((centroid, np.asarray([m]).astype(np.float32)))
f1 = h5py.File(h5_file, 'w')
f1.create_dataset('pc_sdf_original', data=ori_verts.astype(np.float32), compression='gzip', compression_opts=4)
f1.create_dataset('pc_sdf_sample', data=samplesdf.astype(np.float32), compression='gzip', compression_opts=4)
f1.create_dataset('norm_params', data=norm_params, compression='gzip', compression_opts=4)
f1.create_dataset('sdf_params', data=sdf_dict['param'], compression='gzip', compression_opts=4)
f1.close()
print('[*] end writing: ', h5_file)
command_str = ('rm -rf ' + norm_obj_file)
print('[*] command:', command_str)
os.system(command_str)
command_str = ('rm -rf ' + sdf_file)
print('[*] command:', command_str)
os.system(command_str) |
class EOH(QuantumAlgorithm):
def __init__(self, operator: LegacyBaseOperator, initial_state: Union[(InitialState, QuantumCircuit)], evo_operator: LegacyBaseOperator, evo_time: float=1, num_time_slices: int=1, expansion_mode: str='trotter', expansion_order: int=1, quantum_instance: Optional[Union[(QuantumInstance, BaseBackend, Backend)]]=None) -> None:
validate_min('evo_time', evo_time, 0)
validate_min('num_time_slices', num_time_slices, 1)
validate_in_set('expansion_mode', expansion_mode, {'trotter', 'suzuki'})
validate_min('expansion_order', expansion_order, 1)
super().__init__(quantum_instance)
self._operator = op_converter.to_weighted_pauli_operator(operator)
self._initial_state = initial_state
self._evo_operator = op_converter.to_weighted_pauli_operator(evo_operator)
self._evo_time = evo_time
self._num_time_slices = num_time_slices
self._expansion_mode = expansion_mode
self._expansion_order = expansion_order
self._ret = {}
def construct_circuit(self):
quantum_registers = QuantumRegister(self._operator.num_qubits, name='q')
if isinstance(self._initial_state, QuantumCircuit):
qc = QuantumCircuit(quantum_registers)
qc.compose(self._initial_state, inplace=True)
else:
qc = self._initial_state.construct_circuit('circuit', quantum_registers)
qc.append(self._evo_operator.evolve(evo_time=self._evo_time, num_time_slices=self._num_time_slices, quantum_registers=quantum_registers, expansion_mode=self._expansion_mode, expansion_order=self._expansion_order), qc.qubits)
return qc
def _run(self):
qc = self.construct_circuit()
qc_with_op = self._operator.construct_evaluation_circuit(wave_function=qc, statevector_mode=self._quantum_instance.is_statevector)
result = self._quantum_instance.execute(qc_with_op)
(self._ret['avg'], self._ret['std_dev']) = self._operator.evaluate_with_result(result=result, statevector_mode=self._quantum_instance.is_statevector)
return self._ret |
class HSAFFileHandler(BaseFileHandler):
def __init__(self, filename, filename_info, filetype_info):
super(HSAFFileHandler, self).__init__(filename, filename_info, filetype_info)
self._msg_datasets = {}
self._start_time = None
self._end_time = None
try:
with pygrib.open(self.filename) as grib_file:
first_msg = grib_file.message(1)
analysis_time = self._get_datetime(first_msg)
self._analysis_time = analysis_time
self.metadata = self.get_metadata(first_msg)
except (RuntimeError, KeyError):
raise IOError('Unknown GRIB file format: {}'.format(self.filename))
def _get_datetime(msg):
dtstr = (str(msg['dataDate']) + str(msg['dataTime']).zfill(4))
return datetime.strptime(dtstr, '%Y%m%d%H%M')
def analysis_time(self):
return self._analysis_time
def get_metadata(self, msg):
try:
center_description = msg['centreDescription']
except (RuntimeError, KeyError):
center_description = None
ds_info = {'filename': self.filename, 'shortName': msg['shortName'], 'long_name': msg['name'], 'units': msg['units'], 'centreDescription': center_description, 'data_time': self._analysis_time, 'nx': msg['Nx'], 'ny': msg['Ny'], 'projparams': msg.projparams}
return ds_info
def get_area_def(self, dsid):
msg = self._get_message(1)
try:
return self._get_area_def(msg)
except (RuntimeError, KeyError):
raise RuntimeError('Unknown GRIB projection information')
def _get_area_def(self, msg):
proj_param = msg.projparams.copy()
Rx = ((2 * np.arcsin((1.0 / msg['NrInRadiusOfEarth']))) / msg['dx'])
Ry = ((2 * np.arcsin((1.0 / msg['NrInRadiusOfEarth']))) / msg['dy'])
x_0 = (- msg['XpInGridLengths'])
x_1 = (msg['Nx'] - msg['XpInGridLengths'])
y_0 = ((msg['Ny'] - msg['YpInGridLengths']) * (- 1))
y_1 = msg['YpInGridLengths']
min_x = ((x_0 * Rx) * proj_param['h'])
max_x = ((x_1 * Rx) * proj_param['h'])
min_y = ((y_0 * Ry) * proj_param['h'])
max_y = ((y_1 * Ry) * proj_param['h'])
area_extent = (min_x, min_y, max_x, max_y)
area = geometry.AreaDefinition('hsaf_region', 'A region from H-SAF', 'geos', proj_param, msg['Nx'], msg['Ny'], area_extent)
return area
def _get_message(self, idx):
with pygrib.open(self.filename) as grib_file:
msg = grib_file.message(idx)
return msg
def get_dataset(self, ds_id, ds_info):
if (ds_id['name'] not in self.filename):
raise IOError('File does not contain {} data'.format(ds_id['name']))
msg = self._get_message(1)
ds_info = self.get_metadata(msg)
ds_info['end_time'] = ds_info['data_time']
if ((ds_id['name'] == 'h05') or (ds_id['name'] == 'h05B')):
flen = len(self.filename)
timedelt = self.filename[(flen - 10):(flen - 8)]
ds_info['start_time'] = (ds_info['end_time'] - timedelta(hours=int(timedelt)))
else:
ds_info['start_time'] = ds_info['end_time']
fill = msg['missingValue']
data = msg.values.astype(np.float32)
if (msg.valid_key('jScansPositively') and (msg['jScansPositively'] == 1)):
data = data[::(- 1)]
if isinstance(data, np.ma.MaskedArray):
data = data.filled(np.nan)
data = da.from_array(data, chunks=CHUNK_SIZE)
else:
data[(data == fill)] = np.nan
data = da.from_array(data, chunks=CHUNK_SIZE)
return xr.DataArray(data, attrs=ds_info, dims=('y', 'x')) |
class TestVariableModule(TestCase):
def test_is_list_of_tuples(self):
a_list = [(1, 2), (3, 4)]
self.assertEqual(variable.is_list_of_tuples(a_list), (True, a_list))
a_list = [1, 2, 3, 4]
self.assertEqual(variable.is_list_of_tuples(a_list), (False, None))
def test_list_test(self):
a_list = [1, 2, 3, 4]
self.assertEqual(variable.list_test(a_list), True)
a_list = str([1, 2, 3, 4])
self.assertEqual(variable.list_test(a_list), False)
def test_list_of_tuples_test(self):
a_list = [(1, 2), (3, 4)]
self.assertEqual(variable.dict_test(a_list), False)
d = dict(a_list)
self.assertEqual(variable.dict_test(d), True) |
def call_optional(obj: object, name: str, nodeid: str) -> bool:
method = getattr(obj, name, None)
if (method is None):
return False
is_fixture = (getfixturemarker(method) is not None)
if is_fixture:
return False
if (not callable(method)):
return False
method_name = getattr(method, '__name__', str(method))
warnings.warn(NOSE_SUPPORT.format(nodeid=nodeid, method=method_name, stage=name), stacklevel=2)
method()
return True |
def init_pretrained_weights(model, model_url):
if (model_url is None):
import warnings
warnings.warn('ImageNet pretrained weights are unavailable for this model')
return
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {k: v for (k, v) in pretrain_dict.items() if ((k in model_dict) and (model_dict[k].size() == v.size()))}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict) |
def enum_assemble(node, neighbors, prev_nodes=[], prev_amap=[]):
all_attach_confs = []
singletons = [nei_node.nid for nei_node in (neighbors + prev_nodes) if (nei_node.mol.GetNumAtoms() == 1)]
def search(cur_amap, depth):
if (len(all_attach_confs) > MAX_NCAND):
return
if (depth == len(neighbors)):
all_attach_confs.append(cur_amap)
return
nei_node = neighbors[depth]
cand_amap = enum_attach(node.mol, nei_node, cur_amap, singletons)
cand_smiles = set()
candidates = []
for amap in cand_amap:
cand_mol = local_attach(node.mol, neighbors[:(depth + 1)], prev_nodes, amap)
cand_mol = sanitize(cand_mol)
if (cand_mol is None):
continue
smiles = get_smiles(cand_mol)
if (smiles in cand_smiles):
continue
cand_smiles.add(smiles)
candidates.append(amap)
if (len(candidates) == 0):
return
for new_amap in candidates:
search(new_amap, (depth + 1))
search(prev_amap, 0)
cand_smiles = set()
candidates = []
aroma_score = []
for amap in all_attach_confs:
cand_mol = local_attach(node.mol, neighbors, prev_nodes, amap)
cand_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cand_mol))
smiles = Chem.MolToSmiles(cand_mol)
if ((smiles in cand_smiles) or (check_singleton(cand_mol, node, neighbors) == False)):
continue
cand_smiles.add(smiles)
candidates.append((smiles, amap))
aroma_score.append(check_aroma(cand_mol, node, neighbors))
return (candidates, aroma_score) |
class RelatednessPytorch(object):
def __init__(self, train, valid, test, devscores, config):
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
assert torch.cuda.is_available(), 'torch.cuda required for Relatedness'
torch.cuda.manual_seed(config['seed'])
self.train = train
self.valid = valid
self.test = test
self.devscores = devscores
self.inputdim = train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.l2reg = 0.0
self.batch_size = 64
self.maxepoch = 1000
self.early_stop = True
self.model = nn.Sequential(nn.Linear(self.inputdim, self.nclasses), nn.Softmax(dim=(- 1)))
self.loss_fn = nn.MSELoss()
if torch.cuda.is_available():
self.model = self.model.cuda()
self.loss_fn = self.loss_fn.cuda()
self.loss_fn.size_average = False
self.optimizer = optim.Adam(self.model.parameters(), weight_decay=self.l2reg)
def prepare_data(self, trainX, trainy, devX, devy, testX, testy):
trainX = torch.from_numpy(trainX).float().cuda()
trainy = torch.from_numpy(trainy).float().cuda()
devX = torch.from_numpy(devX).float().cuda()
devy = torch.from_numpy(devy).float().cuda()
testX = torch.from_numpy(testX).float().cuda()
testY = torch.from_numpy(testy).float().cuda()
return (trainX, trainy, devX, devy, testX, testy)
def run(self):
self.nepoch = 0
bestpr = (- 1)
early_stop_count = 0
r = np.arange(1, 6)
stop_train = False
(trainX, trainy, devX, devy, testX, testy) = self.prepare_data(self.train['X'], self.train['y'], self.valid['X'], self.valid['y'], self.test['X'], self.test['y'])
while ((not stop_train) and (self.nepoch <= self.maxepoch)):
self.trainepoch(trainX, trainy, nepoches=50)
yhat = np.dot(self.predict_proba(devX), r)
pr = spearmanr(yhat, self.devscores)[0]
pr = (0 if (pr != pr) else pr)
if (pr > bestpr):
bestpr = pr
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if (early_stop_count >= 3):
stop_train = True
early_stop_count += 1
self.model = bestmodel
yhat = np.dot(self.predict_proba(testX), r)
return (bestpr, yhat)
def trainepoch(self, X, y, nepoches=1):
self.model.train()
for _ in range(self.nepoch, (self.nepoch + nepoches)):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
idx = torch.from_numpy(permutation[i:(i + self.batch_size)]).long().cuda()
Xbatch = X[idx]
ybatch = y[idx]
output = self.model(Xbatch)
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.item())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.nepoch += nepoches
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:(i + self.batch_size)]
if (len(probas) == 0):
probas = self.model(Xbatch).data.cpu().numpy()
else:
probas = np.concatenate((probas, self.model(Xbatch).data.cpu().numpy()), axis=0)
return probas |
def get_inverse_hvp_lissa(v, model, device, param_influence, train_loader, damping, num_samples, recursion_depth, scale=10000.0):
ihvp = None
for i in range(num_samples):
cur_estimate = v
lissa_data_iterator = iter(train_loader)
for j in range(recursion_depth):
try:
(input_ids, input_mask, segment_ids, label_ids, guids) = next(lissa_data_iterator)
except StopIteration:
lissa_data_iterator = iter(train_loader)
(input_ids, input_mask, segment_ids, label_ids, guids) = next(lissa_data_iterator)
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
model.zero_grad()
train_loss = model(input_ids, segment_ids, input_mask, label_ids)
hvp = hv(train_loss, param_influence, cur_estimate)
cur_estimate = [((_a + ((1 - damping) * _b)) - (_c / scale)) for (_a, _b, _c) in zip(v, cur_estimate, hvp)]
if (((j % 200) == 0) or (j == (recursion_depth - 1))):
print(('Recursion at depth %s: norm is %f' % (j, np.linalg.norm(gather_flat_grad(cur_estimate).cpu().numpy()))))
if (ihvp == None):
ihvp = [(_a / scale) for _a in cur_estimate]
else:
ihvp = [(_a + (_b / scale)) for (_a, _b) in zip(ihvp, cur_estimate)]
return_ihvp = gather_flat_grad(ihvp)
return_ihvp /= num_samples
return return_ihvp |
def _intensity_validator(value, values):
if (not isinstance(value, tuple)):
raise ValueError('Input value {} of trigger_select should be a tuple'.format(value))
if (len(value) != 2):
raise ValueError('Number of parameters {} different from 2'.format(len(value)))
for i in range(2):
strict_discrete_range(value=value[i], values=values[i], step=1)
return value |
def table_to_file(table: pa.Table, base_path: str, file_system: AbstractFileSystem, block_path_provider: BlockWritePathProvider, content_type: str=ContentType.PARQUET.value, **kwargs) -> None:
writer = CONTENT_TYPE_TO_PA_WRITE_FUNC.get(content_type)
if (not writer):
raise NotImplementedError(f"Pyarrow writer for content type '{content_type}' not implemented. Known content types: {CONTENT_TYPE_TO_PA_WRITE_FUNC.keys}")
path = block_path_provider(base_path)
logger.debug(f'Writing table: {table} with kwargs: {kwargs} to path: {path}')
writer(table, path, filesystem=file_system, **kwargs) |
class Rumor_Data(Dataset):
def __init__(self, dataset):
self.text = torch.from_numpy(np.array(dataset['post_text']))
self.image = list(dataset['image'])
self.mask = torch.from_numpy(np.array(dataset['mask']))
self.label = torch.from_numpy(np.array(dataset['label']))
self.event_label = torch.from_numpy(np.array(dataset['event_label']))
print(('TEXT: %d, Image: %d, labe: %d, Event: %d' % (len(self.text), len(self.image), len(self.label), len(self.event_label))))
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return ((self.text[idx], self.image[idx], self.mask[idx]), self.label[idx], self.event_label[idx]) |
class SNDCGAN_Discrminator(object):
def __init__(self, batch_size=64, hidden_activation=lrelu, output_dim=1, scope='critic', **kwargs):
self.batch_size = batch_size
self.hidden_activation = hidden_activation
self.output_dim = output_dim
self.scope = scope
def __call__(self, x, update_collection=tf.GraphKeys.UPDATE_OPS, **kwargs):
with tf.variable_scope(self.scope):
c0_0 = self.hidden_activation(conv2d(x, 64, 3, 3, 1, 1, mhe=True, net_type='d', spectral_normed=False, update_collection=update_collection, stddev=0.02, name='c0_0'))
c0_1 = self.hidden_activation(conv2d(c0_0, 128, 4, 4, 2, 2, mhe=True, net_type='d', spectral_normed=False, update_collection=update_collection, stddev=0.02, name='c0_1'))
c1_0 = self.hidden_activation(conv2d(c0_1, 128, 3, 3, 1, 1, mhe=True, net_type='d', spectral_normed=False, update_collection=update_collection, stddev=0.02, name='c1_0'))
c1_1 = self.hidden_activation(conv2d(c1_0, 256, 4, 4, 2, 2, mhe=True, net_type='d', spectral_normed=False, update_collection=update_collection, stddev=0.02, name='c1_1'))
c2_0 = self.hidden_activation(conv2d(c1_1, 256, 3, 3, 1, 1, mhe=True, net_type='d', spectral_normed=False, update_collection=update_collection, stddev=0.02, name='c2_0'))
c2_1 = self.hidden_activation(conv2d(c2_0, 512, 4, 4, 2, 2, mhe=True, net_type='d', spectral_normed=False, update_collection=update_collection, stddev=0.02, name='c2_1'))
c3_0 = self.hidden_activation(conv2d(c2_1, 512, 3, 3, 1, 1, mhe=True, net_type='d', spectral_normed=False, update_collection=update_collection, stddev=0.02, name='c3_0'))
c3_0 = tf.reshape(c3_0, [self.batch_size, (- 1)])
l4 = linear(c3_0, self.output_dim, mhe=True, net_type='d', spectral_normed=False, update_collection=update_collection, stddev=0.02, name='l4')
return tf.reshape(l4, [(- 1)]) |
def plot_time_cost(title, yrange, fed_async, fed_avg, fed_sync, fed_localA, local_train, fed_asofed, fed_bdfl, save_path=None, plot_size='L'):
font_settings = get_font_settings(plot_size)
x = range(1, (len(fed_async) + 1))
(fig, axes) = plt.subplots()
axes.plot(x, fed_async, label='DBAFL', linewidth=3, zorder=10, marker='o', markevery=5, markersize=8, mfc='none')
axes.plot(x, fed_sync, label='BSFL', marker='D', markevery=5, markersize=8, mfc='none', alpha=0.8)
axes.plot(x, fed_asofed, label='ASOFED', marker='v', markevery=5, markersize=8, mfc='none', alpha=0.8)
axes.plot(x, fed_bdfl, label='BDFL', marker='>', markevery=5, markersize=8, mfc='none', alpha=0.8)
axes.plot(x, fed_localA, label='APFL', marker='x', markevery=5, markersize=8, mfc='none', alpha=0.8)
axes.plot(x, fed_avg, label='FedAVG', marker='|', markevery=5, markersize=8, mfc='none', alpha=0.8)
axes.plot(x, local_train, label='Local', marker='<', markevery=5, markersize=8, mfc='none', alpha=0.8)
axes.set_xlabel('Training Round', **font_settings.get('cs_xy_label_font'))
axes.set_ylabel('Average Time (s)', **font_settings.get('cs_xy_label_font'))
plt.title(title, **font_settings.get('cs_title_font'))
plt.xticks(**font_settings.get('cs_xy_ticks_font'))
plt.yticks(**font_settings.get('cs_xy_ticks_font'))
plt.tight_layout()
plt.ylim(0, yrange)
plt.legend(prop=font_settings.get('legend_font'), loc='upper right').set_zorder(11)
plt.grid()
if save_path:
plt.savefig(save_path)
else:
plt.show() |
def channel_shuffle(x, groups):
(batchsize, num_channels, height, width) = x.data.size()
channels_per_group = (num_channels // groups)
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batchsize, (- 1), height, width)
return x |
def pick_slices(img, view_set, num_slices):
slices = list()
for view in view_set:
dim_size = img.shape[view]
non_empty_slices = np.array([sl for sl in range(dim_size) if (np.count_nonzero(get_axis(img, view, sl)) > 0)])
num_non_empty = len(non_empty_slices)
skip_count = max(0, np.around((num_non_empty * 0.05)).astype('int16'))
if ((skip_count > 0) and ((num_non_empty - (2 * skip_count)) > num_slices)):
non_empty_slices = non_empty_slices[skip_count:(- skip_count)]
num_non_empty = len(non_empty_slices)
sampled_indices = np.linspace(0, num_non_empty, num=min(num_non_empty, num_slices), endpoint=False)
slices_in_dim = non_empty_slices[np.around(sampled_indices).astype('int64')]
slices_in_dim = [sn for sn in slices_in_dim if ((sn >= 0) or (sn <= num_non_empty))]
slices.extend([(view, slice) for slice in slices_in_dim])
return slices |
def main():
ops.survey.print_header('Uptime')
uptime = ops.system.get_uptime()
if (uptime is None):
dsz.Sleep(5000)
uptime = ops.system.get_uptime()
if (uptime is None):
ops.error('Could not properly find process list to calculate uptime, you might have to do the math on your own')
return
print(('Uptime: %d days, %d:%02d:%02d' % (uptime.days, (uptime.seconds / 3600), ((uptime.seconds / 60) % 60), (uptime.seconds % 60)))) |
def catalyze_one_step_reversible(enzyme, substrate, product, klist):
if isinstance(enzyme, Monomer):
enzyme = enzyme()
if isinstance(substrate, Monomer):
substrate = substrate()
if isinstance(product, Monomer):
product = product()
components = catalyze_one_step(enzyme, substrate, product, klist[0])
components |= _macro_rule('reverse', (product >> substrate), [klist[1]], ['kr'])
return components |
def main(argv):
mutate_sys_path()
assert ('doctest' not in sys.modules)
import testprogram
this_dir = os.path.dirname(__file__)
prog = testprogram.TestProgram(argv=argv, default_discovery_args=(this_dir, '*.py', None), module=None)
result = prog.runTests()
success = result.wasSuccessful()
sys.exit(int((not success))) |
_common_args
def get_observation_taxon_summary(observation_id: int, **params) -> JsonResponse:
results = get(f'{API_V1}/observations/{observation_id}/taxon_summary', **params).json()
results['conservation_status'] = convert_generic_timestamps(results['conservation_status'])
results['listed_taxon'] = convert_generic_timestamps(results['listed_taxon'])
return results |
def test_infer_norm_abbr():
with pytest.raises(TypeError):
infer_norm_abbr(0)
class MyNorm():
_abbr_ = 'mn'
assert (infer_norm_abbr(MyNorm) == 'mn')
class FancyBatchNorm():
pass
assert (infer_norm_abbr(FancyBatchNorm) == 'bn')
class FancyInstanceNorm():
pass
assert (infer_norm_abbr(FancyInstanceNorm) == 'in')
class FancyLayerNorm():
pass
assert (infer_norm_abbr(FancyLayerNorm) == 'ln')
class FancyGroupNorm():
pass
assert (infer_norm_abbr(FancyGroupNorm) == 'gn')
class FancyNorm():
pass
assert (infer_norm_abbr(FancyNorm) == 'norm_layer') |
class SortFilterProxyModel(QSortFilterProxyModel):
def filterAcceptsRow(self, sourceRow, sourceParent):
if (self.filterKeyColumn() == DATE):
index = self.sourceModel().index(sourceRow, DATE, sourceParent)
data = self.sourceModel().data(index)
return (self.filterRegExp().indexIn(data.toString(Qt.DefaultLocaleShortDate)) >= 0)
return super(SortFilterProxyModel, self).filterAcceptsRow(sourceRow, sourceParent) |
class OperatorBase(ABC):
INDENTATION = ' '
ENABLE_DEPRECATION = True
def __init__(self) -> None:
super().__init__()
if OperatorBase.ENABLE_DEPRECATION:
warn_package('aqua.operators', 'qiskit.opflow', 'qiskit-terra')
def num_qubits(self) -> int:
raise NotImplementedError
def primitive_strings(self) -> Set[str]:
raise NotImplementedError
def eval(self, front: Optional[Union[(str, Dict[(str, complex)], 'OperatorBase')]]=None) -> Union[('OperatorBase', float, complex, list)]:
raise NotImplementedError
def reduce(self):
raise NotImplementedError
def to_matrix(self, massive: bool=False) -> np.ndarray:
raise NotImplementedError
def to_legacy_op(self, massive: bool=False) -> LegacyBaseOperator:
raise NotImplementedError
def _indent(lines: str, indentation: str=INDENTATION) -> str:
indented_str = (indentation + lines.replace('\n', '\n{}'.format(indentation)))
if indented_str.endswith('\n{}'.format(indentation)):
indented_str = indented_str[:(- len(indentation))]
return indented_str
def __add__(self, other: 'OperatorBase') -> 'OperatorBase':
if (other == 0):
return self
return self.add(other)
def __radd__(self, other: 'OperatorBase') -> 'OperatorBase':
if (other == 0):
return self
return self.add(other)
def add(self, other: 'OperatorBase') -> 'OperatorBase':
raise NotImplementedError
def __sub__(self, other: 'OperatorBase') -> 'OperatorBase':
return self.add((- other))
def __rsub__(self, other: 'OperatorBase') -> 'OperatorBase':
return self.neg().add(other)
def __neg__(self) -> 'OperatorBase':
return self.neg()
def neg(self) -> 'OperatorBase':
return self.mul((- 1.0))
def __invert__(self) -> 'OperatorBase':
return self.adjoint()
def adjoint(self) -> 'OperatorBase':
raise NotImplementedError
def __eq__(self, other: object) -> bool:
if (not isinstance(other, OperatorBase)):
return NotImplemented
return self.equals(cast(OperatorBase, other))
def equals(self, other: 'OperatorBase') -> bool:
raise NotImplementedError
def mul(self, scalar: Union[(Number, ParameterExpression)]) -> 'OperatorBase':
raise NotImplementedError
def __mul__(self, other: Number) -> 'OperatorBase':
return self.mul(other)
def __rmul__(self, other: Number) -> 'OperatorBase':
return self.mul(other)
def __truediv__(self, other: Union[(int, float, complex)]) -> 'OperatorBase':
return self.mul((1 / other))
def __xor__(self, other: Union[('OperatorBase', int)]) -> 'OperatorBase':
if isinstance(other, int):
return cast(OperatorBase, self.tensorpower(other))
else:
return self.tensor(other)
def __rxor__(self, other: Union[('OperatorBase', int)]) -> 'OperatorBase':
if (other == 1):
return self
else:
return cast(OperatorBase, other).tensor(self)
def tensor(self, other: 'OperatorBase') -> 'OperatorBase':
raise NotImplementedError
def tensorpower(self, other: int) -> Union[('OperatorBase', int)]:
raise NotImplementedError
def parameters(self):
raise NotImplementedError
def assign_parameters(self, param_dict: Dict[(ParameterExpression, Union[(Number, ParameterExpression, List[Union[(Number, ParameterExpression)]])])]) -> 'OperatorBase':
raise NotImplementedError
def _expand_dim(self, num_qubits: int) -> 'OperatorBase':
raise NotImplementedError
def permute(self, permutation: List[int]) -> 'OperatorBase':
raise NotImplementedError
def bind_parameters(self, param_dict: Dict[(ParameterExpression, Union[(Number, ParameterExpression, List[Union[(Number, ParameterExpression)]])])]) -> 'OperatorBase':
return self.assign_parameters(param_dict)
def _unroll_param_dict(value_dict: Dict[(Union[(ParameterExpression, ParameterVector)], Union[(Number, List[Number])])]) -> Union[(Dict[(ParameterExpression, Number)], List[Dict[(ParameterExpression, Number)]])]:
unrolled_value_dict = {}
for (param, value) in value_dict.items():
if isinstance(param, ParameterExpression):
unrolled_value_dict[param] = value
if isinstance(param, ParameterVector):
if (not (len(param) == len(value))):
raise ValueError('ParameterVector {} has length {}, which differs from value list {} of len {}'.format(param, len(param), value, len(value)))
unrolled_value_dict.update(zip(param, value))
if isinstance(list(unrolled_value_dict.values())[0], list):
unrolled_value_dict_list = []
try:
for i in range(len(list(unrolled_value_dict.values())[0])):
unrolled_value_dict_list.append(OperatorBase._get_param_dict_for_index(unrolled_value_dict, i))
return unrolled_value_dict_list
except IndexError as ex:
raise AquaError('Parameter binding lists must all be the same length.') from ex
return unrolled_value_dict
def _get_param_dict_for_index(unrolled_dict: Dict[(ParameterExpression, List[Number])], i: int):
return {k: v[i] for (k, v) in unrolled_dict.items()}
def _expand_shorter_operator_and_permute(self, other: 'OperatorBase', permutation: Optional[List[int]]=None) -> Tuple[('OperatorBase', 'OperatorBase')]:
if (permutation is not None):
other = other.permute(permutation)
new_self = self
if (not (self.num_qubits == other.num_qubits)):
from .operator_globals import Zero
if (other == Zero):
other = Zero.__class__(('0' * self.num_qubits))
elif (other.num_qubits < self.num_qubits):
other = other._expand_dim((self.num_qubits - other.num_qubits))
elif (other.num_qubits > self.num_qubits):
new_self = self._expand_dim((other.num_qubits - self.num_qubits))
return (new_self, other)
def __matmul__(self, other: 'OperatorBase') -> 'OperatorBase':
return self.compose(other)
def compose(self, other: 'OperatorBase', permutation: Optional[List[int]]=None, front: bool=False) -> 'OperatorBase':
raise NotImplementedError
def power(self, exponent: int) -> 'OperatorBase':
raise NotImplementedError
def __pow__(self, exponent: int) -> 'OperatorBase':
return self.power(exponent)
def _check_massive(method: str, matrix: bool, num_qubits: int, massive: bool) -> None:
if ((num_qubits > 16) and (not massive) and (not aqua_globals.massive)):
dim = (2 ** num_qubits)
if matrix:
obj_type = 'matrix'
dimensions = f'{dim}x{dim}'
else:
obj_type = 'vector'
dimensions = f'{dim}'
raise ValueError(f"'{method}' will return an exponentially large {obj_type}, in this case '{dimensions}' elements. Set aqua_globals.massive=True or the method argument massive=True if you want to proceed.")
def __str__(self) -> str:
raise NotImplementedError |
class ValidatingRequestsSession(requests.Session):
def __init__(self, *args, checksum_algorithm=hashlib.sha256, **kwargs):
super().__init__(*args, **kwargs)
self._algorithm = checksum_algorithm
def get(self, url, checksum, **kwargs):
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, checksum, **kwargs)
def post(self, url, checksum, **kwargs):
return self.request('POST', url, checksum, **kwargs)
def request(self, method, url, checksum, **kwargs):
response = super().request(method, url, **kwargs)
digest = self._algorithm(response.content).hexdigest()
if (digest != checksum):
raise ChecksumFailed(f'Checksum failed for {url}, expected {checksum}, got {digest}')
return response |
def plot_unions_HTS(results, size, metric: str='greedy'):
(fig, axs) = plt.subplots(1, 3, sharey=True, figsize=(((4 / 1.5) * 3), 4))
fmt = 'o-'
ms = 5
for (i, (split, ax)) in enumerate(zip(SPLITS, axs)):
xs = [int(((size * split) * i)) for i in range(1, 7)]
for model in MODELS:
if (model not in results['retrain'][split]):
continue
ys = results['retrain'][split][model][metric]
ax.plot(xs, ys, fmt, color=MODEL_COLORS[model], label=model.upper(), ms=ms, mec='black')
add_bounds(ax, xs)
add_random(ax, xs, results, split, fmt, ms)
ax.set_title(f'{(split * 100):0.1f}%')
if (i == 0):
ax.set_ylabel(f'Total Number of Unique SMILES')
ax.legend(loc='upper left', title='Model')
ax.set_xlabel(f'Molecules explored')
ax.set_xlim(left=0)
ax.xaxis.set_major_locator(ticker.MaxNLocator(7))
ax.xaxis.set_tick_params(rotation=30)
formatter = ticker.FuncFormatter(abbreviate_k_or_M)
ax.xaxis.set_major_formatter(formatter)
ax.yaxis.set_major_formatter(formatter)
ax.grid(True)
fig.tight_layout()
return fig |
class ConversationsGeneratorConfig():
openai_api_key: str
agent1: str
agent2: str
initial_utterances: List[str] = field(default_factory=(lambda : ['Hello.']))
num_samples: int = 1
interruption: str = 'length'
end_phrase: str = 'Goodbye!'
end_agent: str = 'both'
lengths: List[int] = field(default_factory=(lambda : [5]))
temperatures: List[float] = field(default_factory=(lambda : [0]))
options: List[Tuple[(str, str)]] = field(default_factory=(lambda : []))
model: str = 'gpt-3.5-turbo'
model_agent_one: str = 'gpt-3.5-turbo'
model_agent_two: str = 'gpt-3.5-turbo' |
class ConditionReturn():
condition: Condition
left_varmap: Optional[VarMap] = None
right_varmap: Optional[VarMap] = None
def reverse(self) -> 'ConditionReturn':
return ConditionReturn(left_varmap=self.right_varmap, right_varmap=self.left_varmap, condition=NotCondition(self.condition)) |
def test_exclude_from_history(base_app):
run_cmd(base_app, 'history')
verify_hi_last_result(base_app, 0)
(out, err) = run_cmd(base_app, 'history')
assert (out == [])
verify_hi_last_result(base_app, 0)
run_cmd(base_app, 'help')
(out, err) = run_cmd(base_app, 'history')
expected = normalize(' 1 help')
assert (out == expected)
verify_hi_last_result(base_app, 1) |
def test_receive_withdraw_request():
pseudo_random_generator = random.Random()
(our_model1, _) = create_model(balance=70)
(partner_model1, privkey2) = create_model(balance=100)
signer = LocalSigner(privkey2)
channel_state = create_channel_from_models(our_model1, partner_model1, privkey2)
block_hash = make_block_hash()
block_number = 1
expiration = 10
withdraw_request = ReceiveWithdrawRequest(message_identifier=message_identifier_from_prng(pseudo_random_generator), canonical_identifier=channel_state.canonical_identifier, total_withdraw=120, signature=make_32bytes(), sender=channel_state.partner_state.address, participant=channel_state.partner_state.address, nonce=1, expiration=expiration)
iteration = channel._handle_receive_withdraw_request(action=withdraw_request, channel_state=channel_state, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator, block_number=block_number)
assert (search_for_item(iteration.events, EventInvalidReceivedWithdrawRequest, {'attempted_withdraw': 120}) is not None)
packed = pack_withdraw(canonical_identifier=channel_state.canonical_identifier, participant=channel_state.partner_state.address, total_withdraw=20, expiration_block=expiration)
signature = signer.sign(packed)
withdraw_request = ReceiveWithdrawRequest(message_identifier=message_identifier_from_prng(pseudo_random_generator), canonical_identifier=channel_state.canonical_identifier, total_withdraw=20, signature=signature, sender=channel_state.partner_state.address, participant=channel_state.partner_state.address, nonce=1, expiration=expiration)
iteration = channel._handle_receive_withdraw_request(action=withdraw_request, channel_state=channel_state, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator, block_number=block_number)
assert (iteration.new_state.partner_state.offchain_total_withdraw == 20)
assert (search_for_item(iteration.events, SendWithdrawConfirmation, {'total_withdraw': 20}) is not None)
withdraw_request = ReceiveWithdrawRequest(message_identifier=message_identifier_from_prng(pseudo_random_generator), canonical_identifier=channel_state.canonical_identifier, total_withdraw=20, signature=make_32bytes(), sender=channel_state.partner_state.address, participant=channel_state.partner_state.address, nonce=1, expiration=10)
iteration = channel._handle_receive_withdraw_request(action=withdraw_request, channel_state=iteration.new_state, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator, block_number=block_number)
assert (search_for_item(iteration.events, EventInvalidReceivedWithdrawRequest, {'attempted_withdraw': 20}) is not None)
withdraw_request = ReceiveWithdrawRequest(message_identifier=message_identifier_from_prng(pseudo_random_generator), canonical_identifier=channel_state.canonical_identifier, total_withdraw=40, signature=make_32bytes(), sender=channel_state.partner_state.address, participant=channel_state.partner_state.address, nonce=1, expiration=10)
iteration = channel._handle_receive_withdraw_request(action=withdraw_request, channel_state=iteration.new_state, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator, block_number=block_number)
assert (search_for_item(iteration.events, EventInvalidReceivedWithdrawRequest, {'attempted_withdraw': 40}) is not None) |
def sdn_get_confusion(model, loader, confusion_stats, device='cpu'):
model.eval()
layer_correct = {}
layer_wrong = {}
instance_confusion = {}
outputs = list(range(model.num_output))
for output_id in outputs:
layer_correct[output_id] = set()
layer_wrong[output_id] = set()
with torch.no_grad():
for (cur_batch_id, batch) in enumerate(loader):
b_x = batch[0].to(device)
b_y = batch[1].to(device)
output = model(b_x)
output = [nn.functional.softmax(out, dim=1) for out in output]
cur_confusion = af.get_confusion_scores(output, confusion_stats, device)
for test_id in range(len(b_x)):
cur_instance_id = (test_id + (cur_batch_id * loader.batch_size))
instance_confusion[cur_instance_id] = cur_confusion[test_id].cpu().numpy()
for output_id in outputs:
cur_output = output[output_id]
pred = cur_output.max(1, keepdim=True)[1]
is_correct = pred.eq(b_y.view_as(pred))
correct = is_correct[test_id]
if (correct == 1):
layer_correct[output_id].add(cur_instance_id)
else:
layer_wrong[output_id].add(cur_instance_id)
return (layer_correct, layer_wrong, instance_confusion) |
class ThrowerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
self._ball_hit_ground = False
self._ball_hit_location = None
mujoco_env.MujocoEnv.__init__(self, 'thrower.xml', 5)
def _step(self, a):
ball_xy = self.get_body_com('ball')[:2]
goal_xy = self.get_body_com('goal')[:2]
if ((not self._ball_hit_ground) and (self.get_body_com('ball')[2] < (- 0.25))):
self._ball_hit_ground = True
self._ball_hit_location = self.get_body_com('ball')
if self._ball_hit_ground:
ball_hit_xy = self._ball_hit_location[:2]
reward_dist = (- np.linalg.norm((ball_hit_xy - goal_xy)))
else:
reward_dist = (- np.linalg.norm((ball_xy - goal_xy)))
reward_ctrl = (- np.square(a).sum())
reward = (reward_dist + (0.002 * reward_ctrl))
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
reward_true = 0
reward_actual = (- np.linalg.norm((ball_xy - goal_xy)))
if (not hasattr(self, 'itr')):
self.itr = 0
if (self.itr == 0):
self.reward_orig = (- reward_actual)
if (self.itr == 49):
reward_true = (reward_actual / self.reward_orig)
img = None
if (((self.itr % 2) == 1) and hasattr(self, '_kwargs') and ('imsize' in self._kwargs) and (self._kwargs['mode'] != 'oracle')):
img = self.render('rgb_array')
idims = self._kwargs['imsize']
img = scipy.misc.imresize(img, idims)
self.itr += 1
return (ob, 0, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl, reward_true=reward_true, img=img))
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
rotation_angle = np.random.uniform(low=(- 0), high=360)
if (hasattr(self, '_kwargs') and ('vp' in self._kwargs)):
rotation_angle = self._kwargs['vp']
cam_dist = 2.5
cam_pos = np.array([0, 0.2, 0, cam_dist, (- 45), rotation_angle])
for i in range(3):
self.viewer.cam.lookat[i] = cam_pos[i]
self.viewer.cam.distance = cam_pos[3]
self.viewer.cam.elevation = cam_pos[4]
self.viewer.cam.azimuth = cam_pos[5]
self.viewer.cam.trackbodyid = (- 1)
def reset_model(self):
self.itr = 0
self._ball_hit_ground = False
self._ball_hit_location = None
qpos = self.init_qpos
self.goal = np.array([self.np_random.uniform(low=(- 0.3), high=0.3), self.np_random.uniform(low=(- 0.3), high=0.3)])
if (hasattr(self, '_kwargs') and ('goal' in self._kwargs)):
self.goal = np.array(self._kwargs['goal'])
qpos[(- 9):(- 7)] = self.goal
qvel = (self.init_qvel + self.np_random.uniform(low=(- 0.005), high=0.005, size=self.model.nv))
qvel[7:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([self.model.data.qpos.flat[:7], self.model.data.qvel.flat[:7], self.get_body_com('r_wrist_roll_link'), self.get_body_com('ball'), self.get_body_com('goal')]) |
class InvertDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._inverted_dict = dict()
for (k, v) in self.items():
if (v in self._inverted_dict):
raise GinoException('Column name {} already maps to {}'.format(v, self._inverted_dict[v]))
self._inverted_dict[v] = k
def __setitem__(self, key, value):
if ((value in self._inverted_dict) and (self._inverted_dict[value] != key)):
raise GinoException('Column name {} already maps to {}'.format(value, self._inverted_dict[value]))
super().__setitem__(key, value)
self._inverted_dict[value] = key
def invert_get(self, value, default=None):
return self._inverted_dict.get(value, default) |
def inside_not_trans(graph):
id2node = {node['id']: node for node in graph['nodes']}
parents = {}
grabbed_objs = []
for edge in graph['edges']:
if (edge['relation_type'] == 'INSIDE'):
if (edge['from_id'] not in parents):
parents[edge['from_id']] = [edge['to_id']]
else:
parents[edge['from_id']] += [edge['to_id']]
elif edge['relation_type'].startswith('HOLDS'):
grabbed_objs.append(edge['to_id'])
edges = []
for edge in graph['edges']:
if ((edge['relation_type'] == 'INSIDE') and (id2node[edge['to_id']]['category'] == 'Rooms')):
if (len(parents[edge['from_id']]) == 1):
edges.append(edge)
else:
edges.append(edge)
graph['edges'] = edges
parent_for_node = {}
char_close = {1: [], 2: []}
for char_id in range(1, 3):
for edge in graph['edges']:
if (edge['relation_type'] == 'CLOSE'):
if ((edge['from_id'] == char_id) and (edge['to_id'] not in char_close[char_id])):
char_close[char_id].append(edge['to_id'])
elif ((edge['to_id'] == char_id) and (edge['from_id'] not in char_close[char_id])):
char_close[char_id].append(edge['from_id'])
objects_to_check = []
for edge in graph['edges']:
if (edge['relation_type'] == 'INSIDE'):
if ((edge['from_id'] in parent_for_node) and (not id2node[edge['from_id']]['class_name'].startswith('closet'))):
print('{} has > 1 parent'.format(edge['from_id']))
ipdb.set_trace()
raise Exception
parent_for_node[edge['from_id']] = edge['to_id']
if (id2node[edge['to_id']]['class_name'] in ['fridge', 'kitchencabinet', 'cabinet', 'microwave', 'dishwasher', 'stove']):
objects_to_check.append(edge['from_id'])
for char_id in range(1, 3):
if ((edge['to_id'] in char_close[char_id]) and (edge['from_id'] not in char_close[char_id])):
graph['edges'].append({'from_id': edge['from_id'], 'relation_type': 'CLOSE', 'to_id': char_id})
graph['edges'].append({'from_id': char_id, 'relation_type': 'CLOSE', 'to_id': edge['from_id']})
nodes_not_rooms = [node['id'] for node in graph['nodes'] if (node['category'] not in ['Rooms', 'Doors'])]
nodes_without_parent = list((set(nodes_not_rooms) - set(parent_for_node.keys())))
nodes_without_parent = [node for node in nodes_without_parent if (node not in grabbed_objs)]
graph['edges'] = [edge for edge in graph['edges'] if (not ((edge['from_id'] in objects_to_check) and (edge['relation_type'] == 'ON')))]
if (len(nodes_without_parent) > 0):
for nd in nodes_without_parent:
print(id2node[nd])
ipdb.set_trace()
raise Exception
return graph |
_dataframe_method
_alias(smiles_col='smiles_column_name', mols_col='mols_column_name')
def smiles2mol(df: pd.DataFrame, smiles_column_name: Hashable, mols_column_name: Hashable, drop_nulls: bool=True, progressbar: Optional[str]=None) -> pd.DataFrame:
valid_progress = ['notebook', 'terminal', None]
if (progressbar not in valid_progress):
raise ValueError(f'progressbar kwarg must be one of {valid_progress}')
if (progressbar is None):
df[mols_column_name] = df[smiles_column_name].apply((lambda x: Chem.MolFromSmiles(x)))
else:
if (progressbar == 'notebook'):
tqdmn().pandas(desc='mols')
elif (progressbar == 'terminal'):
tqdm.pandas(desc='mols')
df[mols_column_name] = df[smiles_column_name].progress_apply((lambda x: Chem.MolFromSmiles(x)))
if drop_nulls:
df = df.dropna(subset=[mols_column_name])
df = df.reset_index(drop=True)
return df |
def download_scan_id(scan_id):
command = ('python download-scannet.py -o . --id %s' % scan_id)
to_download = ['.aggregation.json', '.txt', '_vh_clean_2.0.010000.segs.json', '_vh_clean_2.ply', '_vh_clean_2.labels.ply']
for filetype in to_download:
os.system(((command + ' --type ') + filetype)) |
def test_next_transfer_pair():
block_number = BlockNumber(3)
balance = TokenAmount(10)
pseudo_random_generator = random.Random()
payer_transfer = create(LockedTransferSignedStateProperties(amount=balance, initiator=HOP1, target=ADDR, expiration=BlockExpiration(50)))
channels = make_channel_set([NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance)), NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance))])
(pair, events) = mediator.forward_transfer_pair(payer_transfer=payer_transfer, payer_channel=channels[0], payee_channel=channels[1], pseudo_random_generator=pseudo_random_generator, block_number=block_number)
assert pair
assert (pair.payer_transfer == payer_transfer)
assert (pair.payee_address == channels[1].partner_state.address)
assert (pair.payee_transfer.lock.expiration == pair.payer_transfer.lock.expiration)
assert search_for_item(events, SendLockedTransfer, {'recipient': pair.payee_address, 'transfer': {'payment_identifier': payer_transfer.payment_identifier, 'token': payer_transfer.token, 'initiator': payer_transfer.initiator, 'target': payer_transfer.target, 'lock': {'amount': payer_transfer.lock.amount, 'secrethash': payer_transfer.lock.secrethash, 'expiration': payer_transfer.lock.expiration}}}) |
.django_db
def test_scope_keep_filter(site1, site2, post1, post2):
with pytest.raises(ScopeError):
Post.objects.all()
with scope(site=site1):
assert (list(Post.objects.annotate(c=Value(3, output_field=IntegerField())).distinct().all()) == [post1])
with scope(site=site2):
assert (list(Post.objects.annotate(c=Value(3, output_field=IntegerField())).distinct().all()) == [post2]) |
_request_params(docs._observation_id, docs._access_token)
def delete_observation(observation_id: int, **params):
response = delete(url=f'{API_V0}/observations/{observation_id}.json', raise_for_status=False, **params)
if (response.status_code == 404):
raise ObservationNotFound(response=response)
response.raise_for_status() |
def test_relative_outdir(mocker, tmp_dir, package_test_flit):
mocker.patch('pyproject_hooks.BuildBackendHookCaller', autospec=True)
builder = build.ProjectBuilder(package_test_flit)
builder._hook.build_sdist.return_value = 'dist.tar.gz'
builder.build('sdist', '.')
builder._hook.build_sdist.assert_called_with(os.path.abspath('.'), None) |
class RandomCrop1dReturnCoordinates(RandomCrop):
def forward(self, img: Image) -> (BoundingBox, Image):
if (self.padding is not None):
img = F.pad(img, self.padding, self.fill, self.padding_mode)
(width, height) = get_image_size(img)
if (self.pad_if_needed and (width < self.size[1])):
padding = [(self.size[1] - width), 0]
img = F.pad(img, padding, self.fill, self.padding_mode)
if (self.pad_if_needed and (height < self.size[0])):
padding = [0, (self.size[0] - height)]
img = F.pad(img, padding, self.fill, self.padding_mode)
(i, j, h, w) = self.get_params(img, self.size)
bbox = ((j / width), (i / height), (w / width), (h / height))
return (bbox, F.crop(img, i, j, h, w)) |
def onrun_antlr4(unit, *args):
unit.onexternal_resource(['ANTLR4', ('sbr:' + ANTLR4_RESOURCE_ID)])
if (len(args) < 1):
raise Exception('Not enough arguments for RUN_ANTLR4 macro')
arg_list = ['-jar', ('${ANTLR4}/' + ANTLR4_JAR_PATH)]
arg_list += list(args)
unit.set(['ANTLR4', '$(ANTLR4)'])
unit.onrun_java(arg_list) |
class SawyerPickOutOfHoleV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'gripper': obs[3], 'puck_pos': obs[4:7], 'goal_pos': obs[(- 3):], 'unused_info': obs[7:(- 3)]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_puck = (o_d['puck_pos'] + np.array([0.0, 0.0, 0.02]))
pos_goal = o_d['goal_pos']
if (np.linalg.norm((pos_curr[:2] - pos_puck[:2])) > 0.02):
return (pos_puck + np.array([0.0, 0.0, 0.15]))
elif (abs((pos_curr[2] - pos_puck[2])) > 0.01):
return pos_puck
elif (abs((pos_curr[2] - pos_goal[2])) > 0.04):
return np.array([*pos_curr[:2], pos_goal[2]])
else:
return pos_goal
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_puck = (o_d['puck_pos'] + np.array([0.0, 0.0, 0.02]))
if ((np.linalg.norm((pos_curr[:2] - pos_puck[:2])) > 0.02) or (abs((pos_curr[2] - pos_puck[2])) > 0.15)):
return 0.0
else:
return 0.1 |
def test__loss_function():
data = pd.DataFrame({'1': [float(i) for i in range(1000)], '2': [float((2 * i)) for i in range(1000)]})
tvae = TVAESynthesizer(epochs=300)
tvae.fit(data)
num_samples = 1000
sampled = tvae.sample(num_samples)
error = 0
for (_, row) in sampled.iterrows():
error += abs(((2 * row['1']) - row['2']))
avg_error = (error / num_samples)
assert (avg_error < 400) |
class RightPoolFunction(Function):
def forward(ctx, input):
output = right_pool.forward(input)[0]
ctx.save_for_backward(input)
return output
def backward(ctx, grad_output):
input = ctx.saved_variables[0]
output = right_pool.backward(input, grad_output)[0]
return output |
class ExtractPythonTestCase(unittest.TestCase):
def test_nested_calls(self):
buf = BytesIO(b'msg1 = _(i18n_arg.replace(r\'"\', \'"\'))\nmsg2 = ungettext(i18n_arg.replace(r\'"\', \'"\'), multi_arg.replace(r\'"\', \'"\'), 2)\nmsg3 = ungettext("Babel", multi_arg.replace(r\'"\', \'"\'), 2)\nmsg4 = ungettext(i18n_arg.replace(r\'"\', \'"\'), "Babels", 2)\nmsg5 = ungettext(\'bunny\', \'bunnies\', random.randint(1, 2))\nmsg6 = ungettext(arg0, \'bunnies\', random.randint(1, 2))\nmsg7 = _(hello.there)\nmsg8 = gettext(\'Rabbit\')\nmsg9 = dgettext(\'wiki\', model.addPage())\nmsg10 = dngettext(getDomain(), \'Page\', \'Pages\', 3)\n')
messages = list(extract.extract_python(buf, extract.DEFAULT_KEYWORDS.keys(), [], {}))
assert (messages == [(1, '_', None, []), (2, 'ungettext', (None, None, None), []), (3, 'ungettext', ('Babel', None, None), []), (4, 'ungettext', (None, 'Babels', None), []), (5, 'ungettext', ('bunny', 'bunnies', None), []), (6, 'ungettext', (None, 'bunnies', None), []), (7, '_', None, []), (8, 'gettext', 'Rabbit', []), (9, 'dgettext', ('wiki', None), []), (10, 'dngettext', (None, 'Page', 'Pages', None), [])])
def test_extract_default_encoding_ascii(self):
buf = BytesIO(b'_("a")')
messages = list(extract.extract_python(buf, list(extract.DEFAULT_KEYWORDS), [], {}))
assert (messages == [(1, '_', 'a', [])])
def test_extract_default_encoding_utf8(self):
buf = BytesIO('_("")'.encode('UTF-8'))
messages = list(extract.extract_python(buf, list(extract.DEFAULT_KEYWORDS), [], {}))
assert (messages == [(1, '_', '', [])])
def test_nested_comments(self):
buf = BytesIO(b"msg = ngettext('pylon', # TRANSLATORS: shouldn't be\n 'pylons', # TRANSLATORS: seeing this\n count)\n")
messages = list(extract.extract_python(buf, ('ngettext',), ['TRANSLATORS:'], {}))
assert (messages == [(1, 'ngettext', ('pylon', 'pylons', None), [])])
def test_comments_with_calls_that_spawn_multiple_lines(self):
buf = BytesIO(b'# NOTE: This Comment SHOULD Be Extracted\nadd_notice(req, ngettext("Catalog deleted.",\n "Catalogs deleted.", len(selected)))\n\n# NOTE: This Comment SHOULD Be Extracted\nadd_notice(req, _("Locale deleted."))\n\n\n# NOTE: This Comment SHOULD Be Extracted\nadd_notice(req, ngettext("Foo deleted.", "Foos deleted.", len(selected)))\n\n# NOTE: This Comment SHOULD Be Extracted\n# NOTE: And This One Too\nadd_notice(req, ngettext("Bar deleted.",\n "Bars deleted.", len(selected)))\n')
messages = list(extract.extract_python(buf, ('ngettext', '_'), ['NOTE:'], {'strip_comment_tags': False}))
assert (messages[0] == (3, 'ngettext', ('Catalog deleted.', 'Catalogs deleted.', None), ['NOTE: This Comment SHOULD Be Extracted']))
assert (messages[1] == (6, '_', 'Locale deleted.', ['NOTE: This Comment SHOULD Be Extracted']))
assert (messages[2] == (10, 'ngettext', ('Foo deleted.', 'Foos deleted.', None), ['NOTE: This Comment SHOULD Be Extracted']))
assert (messages[3] == (15, 'ngettext', ('Bar deleted.', 'Bars deleted.', None), ['NOTE: This Comment SHOULD Be Extracted', 'NOTE: And This One Too']))
def test_declarations(self):
buf = BytesIO(b"class gettext(object):\n pass\ndef render_body(context,x,y=_('Page arg 1'),z=_('Page arg 2'),**pageargs):\n pass\ndef ngettext(y='arg 1',z='arg 2',**pageargs):\n pass\nclass Meta:\n verbose_name = _('log entry')\n")
messages = list(extract.extract_python(buf, extract.DEFAULT_KEYWORDS.keys(), [], {}))
assert (messages == [(3, '_', 'Page arg 1', []), (3, '_', 'Page arg 2', []), (8, '_', 'log entry', [])])
def test_multiline(self):
buf = BytesIO(b"msg1 = ngettext('pylon',\n 'pylons', count)\nmsg2 = ngettext('elvis',\n 'elvises',\n count)\n")
messages = list(extract.extract_python(buf, ('ngettext',), [], {}))
assert (messages == [(1, 'ngettext', ('pylon', 'pylons', None), []), (3, 'ngettext', ('elvis', 'elvises', None), [])])
def test_npgettext(self):
buf = BytesIO(b"msg1 = npgettext('Strings','pylon',\n 'pylons', count)\nmsg2 = npgettext('Strings','elvis',\n 'elvises',\n count)\n")
messages = list(extract.extract_python(buf, ('npgettext',), [], {}))
assert (messages == [(1, 'npgettext', ('Strings', 'pylon', 'pylons', None), []), (3, 'npgettext', ('Strings', 'elvis', 'elvises', None), [])])
buf = BytesIO(b"msg = npgettext('Strings', 'pylon', # TRANSLATORS: shouldn't be\n 'pylons', # TRANSLATORS: seeing this\n count)\n")
messages = list(extract.extract_python(buf, ('npgettext',), ['TRANSLATORS:'], {}))
assert (messages == [(1, 'npgettext', ('Strings', 'pylon', 'pylons', None), [])])
def test_triple_quoted_strings(self):
buf = BytesIO(b'msg1 = _(\'\'\'pylons\'\'\')\nmsg2 = ngettext(r\'\'\'elvis\'\'\', """elvises""", count)\nmsg2 = ngettext("""elvis""", \'elvises\', count)\n')
messages = list(extract.extract_python(buf, extract.DEFAULT_KEYWORDS.keys(), [], {}))
assert (messages == [(1, '_', 'pylons', []), (2, 'ngettext', ('elvis', 'elvises', None), []), (3, 'ngettext', ('elvis', 'elvises', None), [])])
def test_multiline_strings(self):
buf = BytesIO(b"_('''This module provides internationalization and localization\nsupport for your Python programs by providing an interface to the GNU\ngettext message catalog library.''')\n")
messages = list(extract.extract_python(buf, extract.DEFAULT_KEYWORDS.keys(), [], {}))
assert (messages == [(1, '_', 'This module provides internationalization and localization\nsupport for your Python programs by providing an interface to the GNU\ngettext message catalog library.', [])])
def test_concatenated_strings(self):
buf = BytesIO(b"foobar = _('foo' 'bar')\n")
messages = list(extract.extract_python(buf, extract.DEFAULT_KEYWORDS.keys(), [], {}))
assert (messages[0][2] == 'foobar')
def test_unicode_string_arg(self):
buf = BytesIO(b"msg = _(u'Foo Bar')")
messages = list(extract.extract_python(buf, ('_',), [], {}))
assert (messages[0][2] == 'Foo Bar')
def test_comment_tag(self):
buf = BytesIO(b"\n# NOTE: A translation comment\nmsg = _(u'Foo Bar')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Foo Bar')
assert (messages[0][3] == ['NOTE: A translation comment'])
def test_comment_tag_multiline(self):
buf = BytesIO(b"\n# NOTE: A translation comment\n# with a second line\nmsg = _(u'Foo Bar')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Foo Bar')
assert (messages[0][3] == ['NOTE: A translation comment', 'with a second line'])
def test_translator_comments_with_previous_non_translator_comments(self):
buf = BytesIO(b"\n# This shouldn't be in the output\n# because it didn't start with a comment tag\n# NOTE: A translation comment\n# with a second line\nmsg = _(u'Foo Bar')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Foo Bar')
assert (messages[0][3] == ['NOTE: A translation comment', 'with a second line'])
def test_comment_tags_not_on_start_of_comment(self):
buf = BytesIO(b"\n# This shouldn't be in the output\n# because it didn't start with a comment tag\n# do NOTE: this will not be a translation comment\n# NOTE: This one will be\nmsg = _(u'Foo Bar')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Foo Bar')
assert (messages[0][3] == ['NOTE: This one will be'])
def test_multiple_comment_tags(self):
buf = BytesIO(b"\n# NOTE1: A translation comment for tag1\n# with a second line\nmsg = _(u'Foo Bar1')\n\n# NOTE2: A translation comment for tag2\nmsg = _(u'Foo Bar2')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE1:', 'NOTE2:'], {}))
assert (messages[0][2] == 'Foo Bar1')
assert (messages[0][3] == ['NOTE1: A translation comment for tag1', 'with a second line'])
assert (messages[1][2] == 'Foo Bar2')
assert (messages[1][3] == ['NOTE2: A translation comment for tag2'])
def test_two_succeeding_comments(self):
buf = BytesIO(b"\n# NOTE: one\n# NOTE: two\nmsg = _(u'Foo Bar')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Foo Bar')
assert (messages[0][3] == ['NOTE: one', 'NOTE: two'])
def test_invalid_translator_comments(self):
buf = BytesIO(b"\n# NOTE: this shouldn't apply to any messages\nhello = 'there'\n\nmsg = _(u'Foo Bar')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Foo Bar')
assert (messages[0][3] == [])
def test_invalid_translator_comments2(self):
buf = BytesIO(b"\n# NOTE: Hi!\nhithere = _('Hi there!')\n\n# NOTE: you should not be seeing this in the .po\nrows = [[v for v in range(0,10)] for row in range(0,10)]\n\n# this (NOTE:) should not show up either\nhello = _('Hello')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Hi there!')
assert (messages[0][3] == ['NOTE: Hi!'])
assert (messages[1][2] == 'Hello')
assert (messages[1][3] == [])
def test_invalid_translator_comments3(self):
buf = BytesIO(b"\n# NOTE: Hi,\n\n# there!\nhithere = _('Hi there!')\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Hi there!')
assert (messages[0][3] == [])
def test_comment_tag_with_leading_space(self):
buf = BytesIO(b"\n #: A translation comment\n #: with leading spaces\nmsg = _(u'Foo Bar')\n")
messages = list(extract.extract_python(buf, ('_',), [':'], {}))
assert (messages[0][2] == 'Foo Bar')
assert (messages[0][3] == [': A translation comment', ': with leading spaces'])
def test_different_signatures(self):
buf = BytesIO(b"\nfoo = _('foo', 'bar')\nn = ngettext('hello', 'there', n=3)\nn = ngettext(n=3, 'hello', 'there')\nn = ngettext(n=3, *messages)\nn = ngettext()\nn = ngettext('foo')\n")
messages = list(extract.extract_python(buf, ('_', 'ngettext'), [], {}))
assert (messages[0][2] == ('foo', 'bar'))
assert (messages[1][2] == ('hello', 'there', None))
assert (messages[2][2] == (None, 'hello', 'there'))
assert (messages[3][2] == (None, None))
assert (messages[4][2] is None)
assert (messages[5][2] == 'foo')
def test_utf8_message(self):
buf = BytesIO("\n# NOTE: hello\nmsg = _('Bonjour a tous')\n".encode('utf-8'))
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {'encoding': 'utf-8'}))
assert (messages[0][2] == 'Bonjour a tous')
assert (messages[0][3] == ['NOTE: hello'])
def test_utf8_message_with_magic_comment(self):
buf = BytesIO("# -*- coding: utf-8 -*-\n# NOTE: hello\nmsg = _('Bonjour a tous')\n".encode('utf-8'))
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Bonjour a tous')
assert (messages[0][3] == ['NOTE: hello'])
def test_utf8_message_with_utf8_bom(self):
buf = BytesIO((codecs.BOM_UTF8 + "\n# NOTE: hello\nmsg = _('Bonjour a tous')\n".encode('utf-8')))
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Bonjour a tous')
assert (messages[0][3] == ['NOTE: hello'])
def test_utf8_message_with_utf8_bom_and_magic_comment(self):
buf = BytesIO((codecs.BOM_UTF8 + "# -*- coding: utf-8 -*-\n# NOTE: hello\nmsg = _('Bonjour a tous')\n".encode('utf-8')))
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Bonjour a tous')
assert (messages[0][3] == ['NOTE: hello'])
def test_utf8_bom_with_latin_magic_comment_fails(self):
buf = BytesIO((codecs.BOM_UTF8 + "# -*- coding: latin-1 -*-\n# NOTE: hello\nmsg = _('Bonjour a tous')\n".encode('utf-8')))
with pytest.raises(SyntaxError):
list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
def test_utf8_raw_strings_match_unicode_strings(self):
buf = BytesIO((codecs.BOM_UTF8 + "\nmsg = _('Bonjour a tous')\nmsgu = _(u'Bonjour a tous')\n".encode('utf-8')))
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Bonjour a tous')
assert (messages[0][2] == messages[1][2])
def test_extract_strip_comment_tags(self):
buf = BytesIO(b"#: This is a comment with a very simple\n#: prefix specified\n_('Servus')\n\n# NOTE: This is a multiline comment with\n# a prefix too\n_('Babatschi')")
messages = list(extract.extract('python', buf, comment_tags=['NOTE:', ':'], strip_comment_tags=True))
assert (messages[0][1] == 'Servus')
assert (messages[0][2] == ['This is a comment with a very simple', 'prefix specified'])
assert (messages[1][1] == 'Babatschi')
assert (messages[1][2] == ['This is a multiline comment with', 'a prefix too'])
def test_nested_messages(self):
buf = BytesIO(b"\n# NOTE: First\n_(u'Hello, {name}!', name=_(u'Foo Bar'))\n\n# NOTE: Second\n_(u'Hello, {name1} and {name2}!', name1=_(u'Heungsub'),\n name2=_(u'Armin'))\n\n# NOTE: Third\n_(u'Hello, {0} and {1}!', _(u'Heungsub'),\n _(u'Armin'))\n")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == ('Hello, {name}!', None))
assert (messages[0][3] == ['NOTE: First'])
assert (messages[1][2] == 'Foo Bar')
assert (messages[1][3] == [])
assert (messages[2][2] == ('Hello, {name1} and {name2}!', None))
assert (messages[2][3] == ['NOTE: Second'])
assert (messages[3][2] == 'Heungsub')
assert (messages[3][3] == [])
assert (messages[4][2] == 'Armin')
assert (messages[4][3] == [])
assert (messages[5][2] == ('Hello, {0} and {1}!', None))
assert (messages[5][3] == ['NOTE: Third'])
assert (messages[6][2] == 'Heungsub')
assert (messages[6][3] == [])
assert (messages[7][2] == 'Armin')
assert (messages[7][3] == []) |
def test_direct_junction_offsets_suc_suc_1_right_wrong_input(direct_junction_right_multi_lane_fixture):
(main_road, small_road, junction_creator) = direct_junction_right_multi_lane_fixture
main_road.add_predecessor(xodr.ElementType.junction, junction_creator.id)
small_road.add_successor(xodr.ElementType.junction, junction_creator.id)
with pytest.raises(xodr.exceptions.MixingDrivingDirection) as e:
junction_creator.add_connection(small_road, main_road, [(- 1), (- 2)], [2, 3]) |
def runAllModulesOnEachHost(args):
if (args['nmap-file'] != None):
nmapReport = NmapParser.parse_fromfile(args['nmap-file'])
for aHost in nmapReport.hosts:
hostAdress = aHost.address
for aService in aHost.services:
serviceName = aService.service.lower()
if ((aService.state == 'open') and (aService.protocol == 'tcp') and (serviceName == 'oracle-tns')):
hostPort = aService.port
logging.info('Server {0} is running a TNS Listener on {1}: {2}'.format(hostAdress, hostPort, repr(aService)))
(args['server'], args['port']) = (hostAdress, hostPort)
(args['user'], args['password']) = (None, None)
runAllModules(args)
elif (args['hostlist'] != None):
hosts = getHostsFromFile(args['hostlist'])
for aHost in hosts:
(args['server'], args['port']) = (aHost[0], aHost[1])
(args['user'], args['password']) = (None, None)
(args['sid'], args['serviceName']) = (None, None)
runAllModules(args)
else:
runAllModules(args) |
def _get_tune_resources(num_actors: int, cpus_per_actor: int, gpus_per_actor: int, resources_per_actor: Optional[Dict], placement_options: Optional[Dict]):
if TUNE_INSTALLED:
from ray.tune import PlacementGroupFactory
head_bundle = {}
child_bundle = {'CPU': cpus_per_actor, 'GPU': gpus_per_actor}
child_bundle_extra = ({} if (resources_per_actor is None) else resources_per_actor)
child_bundles = [{**child_bundle, **child_bundle_extra} for _ in range(num_actors)]
bundles = ([head_bundle] + child_bundles)
placement_options = (placement_options or {})
placement_options.setdefault('strategy', 'PACK')
if (placement_options.get('_max_cpu_fraction_per_node', None) is None):
placement_options.pop('_max_cpu_fraction_per_node', None)
placement_group_factory = PlacementGroupFactory(bundles, **placement_options)
return placement_group_factory
else:
raise RuntimeError('Tune is not installed, so `get_tune_resources` is not supported. You can install Ray Tune via `pip install ray[tune]`.') |
class CharDropout(nn.Module):
def __init__(self, p: float=0.0) -> None:
super(CharDropout, self).__init__()
self.p: float = p
def forward(self, input: Tensor) -> Tensor:
if ((self.p == 0.0) or (not self.training)):
return input
(batch, length, char_length, _) = input.size()
m = input.new_empty((batch, length, char_length, 1)).bernoulli_((1.0 - self.p))
return (m * input) |
def add_file_handler(logger: logging.Logger, logging_level, log_dir: str, log_file_base_name: Optional[str]=''):
abs_log_dir = (Path(get_starting_dir_abs_path()) / log_dir)
abs_log_dir.mkdir(parents=True, exist_ok=True)
log_file = get_formatted_filename(log_file_base_name, datetime.now(), 'txt')
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s [%(name)s]: %(message)s', datefmt=str(DateFormat.ISO_SECONDS))
if (not any((isinstance(handle, logging.FileHandler) for handle in logger.handlers))):
file_logger = logging.FileHandler((abs_log_dir / log_file))
file_logger.setFormatter(formatter)
file_logger.setLevel(logging_level)
logger.addHandler(file_logger)
logger.propagate = False |
class Protonet(nn.Module):
def __init__(self, encoder):
super(Protonet, self).__init__()
self.encoder = encoder
self.slf_attn = MultiHeadAttention(1, 512, 512, 512, dropout=0)
def loss(self, sample, stage, eval=False):
xs = Variable(sample['xs'])
xq = Variable(sample['xq'])
classes = sample['class']
n_class = xs.size(0)
assert (xq.size(0) == n_class)
n_support = xs.size(1)
n_query = xq.size(1)
target_inds = torch.arange(0, n_class).view(n_class, 1, 1).expand(n_class, n_query, 1).long()
target_inds = Variable(target_inds, requires_grad=False)
if xq.is_cuda:
target_inds = target_inds.cuda()
x = torch.cat([xs.view((n_class * n_support), *xs.size()[2:]), xq.view((n_class * n_query), *xq.size()[2:])], 0)
z = self.encoder.forward(x)
z_dim = z.size((- 1))
z_proto = z[:(n_class * n_support)].view(n_class, n_support, z_dim).mean(1)
zq = z[(n_class * n_support):]
dists = euclidean_dist(zq, z_proto)
if (stage == 'feat'):
proto = z_proto.detach()
num_proto = proto.shape[0]
query = zq.detach()
num_query = query.shape[0]
proto = proto.unsqueeze(0).repeat([num_query, 1, 1])
query = query.unsqueeze(1)
combined = torch.cat([proto, query], 1)
(combined, enc_slf_attn, enc_slf_log_attn) = self.slf_attn(combined, combined, combined)
(refined_support, refined_query) = combined.split(n_class, 1)
logitis = (- torch.sum(((refined_support - refined_query) ** 2), 2))
dists = (- dists)
if (not eval):
log_p_y = F.log_softmax((logitis + dists), dim=1).view(n_class, n_query, (- 1))
else:
log_p_y = F.log_softmax((logitis + dists), dim=1).view(n_class, n_query, (- 1))
loss_val = (- log_p_y.gather(2, target_inds).squeeze().view((- 1)).mean())
(_, y_hat) = log_p_y.max(2)
acc_val = torch.eq(y_hat, target_inds.squeeze()).float().mean()
y_hat = y_hat.numpy()
target_inds = target_inds.squeeze().numpy()
class_acc = {}
class_prec = {}
class_count = {}
for i in range(len(target_inds)):
ind = classes[i]
class_acc[ind] = ((y_hat[i] == target_inds[i]).sum() / len(y_hat[i]))
dest = np.where((y_hat == i))
class_count[ind] = len(dest[0])
class_prec[ind] = (y_hat[dest] == target_inds[dest]).sum()
prec_macro = 0
prec_micro = 0
count_micro = 0
for k in class_prec.keys():
if (class_count[k] == 0):
prec_macro += 0
else:
prec_macro += (class_prec[k] / class_count[k])
prec_micro += class_prec[k]
count_micro += class_count[k]
prec_macro = (prec_macro / len(class_prec.keys()))
prec_micro = (prec_micro / count_micro)
return (loss_val, {'loss': loss_val.item(), 'acc': acc_val.item(), 'prec_macro': prec_macro, 'prec_micro': prec_micro}, enc_slf_attn, class_acc, class_count, class_prec)
elif (stage == 'protonet'):
dists = (- dists)
log_p_y = F.log_softmax(dists, dim=1).view(n_class, n_query, (- 1))
loss_val = (- log_p_y.gather(2, target_inds).squeeze().view((- 1)).mean())
(_, y_hat) = log_p_y.max(2)
acc_val = torch.eq(y_hat, target_inds.squeeze()).float().mean()
y_hat = y_hat.numpy()
target_inds = target_inds.squeeze().numpy()
class_acc = {}
class_prec = {}
class_count = {}
for i in range(len(target_inds)):
ind = classes[i]
class_acc[ind] = ((y_hat[i] == target_inds[i]).sum() / len(y_hat[i]))
dest = np.where((y_hat == i))
class_count[ind] = len(dest[0])
class_prec[ind] = (y_hat[dest] == target_inds[dest]).sum()
prec_macro = 0
prec_micro = 0
count_micro = 0
for k in class_prec.keys():
if (class_count[k] == 0):
prec_macro += 0
else:
prec_macro += (class_prec[k] / class_count[k])
prec_micro += class_prec[k]
count_micro += class_count[k]
prec_macro = (prec_macro / len(class_prec.keys()))
prec_micro = (prec_micro / count_micro)
return (loss_val, {'loss': loss_val.item(), 'acc': acc_val.item(), 'prec_macro': prec_macro, 'prec_micro': prec_micro}, class_acc, class_count, class_prec) |
def darknet53_body(inputs):
def res_block(inputs, filters):
shortcut = inputs
net = conv2d(inputs, (filters * 1), 1)
net = conv2d(net, (filters * 2), 3)
net = (net + shortcut)
return net
net = conv2d(inputs, 32, 3, strides=1)
net = conv2d(net, 64, 3, strides=2)
net = res_block(net, 32)
net = conv2d(net, 128, 3, strides=2)
for i in range(2):
net = res_block(net, 64)
net = conv2d(net, 256, 3, strides=2)
for i in range(8):
net = res_block(net, 128)
route_1 = net
net = conv2d(net, 512, 3, strides=2)
for i in range(8):
net = res_block(net, 256)
route_2 = net
net = conv2d(net, 1024, 3, strides=2)
for i in range(4):
net = res_block(net, 512)
route_3 = net
return (route_1, route_2, route_3) |
def test_install_logs_output(tester: CommandTester, mocker: MockerFixture) -> None:
assert isinstance(tester.command, InstallerCommand)
mocker.patch.object(tester.command.installer, 'run', return_value=0)
mocker.patch('poetry.masonry.builders.editable.EditableBuilder')
tester.execute()
assert (tester.status_code == 0)
assert (tester.io.fetch_output() == '\nInstalling the current project: simple-project (1.2.3)\n') |
def test_families():
families = table.families()
for (name, fdesc) in six.iteritems(families):
assert isinstance(name, bytes)
assert isinstance(fdesc, dict)
assert ('name' in fdesc)
assert isinstance(fdesc['name'], six.binary_type)
assert ('max_versions' in fdesc) |
def dataloader_impl(dataset: Dataset, batch_size: int, return_idx: bool=False, return_jnp_array: bool=False):
batch_idx = np.arange(len(dataset))
steps_per_epoch = math.ceil((len(dataset) / batch_size))
batch_idx = np.array_split(batch_idx, steps_per_epoch)
for idx in batch_idx:
batch = dataset[idx]
batch = {k: (jnp.array(v) if return_jnp_array else np.array(v)) for (k, v) in batch.items()}
if return_idx:
(yield (idx, batch))
else:
(yield batch) |
def hexLat2W(nrows=5, ncols=5, **kwargs):
if ((nrows == 1) or (ncols == 1)):
print('Hexagon lattice requires at least 2 rows and columns')
print('Returning a linear contiguity structure')
return lat2W(nrows, ncols)
n = (nrows * ncols)
rid = [(i // ncols) for i in range(n)]
cid = [(i % ncols) for i in range(n)]
r1 = (nrows - 1)
c1 = (ncols - 1)
w = lat2W(nrows, ncols).neighbors
for i in range(n):
odd = (cid[i] % 2)
if odd:
if (rid[i] < r1):
if (cid[i] > 0):
j = ((i + ncols) - 1)
w[i] = (w.get(i, []) + [j])
if (cid[i] < c1):
j = ((i + ncols) + 1)
w[i] = (w.get(i, []) + [j])
else:
jnw = [((i - ncols) - 1)]
jne = [((i - ncols) + 1)]
if (rid[i] > 0):
w[i]
if (cid[i] == 0):
w[i] = (w.get(i, []) + jne)
elif (cid[i] == c1):
w[i] = (w.get(i, []) + jnw)
else:
w[i] = (w.get(i, []) + jne)
w[i] = (w.get(i, []) + jnw)
return W(w, **kwargs) |
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes))
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() |
def check_installed(required_solvers, install_dir, bindings_dir, mirror_link):
pypath_solvers = get_env().factory.all_solvers()
global_solvers_status = []
print('Installed Solvers:')
for i in INSTALLERS:
installer_ = i.InstallerClass(install_dir=install_dir, bindings_dir=bindings_dir, solver_version=i.version, mirror_link=mirror_link, **i.extra_params)
solver = installer_.SOLVER
version = installer_.get_installed_version()
is_installed = (version is not None)
global_solvers_status.append((solver, is_installed, version))
del installer_
for solver in required_solvers:
if (solver not in pypath_solvers):
raise PysmtException(('Was expecting to find %s installed' % solver))
for (solver, is_installed, version) in global_solvers_status:
msg = (' %s%s ' % (solver.ljust(10), is_installed))
msg += ('(%s)' % version).ljust(20)
if (solver not in pypath_solvers):
msg += "Not in Python's path!"
print(msg)
print('')
print(('Solvers: %s' % ', '.join((name for name in pypath_solvers))))
qes = get_env().factory.all_quantifier_eliminators()
print(('Quantifier Eliminators: %s' % ', '.join((name for name in qes))))
ucs = get_env().factory.all_unsat_core_solvers()
print(('UNSAT-Cores: %s' % ', '.join((name for name in ucs))))
interps = get_env().factory.all_interpolators()
print(('Interpolators: %s' % ', '.join((name for name in interps)))) |
class WallFillProperty(bpy.types.PropertyGroup):
width: FloatProperty(name='Wall Width', min=get_scaled_unit(0.0), max=get_scaled_unit(100.0), default=get_scaled_unit(0.075), unit='LENGTH', description='Width of each wall')
def draw(self, context, layout):
row = layout.row(align=True)
row.prop(self, 'width') |
class Task2Dataset(BaseDataset):
def __getitem__(self, index) -> Tuple:
(query_id, idx) = self.samples[index]
product_id = self.database[self.split_dataset][query_id]['product_id'][idx]
example_id = self.database[self.split_dataset][query_id]['example_id'][idx]
dataset = torch.tensor([self.database[self.split_dataset][query_id]['dataset'][idx]], dtype=torch.long)
esci_label = torch.tensor([self.database[self.split_dataset][query_id]['esci_label'][idx]], dtype=torch.long)
query_encode = _process_encoding(self.database[self.split_dataset][query_id]['query'], encode_map=self.cfg.model.encode)
input_ids = [query_encode]
for name in self.used_col:
if (name == 'product_id'):
input_ids.append(_process_encoding(product_id, self.cfg.model.encode, name, self.token_map))
else:
arr = self.database['product_catalogue'][product_id][name]
input_ids.append(_process_encoding(arr, self.cfg.model.encode, name, self.token_map))
input_ids = torch.cat(input_ids)
if (len(input_ids) > self.max_length):
tail = input_ids[(- 1)]
input_ids = input_ids[:self.max_length]
input_ids[(- 1)] = tail
token_type_ids = torch.zeros_like(input_ids)
attention_mask = torch.ones_like(input_ids)
feature = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, 'extra': dataset}
meta = {'product_id': product_id, 'query_id': query_id, 'example_id': example_id, 'pad_token_id': self.cfg.model.pad_token_id, 'sample_length': self.sample_length[query_id]}
return (feature, esci_label, meta)
def collate_fn(batch: List) -> dict:
features = {}
pad_token_id = batch[0][2]['pad_token_id']
features['input_ids'] = pad_sequence([x[0]['input_ids'] for x in batch], batch_first=True, padding_value=pad_token_id)
features['token_type_ids'] = pad_sequence([x[0]['token_type_ids'] for x in batch], batch_first=True)
features['attention_mask'] = pad_sequence([x[0]['attention_mask'] for x in batch], batch_first=True)
features['extra'] = torch.cat([x[0]['extra'] for x in batch])
label = torch.cat([x[1] for x in batch])
meta = {}
meta['product_id'] = [x[2]['product_id'] for x in batch]
meta['example_id'] = [x[2]['example_id'] for x in batch]
meta['query_id'] = [x[2]['query_id'] for x in batch]
meta['sample_length'] = torch.tensor([x[2]['sample_length'] for x in batch], dtype=torch.float)
return {'features': features, 'label': label, 'meta': meta} |
_bitsandbytes
_accelerate
_torch
_torch_gpu
class MixedInt8T5Test(unittest.TestCase):
def setUpClass(cls):
cls.model_name = 't5-small'
cls.dense_act_model_name = 'google/flan-t5-small'
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.input_text = 'Translate in German: Hello, my dog is cute'
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
def test_inference_without_keep_in_fp32(self):
from transformers import T5ForConditionalGeneration
T5ForConditionalGeneration._keep_in_fp32_modules = None
model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map='auto')
encoded_input = self.tokenizer(self.input_text, return_tensors='pt').to(0)
_ = model.generate(**encoded_input)
model = T5ForConditionalGeneration.from_pretrained(self.dense_act_model_name, load_in_8bit=True, device_map='auto')
encoded_input = self.tokenizer(self.input_text, return_tensors='pt').to(0)
_ = model.generate(**encoded_input)
def test_inference_with_keep_in_fp32(self):
import bitsandbytes as bnb
from transformers import T5ForConditionalGeneration
model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map='auto')
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt))
encoded_input = self.tokenizer(self.input_text, return_tensors='pt').to(0)
_ = model.generate(**encoded_input)
model = T5ForConditionalGeneration.from_pretrained(self.dense_act_model_name, load_in_8bit=True, device_map='auto')
encoded_input = self.tokenizer(self.input_text, return_tensors='pt').to(0)
_ = model.generate(**encoded_input) |
class DiscriminatorSTFT(nn.Module):
def __init__(self, filters: int, in_channels: int=1, out_channels: int=1, n_fft: int=1024, hop_length: int=256, win_length: int=1024, max_filters: int=1024, filters_scale: int=1, kernel_size: tp.Tuple[(int, int)]=(3, 9), dilations: tp.List=[1, 2, 4], stride: tp.Tuple[(int, int)]=(1, 2), normalized: bool=True, norm: str='weight_norm', activation: str='LeakyReLU', activation_params: dict={'negative_slope': 0.2}):
super().__init__()
assert (len(kernel_size) == 2)
assert (len(stride) == 2)
self.filters = filters
self.in_channels = in_channels
self.out_channels = out_channels
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.normalized = normalized
self.activation = getattr(torch.nn, activation)(**activation_params)
self.spec_transform = torchaudio.transforms.Spectrogram(n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window_fn=torch.hann_window, normalized=self.normalized, center=False, pad_mode=None, power=None)
spec_channels = (2 * self.in_channels)
self.convs = nn.ModuleList()
self.convs.append(NormConv2d(spec_channels, self.filters, kernel_size=kernel_size, padding=get_2d_padding(kernel_size)))
in_chs = min((filters_scale * self.filters), max_filters)
for (i, dilation) in enumerate(dilations):
out_chs = min(((filters_scale ** (i + 1)) * self.filters), max_filters)
self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=(dilation, 1), padding=get_2d_padding(kernel_size, (dilation, 1)), norm=norm))
in_chs = out_chs
out_chs = min(((filters_scale ** (len(dilations) + 1)) * self.filters), max_filters)
self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_size[0], kernel_size[0]), padding=get_2d_padding((kernel_size[0], kernel_size[0])), norm=norm))
self.conv_post = NormConv2d(out_chs, self.out_channels, kernel_size=(kernel_size[0], kernel_size[0]), padding=get_2d_padding((kernel_size[0], kernel_size[0])), norm=norm)
def forward(self, x: torch.Tensor):
fmap = []
z = self.spec_transform(x)
z = torch.cat([z.real, z.imag], dim=1)
z = rearrange(z, 'b c w t -> b c t w')
for (i, layer) in enumerate(self.convs):
z = layer(z)
z = self.activation(z)
fmap.append(z)
z = self.conv_post(z)
return (z, fmap) |
def path_deploy(base, port=0, host='', index=True, static_dir=None, reconnect_timeout=0, cdn=True, debug=False, allowed_origins=None, check_origin=None, max_payload_size='200M', **tornado_app_settings):
debug = Session.debug = os.environ.get('PYWEBIO_DEBUG', debug)
page.MAX_PAYLOAD_SIZE = max_payload_size = parse_file_size(max_payload_size)
tornado_app_settings.setdefault('websocket_ping_interval', 30)
tornado_app_settings.setdefault('websocket_max_message_size', max_payload_size)
tornado_app_settings['websocket_max_message_size'] = parse_file_size(tornado_app_settings['websocket_max_message_size'])
gen = _path_deploy(base, port=port, host=host, static_dir=static_dir, debug=debug, max_payload_size=max_payload_size, **tornado_app_settings)
cdn = cdn_validation(cdn, 'warn', stacklevel=3)
abs_base = next(gen)
index_func = {True: partial(default_index_page, base=abs_base), False: (lambda p: '403 Forbidden')}.get(index, index)
Handler = webio_handler((lambda : None), cdn=cdn, allowed_origins=allowed_origins, check_origin=check_origin, reconnect_timeout=reconnect_timeout)
class WSHandler(Handler):
def get_cdn(self):
_cdn = super().get_cdn()
if (not _cdn):
return LOCAL_STATIC_URL
return _cdn
def get_app(self):
reload = (self.get_query_argument('reload', None) is not None)
(type, res) = get_app_from_path(self.request.path, abs_base, index=index_func, reload=reload)
if (type == 'error'):
raise tornado.web.HTTPError(status_code=res)
elif (type == 'html'):
raise tornado.web.Finish(res)
app_name = self.get_query_argument('app', 'index')
app = (res.get(app_name) or res['index'])
return app
gen.send(WSHandler)
gen.close() |
class TestCopyArea(EndianTest):
def setUp(self):
self.req_args_0 = {'dst_drawable': , 'dst_x': (- 27552), 'dst_y': (- 6968), 'gc': , 'height': 7340, 'src_drawable': , 'src_x': (- 24637), 'src_y': (- 24026), 'width': 46214}
self.req_bin_0 = b'>\x00\x07\x00c\xa6\x9an\x86]\x17^5\xa2\xc7g\xc3\x9f&\xa2`\x94\xc8\xe4\x86\xb4\xac\x1c'
def testPackRequest0(self):
bin = request.CopyArea._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.CopyArea._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def main(client, config):
(store_sales, date_dim, store, product_reviews) = benchmark(read_tables, config=config, compute_result=config['get_read_time'])
q18_startDate_int = np.datetime64(q18_startDate, 'ms').astype(int)
q18_endDate_int = np.datetime64(q18_endDate, 'ms').astype(int)
date_dim_filtered = date_dim.loc[((date_dim.d_date.astype('datetime64[ms]').astype('int') >= q18_startDate_int) & (date_dim.d_date.astype('datetime64[ms]').astype('int') <= q18_endDate_int))].reset_index(drop=True)
ss_date_dim_join = left_semi_join(store_sales, date_dim_filtered, left_on=['ss_sold_date_sk'], right_on=['d_date_sk'])
temp = ss_date_dim_join.groupby(['ss_store_sk', 'ss_sold_date_sk']).agg({'ss_net_paid': 'sum'}).reset_index()
temp['xx'] = (temp.ss_sold_date_sk * temp.ss_sold_date_sk)
temp['xy'] = (temp.ss_sold_date_sk * temp.ss_net_paid)
temp.columns = ['ss_store_sk', 'x', 'y', 'xx', 'xy']
regression_analysis = temp.groupby(['ss_store_sk']).agg({'x': ['count', 'sum'], 'xy': 'sum', 'y': 'sum', 'xx': 'sum'}).reset_index(drop=False)
regression_analysis['slope'] = (((regression_analysis[('x', 'count')] * regression_analysis[('xy', 'sum')]) - (regression_analysis[('x', 'sum')] * regression_analysis[('y', 'sum')])) / ((regression_analysis[('x', 'count')] * regression_analysis[('xx', 'sum')]) - (regression_analysis[('x', 'sum')] * regression_analysis[('x', 'sum')])))
regression_analysis = regression_analysis[['ss_store_sk', 'slope']]
regression_analysis.columns = ['ss_store_sk', 'slope']
regression_analysis['ss_store_sk'] = regression_analysis['ss_store_sk'].astype('int32')
store['s_store_sk'] = store['s_store_sk'].astype('int32')
temp_table1 = store.merge(regression_analysis[['ss_store_sk', 'slope']].query('slope <= 0').reset_index(drop=True), left_on='s_store_sk', right_on='ss_store_sk')
temp_table1 = temp_table1[['s_store_sk', 's_store_name']]
temp_table1 = temp_table1.repartition(npartitions=1)
temp_table1 = temp_table1.persist()
stores_with_regression = temp_table1
pr = product_reviews
targets = stores_with_regression.s_store_name.str.lower().unique().compute().to_arrow().to_pylist()
no_nulls = pr[(~ pr.pr_review_content.isnull())].reset_index(drop=True)
no_nulls['pr_review_sk'] = no_nulls['pr_review_sk'].astype('int32')
no_nulls = no_nulls.reset_index(drop=True).persist()
temp_table2_meta_empty_df = cudf.DataFrame({'word': ['a'], 'pr_review_sk': np.ones(1, dtype=np.int64), 'pr_review_date': ['a']}).head(0)
combined = no_nulls.map_partitions(find_relevant_reviews, targets, meta=temp_table2_meta_empty_df)
stores_with_regression['store_ID'] = stores_with_regression.s_store_sk.astype('str').str.cat(stores_with_regression.s_store_name, sep='_')
stores_with_regression['s_store_name'] = stores_with_regression.s_store_name.str.lower()
temp_table2 = combined.merge(stores_with_regression, how='inner', left_on=['word'], right_on=['s_store_name'])
temp_table2 = temp_table2[['store_ID', 'pr_review_date', 'pr_review_sk']]
temp_table2 = temp_table2.persist()
no_nulls['pr_review_content'] = no_nulls.pr_review_content.str.replace(['. ', '? ', '! '], [EOL_CHAR], regex=False)
sentences = no_nulls.map_partitions(create_sentences_from_reviews)
sentences['x'] = 1
sentences['sentence_tokenized_global_pos'] = sentences.x.cumsum()
del sentences['x']
sentiment_dir = os.path.join(config['data_dir'], 'sentiment_files')
with open(os.path.join(sentiment_dir, 'negativeSentiment.txt')) as fh:
negativeSentiment = list(map(str.strip, fh.readlines()))
negativeSentiment = list(set(negativeSentiment))
word_df = sentences.map_partitions(create_words_from_sentences, global_position_column='sentence_tokenized_global_pos')
sent_df = cudf.DataFrame({'word': negativeSentiment})
sent_df['sentiment'] = 'NEG'
sent_df = dask_cudf.from_cudf(sent_df, npartitions=1)
word_sentence_sentiment = word_df.merge(sent_df, how='inner', on='word')
word_sentence_sentiment['sentence_idx_global_pos'] = word_sentence_sentiment['sentence_idx_global_pos'].astype('int64')
sentences['sentence_tokenized_global_pos'] = sentences['sentence_tokenized_global_pos'].astype('int64')
word_sentence_sentiment_with_sentence_info = word_sentence_sentiment.merge(sentences, how='left', left_on='sentence_idx_global_pos', right_on='sentence_tokenized_global_pos')
temp_table2['pr_review_sk'] = temp_table2['pr_review_sk'].astype('int32')
final = word_sentence_sentiment_with_sentence_info.merge(temp_table2[['store_ID', 'pr_review_date', 'pr_review_sk']], how='inner', left_on='review_idx_global_pos', right_on='pr_review_sk')
keepcols = ['store_ID', 'pr_review_date', 'sentence', 'sentiment', 'word']
final = final[keepcols]
final.columns = ['s_name', 'r_date', 'r_sentence', 'sentiment', 'sentiment_word']
final = final.persist()
wait(final)
final = final.sort_values(['s_name', 'r_date', 'r_sentence', 'sentiment_word'])
final = final.persist()
wait(final)
return final |
class DescribeZeroOrOne():
def it_adds_a_getter_property_for_the_child_element(self, getter_fixture):
(parent, zooChild) = getter_fixture
assert (parent.zooChild is zooChild)
def it_adds_an_add_method_for_the_child_element(self, add_fixture):
(parent, expected_xml) = add_fixture
zooChild = parent._add_zooChild()
assert (parent.xml == expected_xml)
assert isinstance(zooChild, CT_ZooChild)
assert parent._add_zooChild.__doc__.startswith('Add a new ``<w:zooChild>`` child element ')
def it_adds_an_insert_method_for_the_child_element(self, insert_fixture):
(parent, zooChild, expected_xml) = insert_fixture
parent._insert_zooChild(zooChild)
assert (parent.xml == expected_xml)
assert parent._insert_zooChild.__doc__.startswith('Return the passed ``<w:zooChild>`` ')
def it_adds_a_get_or_add_method_for_the_child_element(self, get_or_add_fixture):
(parent, expected_xml) = get_or_add_fixture
zooChild = parent.get_or_add_zooChild()
assert isinstance(zooChild, CT_ZooChild)
assert (parent.xml == expected_xml)
def it_adds_a_remover_method_for_the_child_element(self, remove_fixture):
(parent, expected_xml) = remove_fixture
parent._remove_zooChild()
assert (parent.xml == expected_xml)
def add_fixture(self):
parent = self.parent_bldr(False).element
expected_xml = self.parent_bldr(True).xml()
return (parent, expected_xml)
(params=[True, False])
def getter_fixture(self, request):
zooChild_is_present = request.param
parent = self.parent_bldr(zooChild_is_present).element
zooChild = parent.find(qn('w:zooChild'))
return (parent, zooChild)
(params=[True, False])
def get_or_add_fixture(self, request):
zooChild_is_present = request.param
parent = self.parent_bldr(zooChild_is_present).element
expected_xml = self.parent_bldr(True).xml()
return (parent, expected_xml)
def insert_fixture(self):
parent = a_parent().with_nsdecls().with_child(an_oomChild()).with_child(an_oooChild()).with_child(a_zomChild()).element
zooChild = a_zooChild().with_nsdecls().element
expected_xml = a_parent().with_nsdecls().with_child(an_oomChild()).with_child(an_oooChild()).with_child(a_zomChild()).with_child(a_zooChild()).xml()
return (parent, zooChild, expected_xml)
(params=[True, False])
def remove_fixture(self, request):
zooChild_is_present = request.param
parent = self.parent_bldr(zooChild_is_present).element
expected_xml = self.parent_bldr(False).xml()
return (parent, expected_xml)
def parent_bldr(self, zooChild_is_present):
parent_bldr = a_parent().with_nsdecls()
if zooChild_is_present:
parent_bldr.with_child(a_zooChild())
return parent_bldr |
class FittingViewDrop(wx.DropTarget):
def __init__(self, dropFn, *args, **kwargs):
super(FittingViewDrop, self).__init__(*args, **kwargs)
self.dropFn = dropFn
self.dropData = wx.TextDataObject()
self.SetDataObject(self.dropData)
def OnData(self, x, y, t):
if self.GetData():
dragged_data = DragDropHelper.data
data = dragged_data.split(':')
self.dropFn(x, y, data)
return t |
def test_variables__validate_dims_optional():
spec = ArrayLikeSpec('foo', 'foo doc', kind='i', dims=({None, 'windows', 'variants'}, 'samples', 'ploidy'))
ds = xr.Dataset()
ds['valid_0'] = (('samples', 'ploidy'), np.ones((2, 3), int))
ds['valid_1'] = (('windows', 'samples', 'ploidy'), np.ones((1, 2, 3), int))
ds['valid_2'] = (('variants', 'samples', 'ploidy'), np.ones((2, 2, 3), int))
ds['invalid_0'] = (('ploidy', 'samples'), np.ones((3, 2), int))
ds['invalid_1'] = (('windows', 'samples'), np.ones((1, 2), int))
ds['invalid_2'] = (('genome', 'samples', 'ploidy'), np.ones((1, 2, 3), int))
variables.validate(ds, {'valid_0': spec, 'valid_1': spec, 'valid_2': spec})
for variable in ['invalid_0', 'invalid_1', 'invalid_2']:
with pytest.warns(DimensionWarning):
variables.validate(ds, {variable: spec}) |
class Index(Op):
__props__ = ()
def make_node(self, x, elem):
assert isinstance(x.type, TypedListType)
assert (x.ttype == elem.type)
return Apply(self, [x, elem], [scalar()])
def perform(self, node, inputs, outputs):
(x, elem) = inputs
(out,) = outputs
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] = np.asarray(y, dtype=config.floatX)
break
def __str__(self):
return self.__class__.__name__ |
def test_pipeline(root_path):
opt = parse_options(root_path, is_train=False)
torch.backends.cudnn.benchmark = True
make_exp_dirs(opt)
log_file = osp.join(opt['path']['log'], f"test_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
test_loaders = []
for (phase, dataset_opt) in sorted(opt['datasets'].items()):
test_set = build_dataset(dataset_opt)
test_loader = build_dataloader(test_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed'])
logger.info(f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
test_loaders.append(test_loader)
model = build_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info(f'Testing {test_set_name}...')
model.validation(test_loader, current_iter=opt['name'], tb_logger=None, save_img=opt['val']['save_img']) |
class GeneralizedRCNN(nn.Module):
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.backbone = build_backbone(cfg)
self.rpn = build_rpn(cfg)
self.roi_heads = build_roi_heads(cfg)
def forward(self, images, targets=None):
if (self.training and (targets is None)):
raise ValueError('In training mode, targets should be passed')
images = to_image_list(images)
features = self.backbone(images.tensors)
(proposals, proposal_losses) = self.rpn(images, features, targets)
if self.roi_heads:
(x, result, detector_losses) = self.roi_heads(features, proposals, targets)
else:
x = features
result = proposals
detector_losses = {}
if self.training:
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
return result |
def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')):
class _FB(FollowedBy):
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return (loc, [])
ret = Forward()
lastExpr = (baseExpr | ((lpar + ret) + rpar))
for (i, operDef) in enumerate(opList):
(opExpr, arity, rightLeftAssoc, pa) = (operDef + (None,))[:4]
termName = (('%s term' % opExpr) if (arity < 3) else ('%s%s term' % opExpr))
if (arity == 3):
if ((opExpr is None) or (len(opExpr) != 2)):
raise ValueError('if numterms=3, opExpr must be a tuple or list of two expressions')
(opExpr1, opExpr2) = opExpr
thisExpr = Forward().setName(termName)
if (rightLeftAssoc == opAssoc.LEFT):
if (arity == 1):
matchExpr = (_FB((lastExpr + opExpr)) + Group((lastExpr + OneOrMore(opExpr))))
elif (arity == 2):
if (opExpr is not None):
matchExpr = (_FB(((lastExpr + opExpr) + lastExpr)) + Group((lastExpr + OneOrMore((opExpr + lastExpr)))))
else:
matchExpr = (_FB((lastExpr + lastExpr)) + Group((lastExpr + OneOrMore(lastExpr))))
elif (arity == 3):
matchExpr = (_FB(((((lastExpr + opExpr1) + lastExpr) + opExpr2) + lastExpr)) + Group(((((lastExpr + opExpr1) + lastExpr) + opExpr2) + lastExpr)))
else:
raise ValueError('operator must be unary (1), binary (2), or ternary (3)')
elif (rightLeftAssoc == opAssoc.RIGHT):
if (arity == 1):
if (not isinstance(opExpr, Optional)):
opExpr = Optional(opExpr)
matchExpr = (_FB((opExpr.expr + thisExpr)) + Group((opExpr + thisExpr)))
elif (arity == 2):
if (opExpr is not None):
matchExpr = (_FB(((lastExpr + opExpr) + thisExpr)) + Group((lastExpr + OneOrMore((opExpr + thisExpr)))))
else:
matchExpr = (_FB((lastExpr + thisExpr)) + Group((lastExpr + OneOrMore(thisExpr))))
elif (arity == 3):
matchExpr = (_FB(((((lastExpr + opExpr1) + thisExpr) + opExpr2) + thisExpr)) + Group(((((lastExpr + opExpr1) + thisExpr) + opExpr2) + thisExpr)))
else:
raise ValueError('operator must be unary (1), binary (2), or ternary (3)')
else:
raise ValueError('operator must indicate right or left associativity')
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= (matchExpr.setName(termName) | lastExpr)
lastExpr = thisExpr
ret <<= lastExpr
return ret |
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
(ims, txts, links) = ([], [], [])
for (label, im_data) in visuals.items():
im = util.tensor2im(im_data)
image_name = ('%s_%s.png' % (name, label))
save_path = os.path.join(image_dir, image_name)
(h, w, _) = im.shape
if (aspect_ratio > 1.0):
im = imresize(im, (h, int((w * aspect_ratio))), interp='bicubic')
if (aspect_ratio < 1.0):
im = imresize(im, (int((h / aspect_ratio)), w), interp='bicubic')
util.save_image(im, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width) |
def get_criterion(opt, summarywriter=None):
assert isinstance(opt['crit'], list)
crit_objects = []
for item in opt['crit']:
crit_name = item.lower()
if (crit_name == 'lang'):
this_crit_object = LanguageGeneration(opt, crit_name)
elif (crit_name == 'length'):
this_crit_object = nn.KLDivLoss()
else:
raise NotImplementedError('Please make sure that:\n\n 1) the criterion name \'{}\' can be found in config.Constants.mapping.keys();\n\n 2) the coressponding criterion for \'{}\' has been implemented in misc.crit;\n\n 3) add "elif crit_name == \'{}\': this_crit_object = xxx" in misc.crit.get_criterion().\n\n '.format(crit_name, crit_name, crit_name))
crit_objects.append(this_crit_object)
return Criterion(crit_objects=crit_objects, keys=opt['crit_key'], names=opt['crit_name'], scales=opt['crit_scale'], summarywriter=summarywriter) |
def usage():
printerr('Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]')
printerr("where: columns 'all' or 'branches'")
printerr(" calls 'calls' => create calls and call_paths table")
printerr(" callchains 'callchains' => create call_paths table")
printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1")
raise Exception('Too few or bad arguments') |
class SIGN(Frame):
_framespec = [ByteSpec('group', default=128), BinaryDataSpec('sig')]
def HashKey(self):
return ('%s:%s:%s' % (self.FrameID, self.group, _bytes2key(self.sig)))
def __bytes__(self):
return self.sig
def __eq__(self, other):
return (self.sig == other)
__hash__ = Frame.__hash__ |
class F9_TestCase(FC6_TestCase):
def runTest(self):
FC6_TestCase.runTest(self)
self.assert_removed('vnc', 'connect')
self.assert_parse_error('vnc --host=HOSTNAME --connect=HOSTNAME --password=PASSWORD')
self.assert_parse_error('vnc --host=HOSTNAME --connect=HOSTNAME --password=PASSWORD')
self.assert_parse_error('vnc --connect=HOSTNAME --password=PASSWORD')
self.assert_parse_error('vnc --connect=HOSTNAME')
self.assert_parse_error('vnc --connect')
self.assert_parse_error('vnc --password') |
class Carousel(Widget):
def __init__(self, view, css_id, show_indicators=True, interval=5000, pause='hover', wrap=True, keyboard=True, min_height=None):
super().__init__(view)
self.carousel_panel = self.add_child(Div(view, css_id=css_id))
self.carousel_panel.append_class('carousel')
self.carousel_panel.append_class('slide')
self.carousel_panel.set_attribute('data-ride', 'carousel')
self.carousel_panel.set_attribute('data-interval', str(interval))
pause_option = HTMLAttributeValueOption((pause or 'false'), True, constrain_value_to=['hover', 'false'])
self.carousel_panel.set_attribute('data-pause', pause_option.as_html_snippet())
self.carousel_panel.set_attribute('data-wrap', ('true' if wrap else 'false'))
self.carousel_panel.set_attribute('data-keyboard', ('true' if keyboard else 'false'))
if min_height:
style = self.carousel_panel.add_child(HTMLElement(self.view, 'style', children_allowed=True))
css_id = self.carousel_panel.css_id
style.add_child(TextNode(self.view, ('#%s .carousel-item { min-height: %sem; }' % (css_id, min_height))))
self.show_indicators = show_indicators
if self.show_indicators:
self.indicator_list = self.carousel_panel.add_child(self.create_indicator_list())
self.inner = self.carousel_panel.add_child(self.create_inner())
self.slides = []
self.add_control(previous=True)
self.add_control()
def create_inner(self):
inner = Div(self.view)
inner.append_class('carousel-inner')
return inner
def add_slide(self, widget, caption_widget=None):
slide = self.inner.add_child(Slide(self.view, widget, caption_widget, len(self.slides)))
self.slides.append(slide)
if self.show_indicators:
self.add_indicator_for(slide)
return slide
def url(self):
return Url(('#%s' % self.carousel_panel.css_id))
def add_control(self, previous=False):
control_a = self.carousel_panel.add_child(A(self.view, self.url))
control_a.append_class(('carousel-control-prev' if previous else 'carousel-control-next'))
control_a.set_attribute('role', 'button')
control_a.set_attribute('data-slide', ('prev' if previous else 'next'))
span_icon = control_a.add_child(Span(self.view))
span_icon.append_class(('carousel-control-%s-icon' % ('prev' if previous else 'next')))
span_icon.set_attribute('aria-hidden', 'true')
span_text = control_a.add_child(Span(self.view, text=(_('Previous') if previous else _('Next'))))
span_text.append_class('sr-only')
return control_a
def create_indicator_list(self):
indicators = Ol(self.view)
indicators.append_class('carousel-indicators')
return indicators
def add_indicator_for(self, item):
li = self.indicator_list.add_child(Li(self.view))
li.set_attribute('data-target', str(self.url))
li.set_attribute('data-slide-to', ('%s' % item.index))
if item.is_active:
li.append_class('active')
return li |
def violet(N, state=None):
state = (np.random.RandomState() if (state is None) else state)
uneven = (N % 2)
X = (state.randn((((N // 2) + 1) + uneven)) + (1j * state.randn((((N // 2) + 1) + uneven))))
S = np.arange(len(X))
y = irfft((X * S)).real
if uneven:
y = y[:(- 1)]
return normalize(y) |
def test_remove_overridden_styles():
from typing import List
from cmd2 import Bg, EightBitBg, EightBitFg, Fg, RgbBg, RgbFg, TextStyle
def make_strs(styles_list: List[ansi.AnsiSequence]) -> List[str]:
return [str(s) for s in styles_list]
styles_to_parse = make_strs([Fg.BLUE, TextStyle.UNDERLINE_DISABLE, TextStyle.INTENSITY_DIM, TextStyle.RESET_ALL])
expected = make_strs([TextStyle.RESET_ALL])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([Fg.BLUE, TextStyle.UNDERLINE_DISABLE, TextStyle.INTENSITY_DIM, TextStyle.ALT_RESET_ALL])
expected = make_strs([TextStyle.ALT_RESET_ALL])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([Fg.BLUE, Fg.RED, Fg.GREEN, Bg.BLUE, Bg.RED, Bg.GREEN])
expected = make_strs([Fg.GREEN, Bg.GREEN])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([EightBitFg.BLUE, EightBitFg.RED, EightBitBg.BLUE, EightBitBg.RED])
expected = make_strs([EightBitFg.RED, EightBitBg.RED])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([RgbFg(0, 3, 4), RgbFg(5, 6, 7), RgbBg(8, 9, 10), RgbBg(11, 12, 13)])
expected = make_strs([RgbFg(5, 6, 7), RgbBg(11, 12, 13)])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([TextStyle.INTENSITY_DIM, TextStyle.INTENSITY_NORMAL, TextStyle.ITALIC_ENABLE])
expected = make_strs([TextStyle.INTENSITY_NORMAL, TextStyle.ITALIC_ENABLE])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([TextStyle.INTENSITY_DIM, TextStyle.ITALIC_ENABLE, TextStyle.ITALIC_DISABLE])
expected = make_strs([TextStyle.INTENSITY_DIM, TextStyle.ITALIC_DISABLE])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([TextStyle.INTENSITY_BOLD, TextStyle.OVERLINE_DISABLE, TextStyle.OVERLINE_ENABLE])
expected = make_strs([TextStyle.INTENSITY_BOLD, TextStyle.OVERLINE_ENABLE])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([TextStyle.OVERLINE_DISABLE, TextStyle.STRIKETHROUGH_DISABLE, TextStyle.STRIKETHROUGH_ENABLE])
expected = make_strs([TextStyle.OVERLINE_DISABLE, TextStyle.STRIKETHROUGH_ENABLE])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([TextStyle.STRIKETHROUGH_DISABLE, TextStyle.UNDERLINE_DISABLE, TextStyle.UNDERLINE_ENABLE])
expected = make_strs([TextStyle.STRIKETHROUGH_DISABLE, TextStyle.UNDERLINE_ENABLE])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
styles_to_parse = make_strs([TextStyle.UNDERLINE_DISABLE])
expected = make_strs([TextStyle.UNDERLINE_DISABLE])
assert (cu._remove_overridden_styles(styles_to_parse) == expected)
slow_blink = (ansi.CSI + str(5))
rapid_blink = (ansi.CSI + str(6))
styles_to_parse = [slow_blink, rapid_blink]
expected = styles_to_parse
assert (cu._remove_overridden_styles(styles_to_parse) == expected) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.