code stringlengths 281 23.7M |
|---|
class PyMTLTypeError(Exception):
def __init__(self, blk, ast, msg):
fname = os.path.abspath(inspect.getsourcefile(blk))
line = inspect.getsourcelines(blk)[1]
col = 0
code = ''
try:
line += (ast.lineno - 1)
col = ast.col_offset
code_line = inspect.getsourcelines(blk)[0][(ast.lineno - 1)]
code = (((('\n ' + code_line.strip()) + '\n ') + (' ' * ((col - len(code_line)) + len(code_line.lstrip())))) + '^')
except AttributeError:
pass
return super().__init__(f'''
In file {fname}, Line {line}, Col {col}:{code}
- {msg}''') |
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_keyboard_interrupt(self, trainer, pl_module):
if (trainer.global_rank == 0):
print('Summoning checkpoint.')
ckpt_path = os.path.join(self.ckptdir, 'last.ckpt')
trainer.save_checkpoint(ckpt_path)
def on_pretrain_routine_start(self, trainer, pl_module):
if (trainer.global_rank == 0):
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
if ('callbacks' in self.lightning_config):
if ('metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']):
os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
print('Project config')
print(OmegaConf.to_yaml(self.config))
OmegaConf.save(self.config, os.path.join(self.cfgdir, '{}-project.yaml'.format(self.now)))
print('Lightning config')
print(OmegaConf.to_yaml(self.lightning_config))
OmegaConf.save(OmegaConf.create({'lightning': self.lightning_config}), os.path.join(self.cfgdir, '{}-lightning.yaml'.format(self.now)))
elif ((not self.resume) and os.path.exists(self.logdir)):
(dst, name) = os.path.split(self.logdir)
dst = os.path.join(dst, 'child_runs', name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass |
class HwndMeta(BaseMeta):
re_wrappers = {}
str_wrappers = {}
def __init__(cls, name, bases, attrs):
BaseMeta.__init__(cls, name, bases, attrs)
for win_class in cls.windowclasses:
HwndMeta.re_wrappers[re.compile(win_class)] = cls
HwndMeta.str_wrappers[win_class] = cls
def find_wrapper(element):
if isinstance(element, six.integer_types):
element = HwndElementInfo(element)
class_name = element.class_name
try:
return HwndMeta.str_wrappers[class_name]
except KeyError:
wrapper_match = None
for (regex, wrapper) in HwndMeta.re_wrappers.items():
if regex.match(class_name):
wrapper_match = wrapper
HwndMeta.str_wrappers[class_name] = wrapper
return wrapper
if handleprops.is_toplevel_window(element.handle):
wrapper_match = DialogWrapper
if (wrapper_match is None):
wrapper_match = HwndWrapper
return wrapper_match |
class TestGivensMatrix(QiskitNatureTestCase):
((0, (1 + 1j)), ((1 + 1j), 0), ((1 + 2j), (3 - 4j)))
def test_givens_matrix(self, a: complex, b: complex):
givens_mat = givens_matrix(a, b)
product = (givens_mat np.array([a, b]))
np.testing.assert_allclose(product[1], 0.0, atol=1e-08) |
class ProbabilisticTensorDictModule(TensorDictModuleBase):
def __init__(self, in_keys: ((NestedKey | List[NestedKey]) | Dict[(str, NestedKey)]), out_keys: ((NestedKey | List[NestedKey]) | None)=None, *, default_interaction_mode: (str | None)=None, default_interaction_type: InteractionType=InteractionType.MODE, distribution_class: type=Delta, distribution_kwargs: (dict | None)=None, return_log_prob: bool=False, log_prob_key: Optional[NestedKey]='sample_log_prob', cache_dist: bool=False, n_empirical_estimate: int=1000) -> None:
super().__init__()
if isinstance(in_keys, (str, tuple)):
in_keys = [in_keys]
if isinstance(out_keys, (str, tuple)):
out_keys = [out_keys]
elif (out_keys is None):
out_keys = ['_']
if isinstance(in_keys, dict):
(dist_keys, in_keys) = zip(*in_keys.items())
if (set(map(type, dist_keys)) != {str}):
raise ValueError(f'If in_keys is dict, its keys must be strings matching to the distribution kwargs.{self.__class__.__name__} got {dist_keys}')
else:
dist_keys = in_keys
self.out_keys = out_keys
self.in_keys = in_keys
self.dist_keys = dist_keys
if (log_prob_key is None):
log_prob_key = 'sample_log_prob'
self.log_prob_key = log_prob_key
if (default_interaction_mode is not None):
_insert_interaction_mode_deprecation_warning('default_')
self.default_interaction_type = InteractionType.from_str(default_interaction_mode)
else:
self.default_interaction_type = default_interaction_type
if isinstance(distribution_class, str):
distribution_class = distributions_maps.get(distribution_class.lower())
self.distribution_class = distribution_class
self.distribution_kwargs = (distribution_kwargs if (distribution_kwargs is not None) else {})
self.n_empirical_estimate = n_empirical_estimate
self._dist = None
self.cache_dist = (cache_dist if hasattr(distribution_class, 'update') else False)
self.return_log_prob = return_log_prob
if (self.return_log_prob and (self.log_prob_key not in self.out_keys)):
self.out_keys.append(self.log_prob_key)
def get_dist(self, tensordict: TensorDictBase) -> D.Distribution:
try:
dist_kwargs = {}
for (dist_key, td_key) in zip(self.dist_keys, self.in_keys):
if isinstance(dist_key, tuple):
dist_key = dist_key[(- 1)]
dist_kwargs[dist_key] = tensordict.get(td_key)
dist = self.distribution_class(**dist_kwargs, **self.distribution_kwargs)
except TypeError as err:
if ('an unexpected keyword argument' in str(err)):
raise TypeError(f'''distribution keywords and tensordict keys indicated by ProbabilisticTensorDictModule.dist_keys must match.Got this error message:
{indent(str(err), (4 * ' '))}
with dist_keys={self.dist_keys}''')
elif re.search('missing.*required positional arguments', str(err)):
raise TypeError(f'TensorDict with keys {tensordict.keys()} does not match the distribution {self.distribution_class} keywords.')
else:
raise err
return dist
def log_prob(self, tensordict):
dist = self.get_dist(tensordict)
if isinstance(dist, CompositeDistribution):
tensordict = dist.log_prob(tensordict)
return tensordict.get('sample_log_prob')
else:
return dist.log_prob(tensordict.get(self.out_keys[0]))
def SAMPLE_LOG_PROB_KEY(self):
warnings.warn("SAMPLE_LOG_PROB_KEY will be deprecated soon.Use 'obj.log_prob_key' instead", category=DeprecationWarning)
return self.log_prob_key
(auto_batch_size=False)
_skip_existing(None)
def forward(self, tensordict: TensorDictBase, tensordict_out: (TensorDictBase | None)=None, _requires_sample: bool=True) -> TensorDictBase:
if (tensordict_out is None):
tensordict_out = tensordict
dist = self.get_dist(tensordict)
if _requires_sample:
out_tensors = self._dist_sample(dist, interaction_type=interaction_type())
if isinstance(out_tensors, TensorDictBase):
tensordict_out.update(out_tensors)
if self.return_log_prob:
tensordict_out = dist.log_prob(tensordict_out)
else:
if isinstance(out_tensors, Tensor):
out_tensors = (out_tensors,)
tensordict_out.update({key: value for (key, value) in zip(self.out_keys, out_tensors)})
if self.return_log_prob:
log_prob = dist.log_prob(*out_tensors)
tensordict_out.set(self.log_prob_key, log_prob)
elif self.return_log_prob:
out_tensors = [tensordict.get(key) for key in self.out_keys if (key != self.log_prob_key)]
log_prob = dist.log_prob(*out_tensors)
tensordict_out.set(self.log_prob_key, log_prob)
return tensordict_out
def _dist_sample(self, dist: D.Distribution, interaction_type: (InteractionType | None)=None) -> (tuple[(Tensor, ...)] | Tensor):
if (interaction_type is None):
interaction_type = self.default_interaction_type
if (interaction_type is InteractionType.MODE):
try:
return dist.mode
except AttributeError:
raise NotImplementedError(f'method {type(dist)}.mode is not implemented')
elif (interaction_type is InteractionType.MEDIAN):
try:
return dist.median
except AttributeError:
raise NotImplementedError(f'method {type(dist)}.median is not implemented')
elif (interaction_type is InteractionType.MEAN):
try:
return dist.mean
except (AttributeError, NotImplementedError):
if dist.has_rsample:
return dist.rsample((self.n_empirical_estimate,)).mean(0)
else:
return dist.sample((self.n_empirical_estimate,)).mean(0)
elif (interaction_type is InteractionType.RANDOM):
if dist.has_rsample:
return dist.rsample()
else:
return dist.sample()
else:
raise NotImplementedError(f'unknown interaction_type {interaction_type}') |
.parametrize('unary_op', [pytest.param((lambda a: a.conj()), id='conj'), pytest.param((lambda a: a.dag()), id='dag'), pytest.param((lambda a: a.trans()), id='trans'), pytest.param((lambda a: (- a)), id='neg')])
def test_unary_ket(unary_op):
obj = QobjEvo(rand_ket(5))
for t in TESTTIMES:
transformed = unary_op(obj)
as_qevo = transformed(t)
as_qobj = unary_op(obj(t))
assert (transformed._dims == as_qevo._dims)
_assert_qobj_almost_eq(as_qevo, as_qobj) |
def simple_eval(dataset, prompts, eval_template='Instruction: [PROMPT]\nInput: [INPUT]\nOutput: [OUTPUT]', demos_template='Input: [INPUT]\nOutput: [OUTPUT]', eval_model='text-davinci-002', num_samples=50):
eval_template = template.EvalTemplate(eval_template)
demos_template = template.DemosTemplate(demos_template)
conf = config.update_config({}, 'configs/default.yaml')
conf['evaluation']['model']['gpt_config']['model'] = eval_model
conf['evaluation']['num_samples'] = min(len(dataset[0]), num_samples)
res = evaluate.evalute_prompts(prompts, eval_template, dataset, demos_template, dataset, conf['evaluation']['method'], conf['evaluation'])
return res |
class Boundary():
def __init__(self, frequency, flow_resistivity, density=DENSITY, soundspeed=SOUNDSPEED, porosity_decrease=POROSITY_DECREASE, specific_heat_ratio=SPECIFIC_HEAT_RATIO, angle=None, distance=None, impedance_model='db', reflection_model='plane'):
self.frequency = frequency
self.flow_resistivity = flow_resistivity
self.density = density
self.soundspeed = soundspeed
self.porosity_decrease = porosity_decrease
self.specific_heat_ratio = specific_heat_ratio
self.angle = angle
self.distance = distance
self.impedance_model = impedance_model
self.reflection_model = reflection_model
def wavenumber(self):
return (((2.0 * np.pi) * self.frequency) / self.soundspeed)
def impedance(self):
if (self.impedance_model == 'db'):
return impedance_delany_and_bazley(self.frequency, self.flow_resistivity)
if (self.impedance_model == 'att'):
return impedance_attenborough(self.frequency, self.flow_resistivity, self.density, self.soundspeed, self.porosity_decrease, self.specific_heat_ratio)
else:
raise ValueError('Incorrect impedance model.')
def reflection_factor(self):
if (self.angle is None):
raise AttributeError('Cannot calculate reflection factor. self.angle has not been specified.')
if (self.reflection_model == 'plane'):
return reflection_factor_plane_wave(*np.meshgrid(self.impedance, self.angle))
elif (self.reflection_model == 'spherical'):
if (self.distance is None):
raise AttributeError('Cannot calculate reflection factor. self.distance has not been specified.')
else:
return reflection_factor_spherical_wave(*np.meshgrid(self.impedance, self.angle), distance=self.distance, wavenumber=self.wavenumber)
else:
raise RuntimeError('Oops...')
def plot_impedance(self, filename=None):
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.set_title('Magnitude of impedance')
ax0.semilogx(self.frequency, np.abs(self.impedance))
ax0.set_xlabel('$f$ in Hz')
ax0.set_ylabel('$\\left|Z\\right|$')
ax0.grid()
ax0 = fig.add_subplot(212)
ax0.set_title('Angle of impedance')
ax0.semilogx(self.frequency, np.angle(self.impedance))
ax0.set_xlabel('$f$ in Hz')
ax0.set_ylabel('$\\angle Z$')
ax0.grid()
plt.tight_layout()
if filename:
fig.savefig(filename, transparant=True)
return fig
def plot_reflection_factor(self, filename=None):
if (self.frequency is None):
raise ValueError('No frequency specified.')
if (self.angle is None):
raise ValueError('No angle specified.')
try:
n_f = len(self.frequency)
except TypeError:
n_f = 1
try:
n_a = len(self.angle)
except TypeError:
n_a = 1
if ((n_f == 1) and (n_a == 1)):
raise ValueError('Either frequency or angle needs to be a vector.')
elif ((n_f == 1) or (n_a == 1)):
if ((n_f == 1) and (n_a > 1)):
xlabel = '$\\theta$ in degrees'
elif ((n_f > 1) and (n_a == 1)):
xlabel = '$f$ in Hz'
R = self.reflection_factor
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.set_title('Magnitude of reflection factor')
ax0.semilogx(self.frequency, np.abs(R))
ax0.set_xlabel(xlabel)
ax0.set_ylabel('$\\left|R\\right|$')
ax0.grid()
ax1 = fig.add_subplot(212)
ax1.set_title('Phase of reflection factor')
ax1.semilogx(self.frequency, np.angle(R))
ax1.set_xlabel(xlabel)
ax1.set_ylabel('$\\angle R$')
ax1.grid()
elif ((n_f > 1) and (n_a > 1)):
R = self.reflection_factor
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.set_title('Magnitude of reflection factor')
ax0.pcolormesh(self.frequency, ((self.angle * 180.0) / np.pi), np.abs(R))
ax0.grid()
ax1 = fig.add_subplot(212)
ax1.set_title('Phase of reflection factor')
ax1.pcolormesh(self.frequency, ((self.angle * 180.0) / np.pi), np.angle(R))
ax1.grid()
else:
raise RuntimeError('Oops...')
if filename:
fig.savefig(filename, transparant=True)
else:
return fig |
def trapping_instance(layout: QubitsLayout, u: float, dt: float=0.3, up_particles: int=2, down_particles: int=2) -> FermiHubbardParameters:
hamiltonian = Hamiltonian(sites_count=layout.size, j=1.0, u=u)
initial_state = IndependentChainsInitialState(up=GaussianTrappingPotential(particles=up_particles, center=0.5, sigma=(1 / 7), scale=(- 4)), down=UniformTrappingPotential(particles=down_particles))
return FermiHubbardParameters(hamiltonian=hamiltonian, initial_state=initial_state, layout=layout, dt=dt) |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
.slow
def test_sf_helper_trunc():
mf = make_diamond_113_szv()
exact_cc = cc.KRCCSD(mf)
eris = exact_cc.ao2mo()
(exact_emp2, _, _) = exact_cc.init_amps(eris)
mymp = mp.KMP2(mf)
Luv = cholesky_from_df_ints(mymp)
naux = Luv[(0, 0)].shape[0]
print(' naux error (Eh)')
approx_cc = cc.KRCCSD(mf)
approx_cc.verbose = 0
helper = SingleFactorization(cholesky_factor=Luv, kmf=mf, naux=10)
eris = build_approximate_eris(approx_cc, helper)
(emp2, _, _) = approx_cc.init_amps(eris)
assert (not np.isclose(emp2, exact_emp2))
out_eris = build_approximate_eris(approx_cc, helper)
(emp2_2, _, _) = approx_cc.init_amps(out_eris)
assert (not np.isclose(emp2, exact_emp2))
assert np.isclose(emp2, emp2_2)
helper = SingleFactorization(cholesky_factor=Luv, kmf=mf, naux=5)
out_eris = build_approximate_eris(approx_cc, helper)
(emp2_2, _, _) = approx_cc.init_amps(out_eris)
assert (not np.isclose(emp2, exact_emp2))
assert (not np.isclose(emp2, emp2_2))
out_eris = build_approximate_eris(approx_cc, helper, eris=eris)
(emp2_3, _, _) = approx_cc.init_amps(out_eris)
assert (not np.isclose(emp2, exact_emp2))
assert np.isclose(emp2_2, emp2_3)
helper = SingleFactorization(cholesky_factor=Luv, kmf=mf, naux=naux)
out_eris = build_approximate_eris(approx_cc, helper)
(emp2, _, _) = approx_cc.init_amps(out_eris)
assert np.isclose(emp2, exact_emp2) |
def netmf_large(args):
logger.info('Running NetMF for a large window size...')
logger.info('Window size is set to be %d', args.window)
A = load_adjacency_matrix(args.input, variable_name=args.matfile_variable_name)
vol = float(A.sum())
(evals, D_rt_invU) = approximate_normalized_graph_laplacian(A, rank=args.rank, which='LA')
deepwalk_matrix = approximate_deepwalk_matrix(evals, D_rt_invU, window=args.window, vol=vol, b=args.negative)
deepwalk_embedding = svd_deepwalk_matrix(deepwalk_matrix, dim=args.dim)
logger.info('Save embedding to %s', args.output)
np.save(args.output, deepwalk_embedding, allow_pickle=False) |
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent=None):
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
return get_from_cache(url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent)
elif os.path.exists(url_or_filename):
return url_or_filename
elif (urlparse(url_or_filename).scheme == ''):
raise EnvironmentError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename)) |
class TestS3PartialParquetFileToTable(TestCase):
def test_s3_partial_parquet_file_to_table_sanity(self):
pq_file = ParquetFile(PARQUET_FILE_PATH)
partial_parquet_params = PartialParquetParameters.of(pq_metadata=pq_file.metadata)
self.assertEqual(partial_parquet_params.num_row_groups, 2, 'test_file.parquet has changed.')
partial_parquet_params.row_groups_to_download.pop()
result = s3_partial_parquet_file_to_table(PARQUET_FILE_PATH, include_columns=['n_legs'], content_encoding=ContentEncoding.IDENTITY.value, content_type=ContentType.PARQUET.value, partial_file_download_params=partial_parquet_params)
self.assertEqual(len(result), 3)
self.assertEqual(len(result.columns), 1)
def test_s3_partial_parquet_file_to_table_when_schema_passed(self):
pq_file = ParquetFile(PARQUET_FILE_PATH)
partial_parquet_params = PartialParquetParameters.of(pq_metadata=pq_file.metadata)
partial_parquet_params.row_groups_to_download.pop()
schema = pa.schema([pa.field('n_legs', pa.string()), pa.field('animal', pa.string()), pa.field('MISSING', pa.int64())])
pa_kwargs_provider = (lambda content_type, kwargs: {'schema': schema})
result = s3_partial_parquet_file_to_table(PARQUET_FILE_PATH, ContentType.PARQUET.value, ContentEncoding.IDENTITY.value, pa_read_func_kwargs_provider=pa_kwargs_provider, partial_file_download_params=partial_parquet_params)
self.assertEqual(len(result), 3)
self.assertEqual(len(result.column_names), 3)
result_schema = result.schema
self.assertEqual(result_schema.field(0).type, 'string')
self.assertEqual(result_schema.field(0).name, 'n_legs')
self.assertEqual(result_schema.field(1).type, 'string')
self.assertEqual(result_schema.field(1).name, 'animal')
self.assertEqual(result_schema.field(2).type, 'int64')
self.assertEqual(result_schema.field(2).name, 'MISSING')
def test_s3_partial_parquet_file_to_table_when_schema_passed_with_include_columns(self):
pq_file = ParquetFile(PARQUET_FILE_PATH)
partial_parquet_params = PartialParquetParameters.of(pq_metadata=pq_file.metadata)
partial_parquet_params.row_groups_to_download.pop()
schema = pa.schema([pa.field('animal', pa.string()), pa.field('n_legs', pa.string())])
pa_kwargs_provider = (lambda content_type, kwargs: {'schema': schema})
result = s3_partial_parquet_file_to_table(PARQUET_FILE_PATH, ContentType.PARQUET.value, ContentEncoding.IDENTITY.value, ['n_legs', 'animal'], pa_read_func_kwargs_provider=pa_kwargs_provider, partial_file_download_params=partial_parquet_params)
self.assertEqual(len(result), 3)
self.assertEqual(len(result.column_names), 2)
result_schema = result.schema
self.assertEqual(result_schema.field(0).type, 'string')
self.assertEqual(result_schema.field(0).name, 'n_legs')
def test_s3_partial_parquet_file_to_table_when_multiple_row_groups(self):
pq_file = ParquetFile(PARQUET_FILE_PATH)
partial_parquet_params = PartialParquetParameters.of(pq_metadata=pq_file.metadata)
self.assertEqual(partial_parquet_params.num_row_groups, 2, 'test_file.parquet has changed.')
result = s3_partial_parquet_file_to_table(PARQUET_FILE_PATH, content_encoding=ContentEncoding.IDENTITY.value, content_type=ContentType.PARQUET.value, partial_file_download_params=partial_parquet_params)
self.assertEqual(len(result), 6)
self.assertEqual(len(result.columns), 2) |
def main():
vk_session = vk_api.VkApi(token='your_group_token')
longpoll = VkBotLongPoll(vk_session, 'your_group_id')
for event in longpoll.listen():
if (event.type == VkBotEventType.MESSAGE_NEW):
print(' :')
print(' : ', end='')
print(event.obj.from_id)
print(':', event.obj.text)
elif (event.type == VkBotEventType.MESSAGE_REPLY):
print(' :')
print(' : ', end='')
print(event.obj.peer_id)
print(':', event.obj.text)
elif (event.type == VkBotEventType.MESSAGE_TYPING_STATE):
print(' ', end='')
print(event.obj.from_id, end=' ')
print(' ', end='')
print(event.obj.to_id)
elif (event.type == VkBotEventType.GROUP_JOIN):
print(event.obj.user_id, end=' ')
print(' !')
elif (event.type == VkBotEventType.GROUP_LEAVE):
print(event.obj.user_id, end=' ')
print(' !')
else:
print(event.type)
print() |
class TestCase(unittest.TestCase):
is_windows = (sys.platform == 'win32')
is_cygwin = (sys.platform == 'cygwin')
is_macos = (sys.platform == 'darwin')
symlinks_can_be_tested = None
def assert_mode_equal(self, expected, actual):
return self.assertEqual(stat.S_IMODE(expected), stat.S_IMODE(actual))
def raises_os_error(self, subtype):
try:
(yield)
self.fail('No exception was raised, OSError expected')
except OSError as exc:
if isinstance(subtype, list):
self.assertIn(exc.errno, subtype)
else:
self.assertEqual(subtype, exc.errno) |
class Mirror(_Widget):
def __init__(self, reflection, **config):
_Widget.__init__(self, reflection.length, **config)
self.reflects = reflection
self._length = 0
self.length_type = self.reflects.length_type
def _configure(self, qtile, bar):
_Widget._configure(self, qtile, bar)
self.reflects.add_mirror(self)
self.drawer.clear((self.background or self.bar.background))
def calculate_length(self):
return self.reflects.calculate_length()
def length(self):
if (self.length_type != bar.STRETCH):
return self.reflects.length
return self._length
def length(self, value):
self._length = value
def draw(self):
self.drawer.clear((self.reflects.background or self.bar.background))
self.reflects.drawer.paint_to(self.drawer)
self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)
def button_press(self, x, y, button):
self.reflects.button_press(x, y, button)
def mouse_enter(self, x, y):
self.reflects.mouse_enter(x, y)
def mouse_leave(self, x, y):
self.reflects.mouse_leave(x, y)
def finalize(self):
self.reflects.remove_mirror(self)
_Widget.finalize(self) |
class TestLogTime():
def test_duration(self, caplog):
logger_name = 'qt-tests'
with caplog.at_level(logging.DEBUG, logger_name):
with debug.log_time(logger_name, action='foobar'):
time.sleep(0.1)
assert (len(caplog.records) == 1)
pattern = re.compile('Foobar took ([\\d.]*) seconds\\.')
match = pattern.fullmatch(caplog.messages[0])
assert match
duration = float(match.group(1))
assert (0 < duration < 30)
def test_logger(self, caplog):
logger_name = 'qt-tests'
with caplog.at_level(logging.DEBUG, logger_name):
with debug.log_time(logging.getLogger(logger_name)):
pass
assert (len(caplog.records) == 1)
def test_decorator(self, caplog):
logger_name = 'qt-tests'
_time(logger_name, action='foo')
def func(arg, *, kwarg):
assert (arg == 1)
assert (kwarg == 2)
with caplog.at_level(logging.DEBUG, logger_name):
func(1, kwarg=2)
assert (len(caplog.records) == 1)
assert caplog.messages[0].startswith('Foo took') |
def create_line_chart(data_list: List[Union[(QFSeries, DataElementDecorator)]], names_list, title: str=None, recession_series: QFSeries=None, horizontal_lines_list: List[float]=None, vertical_lines_list: List[float]=None, disable_dot: bool=False, start_x: datetime=None, end_x: datetime=None, upper_y: float=None, lower_y: float=None, dot_decimal_points: int=2, recession_name: str=None) -> LineChart:
if ((end_x is None) and (start_x is not None)):
end_x = LineChart.determine_end_x(start_x, data_list)
line_chart = LineChart(start_x=start_x, end_x=end_x, upper_y=upper_y, lower_y=lower_y)
line_chart.tick_fontweight = 'bold'
line_chart.tick_color = 'black'
names_index = 0
legend_decorator = LegendDecorator(key='legend')
for data in data_list:
assert isinstance(data, (pandas.Series, DataElementDecorator))
data_element = data
if isinstance(data_element, pandas.Series):
data_element = DataElementDecorator(data)
line_id = data_element.key
line_chart.add_decorator(data_element)
point_to_emphasise = (_get_last_valid_value(data_element.data.index), _get_last_valid_value(data_element.data.values))
series_label = _get_name(names_list, names_index)
if (series_label is not None):
legend_decorator.add_entry(data_element, (series_label + ' [{}]'.format(point_to_emphasise[0].strftime('%b %y'))))
names_index += 1
if (not disable_dot):
point_emphasis = PointEmphasisDecorator(data_element, point_to_emphasise, decimal_points=dot_decimal_points, key='point_emphasis_{}'.format(line_id), use_secondary_axes=data_element.use_secondary_axes)
line_chart.add_decorator(point_emphasis)
if (title is not None):
title_decorator = TitleDecorator(title, 'title')
line_chart.add_decorator(title_decorator)
if (recession_series is not None):
span_decorator = SpanDecorator.from_int_list(recession_series, 'span')
line_chart.add_decorator(span_decorator)
if (recession_name is not None):
legend_decorator.add_entry(span_decorator, recession_name)
if (horizontal_lines_list is not None):
for hline in horizontal_lines_list:
line_decorator = HorizontalLineDecorator(hline, key=('hline' + str(hline)))
line_chart.add_decorator(line_decorator)
series_label = _get_name(names_list, names_index)
if (series_label is not None):
legend_decorator.add_entry(line_decorator, series_label)
names_index += 1
if (vertical_lines_list is not None):
for vline in vertical_lines_list:
line_decorator = VerticalLineDecorator(vline, key=('vline' + str(vline)))
line_chart.add_decorator(line_decorator)
series_label = _get_name(names_list, names_index)
if (series_label is not None):
legend_decorator.add_entry(line_decorator, series_label)
names_index += 1
line_chart.add_decorator(legend_decorator)
return line_chart |
class HDFEOSBaseFileReader(BaseFileHandler):
def __init__(self, filename, filename_info, filetype_info, **kwargs):
BaseFileHandler.__init__(self, filename, filename_info, filetype_info)
try:
self.sd = SD(self.filename)
except HDF4Error as err:
error_message = 'Could not load data from file {}: {}'.format(self.filename, err)
raise ValueError(error_message)
self.metadata = self._load_all_metadata_attributes()
def _load_all_metadata_attributes(self):
metadata = {}
attrs = self.sd.attributes()
for md_key in ('CoreMetadata.0', 'StructMetadata.0', 'ArchiveMetadata.0'):
try:
str_val = attrs[md_key]
except KeyError:
continue
else:
metadata.update(self.read_mda(str_val))
return metadata
def read_mda(cls, attribute):
line_iterator = iter(attribute.split('\n'))
return cls._read_mda(line_iterator)
def _read_mda(cls, lines, element=None):
current_dict = {}
for line in lines:
if (not line):
continue
if (line == 'END'):
return current_dict
(key, val) = cls._split_line(line, lines)
if (key in ['GROUP', 'OBJECT']):
current_dict[val] = cls._read_mda(lines, val)
elif (key in ['END_GROUP', 'END_OBJECT']):
if (val != element):
raise SyntaxError('Non-matching end-tag')
return current_dict
elif (key in ['CLASS', 'NUM_VAL']):
pass
else:
current_dict[key] = val
logger.warning('Malformed EOS metadata, missing an END.')
return current_dict
def _split_line(cls, line, lines):
(key, val) = line.split('=', maxsplit=1)
key = key.strip()
val = val.strip()
try:
with suppress(ValueError):
val = literal_eval(val)
except SyntaxError:
(key, val) = cls._split_line((line + next(lines)), lines)
return (key, val)
def metadata_platform_name(self):
try:
return self.metadata['INVENTORYMETADATA']['ASSOCIATEDPLATFORMINSTRUMENTSENSOR']['ASSOCIATEDPLATFORMINSTRUMENTSENSORCONTAINER']['ASSOCIATEDPLATFORMSHORTNAME']['VALUE']
except KeyError:
return self._platform_name_from_filename()
def _platform_name_from_filename(self):
platform_indicator = self.filename_info['platform_indicator']
if (platform_indicator in ('t', 'O')):
return 'Terra'
return 'Aqua'
def start_time(self):
try:
date = ((self.metadata['INVENTORYMETADATA']['RANGEDATETIME']['RANGEBEGINNINGDATE']['VALUE'] + ' ') + self.metadata['INVENTORYMETADATA']['RANGEDATETIME']['RANGEBEGINNINGTIME']['VALUE'])
return datetime.strptime(date, '%Y-%m-%d %H:%M:%S.%f')
except KeyError:
return self._start_time_from_filename()
def _start_time_from_filename(self):
return self.filename_info['start_time']
def end_time(self):
try:
date = ((self.metadata['INVENTORYMETADATA']['RANGEDATETIME']['RANGEENDINGDATE']['VALUE'] + ' ') + self.metadata['INVENTORYMETADATA']['RANGEDATETIME']['RANGEENDINGTIME']['VALUE'])
return datetime.strptime(date, '%Y-%m-%d %H:%M:%S.%f')
except KeyError:
return self.start_time
def _read_dataset_in_file(self, dataset_name):
if (dataset_name not in self.sd.datasets()):
error_message = 'Dataset name {} not included in available datasets {}'.format(dataset_name, self.sd.datasets())
raise KeyError(error_message)
dataset = self.sd.select(dataset_name)
return dataset
def load_dataset(self, dataset_name, is_category=False):
from satpy.readers.hdf4_utils import from_sds
dataset = self._read_dataset_in_file(dataset_name)
chunks = self._chunks_for_variable(dataset)
dask_arr = from_sds(dataset, chunks=chunks)
dims = (('y', 'x') if (dask_arr.ndim == 2) else None)
data = xr.DataArray(dask_arr, dims=dims, attrs=dataset.attributes())
data = self._scale_and_mask_data_array(data, is_category=is_category)
return data
def _chunks_for_variable(self, hdf_dataset):
scan_length_250m = 40
var_shape = hdf_dataset.info()[2]
res_multiplier = self._get_res_multiplier(var_shape)
num_nonyx_dims = (len(var_shape) - 2)
return normalize_low_res_chunks((((1,) * num_nonyx_dims) + ('auto', (- 1))), var_shape, (((1,) * num_nonyx_dims) + (scan_length_250m, (- 1))), (((1,) * num_nonyx_dims) + (res_multiplier, res_multiplier)), np.float32)
def _get_res_multiplier(var_shape):
num_columns_to_multiplier = {271: 20, 1354: 4, 2708: 2, 5416: 1}
for (max_columns, res_multiplier) in num_columns_to_multiplier.items():
if (var_shape[(- 1)] <= max_columns):
return res_multiplier
return 1
def _scale_and_mask_data_array(self, data, is_category=False):
(good_mask, new_fill) = self._get_good_data_mask(data, is_category=is_category)
scale_factor = data.attrs.pop('scale_factor', None)
add_offset = data.attrs.pop('add_offset', None)
if ((scale_factor is not None) and (not is_category)):
if ((add_offset is not None) and (add_offset != 0)):
data = (data - np.float32(add_offset))
data = (data * np.float32(scale_factor))
if (good_mask is not None):
data = data.where(good_mask, new_fill)
return data
def _get_good_data_mask(self, data_arr, is_category=False):
try:
fill_value = data_arr.attrs['_FillValue']
except KeyError:
return (None, None)
if (is_category and np.issubdtype(data_arr.dtype, np.integer)):
return (None, None)
fill_type = (data_arr.dtype.type if np.issubdtype(data_arr.dtype, np.floating) else np.float32)
new_fill = fill_type(np.nan)
data_arr.attrs.pop('_FillValue', None)
good_mask = (data_arr != fill_value)
return (good_mask, new_fill)
def _add_satpy_metadata(self, data_id: DataID, data_arr: xr.DataArray):
new_attrs = {'platform_name': ('EOS-' + self.metadata_platform_name), 'sensor': 'modis'}
res = data_id['resolution']
rps = self._resolution_to_rows_per_scan(res)
new_attrs['rows_per_scan'] = rps
data_arr.attrs.update(new_attrs)
def _resolution_to_rows_per_scan(self, resolution: int) -> int:
known_rps = {5000: 2, 1000: 10, 500: 20, 250: 40}
return known_rps.get(resolution, 10) |
class TestImportModelCreate():
def loaded_model_class(self):
class BarModel():
a: str
b: int
foo_module = ModuleType('foo')
foo_module.BarModel = BarModel
modules['foo'] = foo_module
(yield BarModel)
del modules['foo']
def test_dynamic_model(self):
factory = ModelPathFactory()
schema = SchemaPath.from_dict({'x-model': 'TestModel'})
test_model_class = factory.create(schema, ['name'])
assert is_dataclass(test_model_class)
assert (test_model_class.__name__ == 'TestModel')
assert (list(test_model_class.__dataclass_fields__.keys()) == ['name'])
assert (test_model_class.__dataclass_fields__['name'].type == str(Any))
def test_model_path(self, loaded_model_class):
factory = ModelPathFactory()
schema = SchemaPath.from_dict({'x-model-path': 'foo.BarModel'})
test_model_class = factory.create(schema, ['a', 'b'])
assert (test_model_class == loaded_model_class) |
def _form_datetimes(days, msecs):
all_datetimes = []
for i in range(days.size):
day = int(days[i])
msec = msecs[i]
scanline_datetimes = []
for j in range(int((VALUES_PER_SCAN_LINE / 4))):
usec = (1000 * ((j * VIEW_TIME_ADJUSTMENT) + msec))
delta = dt.timedelta(days=day, microseconds=usec)
for _k in range(4):
scanline_datetimes.append(delta.total_seconds())
all_datetimes.append(scanline_datetimes)
return np.array(all_datetimes, dtype=np.float64) |
def create_stairs(bm, faces, prop):
for f in faces:
f.select = False
if (not valid_ngon(f)):
popup_message('Stairs creation not supported for non-rectangular n-gon!', 'Ngon Error')
return False
f = create_stairs_split(bm, f, prop)
add_faces_to_group(bm, [f], MaterialGroup.STAIRS)
normal = f.normal.copy()
top_faces = create_steps(bm, f, prop)
if prop.has_railing:
add_railing_to_stairs(bm, top_faces, normal, prop)
return True |
.parametrize('chunk', [False, True])
.parametrize('genotypes', [[[0, 0], [0, 1], [1, 1]], [[0, 0], [0, 1], [1, 1], [0, 2], [1, 2], [2, 2]], [[0, 0, 0], [0, 0, 1], [0, 1, 1], [1, 1, 1], [0, 0, 2], [0, 1, 2], [1, 1, 2], [0, 2, 2], [1, 2, 2]], [[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 2], [0, 0, 1, 2], [0, 1, 1, 2], [1, 1, 1, 2], [0, 0, 2, 2], [0, 1, 2, 2], [1, 1, 2, 2], [0, 2, 2, 2], [1, 2, 2, 2], [2, 2, 2, 2], [0, 0, 0, 3], [0, 0, 1, 3], [0, 1, 1, 3], [1, 1, 1, 3], [0, 0, 2, 3], [0, 1, 2, 3], [1, 1, 2, 3], [0, 2, 2, 3], [1, 2, 2, 3], [2, 2, 2, 3], [0, 0, 3, 3], [0, 1, 3, 3], [1, 1, 3, 3], [0, 2, 3, 3], [1, 2, 3, 3], [2, 2, 3, 3], [0, 3, 3, 3], [1, 3, 3, 3], [2, 3, 3, 3], [3, 3, 3, 3]]])
def test_convert_call_to_index(genotypes, chunk):
np.random.seed(0)
genotypes = np.array(genotypes)
(n_index, ploidy) = genotypes.shape
n_variant = 1000
n_sample = 100
n_allele = (genotypes.max() + 1)
expect = np.random.randint(n_index, size=(n_variant * n_sample))
gt = genotypes[expect].reshape(n_variant, n_sample, ploidy)
expect = expect.reshape(n_variant, n_sample)
for _ in range(1000):
i = np.random.randint(n_variant)
j = np.random.randint(n_sample)
a = np.random.randint(ploidy)
gt[(i, j, a)] = (- 1)
expect[(i, j)] = (- 1)
gt = np.random.default_rng(0).permutation(gt, axis=(- 1))
ds = simulate_genotype_call_dataset(n_variant=n_variant, n_sample=n_sample, n_allele=n_allele, n_ploidy=ploidy)
ds.call_genotype.data = gt
if chunk:
ds = ds.chunk(dict(variants=100, samples=50))
ds = convert_call_to_index(ds).compute()
actual = ds.call_genotype_index
actual_mask = ds.call_genotype_index_mask
np.testing.assert_array_equal(expect, actual)
np.testing.assert_array_equal((expect < 0), actual_mask) |
class TextureArrayBin():
def __init__(self, texture_width: int=2048, texture_height: int=2048, max_depth: Optional[int]=None) -> None:
max_texture_size = pyglet.image.get_max_texture_size()
self.max_depth = (max_depth or pyglet.image.get_max_array_texture_layers())
self.texture_width = min(texture_width, max_texture_size)
self.texture_height = min(texture_height, max_texture_size)
self.arrays = []
def add(self, img: 'AbstractImage') -> 'TextureArrayRegion':
try:
array = self.arrays[(- 1)]
return array.add(img)
except pyglet.image.TextureArrayDepthExceeded:
pass
except IndexError:
pass
array = pyglet.image.TextureArray.create(self.texture_width, self.texture_height, max_depth=self.max_depth)
self.arrays.append(array)
return array.add(img) |
def average_it_results(it_rep_results: Sequence[Sequence[Tuple]]):
d_metric_results = {'avg': [], 'smis': [], 'scores': []}
for it_reps in it_rep_results:
(it_avgs, it_smiss, it_scoress) = zip(*it_reps)
d_metric_results['avg'].append(mean_and_sd(it_avgs))
d_metric_results['smis'].append(mean_and_sd(it_smiss))
d_metric_results['scores'].append(mean_and_sd(it_scoress))
return d_metric_results |
class TxOutputColoring():
def __init__(self, *, legend: str, color: ColorSchemeItem, tooltip: str):
self.color = color.as_color(background=True)
self.legend_label = QLabel('<font color={color}>{box_char}</font> = {label}'.format(color=self.color.name(), box_char='', label=legend))
font = self.legend_label.font()
font.setPointSize((font.pointSize() - 1))
self.legend_label.setFont(font)
self.legend_label.setVisible(False)
self.text_char_format = QTextCharFormat()
self.text_char_format.setBackground(QBrush(self.color))
self.text_char_format.setToolTip(tooltip) |
class BaseLegacyTest(BaseBackendTest):
form = ''
response_body = ''
def setUp(self):
super().setUp()
self.strategy.set_settings({f'SOCIAL_AUTH_{self.name}_FORM_URL': self.strategy.build_absolute_uri(f'/login/{self.backend.name}')})
def extra_settings(self):
return {f'SOCIAL_AUTH_{self.name}_FORM_URL': f'/login/{self.backend.name}'}
def do_start(self):
start_url = self.strategy.build_absolute_uri(self.backend.start().url)
HTTPretty.register_uri(HTTPretty.GET, start_url, status=200, body=self.form.format(self.complete_url))
HTTPretty.register_uri(HTTPretty.POST, self.complete_url, status=200, body=self.response_body, content_type='application/x-www-form-urlencoded')
response = requests.get(start_url)
self.assertEqual(response.text, self.form.format(self.complete_url))
response = requests.post(self.complete_url, data=parse_qs(self.response_body))
self.strategy.set_request_data(parse_qs(response.text), self.backend)
return self.backend.complete() |
class TestSuggestedType(TestNameCheckVisitorBase):
_passes(settings={ErrorCode.suggested_return_type: True})
def test_return(self):
def capybara():
return 1
def kerodon(cond):
if cond:
return 1
else:
return 2
_passes(settings={ErrorCode.suggested_parameter_type: True})
def test_parameter(self):
def capybara(a):
pass
def annotated(b: int):
pass
class Mammalia():
def method(self):
pass
def kerodon(unannotated):
capybara(1)
annotated(2)
m = Mammalia()
m.method()
Mammalia.method(unannotated) |
class ResNetShard2(ResNetBase):
def __init__(self, device, *args, **kwargs):
super(ResNetShard2, self).__init__(Bottleneck, 512, *args, num_classes=num_classes, **kwargs)
self.device = device
self.seq = nn.Sequential(self._make_layer(256, 6, stride=2), self._make_layer(512, 3, stride=2), nn.AdaptiveAvgPool2d((1, 1))).to(self.device)
self.fc = nn.Linear((512 * self._block.expansion), num_classes).to(self.device)
def forward(self, x_rref):
x = x_rref.to_here().to(self.device)
with self._lock:
out = self.fc(torch.flatten(self.seq(x), 1))
return out.cpu() |
class LabelContextAttentionBlock(nn.Module):
def __init__(self, in_channels, out_channels, context_type, last_affine=True):
super().__init__()
self.context_type = context_type
self.query_project = nn.Sequential(utils_heads.ConvBNReLU(in_channels, out_channels, kernel_size=1, norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU), utils_heads.ConvBNReLU(out_channels, out_channels, kernel_size=1, norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU))
self.key_project = nn.Sequential(utils_heads.ConvBNReLU(in_channels, out_channels, kernel_size=1, norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU), utils_heads.ConvBNReLU(out_channels, out_channels, kernel_size=1, norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU))
self.value_project = utils_heads.ConvBNReLU(in_channels, out_channels, kernel_size=1, norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU, affine=last_affine)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
def forward(self, target_task_feats, source_task_feats, target_aux_prob, source_aux_prob):
context = self.gather_context(source_task_feats, target_aux_prob, source_aux_prob)
batch_size = target_task_feats.size(0)
key = self.key_project(context)
value = self.value_project(context)
key = key.view(*key.shape[:2], (- 1))
value = value.view(*value.shape[:2], (- 1)).permute(0, 2, 1)
query = self.query_project(target_task_feats)
query = query.view(*query.shape[:2], (- 1)).permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map /= (query.shape[(- 1)] ** 0.5)
sim_map = sim_map.softmax(dim=(- 1))
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.reshape(batch_size, (- 1), *target_task_feats.shape[2:])
return context
def gather_context(self, source_feats, target_aux_prob, source_aux_prob):
if (self.context_type == 'tlabel'):
(batch_size, channels) = source_feats.shape[:2]
source_feats = source_feats.view(batch_size, channels, (- 1))
source_feats = source_feats.permute(0, 2, 1)
cxt = torch.matmul(target_aux_prob, source_feats)
context = cxt.permute(0, 2, 1).contiguous().unsqueeze(3)
elif (self.context_type == 'slabel'):
(batch_size, channels) = source_feats.shape[:2]
source_feats = source_feats.view(batch_size, channels, (- 1))
source_feats = source_feats.permute(0, 2, 1)
cxt = torch.matmul(source_aux_prob, source_feats)
context = cxt.permute(0, 2, 1).contiguous().unsqueeze(3)
return context |
class PassportElementErrorReverseSide(PassportElementError):
__slots__ = ('file_hash',)
def __init__(self, type: str, file_hash: str, message: str, *, api_kwargs: Optional[JSONDict]=None):
super().__init__('reverse_side', type, message, api_kwargs=api_kwargs)
with self._unfrozen():
self.file_hash: str = file_hash
self._id_attrs = (self.source, self.type, self.file_hash, self.message) |
class EchoesGameExportDialog(GameExportDialog, Ui_EchoesGameExportDialog):
_prompt_input_file: bool
_use_prime_models: bool
def game_enum(cls):
return RandovaniaGame.METROID_PRIME_ECHOES
def __init__(self, options: Options, patch_data: dict, word_hash: str, spoiler: bool, games: list[RandovaniaGame]):
super().__init__(options, patch_data, word_hash, spoiler, games)
self.default_output_name = f'Echoes Randomizer - {word_hash}'
self._prompt_input_file = check_extracted_game(self.input_file_edit, self.input_file_button, self._contents_file_path)
per_game = options.options_for_game(self.game_enum())
assert isinstance(per_game, EchoesPerGameOptions)
self.input_file_button.clicked.connect(self._on_input_file_button)
self.output_file_button.clicked.connect(self._on_output_file_button)
self.prime_file_button.clicked.connect(self._on_prime_file_button)
if (RandovaniaGame.METROID_PRIME in games):
self._use_prime_models = (RandovaniaGame.METROID_PRIME in per_game.use_external_models)
self.prime_models_check.setChecked(self._use_prime_models)
self._on_prime_models_check()
self.prime_models_check.clicked.connect(self._on_prime_models_check)
prime_options = options.options_for_game(RandovaniaGame.METROID_PRIME)
assert isinstance(prime_options, PrimePerGameOptions)
if (prime_options.input_path is not None):
self.prime_file_edit.setText(str(prime_options.input_path))
else:
self._use_prime_models = False
self.prime_models_check.hide()
self.prime_file_edit.hide()
self.prime_file_label.hide()
self.prime_file_button.hide()
if (self._prompt_input_file and (per_game.input_path is not None)):
self.input_file_edit.setText(str(per_game.input_path))
if (per_game.output_directory is not None):
output_path = per_game.output_directory.joinpath(f'{self.default_output_name}.iso')
self.output_file_edit.setText(str(output_path))
add_field_validation(accept_button=self.accept_button, fields={self.input_file_edit: (lambda : echoes_input_validator(self.input_file, self._prompt_input_file, self.input_file_edit)), self.output_file_edit: (lambda : output_file_validator(self.output_file)), self.prime_file_edit: (lambda : (self._use_prime_models and is_prime1_iso_validator(self.prime_file, iso_required=True)))})
def update_per_game_options(self, per_game: EchoesPerGameOptions) -> EchoesPerGameOptions:
per_game_changes = {}
if self._prompt_input_file:
per_game_changes['input_path'] = self.input_file
use_external_models = per_game.use_external_models.copy()
if (not self.prime_models_check.isHidden()):
if self._use_prime_models:
use_external_models.add(RandovaniaGame.METROID_PRIME)
else:
use_external_models.discard(RandovaniaGame.METROID_PRIME)
return dataclasses.replace(per_game, output_directory=self.output_file.parent, use_external_models=use_external_models, **per_game_changes)
def save_options(self):
super().save_options()
if (not self._use_prime_models):
return
with self._options as options:
from randovania.games.prime1.exporter.options import PrimePerGameOptions
prime_options = options.options_for_game(RandovaniaGame.METROID_PRIME)
assert isinstance(prime_options, PrimePerGameOptions)
options.set_options_for_game(RandovaniaGame.METROID_PRIME, dataclasses.replace(prime_options, input_path=self.prime_file))
def input_file(self) -> (Path | None):
if self._prompt_input_file:
return Path(self.input_file_edit.text())
def output_file(self) -> Path:
return Path(self.output_file_edit.text())
def prime_file(self) -> (Path | None):
return (Path(self.prime_file_edit.text()) if self.prime_file_edit.text() else None)
def auto_save_spoiler(self) -> bool:
return self.auto_save_spoiler_check.isChecked()
def _on_input_file_button(self):
if self._prompt_input_file:
input_file = prompt_for_input_file(self, self.input_file_edit, ['iso'])
if (input_file is not None):
self.input_file_edit.setText(str(input_file.absolute()))
else:
delete_internal_copy(self._options.internal_copies_path)
self.input_file_edit.setText('')
self._prompt_input_file = check_extracted_game(self.input_file_edit, self.input_file_button, self._contents_file_path)
def _on_output_file_button(self):
output_file = prompt_for_output_file(self, ['iso'], f'{self.default_output_name}.iso', self.output_file_edit)
if (output_file is not None):
self.output_file_edit.setText(str(output_file))
def _on_prime_file_button(self):
prime_file = prompt_for_input_file(self, self.prime_file_edit, ['iso'])
if (prime_file is not None):
self.prime_file_edit.setText(str(prime_file.absolute()))
def _on_prime_models_check(self):
use_prime_models = self.prime_models_check.isChecked()
self._use_prime_models = use_prime_models
self.prime_file_edit.setEnabled(use_prime_models)
self.prime_file_label.setEnabled(use_prime_models)
self.prime_file_button.setEnabled(use_prime_models)
update_validation(self.prime_file_edit)
def _contents_file_path(self):
return self._options.internal_copies_path.joinpath('prime2', 'contents')
def get_game_export_params(self) -> GameExportParams:
spoiler_output = spoiler_path_for(self.auto_save_spoiler, self.output_file)
backup_files_path = self._options.internal_copies_path.joinpath('prime2', 'vanilla')
asset_cache_path = self._options.internal_copies_path.joinpath('prime2', 'prime1_models')
return EchoesGameExportParams(spoiler_output=spoiler_output, input_path=self.input_file, output_path=self.output_file, contents_files_path=self._contents_file_path, backup_files_path=backup_files_path, asset_cache_path=asset_cache_path, prime_path=self.prime_file, use_prime_models=self._use_prime_models)
async def handle_unable_to_export(self, error: UnableToExportError):
delete_internal_copy(self._options.internal_copies_path)
return (await super().handle_unable_to_export(error)) |
def get_xpubs_and_der_suffixes_from_txinout(tx: PartialTransaction, txinout: Union[(PartialTxInput, PartialTxOutput)]) -> List[Tuple[(str, List[int])]]:
xfp_to_xpub_map = {xfp: bip32node for (bip32node, (xfp, path)) in tx.xpubs.items()}
xfps = [txinout.bip32_paths[pubkey][0] for pubkey in txinout.pubkeys]
try:
xpubs = [xfp_to_xpub_map[xfp] for xfp in xfps]
except KeyError as e:
raise Exception(f'Partial transaction is missing global xpub for fingerprint ({str(e)}) in input/output') from e
xpubs_and_deriv_suffixes = []
for (bip32node, pubkey) in zip(xpubs, txinout.pubkeys):
(xfp, path) = txinout.bip32_paths[pubkey]
der_suffix = list(path)[bip32node.depth:]
xpubs_and_deriv_suffixes.append((bip32node.to_xpub(), der_suffix))
return xpubs_and_deriv_suffixes |
def log_mid_epoch_stats(trainer, progress, extra_meters, log_output):
stats = get_training_stats(trainer)
for (k, v) in log_output.items():
if (k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']):
continue
if ('loss' in k):
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats)
return stats |
class VideoTester():
def __init__(self, args, my_model, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.model = my_model
(self.filename, _) = os.path.splitext(os.path.basename(args.dir_demo))
def test(self):
torch.set_grad_enabled(False)
self.ckp.write_log('\nEvaluation on video:')
self.model.eval()
timer_test = utility.timer()
for (idx_scale, scale) in enumerate(self.scale):
vidcap = cv2.VideoCapture(self.args.dir_demo)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
vidwri = cv2.VideoWriter(self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), (int((scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))), int((scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)))))
tqdm_test = tqdm(range(total_frames), ncols=80)
for _ in tqdm_test:
(success, lr) = vidcap.read()
if (not success):
break
(lr,) = common.set_channel(lr, n_channels=self.args.n_colors)
(lr,) = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
(lr,) = self.prepare(lr.unsqueeze(0))
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range).squeeze(0)
normalized = ((sr * 255) / self.args.rgb_range)
ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy()
vidwri.write(ndarr)
vidcap.release()
vidwri.release()
self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device(('cpu' if self.args.cpu else 'cuda'))
def _prepare(tensor):
if (self.args.precision == 'half'):
tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args] |
class Object(ValueField):
def __init__(self, name, type, default=None):
ValueField.__init__(self, name, default)
self.type = type
self.structcode = self.type.structcode
self.structvalues = self.type.structvalues
def parse_binary_value(self, data, display, length, format):
return self.type.parse_binary(data, display)
def parse_value(self, val, display):
return self.type.parse_value(val, display)
def pack_value(self, val):
return self.type.pack_value(val)
def check_value(self, val):
if isinstance(val, tuple):
vals = []
i = 0
for f in self.type.fields:
if f.name:
if (f.check_value is None):
v = val[i]
else:
v = f.check_value(val[i])
if (f.structvalues == 1):
vals.append(v)
else:
vals.extend(v)
i = (i + 1)
return vals
if isinstance(val, dict):
data = val
elif isinstance(val, DictWrapper):
data = val._data
else:
raise TypeError(('Object value must be tuple, dictionary or DictWrapper: %s' % val))
vals = []
for f in self.type.fields:
if f.name:
if (f.check_value is None):
v = data[f.name]
else:
v = f.check_value(data[f.name])
if (f.structvalues == 1):
vals.append(v)
else:
vals.extend(v)
return vals |
class DLA(nn.Module):
def __init__(self, inp, oup, kernel_size=3, stride=1, expand_ratio=3, refine_mode='conv_exapnd'):
super(DLA, self).__init__()
hidden_dim = round((inp * expand_ratio))
self.expand_ratio = expand_ratio
self.identity = ((stride == 1) and (inp == oup))
(self.inp, self.oup) = (inp, oup)
self.high_dim_id = False
self.refine_mode = refine_mode
if (refine_mode == 'conv'):
self.conv = Conv2dSamePadding(hidden_dim, hidden_dim, (kernel_size, kernel_size), stride, (1, 1), groups=1, bias=False)
elif (refine_mode == 'conv_exapnd'):
if (self.expand_ratio != 1):
self.conv_exp = Conv2dSamePadding(inp, hidden_dim, 1, 1, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.depth_sep_conv = Conv2dSamePadding(hidden_dim, hidden_dim, (kernel_size, kernel_size), stride, (1, 1), groups=hidden_dim, bias=False)
self.bn2 = nn.BatchNorm2d(hidden_dim)
self.conv_pro = Conv2dSamePadding(hidden_dim, oup, 1, 1, bias=False)
self.bn3 = nn.BatchNorm2d(oup)
self.relu = nn.ReLU6(inplace=True)
def forward(self, input):
x = input
if (self.refine_mode == 'conv'):
return self.conv(x)
else:
if (self.expand_ratio != 1):
x = self.relu(self.bn1(self.conv_exp(x)))
x = self.relu(self.bn2(self.depth_sep_conv(x)))
x = self.bn3(self.conv_pro(x))
if self.identity:
return (x + input)
else:
return x |
.parametrize('username,password', users)
.parametrize('url_name', url_names)
def test_next(db, client, username, password, url_name):
client.login(username=username, password=password)
url = reverse(urlnames['next'], args=[url_name])
response = client.post(url)
if password:
assert (response.status_code == 200)
assert (response.json().get('overlay') == Overlay.objects.get(user__username=username, url_name=url_name).current)
else:
assert (response.status_code == 401) |
class NormalDistribution(QuantumCircuit):
def __init__(self, num_qubits: Union[(int, List[int])], mu: Optional[Union[(float, List[float])]]=None, sigma: Optional[Union[(float, List[float])]]=None, bounds: Optional[Union[(Tuple[(float, float)], List[Tuple[(float, float)]])]]=None, upto_diag: bool=False, name: str='P(X)') -> None:
_check_dimensions_match(num_qubits, mu, sigma, bounds)
_check_bounds_valid(bounds)
dim = (1 if isinstance(num_qubits, int) else len(num_qubits))
if (mu is None):
mu = (0 if (dim == 1) else ([0] * dim))
if (sigma is None):
sigma = (1 if (dim == 1) else np.eye(dim))
if (bounds is None):
bounds = (((- 1), 1) if (dim == 1) else ([((- 1), 1)] * dim))
if isinstance(num_qubits, int):
inner = QuantumCircuit(num_qubits, name=name)
x = np.linspace(bounds[0], bounds[1], num=(2 ** num_qubits))
else:
inner = QuantumCircuit(sum(num_qubits), name=name)
meshgrid = np.meshgrid(*[np.linspace(bound[0], bound[1], num=(2 ** num_qubits[i])) for (i, bound) in enumerate(bounds)], indexing='ij')
x = list(zip(*[grid.flatten() for grid in meshgrid]))
from scipy.stats import multivariate_normal
probabilities = multivariate_normal.pdf(x, mu, sigma)
normalized_probabilities = (probabilities / np.sum(probabilities))
self._values = x
self._probabilities = normalized_probabilities
self._bounds = bounds
super().__init__(*inner.qregs, name=name)
if upto_diag:
inner.isometry(np.sqrt(normalized_probabilities), inner.qubits, None)
self.append(inner.to_instruction(), inner.qubits)
else:
from qiskit.extensions import Initialize
initialize = Initialize(np.sqrt(normalized_probabilities))
circuit = initialize.gates_to_uncompute().inverse()
inner.compose(circuit, inplace=True)
self.append(inner.to_gate(), inner.qubits)
def values(self) -> np.ndarray:
return self._values
def probabilities(self) -> np.ndarray:
return self._probabilities
def bounds(self) -> Union[(Tuple[(float, float)], List[Tuple[(float, float)]])]:
return self._bounds |
def group_score_lama_eval(lm_results: Dict):
patterns = list(lm_results.keys())
points = 0
data = lm_results[patterns[0]]['data']
for (datum_ind, datum) in enumerate(data):
obj = datum['obj_label']
consistent_true = True
for pattern in patterns:
preds = lm_results[pattern]['predictions'][datum_ind]
if (preds[0]['token_str'] != obj):
consistent_true = False
break
if consistent_true:
points += 1
return (points / len(data)) |
def process_game(rand, moves):
wumpus = Wumpus(rand)
creature = rand.choice([Dog, Bear, Horse, Skeleton, Snake, Dragon])(rand)
messages = [f'You are fighting a {creature.name}!']
state = None
for move in moves:
messages.clear()
wumpus.defending = (move == 'DEF')
if wumpus.defending:
msg = rand.choice(you_defend_msgs).replace('CREATURE', creature.name)
messages.append(msg)
wumpus.action_def(msg)
if (move == 'ATK'):
dmg = rand.randint(*wumpus.attack)
creature.deal_attack(dmg)
msg = rand.choice(you_attack_msgs).replace('CREATURE', creature.name).replace('DAMAGE', str(dmg))
messages.append(msg)
wumpus.action_atk(msg)
if (move == 'HUG'):
msg = rand.choice(you_hug_msgs).replace('CREATURE', creature.name)
messages.append(msg)
creature.give_hug()
wumpus.action_hug(msg)
if (creature.hp <= 0):
state = 'good'
break
if (creature.love <= 0):
state = 'best'
break
creature_move = creature.turn()
if wumpus.defending:
creature_move = (creature_move[0].replace(str(creature_move[1]), '0'), creature_move[1])
messages.append(creature_move[0])
if (not wumpus.defending):
wumpus.hp -= creature_move[1]
if (wumpus.hp <= 0):
state = 'bad'
break
return render_image(state, messages, wumpus, creature) |
def get_param_shape_using_connected_graph(connected_graph: ConnectedGraph, param_name: str):
ops = connected_graph.get_all_ops()
for op in ops.values():
if op.parameters:
for (param, _) in op.parameters.values():
if (param.name == param_name):
return param.shape
return None |
def sinkhorn(C, epsilon, niter=50, device='cuda'):
m = C.size(0)
n = C.size(1)
mu = Variable(((1.0 / m) * torch.FloatTensor(m).fill_(1).to('cuda')), requires_grad=False)
nu = Variable(((1.0 / n) * torch.FloatTensor(n).fill_(1).to('cuda')), requires_grad=False)
rho = 1
tau = (- 0.8)
lam = (rho / (rho + epsilon))
thresh = (10 ** (- 1))
def ave(u, u1):
return ((tau * u) + ((1 - tau) * u1))
def M(u, v):
return ((((- C) + u.unsqueeze(1)) + v.unsqueeze(0)) / epsilon)
def lse(A):
return torch.log((torch.exp(A).sum(1, keepdim=True) + 1e-06))
(u, v, err) = ((0.0 * mu), (0.0 * nu), 0.0)
actual_nits = 0
for i in range(niter):
u1 = u
u = ((epsilon * (torch.log(mu) - lse(M(u, v)).squeeze())) + u)
v = ((epsilon * (torch.log(nu) - lse(M(u, v).t()).squeeze())) + v)
err = (u - u1).abs().sum()
actual_nits += 1
if (err < thresh).cpu().data.numpy():
break
(U, V) = (u, v)
pi = torch.exp(M(U, V))
pi = pi.to('cuda').float()
return pi |
def bench_regex_effbot(loops):
if (bench_regex_effbot.data is None):
bench_regex_effbot.data = init_benchmarks()
data = bench_regex_effbot.data
range_it = range(loops)
search = re.search
t0 = pyperf.perf_counter()
for _ in range_it:
for (regex, string) in data:
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
return (pyperf.perf_counter() - t0) |
def find_fonts_paths(directory, recursive):
if (not os.path.isdir(directory)):
raise OSError(f'Not a directory: {directory}')
extensions = ('.ttf', '.otf')
dir_paths = set()
file_paths = set()
if (not recursive):
dir_paths.add(directory)
for fname in os.listdir(directory):
if fname.lower().endswith(extensions):
file_paths.add(os.path.join(directory, fname))
else:
for (dirpath, _, filenames) in os.walk(directory):
dir_paths.add(dirpath)
for fname in filenames:
if fname.lower().endswith(extensions):
file_paths.add(os.path.join(dirpath, fname))
return (dir_paths, file_paths) |
class BaseLowdimDataset(torch.utils.data.Dataset):
def get_validation_dataset(self) -> 'BaseLowdimDataset':
return BaseLowdimDataset()
def get_normalizer(self, **kwargs) -> LinearNormalizer:
raise NotImplementedError()
def get_all_actions(self) -> torch.Tensor:
raise NotImplementedError()
def __len__(self) -> int:
return 0
def __getitem__(self, idx: int) -> Dict[(str, torch.Tensor)]:
raise NotImplementedError() |
def packageSingleFile(path):
from Cython.Build import cythonize
directives = {'language_level': '3'}
if (path.endswith('.pyc') or path.endswith('.pyo')):
return
current = multiprocessing.current_process()
print(f'Worker-{current.pid}: cythonizing', path)
(dirpath, file) = os.path.split(path)
if (file.endswith('.py') and (not file.startswith('__')) and (file != '_version.py') and (not path.startswith(os.path.join('pyunity', 'window', 'providers')))):
srcPath = (path[:(- 2)] + 'c')
try:
cythonize(path, quiet=True, compiler_directives=directives, show_all_warnings=True)
except:
os.remove(srcPath)
raise Exception(f'Cythonization of `{path}` failed.') from None
op = shutil.move
else:
srcPath = os.path.join(dirpath, file)
op = shutil.copy
destPath = os.path.join('src', os.path.dirname(srcPath[8:]))
os.makedirs(destPath, exist_ok=True)
op(srcPath, destPath) |
def get_coco_imgs_labels_info(split, data_source_dir, args):
from pycocotools.coco import COCO
json_file = f'{data_source_dir}/annotations/instances_{split}2014.json'
assert PathManager.exists(json_file), 'Annotations file does not exist. Abort'
json_data = json.load(PathManager.open(json_file, 'r'))
image_index = [x['id'] for x in json_data['images']]
coco = COCO(json_file)
num_cats = len(json_data['categories'])
logging.info('partition: {} num_cats: {} num_images: {}'.format(split, num_cats, len(image_index)))
cat_ids = [x['id'] for x in json_data['categories']]
coco_to_me = {val: ind for (ind, val) in enumerate(cat_ids)}
cat_names = [str(x['name']) for x in json_data['categories']]
(cat_name_to_id, cat_id_to_name) = ({}, {})
for (ind, name) in enumerate(cat_names):
cat_name_to_id[name] = ind
cat_id_to_name[ind] = name
class_ids = cat_id_to_name.keys()
assert (len(list(class_ids)) == num_cats)
assert (min(class_ids) == 0)
assert (max(class_ids) == (len(class_ids) - 1))
assert (len(set(class_ids)) == len(class_ids))
img_labels_map = {}
num_classes = len(cat_names)
for (_, im_id) in enumerate(image_index):
ann_ids = coco.getAnnIds(imgIds=im_id)
entry = coco.imgs[im_id]
img_name = entry['file_name']
objs = coco.loadAnns(ann_ids)
valid_objs = get_valid_objs(entry, objs)
if (img_name not in img_labels_map):
img_labels_map[img_name] = np.zeros(num_classes, dtype=np.int32)
for (_, obj) in enumerate(valid_objs):
cocoCatId = obj['category_id']
myId = coco_to_me[cocoCatId]
img_labels_map[img_name][myId] = 1.0
(img_paths, img_labels) = ([], [])
train_imgs_path = f'{data_source_dir}/train2014'
val_imgs_path = f'{data_source_dir}/val2014'
prefix = (train_imgs_path if (split == 'train') else val_imgs_path)
for item in sorted(img_labels_map.keys()):
img_paths.append(f'{prefix}/{item}')
img_labels.append(img_labels_map[item])
output_dir = get_output_dir()
img_info_out_path = f'{output_dir}/{split}_images.npy'
label_info_out_path = f'{output_dir}/{split}_labels.npy'
save_file(np.array(img_paths), img_info_out_path)
save_file(np.array(img_labels), label_info_out_path)
return [img_info_out_path, label_info_out_path] |
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train() |
def router_application(num_mock_deployments: int=1, hooks=None):
deployment_map = {}
for i in range(num_mock_deployments):
deployment_map[f'model_{i}'] = MockDeployment.options(name=f'MockDeployment:model_{i}').bind(VLLMApp.parse_yaml(vllm_app_def))
merged_client = MockRouterQueryClient(deployment_map, hooks=hooks)
return RouterDeployment.options(autoscaling_config={'min_replicas': 1, 'initial_replicas': 1, 'max_replicas': 1, 'target_num_ongoing_requests_per_replica': 1}).bind(merged_client) |
def get_locale_identifier(tup: ((((tuple[str] | tuple[(str, (str | None))]) | tuple[(str, (str | None), (str | None))]) | tuple[(str, (str | None), (str | None), (str | None))]) | tuple[(str, (str | None), (str | None), (str | None), (str | None))]), sep: str='_') -> str:
tup = tuple(tup[:5])
(lang, territory, script, variant, modifier) = (tup + ((None,) * (5 - len(tup))))
ret = sep.join(filter(None, (lang, script, territory, variant)))
return (f'{ret}{modifier}' if modifier else ret) |
.skipif((sys.platform.startswith('linux') and (pg.Qt.QT_LIB == 'PySide6') and ((6, 0) < pg.Qt.PySide6.__version_info__ < (6, 4, 3))), reason='taking gui thread causes segfault')
def test_nested_busy_cursors_clear_after_all_exit():
with pg.BusyCursor():
wait_cursor = pg.Qt.QtCore.Qt.CursorShape.WaitCursor
with pg.BusyCursor():
assert (pg.Qt.QtWidgets.QApplication.overrideCursor().shape() == wait_cursor), 'Cursor should be waiting'
assert (pg.Qt.QtWidgets.QApplication.overrideCursor().shape() == wait_cursor), 'Cursor should be waiting'
assert (pg.Qt.QtWidgets.QApplication.overrideCursor() is None), 'No override cursor should be set' |
class UserInputProtocol(SessionDataProtocol, metaclass=ABCMeta):
def get_previously_entered_for_form(cls, form, input_name, entered_input_type):
def save_input_value_for_form(cls, form, input_name, value, entered_input_type):
def get_persisted_for_view(cls, view, key, value_type):
def add_persisted_for_view(cls, view, key, value, value_type):
def remove_persisted_for_view(cls, view, key): |
def test_type_param() -> None:
func_node = extract_node('def func[T]() -> T: ...')
assert isinstance(func_node.type_params[0], TypeVar)
assert (func_node.type_params[0].name.name == 'T')
assert (func_node.type_params[0].bound is None)
class_node = extract_node('class MyClass[T]: ...')
assert isinstance(class_node.type_params[0], TypeVar)
assert (class_node.type_params[0].name.name == 'T')
assert (class_node.type_params[0].bound is None) |
class Conv1d(_ConvBase):
def __init__(self, in_size: int, out_size: int, *, kernel_size: int=1, stride: int=1, padding: int=0, activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str=''):
super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv1d, batch_norm=BatchNorm1d, bias=bias, preact=preact, name=name) |
def disable_all_quantizers(model: torch.nn.Module) -> Handle:
(param_quantizers, input_quantizers, output_quantizers) = get_all_quantizers(model)
all_quantizers = ((param_quantizers + input_quantizers) + output_quantizers)
active_quantizers = set((quantizer for quantizer in all_quantizers if quantizer.enabled))
def cleanup():
for quantizer in active_quantizers:
quantizer.enabled = True
try:
for quantizer in active_quantizers:
quantizer.enabled = False
return Handle(cleanup)
except:
cleanup()
raise |
def demo():
import textwrap
hello_source = textwrap.dedent('\n def hello():\n try:\n hello_ = "Hello"\n world_ = "World"\n print(f"{hello_}, {world_}!")\n except TypeError as exc:\n print("failed: {}".format(exc))\n \n if __name__ == "__main__":\n hello()\n ')
transformed = transformer.transform_string(hello_source)
print(transformed)
code = compile(transformed, 'inline source', mode='exec')
exec(code)
if 1:
import unittest.util as lib_module
import inspect
source = inspect.getsource(lib_module)
transformed = transformer.transform_string(source)
print()
print(transformed) |
_attr(allow_interpreted_subclasses=True)
class StatementVisitor(Generic[T]):
def visit_assignment_stmt(self, o: mypy.nodes.AssignmentStmt) -> T:
pass
def visit_for_stmt(self, o: mypy.nodes.ForStmt) -> T:
pass
def visit_with_stmt(self, o: mypy.nodes.WithStmt) -> T:
pass
def visit_del_stmt(self, o: mypy.nodes.DelStmt) -> T:
pass
def visit_func_def(self, o: mypy.nodes.FuncDef) -> T:
pass
def visit_overloaded_func_def(self, o: mypy.nodes.OverloadedFuncDef) -> T:
pass
def visit_class_def(self, o: mypy.nodes.ClassDef) -> T:
pass
def visit_global_decl(self, o: mypy.nodes.GlobalDecl) -> T:
pass
def visit_nonlocal_decl(self, o: mypy.nodes.NonlocalDecl) -> T:
pass
def visit_decorator(self, o: mypy.nodes.Decorator) -> T:
pass
def visit_import(self, o: mypy.nodes.Import) -> T:
pass
def visit_import_from(self, o: mypy.nodes.ImportFrom) -> T:
pass
def visit_import_all(self, o: mypy.nodes.ImportAll) -> T:
pass
def visit_block(self, o: mypy.nodes.Block) -> T:
pass
def visit_expression_stmt(self, o: mypy.nodes.ExpressionStmt) -> T:
pass
def visit_operator_assignment_stmt(self, o: mypy.nodes.OperatorAssignmentStmt) -> T:
pass
def visit_while_stmt(self, o: mypy.nodes.WhileStmt) -> T:
pass
def visit_return_stmt(self, o: mypy.nodes.ReturnStmt) -> T:
pass
def visit_assert_stmt(self, o: mypy.nodes.AssertStmt) -> T:
pass
def visit_if_stmt(self, o: mypy.nodes.IfStmt) -> T:
pass
def visit_break_stmt(self, o: mypy.nodes.BreakStmt) -> T:
pass
def visit_continue_stmt(self, o: mypy.nodes.ContinueStmt) -> T:
pass
def visit_pass_stmt(self, o: mypy.nodes.PassStmt) -> T:
pass
def visit_raise_stmt(self, o: mypy.nodes.RaiseStmt) -> T:
pass
def visit_try_stmt(self, o: mypy.nodes.TryStmt) -> T:
pass
def visit_match_stmt(self, o: mypy.nodes.MatchStmt) -> T:
pass |
class KannelBackendView(BaseHttpBackendView):
= ['get']
form_class = KannelForm
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(KannelBackendView, self).get_form_kwargs()
kwargs['data'] = self.request.GET
return kwargs
def form_valid(self, form):
super(KannelBackendView, self).form_valid(form)
return HttpResponse('') |
def test_std_color_re():
for color in ansi.Fg:
assert ansi.STD_FG_RE.match(str(color))
assert (not ansi.STD_BG_RE.match(str(color)))
for color in ansi.Bg:
assert ansi.STD_BG_RE.match(str(color))
assert (not ansi.STD_FG_RE.match(str(color)))
assert (not ansi.STD_FG_RE.match(f'{ansi.CSI}38m'))
assert (not ansi.STD_BG_RE.match(f'{ansi.CSI}48m')) |
class TestLogger(logging.Logger):
def initialize(cls):
logging.addLevelName(TRACE, 'TRACE')
logging.setLoggerClass(cls)
if any(((i in sys.argv) for i in ('-v', '--verbose'))):
logging.getLogger().setLevel(TRACE)
elif any(((i in sys.argv) for i in ('-q', '--quiet'))):
logging.getLogger().setLevel(logging.INFO)
def trace(self, msg, *args, **kwargs):
return self.log(TRACE, msg, *args, **kwargs)
debugall = trace
verbose = logging.Logger.info |
def create_cityscapes_label_colormap():
colormap = np.zeros((256, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [244, 35, 232]
colormap[2] = [70, 70, 70]
colormap[3] = [102, 102, 156]
colormap[4] = [190, 153, 153]
colormap[5] = [153, 153, 153]
colormap[6] = [250, 170, 30]
colormap[7] = [220, 220, 0]
colormap[8] = [107, 142, 35]
colormap[9] = [152, 251, 152]
colormap[10] = [70, 130, 180]
colormap[11] = [220, 20, 60]
colormap[12] = [255, 0, 0]
colormap[13] = [0, 0, 142]
colormap[14] = [0, 0, 70]
colormap[15] = [0, 60, 100]
colormap[16] = [0, 80, 100]
colormap[17] = [0, 0, 230]
colormap[18] = [119, 11, 32]
return colormap |
class ProjectCommitDiscussionNoteManager(GetMixin, CreateMixin, UpdateMixin, DeleteMixin, RESTManager):
_path = '/projects/{project_id}/repository/commits/{commit_id}/discussions/{discussion_id}/notes'
_obj_cls = ProjectCommitDiscussionNote
_from_parent_attrs = {'project_id': 'project_id', 'commit_id': 'commit_id', 'discussion_id': 'id'}
_create_attrs = RequiredOptional(required=('body',), optional=('created_at', 'position'))
_update_attrs = RequiredOptional(required=('body',))
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> ProjectCommitDiscussionNote:
return cast(ProjectCommitDiscussionNote, super().get(id=id, lazy=lazy, **kwargs)) |
def _maybe_typed_value(val: Union[(type, str)]) -> Value:
if (val is type(None)):
return KnownValue(None)
elif (val is Hashable):
return _HashableValue(val)
elif ((val is Callable) or is_typing_name(val, 'Callable')):
return CallableValue(ANY_SIGNATURE)
return TypedValue(val) |
def test_multiple_macros(base_app):
macro1 = 'h1'
macro2 = 'h2'
run_cmd(base_app, 'macro create {} help'.format(macro1))
run_cmd(base_app, 'macro create {} help -v'.format(macro2))
(out, err) = run_cmd(base_app, macro1)
verify_help_text(base_app, out)
(out2, err2) = run_cmd(base_app, macro2)
verify_help_text(base_app, out2)
assert (len(out2) > len(out)) |
class DistillDiffPruningLoss_dynamic(torch.nn.Module):
def __init__(self, teacher_model, base_criterion: torch.nn.Module, ratio_weight=2.0, distill_weight=0.5, dynamic=False, pruning_loc=[3, 6, 9], keep_ratio=[0.75, 0.5, 0.25], clf_weight=0, mse_token=False, print_mode=True):
super().__init__()
self.teacher_model = teacher_model
self.base_criterion = base_criterion
self.clf_weight = clf_weight
self.pruning_loc = pruning_loc
self.keep_ratio = keep_ratio
self.count = 0
self.print_mode = print_mode
self.cls_loss = 0
self.ratio_loss = 0
self.cls_distill_loss = 0
self.token_distill_loss = 0
self.mse_token = mse_token
self.dynamic = dynamic
self.ratio_weight = ratio_weight
self.distill_weight = distill_weight
print('ratio_weight=', ratio_weight, 'distill_weight', distill_weight)
if dynamic:
print('using dynamic loss')
def forward(self, inputs, outputs, labels):
(pred, token_pred, mask, out_pred_score) = outputs
pred_loss = 0.0
ratio = self.keep_ratio
for (i, score) in enumerate(out_pred_score):
if self.dynamic:
pos_ratio = score.mean()
else:
pos_ratio = score.mean(1)
pred_loss = (pred_loss + ((pos_ratio - ratio[i]) ** 2).mean())
cls_loss = self.base_criterion(pred, labels)
with torch.no_grad():
(cls_t, token_t) = self.teacher_model(inputs)
cls_kl_loss = F.kl_div(F.log_softmax(pred, dim=(- 1)), F.log_softmax(cls_t, dim=(- 1)), reduction='batchmean', log_target=True)
(B, N, C) = token_pred.size()
assert (mask.numel() == (B * N))
bool_mask = (mask.reshape((B * N)) > 0.5)
loss_part = []
token_pred = token_pred.reshape((B * N), C)
token_t = token_t.reshape((B * N), C)
if (mask.sum() < 0.1):
token_kl_loss = token_pred.new(1).fill_(0.0)
else:
token_t = token_t[bool_mask]
token_pred = token_pred[bool_mask]
if self.mse_token:
token_kl_loss = torch.pow((token_pred - token_t), 2).mean()
else:
token_kl_loss = F.kl_div(F.log_softmax(token_pred, dim=(- 1)), F.log_softmax(token_t, dim=(- 1)), reduction='batchmean', log_target=True)
loss = ((((self.clf_weight * cls_loss) + ((self.ratio_weight * pred_loss) / len(self.pruning_loc))) + (self.distill_weight * cls_kl_loss)) + (self.distill_weight * token_kl_loss))
if self.print_mode:
self.cls_loss += cls_loss.item()
self.ratio_loss += pred_loss.item()
self.cls_distill_loss += cls_kl_loss.item()
self.token_distill_loss += token_kl_loss.item()
loss_part.append(cls_loss)
loss_part.append(pred_loss)
loss_part.append(cls_kl_loss)
loss_part.append(token_kl_loss)
self.count += 1
if (self.count == 100):
print(('loss info: cls_loss=%.4f, ratio_loss=%.4f, cls_kl=%.4f, token_kl=%.4f' % ((self.cls_loss / 100), (self.ratio_loss / 100), (self.cls_distill_loss / 100), (self.token_distill_loss / 100))))
self.count = 0
self.cls_loss = 0
self.ratio_loss = 0
self.cls_distill_loss = 0
self.token_distill_loss = 0
return (loss, loss_part) |
def test_regress(ansi_bar: ProgressBar, ansi_io: BufferedIO) -> None:
ansi_bar.start()
ansi_bar.advance()
ansi_bar.advance()
ansi_bar.advance((- 1))
output = [' 0 [>]', ' 1 [->]', ' 2 [-->]', ' 1 [->]']
expected = generate_output(output)
assert (expected == ansi_io.fetch_error()) |
class WarmupStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, step_size=2, gamma=0.9, warmup_factor=(1.0 / 3), warmup_iters=500, warmup_method='linear', last_epoch=(- 1)):
if (warmup_method not in ('constant', 'linear')):
raise ValueError("Only 'constant' or 'linear' warmup_method acceptedgot {}".format(warmup_method))
self.step_size = step_size
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if (self.last_epoch < self.warmup_iters):
if (self.warmup_method == 'constant'):
warmup_factor = self.warmup_factor
elif (self.warmup_method == 'linear'):
alpha = (self.last_epoch / self.warmup_iters)
warmup_factor = ((self.warmup_factor * (1 - alpha)) + alpha)
return [((base_lr * warmup_factor) * (self.gamma ** (self.last_epoch // self.step_size))) for base_lr in self.base_lrs] |
def get_last_epoch() -> str:
if (constants.job_type != 'fine-tune'):
convergence_path = (constants.job_dir + 'convergence.log')
try:
(epoch_key, _, _) = read_row(path=convergence_path, row=(- 1), col=(0, 1, 2))
except ValueError:
epoch_key = 'Epoch 1'
generation_epoch = constants.generation_epoch
if (constants.job_type == 'generate'):
epoch_key = f'Epoch GEN{generation_epoch}'
elif (constants.job_type == 'test'):
epoch_key = f'Epoch EVAL{generation_epoch}'
else:
convergence_path = (constants.job_dir + 'convergence.log')
try:
(epoch_key_tmp, _, _) = read_row(path=convergence_path, row=(- 1), col=(0, 1, 2))
epoch_key_tmp = epoch_key_tmp.split(' ')
epoch_key = ' '.join(epoch_key_tmp[:2])
except (ValueError, IndexError) as e:
epoch_key = 'Step init'
return epoch_key |
def test(args):
outdir = args.save_folder
if (not os.path.exists(outdir)):
os.makedirs(outdir)
input_transform = transform.Compose([transform.ToTensor(), transform.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
testset = get_segmentation_dataset(args.dataset, split=args.split, mode=args.mode, transform=input_transform)
loader_kwargs = ({'num_workers': args.workers, 'pin_memory': True} if args.cuda else {})
test_data = data.DataLoader(testset, batch_size=args.test_batch_size, drop_last=False, shuffle=False, collate_fn=test_batchify_fn, **loader_kwargs)
if (args.model_zoo is not None):
model = get_model(args.model_zoo, pretrained=True)
else:
model = get_segmentation_model(args.model, dataset=args.dataset, backbone=args.backbone, dilated=args.dilated, lateral=args.lateral, jpu=args.jpu, aux=args.aux, se_loss=args.se_loss, norm_layer=BatchNorm, base_size=args.base_size, crop_size=args.crop_size)
if ((args.resume is None) or (not os.path.isfile(args.resume))):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
print(model)
scales = ([0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25] if (args.dataset == 'citys') else [0.5, 0.75, 1.0, 1.25, 1.5, 1.75])
if (not args.ms):
scales = [1.0]
evaluator = MultiEvalModule(model, testset.num_class, scales=scales, flip=args.ms).cuda()
evaluator.eval()
metric = utils.SegmentationMetric(testset.num_class)
tbar = tqdm(test_data)
for (i, (image, dst)) in enumerate(tbar):
if ('val' in args.mode):
with torch.no_grad():
predicts = evaluator.parallel_forward(image)
metric.update(dst, predicts)
(pixAcc, mIoU) = metric.get()
tbar.set_description(('pixAcc: %.4f, mIoU: %.4f' % (pixAcc, mIoU)))
else:
with torch.no_grad():
outputs = evaluator.parallel_forward(image)
predicts = [testset.make_pred(torch.max(output, 1)[1].cpu().numpy()) for output in outputs]
for (predict, impath) in zip(predicts, dst):
mask = utils.get_mask_pallete(predict, args.dataset)
outname = (os.path.splitext(impath)[0] + '.png')
mask.save(os.path.join(outdir, outname)) |
class STM32F1xxSpi(QlConnectivityPeripheral):
class Type(ctypes.Structure):
_fields_ = [('CR1', ctypes.c_uint32), ('CR2', ctypes.c_uint32), ('SR', ctypes.c_uint32), ('DR', ctypes.c_uint32), ('CRCPR', ctypes.c_uint32), ('RXCRCR', ctypes.c_uint32), ('TXCRCR', ctypes.c_uint32), ('I2SCFGR', ctypes.c_uint32)]
def __init__(self, ql, label, intn=None):
super().__init__(ql, label)
self.instance = self.struct(CR1=0, CR2=0, SR=11, DR=12, CRCPR=7, RXCRCR=0, TXCRCR=0, I2SCFGR=0)
self.intn = intn
()
def read(self, offset: int, size: int) -> int:
if self.contain(self.struct.DR, offset, size):
if self.has_input():
return self.recv_from_user()
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, (ctypes.addressof(self.instance) + offset), size)
data = int.from_bytes(buf.raw, byteorder='little')
return data
()
def write(self, offset: int, size: int, value: int):
if self.contain(self.struct.DR, offset, size):
self.send_to_user(value)
else:
data = value.to_bytes(size, 'little')
ctypes.memmove((ctypes.addressof(self.instance) + offset), data, size)
def send_interrupt(self):
self.ql.hw.nvic.set_pending(self.intn) |
class Linear(nn.Linear, DiffEqModule):
def __init__(self, in_features: int, out_features: int):
super(Linear, self).__init__(in_features=in_features, out_features=out_features)
def forward(self, t, y, params: Optional[List]=None):
(w, b) = ((self.weight, self.bias) if (params is None) else params)
return F.linear(y, w, b) |
class TestDataFrame(unittest.TestCase):
def base_test_internals_empty(self):
empty = ta.dataframe(device=self.device)
self.assertTrue(isinstance(empty, DataFrame))
self.assertEqual(empty.length, 0)
self.assertEqual(empty.null_count, 0)
self.assertEqual(empty.columns, [])
def base_test_internals_full(self):
df = ta.dataframe(dt.Struct([dt.Field('a', dt.int64)]), device=self.device)
for i in range(4):
df = df.append([(i,)])
for i in range(4):
self.assertEqual(df[i], (i,))
self.assertEqual(df.length, 4)
self.assertEqual(df.null_count, 0)
self.assertEqual(list(df), list(((i,) for i in range(4))))
m = df[0:len(df)]
self.assertEqual(list(df[0:len(df)]), list(((i,) for i in range(4))))
def base_test_internals_full_nullable(self):
with self.assertRaises(TypeError):
df = ta.dataframe(dt.Struct([dt.Field('a', dt.int64), dt.Field('b', dt.int64)], nullable=True), device=self.device)
df = ta.dataframe(dt.Struct([dt.Field('a', dt.int64.with_null()), dt.Field('b', dt.Int64(True))], nullable=True), device=self.device)
for i in [0, 1, 2]:
df = df.append([None])
self.assertEqual(df[i], None)
self.assertEqual(df.is_valid_at(i), False)
self.assertEqual(df.null_count, (i + 1))
for i in [3]:
df = df.append([(i, (i * i))])
self.assertEqual(df[i], (i, (i * i)))
self.assertEqual(df.is_valid_at(i), True)
self.assertEqual(df.length, 4)
self.assertEqual(df.null_count, 3)
self.assertEqual(len(df['a']), 4)
self.assertEqual(len(df['b']), 4)
self.assertEqual(len(df._mask), 4)
self.assertEqual(list(df), [None, None, None, (3, 9)])
df = df.append([(4, (4 * 4)), (5, (5 * 5))])
self.assertEqual(list(df), [None, None, None, (3, 9), (4, 16), (5, 25)])
self.assertEqual(len(df), 6)
def base_test_internals_column_indexing(self):
df = ta.dataframe()
df['a'] = ta.column(([None] * 3), dtype=dt.Int64(nullable=True), device=self.device)
df['b'] = ta.column([1, 2, 3], device=self.device)
df['c'] = ta.column([1.1, 2.2, 3.3], device=self.device)
self.assertEqual(list(df['a']), ([None] * 3))
self.assertEqual(df[['a', 'c']].columns, ['a', 'c'])
self.assertEqual(list(df[['a', 'c']]['a']), ([None] * 3))
numpy.testing.assert_almost_equal(list(df[['a', 'c']]['c']), [1.1, 2.2, 3.3])
def base_test_construction(self):
data1 = [[(1, 'a'), (2, 'b')], [(3, 'c'), (4, 'd'), (5, 'e')]]
with self.assertRaises(TypeError) as ex:
a = ta.column(data1, device=self.device)
self.assertTrue(('Cannot infer type from nested Python tuple' in str(ex.exception)), f'Exception message is not as expected: {str(ex.exception)}')
a = ta.column(data1, dtype=dt.List(dt.Struct([dt.Field('col1', dt.int64), dt.Field('col2', dt.string)])), device=self.device)
self.assertEqual(list(a), data1)
data2 = {'a': list(range(10)), 'b': list(range(10, 20))}
dtype2 = dt.Struct([dt.Field('a', dt.int32), dt.Field('b', dt.int16)])
expected2 = list(zip(*data2.values()))
df = ta.dataframe(data2, device=self.device)
self.assertEqual(list(df), expected2)
self.assertEqual(df.dtype, dt.Struct([dt.Field('a', dt.int64), dt.Field('b', dt.int64)]))
df = ta.dataframe(data2, dtype2, device=self.device)
self.assertEqual(list(df), expected2)
self.assertEqual(df.dtype, dtype2)
data3 = {'a': [1, 2, 3], 'b': [(1, 'a'), (2, 'b'), (3, 'c')]}
dtype3 = dt.Struct([dt.Field('a', dt.int64), dt.Field('b', dt.Struct([dt.Field('b1', dt.int64), dt.Field('b2', dt.string)]))])
with self.assertRaises(TypeError) as ex:
df = ta.dataframe(data3, device=self.device)
self.assertTrue(('Cannot infer type from nested Python tuple' in str(ex.exception)), f'Excpeion message is not as expected: {str(ex.exception)}')
df = ta.dataframe(data3, dtype3, device=self.device)
self.assertEqual(list(df), list(zip(*data3.values())))
self.assertEqual(df.dtype, dtype3)
data4 = [(1, 'a'), (2, 'b'), (3, 'c')]
columns4 = ['t1', 't2']
dtype4 = dt.Struct([dt.Field('t1', dt.int64), dt.Field('t2', dt.string)])
with self.assertRaises(TypeError) as ex:
df = ta.dataframe(data4, device=self.device)
self.assertTrue(('DataFrame construction from tuples requires' in str(ex.exception)), f'Excpeion message is not as expected: {str(ex.exception)}')
df4 = ta.dataframe(data4, columns=columns4)
self.assertEqual(list(df4), data4)
self.assertEqual(df4.dtype, dtype4)
df4 = ta.dataframe(data4, dtype=dtype4)
self.assertEqual(list(df4), data4)
self.assertEqual(df4.dtype, dtype4)
IntAndStrType = NamedTuple('IntAndStr', [('t1', int), ('t2', str)])
data5 = [IntAndStrType(t1=1, t2='a'), IntAndStrType(t1=2, t2='b'), IntAndStrType(t1=3, t2='c')]
dtype = dt.Struct([dt.Field('t1', dt.int64), dt.Field('t2', dt.string)])
df5 = ta.dataframe(data5, device=self.device)
self.assertEqual(list(df5), data5)
self.assertEqual(df5.dtype, dtype)
data6 = [None, IntAndStrType(t1=2, t2='b'), IntAndStrType(t1=3, t2='c')]
with self.assertRaises(TypeError) as ex:
df = ta.dataframe(data6, device=self.device)
self.assertTrue((f'a tuple of type {str(dtype)} is required, got None' in str(ex.exception)), f'Excpeion message is not as expected: {str(ex.exception)}')
def base_test_infer(self):
df = ta.dataframe({'a': [1, 2, 3], 'b': [1.0, None, 3]}, device=self.device)
self.assertEqual(df.columns, ['a', 'b'])
self.assertEqual(df.dtype, dt.Struct([dt.Field('a', dt.int64), dt.Field('b', dt.Float32(nullable=True))]))
self.assertEqual(df.dtype.get('a'), dt.int64)
self.assertEqual(list(df), list(zip([1, 2, 3], [1.0, None, 3])))
df = ta.dataframe(device=self.device)
self.assertEqual(len(df), 0)
df['a'] = ta.column([1, 2, 3], dtype=dt.int32, device=self.device)
self.assertEqual(df._dtype.get('a'), dt.int32)
self.assertEqual(len(df), 3)
df['b'] = [1.0, None, 3]
self.assertEqual(len(df), 3)
df = ta.dataframe([(1, 2), (2, 3), (4, 5)], columns=['a', 'b'], device=self.device)
self.assertEqual(list(df), [(1, 2), (2, 3), (4, 5)])
B = dt.Struct([dt.Field('b1', dt.int64), dt.Field('b2', dt.int64)])
A = dt.Struct([dt.Field('a', dt.int64), dt.Field('b', B)])
df = ta.dataframe([(1, (2, 22)), (2, (3, 33)), (4, (5, 55))], dtype=A, device=self.device)
self.assertEqual(list(df), [(1, (2, 22)), (2, (3, 33)), (4, (5, 55))])
def _identity(*args):
return [*args]
def base_test_map_where_filter(self):
df = ta.dataframe(device=self.device)
df['a'] = [1, 2, 3]
df['b'] = [11, 22, 33]
df['c'] = ['a', 'b', 'C']
df['d'] = [100, 200, None]
self.assertEqual(list(df.map({100: 1000}, columns=['d'], dtype=dt.Int64(nullable=True))), [1000, None, None])
self.assertEqual(list(df.map({None: 1, 100: 1000}, columns=['d'], dtype=dt.Int64(nullable=True))), [1000, None, 1])
self.assertEqual(list(df.map(TestDataFrame._identity, columns=['a', 'a'], dtype=dt.List(dt.Int64(nullable=True)))), [[1, 1], [2, 2], [3, 3]])
self.assertEqual(list(df.map(TestDataFrame._identity, columns=['a', 'a', 'b'], dtype=dt.List(dt.Int64(nullable=True)))), [[1, 1, 11], [2, 2, 22], [3, 3, 33]])
self.assertEqual(list(df.map(TestDataFrame._identity, columns=['a', 'a', 'b', 'b'], dtype=dt.List(dt.Int64(nullable=True)))), [[1, 1, 11, 11], [2, 2, 22, 22], [3, 3, 33, 33]])
self.assertEqual(list(df.map(TestDataFrame._identity, columns=['a', 'd'], dtype=dt.List(dt.Int64(nullable=True)))), [[1, 100], [2, 200], [3, None]])
self.assertEqual(list(df.map(TestDataFrame._identity, columns=['a', 'd'], na_action='ignore', dtype=dt.List(dt.Int64(nullable=True)))), [[1, 100], [2, 200], None])
with self.assertRaises(TypeError):
list(df.map(TestDataFrame._identity, columns=['a', 'd'], na_action='foobar', dtype=dt.List(dt.Int64(nullable=True))))
self.assertEqual(list(df.filter(str.islower, columns=['c'])), [(1, 11, 'a', 100), (2, 22, 'b', 200)])
def base_test_transform(self):
df = ta.dataframe(device=self.device)
df['a'] = [1, 2, 3]
df['b'] = [11, 22, 33]
self.assertEqual(list(df['a'].transform((lambda l: [(x + 1) for x in l]))), [2, 3, 4])
with self.assertRaises(ValueError):
df['a'].transform((lambda l: ([(- 1)] + [(x + 1) for x in l])))
def batch_str(a):
return list(map(str, a))
self.assertEqual(list(df['a'].transform(batch_str, dtype=dt.string)), ['1', '2', '3'])
def batch_str_ann(a) -> List[List[str]]:
assert isinstance(a, list)
return [([str(x)] * x) for x in a]
self.assertEqual(list(df['a'].transform(batch_str_ann, format='python')), [['1'], ['2', '2'], ['3', '3', '3']])
with self.assertRaises(AssertionError):
df['a'].transform(batch_str_ann)
def myadd(a, b):
return [(x + y) for (x, y) in zip(a, b)]
self.assertEqual(list(df.transform(myadd, columns=['a', 'b'], dtype=dt.int64)), [12, 24, 36])
def myadd_hint(a, b) -> List[int]:
return [(x + y) for (x, y) in zip(a, b)]
self.assertEqual(list(df.transform(myadd_hint, columns=['a', 'b'])), [12, 24, 36])
def base_test_sort_stuff(self):
df = ta.dataframe({'a': [1, 2, 3], 'b': [1.0, None, 3]}, device=self.device)
self.assertEqual(list(df.sort(by='a', ascending=False)), list(zip([3, 2, 1], [3, None, 1.0])))
df = ta.dataframe({'a': [1, 2, 3], 'b': [1.0, None, 3], 'c': [4, 4, 1]}, device=self.device)
self.assertEqual(list(df.sort(by=['c', 'a'], ascending=False)), list([(2, None, 4), (1, 1.0, 4), (3, 3.0, 1)]))
def base_test_operators(self):
c = ta.dataframe({'a': [0, 1, 3]}, device=self.device)
d = ta.dataframe({'a': [5, 5, 6]}, device=self.device)
e = ta.dataframe({'a': [1.0, 1, 7]}, device=self.device)
self.assertEqual(list((c == c)), ([(True,)] * 3))
self.assertEqual(list((c == d)), ([(False,)] * 3))
self.assertEqual(list((c == 1)), [(i,) for i in [False, True, False]])
self.assertTrue(((c == 1) == ta.dataframe({'a': [False, True, False]}, device=self.device))['a'].all())
with self.assertRaises(ValueError) as ex:
assert (not (c == c))
self.assertTrue(('The truth value of a DataFrameCpu is ambiguous.' in str(ex.exception)), f'Exception message is not as expected: {str(ex.exception)}')
with self.assertRaises(ValueError) as ex:
assert (not (c['a'] == c['a']))
self.assertTrue(('The truth value of a NumericalColumnCpu is ambiguous.' in str(ex.exception)), f'Exception message is not as expected: {str(ex.exception)}')
self.assertEqual(list((c <= 2)), [(i,) for i in [True, True, False]])
self.assertEqual(list((c < d)), [(i,) for i in [True, True, True]])
self.assertEqual(list((c >= d)), [(i,) for i in [False, False, False]])
self.assertEqual(list((c > 2)), [(i,) for i in [False, False, True]])
self.assertEqual(list((- c)), [(i,) for i in [0, (- 1), (- 3)]])
self.assertEqual(list((+ (- c))), [(i,) for i in [0, (- 1), (- 3)]])
self.assertEqual(list((c + 1)), [(i,) for i in [1, 2, 4]])
self.assertEqual(list((1 + c)), [(i,) for i in [1, 2, 4]])
self.assertEqual(list((c + d)), [(i,) for i in [5, 6, 9]])
self.assertEqual(list((c + 1)), [(i,) for i in [1, 2, 4]])
self.assertEqual(list((1 + c)), [(i,) for i in [1, 2, 4]])
self.assertEqual(list((c + d)), [(i,) for i in [5, 6, 9]])
self.assertEqual(list((c - 1)), [(i,) for i in [(- 1), 0, 2]])
self.assertEqual(list((1 - c)), [(i,) for i in [1, 0, (- 2)]])
self.assertEqual(list((d - c)), [(i,) for i in [5, 4, 3]])
self.assertEqual(list((c * 2)), [(i,) for i in [0, 2, 6]])
self.assertEqual(list((2 * c)), [(i,) for i in [0, 2, 6]])
self.assertEqual(list((c * d)), [(i,) for i in [0, 5, 18]])
self.assertEqual(list((c * 2)), [(i,) for i in [0, 2, 6]])
self.assertEqual(list((2 * c)), [(i,) for i in [0, 2, 6]])
self.assertEqual(list((c * d)), [(i,) for i in [0, 5, 18]])
self.assertEqual(list((c / 2)), [(i,) for i in [0.0, 0.5, 1.5]])
self.assertEqual(list((c / d)), [(i,) for i in [0.0, 0., 0.5]])
self.assertEqual(list((d // 2)), [(i,) for i in [2, 2, 3]])
self.assertEqual(list((2 // d)), [(i,) for i in [0, 0, 0]])
self.assertEqual(list((c // d)), [(i,) for i in [0, 0, 0]])
self.assertEqual(list((e // d)), [(i,) for i in [0.0, 0.0, 1.0]])
self.assertEqual(list((c ** 2)), [(i,) for i in [0, 1, 9]])
self.assertEqual(list((2 ** c)), [(i,) for i in [1, 2, 8]])
self.assertEqual(list((c ** d)), [(i,) for i in [0, 1, 729]])
c = ta.dataframe({'a': [0, 1, 3, None]}, device=self.device)
self.assertEqual(list((c + 1)), [(i,) for i in [1, 2, 4, None]])
f = ta.column([None, 1, 3, None], device=self.device)
self.assertEqual(list((c + f).fill_null(100)), [(i,) for i in [100, 2, 6, 100]])
g = ta.column([True, False, True, False], device=self.device)
h = ta.column([False, False, True, True], device=self.device)
self.assertEqual(list((g & h)), [False, False, True, False])
self.assertEqual(list((g | h)), [True, False, True, True])
self.assertEqual(list((g ^ h)), [True, False, False, True])
self.assertEqual(list((True & g)), [True, False, True, False])
self.assertEqual(list((True | g)), [True, True, True, True])
self.assertEqual(list((True ^ g)), [False, True, False, True])
self.assertEqual(list((~ g)), [False, True, False, True])
i = ta.column([1, 2, 0], device=self.device)
j = ta.column([3, 2, 3], device=self.device)
self.assertEqual(list((i & j)), [1, 2, 0])
self.assertEqual(list((i | j)), [3, 2, 3])
self.assertEqual(list((i ^ j)), [2, 0, 3])
self.assertEqual(list((2 & i)), [0, 2, 0])
self.assertEqual(list((2 | i)), [3, 2, 2])
self.assertEqual(list((2 ^ i)), [3, 0, 2])
self.assertEqual(list((~ i)), [(- 2), (- 3), (- 1)])
u = ta.column(list(range(5)), device=self.device)
v = (- u)
uv = ta.dataframe({'a': u, 'b': v}, device=self.device)
uu = ta.dataframe({'a': u, 'b': u}, device=self.device)
x = (uv == 1)
y = (uu['a'] == uv['a'])
z = (uv == uu)
z['a']
(z | x['a'])
k = ta.dataframe({'a': [0, 1, 3, 4], 'b': [0.0, 10.0, 20.0, 30.0]}, device=self.device)
l = ta.column(list(range(4)), device=self.device)
self.assertEqual(list((k['a'] + k)), [(0, 0.0), (2, 11.0), (6, 23.0), (8, 34.0)])
self.assertEqual(list((l + k)), [(0, 0.0), (2, 11.0), (5, 22.0), (7, 33.0)])
dfa = ta.dataframe({'a': [1.0, 2.0, 3.0], 'b': [11.0, 22.0, 33.0]}, device=self.device)
dfb = (dfa['a'] * dfa)
self.assertEqual(list(dfb), [(1.0, 11.0), (4.0, 44.0), (9.0, 99.0)])
self.assertTrue(isinstance(dfb, DataFrameCpu))
dfd = ta.dataframe({'a': [1, 3, 7]})
dfe = (dfd['a'] ** dfd)
self.assertEqual(list(dfe), [(1,), (27,), (823543,)])
self.assertTrue(isinstance(dfe, DataFrameCpu))
cola = ta.column([3, 4, 5], device=self.device)
self.assertEqual(list((dfa * cola)), [(3.0, 33.0), (8.0, 88.0), (15.0, 165.0)])
self.assertEqual(list((k['a'] - k)), [(0, 0.0), (0, (- 9.0)), (0, (- 17.0)), (0, (- 26.0))])
self.assertEqual(list((l - k)), [(0, 0.0), (0, (- 9.0)), ((- 1), (- 18.0)), ((- 1), (- 27.0))])
dfx = ta.dataframe({'a': [3.0, 31.0, 94.0], 'b': [5.0, 7.0, 33.0]}, device=self.device)
dfy = (dfx['a'] % dfx)
self.assertEqual(list(dfy), [(0.0, 3.0), (0.0, 3.0), (0.0, 28.0)])
self.assertTrue(isinstance(dfy, DataFrameCpu))
colx = ta.column([3, 4, 5], device=self.device)
self.assertEqual(list((dfx % colx)), [(0.0, 2.0), (3.0, 3.0), (4.0, 3.0)])
dfx = ta.dataframe({'a': [3, 4, 6], 'b': [6, 8, 7]}, device=self.device)
self.assertEqual(list((dfx['a'] // dfx)), [(1, 0), (1, 0), (1, 0)])
self.assertEqual(list((dfx['b'] // dfx)), [(2, 1), (2, 1), (1, 1)])
self.assertEqual(list((dfx // dfx['a'])), [(1, 2), (1, 2), (1, 1)])
self.assertEqual(list((dfx // dfx['b'])), [(0, 1), (0, 1), (0, 1)])
dfx = ta.dataframe({'a': [1, 2, 3], 'b': [11, 22, 33]}, device=self.device)
dfy = (dfx['a'] & dfx)
self.assertEqual(list(dfy), [(1, 1), (2, 2), (3, 1)])
self.assertTrue(isinstance(dfy, DataFrameCpu))
colx = ta.column([1, 2, 3], device=self.device)
self.assertEqual(list((dfx & colx)), [(1, 1), (2, 2), (3, 1)])
self.assertEqual(list((1 & colx)), [1, 0, 1])
dfx = ta.dataframe({'a': [1, 2, 3], 'b': [11, 22, 33]}, device=self.device)
dfy = (dfx['a'] | dfx)
self.assertEqual(list(dfy), [(1, 11), (2, 22), (3, 35)])
self.assertTrue(isinstance(dfy, DataFrameCpu))
colx = ta.column([1, 2, 3], device=self.device)
self.assertEqual(list((dfx | colx)), [(1, 11), (2, 22), (3, 35)])
self.assertEqual(list((1 | colx)), [1, 3, 3])
def base_test_python_comparison_ops(self):
c = ta.column([[1, 2], [3, 4]])
d = ta.column([[0, 1], [3, 4]])
self.assertEqual(list((c == c)), [True, True])
self.assertEqual(list((c == d)), [False, True])
self.assertEqual(list((c != c)), [False, False])
self.assertEqual(list((c != d)), [True, False])
self.assertEqual(list((c == [3, 4])), [False, True])
self.assertEqual(list((c != [3, 4])), [True, False])
self.assertEqual(list((c < c)), [False, False])
self.assertEqual(list((c <= c)), [True, True])
self.assertEqual(list((c < [3, 4])), [True, False])
self.assertEqual(list((c <= [3, 4])), [True, True])
self.assertEqual(list((c > c)), [False, False])
self.assertEqual(list((c >= c)), [True, True])
self.assertEqual(list((c > [3, 4])), [False, False])
self.assertEqual(list((c >= [3, 4])), [False, True])
with self.assertRaises(TypeError):
assert (c == c.append([None]))
def base_test_na_handling(self):
c = ta.dataframe({'a': [None, 2, 17.0]}, device=self.device)
self.assertEqual(list(c.fill_null(99.0)), [(i,) for i in [99.0, 2, 17.0]])
self.assertEqual(list(c.drop_null()), [(i,) for i in [2, 17.0]])
c = c.append([(2,)])
self.assertEqual(list(c.drop_duplicates()), [(i,) for i in [None, 2, 17.0]])
d = ta.dataframe({'a': [None, 2, 17.0, 7, 2], 'b': [1, 2, 17.0, 2, 1]}, device=self.device)
self.assertEqual(list(d.drop_duplicates(subset='a')), [(None, 1.0), (2.0, 2.0), (17.0, 17.0), (7.0, 2.0)])
self.assertEqual(list(d.drop_duplicates(subset='b')), [(None, 1.0), (2.0, 2.0), (17.0, 17.0)])
self.assertEqual(list(d.drop_duplicates(subset=['b', 'a'])), [(None, 1.0), (2.0, 2.0), (17.0, 17.0), (7.0, 2.0), (2.0, 1.0)])
self.assertEqual(list(d.drop_duplicates()), [(None, 1.0), (2.0, 2.0), (17.0, 17.0), (7.0, 2.0), (2.0, 1.0)])
def base_test_agg_handling(self):
import functools
import operator
c = [1, 4, 2, 7, 9, 0]
C = ta.dataframe({'a': [1, 4, 2, 7, 9, 0, None]}, device=self.device)
self.assertEqual(len(C.min()['a']), 1)
self.assertEqual(C.min()['a'][0], min(c))
self.assertEqual(len(C.max()['a']), 1)
self.assertEqual(C.max()['a'][0], max(c))
self.assertEqual(len(C.sum()['a']), 1)
self.assertEqual(C.sum()['a'][0], sum(c))
self.assertEqual(list(C._cummin()), [(i,) for i in ([min(c[:i]) for i in range(1, (len(c) + 1))] + [None])])
self.assertEqual(list(C._cummax()), [(i,) for i in ([max(c[:i]) for i in range(1, (len(c) + 1))] + [None])])
self.assertEqual(list(C.cumsum()), [(i,) for i in ([sum(c[:i]) for i in range(1, (len(c) + 1))] + [None])])
self.assertEqual(list(C._cumprod()), [(i,) for i in ([functools.reduce(operator.mul, c[:i], 1) for i in range(1, (len(c) + 1))] + [None])])
self.assertEqual(((C['a'] % 2) == 0)[:(- 1)].all(), all((((i % 2) == 0) for i in c)))
self.assertEqual(((C['a'] % 2) == 0)[:(- 1)].any(), any((((i % 2) == 0) for i in c)))
def base_test_isin(self):
c = [1, 4, 2, 7]
C = ta.dataframe({'a': (c + [None])}, device=self.device)
self.assertEqual(list(C.isin([1, 2, 3])), [(i,) for i in [True, False, True, False, False]])
def base_test_isin2(self):
df = ta.dataframe({'A': [1, 2, 3], 'B': [1, 1, 1]}, device=self.device)
self.assertEqual(list(df.nunique()), [('A', 3), ('B', 1)])
def base_test_describe_dataframe(self):
c = ta.dataframe({'a': ta.column([1, 2, 3], dtype=dt.int32), 'b': ta.column([10, 20, 30], dtype=dt.int64), 'c': ta.column([1.0, 2.0, 3.0], dtype=dt.float32), 'd': ta.column([10.0, 20.0, 30.0], dtype=dt.float64)}, device=self.device)
self.assertEqual(list(c.describe()), [('count', 3.0, 3.0, 3.0, 3.0), ('mean', 2.0, 20.0, 2.0, 20.0), ('std', 1.0, 10.0, 1.0, 10.0), ('min', 1.0, 10.0, 1.0, 10.0), ('25%', 1.5, 15.0, 1.5, 15.0), ('50%', 2.0, 20.0, 2.0, 20.0), ('75%', 2.5, 25.0, 2.5, 25.0), ('max', 3.0, 30.0, 3.0, 30.0)])
self.assertEqual(list(c.describe(include=[dt.int32, dt.float64])), [('count', 3.0, 3.0), ('mean', 2.0, 20.0), ('std', 1.0, 10.0), ('min', 1.0, 10.0), ('25%', 1.5, 15.0), ('50%', 2.0, 20.0), ('75%', 2.5, 25.0), ('max', 3.0, 30.0)])
self.assertEqual(list(c.describe(exclude=[dt.int32, dt.float64])), [('count', 3.0, 3.0), ('mean', 20.0, 2.0), ('std', 10.0, 1.0), ('min', 10.0, 1.0), ('25%', 15.0, 1.5), ('50%', 20.0, 2.0), ('75%', 25.0, 2.5), ('max', 30.0, 3.0)])
def base_test_drop_by_str_as_columns(self):
df = ta.dataframe(device=self.device)
df['aa'] = [1, 2, 3]
df['ab'] = [11, 22, 33]
df['ac'] = [111, 222, 333]
self.assertEqual(list(df.drop('aa')), [(11, 111), (22, 222), (33, 333)])
self.assertEqual(list(df.drop('ab')), [(1, 111), (2, 222), (3, 333)])
self.assertEqual(list(df.drop('ac')), [(1, 11), (2, 22), (3, 33)])
def base_test_drop_by_list_of_str_as_columns(self):
df = ta.dataframe(device=self.device)
df['aa'] = [1, 2, 3]
df['ab'] = [11, 22, 33]
df['ac'] = [111, 222, 333]
self.assertEqual(list(df.drop(['aa', 'ab'])), [(111,), (222,), (333,)])
self.assertEqual(list(df.drop(['aa', 'ac'])), [(11,), (22,), (33,)])
self.assertEqual(list(df.drop(['ab', 'ac'])), [(1,), (2,), (3,)])
def base_test_drop_keep_rename_reorder_pipe(self):
df = ta.dataframe(device=self.device)
df['a'] = [1, 2, 3]
df['b'] = [11, 22, 33]
df['c'] = [111, 222, 333]
self.assertEqual(list(df.drop([])), [(1, 11, 111), (2, 22, 222), (3, 33, 333)])
self.assertEqual(list(df.drop(['c', 'a'])), [(11,), (22,), (33,)])
self.assertEqual(list(df[[]]), [])
self.assertEqual(list(df[['a', 'c']]), [(1, 111), (2, 222), (3, 333)])
self.assertEqual(list(df.rename({'a': 'c', 'c': 'a'})), [(1, 11, 111), (2, 22, 222), (3, 33, 333)])
self.assertEqual(list(df.reorder(list(reversed(df.columns)))), [(111, 11, 1), (222, 22, 2), (333, 33, 3)])
def f(df):
return df
self.assertEqual(list(df), list(df.pipe(f)))
def g(df, num):
return (df + num)
self.assertEqual(list((df + 13)), list(df.pipe(g, 13)))
def base_test_me_on_str(self):
df = ta.dataframe(device=self.device)
df['a'] = [1, 2, 3]
df['b'] = [11, 22, 33]
df['c'] = ['a', 'b', 'C']
self.assertEqual(list(df.where((me['c'].str.upper() == me['c']))), [(3, 33, 'C')])
def base_test_locals_and_me_equivalence(self):
df = ta.dataframe(device=self.device)
df['a'] = [1, 2, 3]
df['b'] = [11, 22, 33]
self.assertEqual(list(df.where(((me['a'] > 1) & (me['b'] == 33)))), list(df[((df['a'] > 1) & (df['b'] == 33))]))
self.assertEqual(list(df.select('*')), list(df))
self.assertEqual(list(df.select('a')), list(df[['a']]))
self.assertEqual(list(df.select('a', 'b')), list(df[['a', 'b']]))
self.assertEqual(list(df.select(b=me['b'], a=me['a'])), list(df[['b', 'a']]))
self.assertEqual(list(df.select('*', '-a')), list(df.drop(['a'])))
gf = ta.dataframe({'a': df['a'], 'b': df['b'], 'c': (df['a'] + df['b'])}, device=self.device)
self.assertEqual(list(df.select('*', d=(me['a'] + me['b']))), list(gf))
def base_test_groupby_str(self):
df = ta.dataframe({'a': [1, 1, 2], 'b': [1, 2, 3], 'c': [2, 2, 1]}, device=self.device)
self.assertEqual(list(df.groupby('a').size), [(1, 2), (2, 1)])
def base_test_groupby_list_of_str(self):
df = ta.dataframe({'a': [1, 1, 2], 'b': [1, 2, 3], 'c': [2, 2, 1]}, device=self.device)
self.assertEqual(list(df.groupby(['a']).size), [(1, 2), (2, 1)])
def base_test_groupby_size_pipe(self):
df = ta.dataframe({'a': [1, 1, 2], 'b': [1, 2, 3], 'c': [2, 2, 1]}, device=self.device)
self.assertEqual(list(df.groupby('a').size), [(1, 2), (2, 1)])
df = ta.dataframe({'A': ['a', 'b', 'a', 'b'], 'B': [1, 2, 3, 4]}, device=self.device)
def base_test_groupby_agg(self):
df = ta.dataframe({'A': ['a', 'b', 'a', 'b'], 'B': [1, 2, 3, 4]}, device=self.device)
self.assertEqual(list(df.groupby('A').agg('sum')), [('a', 4), ('b', 6)])
df = ta.dataframe({'a': [1, 1, 2], 'b': [1, 2, 3], 'c': [2, 2, 1]}, device=self.device)
self.assertEqual(list(df.groupby('a').agg('sum')), [(1, 3, 4), (2, 3, 1)])
self.assertEqual(list(df.groupby('a').agg(['sum', 'min'])), [(1, 3, 4, 1, 2), (2, 3, 1, 3, 1)])
self.assertEqual(list(df.groupby('a').agg({'c': 'max', 'b': ['min', 'mean']})), [(1, 2, 1, 1.5), (2, 1, 3, 3.0)])
def base_test_groupby_iter_get_item_ops(self):
df = ta.dataframe({'A': ['a', 'b', 'a', 'b'], 'B': [1, 2, 3, 4]}, device=self.device)
for (g, gf) in df.groupby('A'):
if (g == ('a',)):
self.assertEqual(list(gf), [(1,), (3,)])
elif (g == ('b',)):
self.assertEqual(list(gf), [(2,), (4,)])
else:
self.assertTrue(False)
self.assertEqual(list(df.groupby('A').sum()), [('a', 4), ('b', 6)])
self.assertEqual(list(df.groupby('A')['B'].sum()), [4, 6])
def base_test_column_overriden(self):
df = ta.dataframe({'a': [1, 2, 3], 'b': ['a', 'b', 'c']}, device=self.device)
self.assertEqual(list(df), [(1, 'a'), (2, 'b'), (3, 'c')])
self.assertEqual(df.dtype, dt.Struct([dt.Field('a', dt.int64), dt.Field('b', dt.string)]))
df['a'] = df['a'].map((lambda x: ('str_' + str(x))), dtype=dt.string)
self.assertEqual(list(df['a']), ['str_1', 'str_2', 'str_3'])
self.assertEqual(list(df), [('str_1', 'a'), ('str_2', 'b'), ('str_3', 'c')])
self.assertEqual(df.dtype, dt.Struct([dt.Field('a', dt.string), dt.Field('b', dt.string)]))
def base_test_infer_func_output_dtype(self):
df = ta.dataframe({'a': [1, 2, 3], 'b': [11, 22, 33]}, device=self.device)
def myadd(a: int, b: int) -> str:
return f'{a}_{b}'
self.assertEqual(list(df.map(myadd, columns=['a', 'b'])), ['1_11', '2_22', '3_33'])
def mynullable(a: int) -> Optional[int]:
return (a if ((a % 2) == 1) else None)
r = df['a'].map(mynullable)
self.assertEqual(df['a'].dtype, dt.int64)
self.assertEqual(r.dtype, dt.int64.with_null())
self.assertEqual(list(r), [1, None, 3])
class Ret(NamedTuple):
plus: int
minus: int
def mymultiret(a: int, b: int) -> Ret:
return Ret((a + b), (a - b))
r = df.map(mymultiret, columns=['a', 'b'])
self.assertEqual(r.dtype, dt.Struct([dt.Field('plus', dt.int64), dt.Field('minus', dt.int64)]))
self.assertEqual(list(r), [(12, (- 10)), (24, (- 20)), (36, (- 30))])
r = df.map({None: 1, 1: 1000}, columns=['a'])
self.assertEqual(list(r), [1000, None, None])
self.assertEqual(r.dtype, dt.int64)
def base_test_in(self):
df = ta.dataframe({'A': ['a', 'b', 'a', 'b'], 'B': [1, 2, 3, 4]}, device=self.device)
self.assertTrue(('A' in df))
self.assertFalse(('X' in df)) |
class GetMediaGroup():
async def get_media_group(self: 'pyrogram.Client', chat_id: Union[(int, str)], message_id: int) -> List['types.Message']:
if (message_id <= 0):
raise ValueError('Passed message_id is negative or equal to zero.')
messages = (await self.get_messages(chat_id=chat_id, message_ids=[msg_id for msg_id in range((message_id - 9), (message_id + 10))], replies=0))
media_group_id = (messages[9].media_group_id if (len(messages) == 19) else messages[(message_id - 1)].media_group_id)
if (media_group_id is None):
raise ValueError("The message doesn't belong to a media group")
return types.List((msg for msg in messages if (msg.media_group_id == media_group_id))) |
def get_test_loaders(config):
assert ('loaders' in config), 'Could not find data loaders configuration'
loaders_config = config['loaders']
logger.info('Creating test set loaders...')
dataset_cls_str = loaders_config.get('dataset', None)
if (dataset_cls_str is None):
dataset_cls_str = 'StandardHDF5Dataset'
logger.warning(f"Cannot find dataset class in the config. Using default '{dataset_cls_str}'.")
dataset_class = _loader_classes(dataset_cls_str)
test_datasets = dataset_class.create_datasets(loaders_config, phase='test')
num_workers = loaders_config.get('num_workers', 1)
logger.info(f'Number of workers for the dataloader: {num_workers}')
batch_size = loaders_config.get('batch_size', 1)
if ((torch.cuda.device_count() > 1) and (not (config['device'] == 'cpu'))):
logger.info(f'{torch.cuda.device_count()} GPUs available. Using batch_size = {torch.cuda.device_count()} * {batch_size}')
batch_size = (batch_size * torch.cuda.device_count())
logger.info(f'Batch size for dataloader: {batch_size}')
for test_dataset in test_datasets:
logger.info(f'Loading test set from: {test_dataset.file_path}...')
if hasattr(test_dataset, 'prediction_collate'):
collate_fn = test_dataset.prediction_collate
else:
collate_fn = default_prediction_collate
(yield DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, collate_fn=collate_fn)) |
class CondenseUnit(nn.Module):
def __init__(self, in_channels, out_channels, groups):
super(CondenseUnit, self).__init__()
bottleneck_size = 4
inc_channels = (out_channels - in_channels)
mid_channels = (inc_channels * bottleneck_size)
self.conv1 = condense_complex_conv1x1(in_channels=in_channels, out_channels=mid_channels, groups=groups)
self.conv2 = condense_simple_conv3x3(in_channels=mid_channels, out_channels=inc_channels, groups=groups)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = torch.cat((identity, x), dim=1)
return x |
class CutExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_cuttable(state, node, info, char_index):
if modify:
(yield state.change_state([], in_place=in_place))
else:
(yield state)
def check_cuttable(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo, char_index):
if (_find_free_hand(state, char_index) is None):
info.error('{} does not have a free hand', _get_character_node(state, char_index))
return False
if (not _is_character_close_to(state, node, char_index)):
info.error('{} is not close to {}', _get_character_node(state, char_index), node)
return False
if (Property.EATABLE not in node.properties):
info.error('{} is not eatable', node)
return False
if (Property.CUTTABLE not in node.properties):
info.error('{} is not cuttable', node)
return False
char_node = _get_character_node(state, char_index)
holding_nodes = _find_nodes_from(state, char_node, [Relation.HOLDS_LH, Relation.HOLDS_RH])
if (not any([('knife' in node.class_name) for node in holding_nodes])):
info.error('{} is not holding a knife', _get_character_node(state, char_index))
return False
return True |
def test_serializer_update_missing_updated(db):
value = Value.objects.get(project_id=project_id, snapshot=None, attribute__path=attribute_path)
class MockedRequest():
data = {}
class MockedView():
request = MockedRequest()
project = Project.objects.get(id=project_id)
validator = ValueConflictValidator()
serializer = ValueSerializer()
serializer.instance = value
serializer.context['view'] = MockedView()
validator({'attribute': value.attribute, 'set_prefix': value.set_prefix, 'set_index': value.set_index, 'collection_index': value.collection_index}, serializer) |
def loadAWSOrganizations(neo4j_uri, neo4j_user, neo4j_password, data_path, account_name):
neo4j_auth = (neo4j_user, neo4j_password)
neo4j_driver = GraphDatabase.driver(neo4j_uri, auth=neo4j_auth, encrypted=False)
with neo4j_driver.session() as neo4j_session:
loadAWSServiceControlPolicy(neo4j_session, data_path, account_name)
loadAWSOrganizationRootNode(neo4j_session, data_path, account_name)
loadAWSOrganizationMasterAccount(neo4j_session, data_path, account_name)
loadAWSOrganizationOU(neo4j_session, data_path, account_name)
loadAWSOrganizationAccounts(neo4j_session, data_path, account_name)
loadAWSOrganizationOURelations(neo4j_session, data_path, account_name)
loadAWSOrganizationAccountRelations(neo4j_session, data_path, account_name)
loadAWSOUAccountPolicyRelations(neo4j_session, data_path, account_name) |
class Soil(object):
def __init__(self, model, lidcontrol):
self._model = model
self._lidcontrol = lidcontrol
self._lidcontrolid = lidcontrol._lidcontrolid
def thickness(self):
return self._model.getLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.thickness.value)
def thickness(self, param):
return self._model.setLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.thickness.value, param)
def porosity(self):
return self._model.getLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.porosity.value)
def porosity(self, param):
return self._model.setLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.porosity.value, param)
def field_capacity(self):
return self._model.getLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.fieldCap.value)
_capacity.setter
def field_capacity(self, param):
return self._model.setLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.fieldCap.value, param)
def wilting_point(self):
return self._model.getLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.wiltPoint.value)
_point.setter
def wilting_point(self, param):
return self._model.setLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.wiltPoint.value, param)
def k_saturated(self):
return self._model.getLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.kSat.value)
_saturated.setter
def k_saturated(self, param):
return self._model.setLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.kSat.value, param)
def k_slope(self):
return self._model.getLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.kSlope.value)
_slope.setter
def k_slope(self, param):
return self._model.setLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.kSlope.value, param)
def suction_head(self):
return self._model.getLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.suction.value)
_head.setter
def suction_head(self, param):
return self._model.setLidCParam(self._lidcontrolid, LidLayers.soil.value, LidLayersProperty.suction.value, param) |
def get_edges(o: object) -> Iterator[tuple[(object, object)]]:
for (s, e) in get_edge_candidates(o):
if isinstance(e, FUNCTION_TYPES):
if hasattr(e, '__closure__'):
(yield ((s, '__closure__'), e.__closure__))
if hasattr(e, '__self__'):
se = e.__self__
if ((se is not o) and (se is not type(o)) and hasattr(s, '__self__')):
(yield (s.__self__, se))
elif (type(e) not in TYPE_BLACKLIST):
(yield (s, e)) |
class FusedEmbeddingBagCollectionSharder(BaseEmbeddingSharder[FusedEmbeddingBagCollection]):
def shard(self, module: FusedEmbeddingBagCollection, params: Dict[(str, ParameterSharding)], env: ShardingEnv, device: Optional[torch.device]=None) -> ShardedEmbeddingBagCollection:
return ShardedFusedEmbeddingBagCollection(module, params, env, device, qcomm_codecs_registry=self.qcomm_codecs_registry)
def shardable_parameters(self, module: FusedEmbeddingBagCollection) -> Dict[(str, nn.Parameter)]:
params = {name.split('.')[(- 2)]: param for (name, param) in module.state_dict().items() if name.endswith('.weight')}
return params
def module_type(self) -> Type[FusedEmbeddingBagCollection]:
return FusedEmbeddingBagCollection
def sharding_types(self, compute_device_type: str) -> List[str]:
types = [ShardingType.DATA_PARALLEL.value, ShardingType.TABLE_WISE.value, ShardingType.COLUMN_WISE.value, ShardingType.TABLE_COLUMN_WISE.value]
if (compute_device_type in {'cuda'}):
types += [ShardingType.ROW_WISE.value, ShardingType.TABLE_ROW_WISE.value]
return types
def compute_kernels(self, sharding_type: str, compute_device_type: str) -> List[str]:
ret = []
if (sharding_type != ShardingType.DATA_PARALLEL.value):
ret += [EmbeddingComputeKernel.FUSED.value]
if (compute_device_type in {'cuda'}):
ret += [EmbeddingComputeKernel.FUSED_UVM.value, EmbeddingComputeKernel.FUSED_UVM_CACHING.value]
else:
ret.append(EmbeddingComputeKernel.DENSE.value)
return ret |
class InceptionBUnit(nn.Module):
def __init__(self):
super(InceptionBUnit, self).__init__()
in_channels = 1024
self.branches = Concurrent()
self.branches.add_module('branch1', Conv1x1Branch(in_channels=in_channels, out_channels=384))
self.branches.add_module('branch2', ConvSeqBranch(in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0))))
self.branches.add_module('branch3', ConvSeqBranch(in_channels=in_channels, out_channels_list=(192, 192, 224, 224, 256), kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)), strides_list=(1, 1, 1, 1, 1), padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3))))
self.branches.add_module('branch4', AvgPoolBranch(in_channels=in_channels, out_channels=128))
def forward(self, x):
x = self.branches(x)
return x |
class KitchenLowdimWrapper(gym.Env):
def __init__(self, env: KitchenBase, init_qpos: Optional[np.ndarray]=None, init_qvel: Optional[np.ndarray]=None, render_hw=(240, 360)):
self.env = env
self.init_qpos = init_qpos
self.init_qvel = init_qvel
self.render_hw = render_hw
def action_space(self):
return self.env.action_space
def observation_space(self):
return self.env.observation_space
def seed(self, seed=None):
return self.env.seed(seed)
def reset(self):
if (self.init_qpos is not None):
_ = self.env.reset()
self.env.set_state(self.init_qpos, self.init_qvel)
obs = self.env._get_obs()
return obs
else:
return self.env.reset()
def render(self, mode='rgb_array'):
(h, w) = self.render_hw
return self.env.render(mode=mode, width=w, height=h)
def step(self, a):
return self.env.step(a) |
class id_parser(object):
reserved = ['AND', 'OR', 'WITH']
tokens = (['LPAR', 'RPAR', 'ID', 'EXC'] + reserved)
precedence = (('nonassoc', 'AND', 'OR'),)
t_ignore = ' \t'
def __init__(self, spdx):
self.spdx = spdx
self.lasttok = None
self.lastid = None
self.lexer = lex.lex(module=self, reflags=re.UNICODE)
self.parser = yacc.yacc(module=self, write_tables=False, debug=False)
self.lines_checked = 0
self.checked = 0
self.spdx_valid = 0
self.spdx_errors = 0
self.curline = 0
self.deepest = 0
def validate(self, tok):
id = tok.value.upper()
if (tok.type == 'ID'):
if (not (id in self.spdx.licenses)):
raise ParserException(tok, 'Invalid License ID')
self.lastid = id
elif (tok.type == 'EXC'):
if (id not in self.spdx.exceptions):
raise ParserException(tok, 'Invalid Exception ID')
if (self.lastid not in self.spdx.exceptions[id]):
raise ParserException(tok, ('Exception not valid for license %s' % self.lastid))
self.lastid = None
elif (tok.type != 'WITH'):
self.lastid = None
def t_RPAR(self, tok):
self.lasttok = tok.type
return tok
def t_LPAR(self, tok):
self.lasttok = tok.type
return tok
def t_ID(self, tok):
if (self.lasttok == 'EXC'):
print(tok)
raise ParserException(tok, 'Missing parentheses')
tok.value = tok.value.strip()
val = tok.value.upper()
if (val in self.reserved):
tok.type = val
elif (self.lasttok == 'WITH'):
tok.type = 'EXC'
self.lasttok = tok.type
self.validate(tok)
return tok
def t_error(self, tok):
raise ParserException(tok, 'Invalid token')
def p_expr(self, p):
pass
def p_error(self, p):
if (not p):
raise ParserException(None, 'Unfinished license expression')
else:
raise ParserException(p, 'Syntax error')
def parse(self, expr):
self.lasttok = None
self.lastid = None
self.parser.parse(expr, lexer=self.lexer)
def parse_lines(self, fd, maxlines, fname):
self.checked += 1
self.curline = 0
try:
for line in fd:
line = line.decode(locale.getpreferredencoding(False), errors='ignore')
self.curline += 1
if (self.curline > maxlines):
break
self.lines_checked += 1
if (line.find('SPDX-License-Identifier:') < 0):
continue
expr = line.split(':')[1].strip()
if line.strip().endswith('*/'):
expr = expr.rstrip('*/').strip()
if line.strip().endswith('-->'):
expr = expr.rstrip('-->').strip()
if line.startswith('LIST "'):
expr = expr.rstrip('"').strip()
self.parse(expr)
self.spdx_valid += 1
break
except ParserException as pe:
if pe.tok:
col = (line.find(expr) + pe.tok.lexpos)
tok = pe.tok.value
sys.stdout.write(('%s: %d:%d %s: %s\n' % (fname, self.curline, col, pe.txt, tok)))
else:
sys.stdout.write(('%s: %d:0 %s\n' % (fname, self.curline, col, pe.txt)))
self.spdx_errors += 1 |
class OddLength(LengthField):
structcode = 'B'
structvalues = 1
def __init__(self, name):
self.name = name
def calc_length(self, length):
return (length % 2)
def parse_value(self, value, display):
if (value == 0):
return 'even'
else:
return 'odd' |
def load_conv3d(state_dict, name_pt, sess, name_tf, bias=False, bn=True):
conv_name_tf = os.path.join(name_tf, 'conv_3d')
conv_params = get_conv_params(sess, conv_name_tf, bias=bias)
if bias:
(conv_weights, kernel_shape, in_channels, out_channels, strides, padding, conv_bias) = conv_params
else:
(conv_weights, kernel_shape, in_channels, out_channels, strides, padding) = conv_params
conv_weights_rs = np.transpose(conv_weights, (4, 3, 0, 1, 2))
state_dict[(name_pt + '.conv3d.weight')] = torch.from_numpy(conv_weights_rs)
if bias:
state_dict[(name_pt + '.conv3d.bias')] = torch.from_numpy(conv_bias)
if bn:
conv_tf_name = os.path.join(name_tf, 'batch_norm')
(moving_mean, moving_var, beta) = get_bn_params(sess, conv_tf_name)
out_planes = conv_weights_rs.shape[0]
state_dict[(name_pt + '.batch3d.weight')] = torch.ones(out_planes)
state_dict[(name_pt + '.batch3d.bias')] = torch.from_numpy(beta.squeeze())
state_dict[(name_pt + '.batch3d.running_mean')] = torch.from_numpy(moving_mean.squeeze())
state_dict[(name_pt + '.batch3d.running_var')] = torch.from_numpy(moving_var.squeeze()) |
class F38Handler(BaseHandler):
version = F38
commandMap = {'auth': commands.authconfig.F35_Authconfig, 'authconfig': commands.authconfig.F35_Authconfig, 'authselect': commands.authselect.F28_Authselect, 'autopart': commands.autopart.F38_AutoPart, 'autostep': commands.autostep.F34_AutoStep, 'bootloader': commands.bootloader.F34_Bootloader, 'btrfs': commands.btrfs.F23_BTRFS, 'cdrom': commands.cdrom.FC3_Cdrom, 'clearpart': commands.clearpart.F28_ClearPart, 'cmdline': commands.displaymode.F26_DisplayMode, 'device': commands.device.F34_Device, 'deviceprobe': commands.deviceprobe.F34_DeviceProbe, 'dmraid': commands.dmraid.F34_DmRaid, 'driverdisk': commands.driverdisk.F14_DriverDisk, 'module': commands.module.F31_Module, 'eula': commands.eula.F20_Eula, 'fcoe': commands.fcoe.F28_Fcoe, 'firewall': commands.firewall.F28_Firewall, 'firstboot': commands.firstboot.FC3_Firstboot, 'graphical': commands.displaymode.F26_DisplayMode, 'group': commands.group.F12_Group, 'halt': commands.reboot.F23_Reboot, 'harddrive': commands.harddrive.F33_HardDrive, 'hmc': commands.hmc.F28_Hmc, 'ignoredisk': commands.ignoredisk.F34_IgnoreDisk, 'install': commands.install.F34_Install, 'iscsi': commands.iscsi.F17_Iscsi, 'iscsiname': commands.iscsiname.FC6_IscsiName, 'keyboard': commands.keyboard.F18_Keyboard, 'lang': commands.lang.F19_Lang, 'liveimg': commands.liveimg.F19_Liveimg, 'logging': commands.logging.F34_Logging, 'logvol': commands.logvol.F29_LogVol, 'mediacheck': commands.mediacheck.FC4_MediaCheck, 'method': commands.method.F34_Method, 'mount': commands.mount.F27_Mount, 'multipath': commands.multipath.F34_MultiPath, 'network': commands.network.F27_Network, 'nfs': commands.nfs.FC6_NFS, 'nvdimm': commands.nvdimm.F28_Nvdimm, 'timesource': commands.timesource.F33_Timesource, 'ostreecontainer': commands.ostreecontainer.F38_OSTreeContainer, 'ostreesetup': commands.ostreesetup.F38_OSTreeSetup, 'part': commands.partition.F34_Partition, 'partition': commands.partition.F34_Partition, 'poweroff': commands.reboot.F23_Reboot, 'raid': commands.raid.F29_Raid, 'realm': commands.realm.F19_Realm, 'reboot': commands.reboot.F23_Reboot, 'repo': commands.repo.F33_Repo, 'reqpart': commands.reqpart.F23_ReqPart, 'rescue': commands.rescue.F10_Rescue, 'rootpw': commands.rootpw.F37_RootPw, 'selinux': commands.selinux.FC3_SELinux, 'services': commands.services.FC6_Services, 'shutdown': commands.reboot.F23_Reboot, 'skipx': commands.skipx.FC3_SkipX, 'snapshot': commands.snapshot.F26_Snapshot, 'sshpw': commands.sshpw.F24_SshPw, 'sshkey': commands.sshkey.F22_SshKey, 'text': commands.displaymode.F26_DisplayMode, 'timezone': commands.timezone.F33_Timezone, 'updates': commands.updates.F34_Updates, 'url': commands.url.F30_Url, 'user': commands.user.F24_User, 'vnc': commands.vnc.F9_Vnc, 'volgroup': commands.volgroup.F21_VolGroup, 'xconfig': commands.xconfig.F14_XConfig, 'zerombr': commands.zerombr.F9_ZeroMbr, 'zfcp': commands.zfcp.F37_ZFCP, 'zipl': commands.zipl.F32_Zipl}
dataMap = {'BTRFSData': commands.btrfs.F23_BTRFSData, 'DriverDiskData': commands.driverdisk.F14_DriverDiskData, 'DeviceData': commands.device.F8_DeviceData, 'DmRaidData': commands.dmraid.FC6_DmRaidData, 'ModuleData': commands.module.F31_ModuleData, 'TimesourceData': commands.timesource.F33_TimesourceData, 'FcoeData': commands.fcoe.F28_FcoeData, 'GroupData': commands.group.F12_GroupData, 'IscsiData': commands.iscsi.F17_IscsiData, 'LogVolData': commands.logvol.F29_LogVolData, 'MountData': commands.mount.F27_MountData, 'MultiPathData': commands.multipath.FC6_MultiPathData, 'NetworkData': commands.network.F27_NetworkData, 'NvdimmData': commands.nvdimm.F28_NvdimmData, 'PartData': commands.partition.F29_PartData, 'RaidData': commands.raid.F29_RaidData, 'RepoData': commands.repo.F30_RepoData, 'SnapshotData': commands.snapshot.F26_SnapshotData, 'SshPwData': commands.sshpw.F24_SshPwData, 'SshKeyData': commands.sshkey.F38_SshKeyData, 'UserData': commands.user.F19_UserData, 'VolGroupData': commands.volgroup.F21_VolGroupData, 'ZFCPData': commands.zfcp.F37_ZFCPData} |
def build_detection_train_loader(cfg, mapper=None):
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert ((images_per_batch % num_workers) == 0), 'SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).'.format(images_per_batch, num_workers)
assert (images_per_batch >= num_workers), 'SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).'.format(images_per_batch, num_workers)
images_per_worker = (images_per_batch // num_workers)
dataset_dicts = get_detection_dataset_dicts(cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=(cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0), proposal_files=(cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None))
dataset = DatasetFromList(dataset_dicts, copy=False)
if (mapper is None):
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info('Using training sampler {}'.format(sampler_name))
if (sampler_name == 'TrainingSampler'):
sampler = TrainingSampler(len(dataset))
elif (sampler_name == 'RepeatFactorTrainingSampler'):
repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD)
sampler = RepeatFactorTrainingSampler(repeat_factors)
elif (sampler_name == 'PairTrainingSampler'):
sampler = PairTrainingSampler(cfg, dataset_dicts, images_per_worker)
data_loader = torch.utils.data.DataLoader(dataset, num_workers=cfg.DATALOADER.NUM_WORKERS, batch_sampler=sampler, collate_fn=trivial_batch_collator, worker_init_fn=worker_init_reset_seed)
return PairDataLoader(cfg, data_loader)
else:
raise ValueError('Unknown training sampler: {}'.format(sampler_name))
return build_batch_data_loader(dataset, sampler, cfg.SOLVER.IMS_PER_BATCH, aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING, num_workers=cfg.DATALOADER.NUM_WORKERS) |
def parse_binary_job(ingress: Queue, egress: Queue, root_directory: Path) -> None:
while True:
try:
path = ingress.get(timeout=0.5)
try:
res = Binary(path, gen_fw_path(path, root_directory))
except Exception as e:
res = e
egress.put((path, res))
except queue.Empty:
pass
except KeyboardInterrupt:
break |
.skip('Disable tests that requires eager execution')
def test_quantizable_mha_export_backwards_pass():
vocab_size = 20000
maxlen = 200
embed_dim = 32
num_heads = 2
ff_dim = 32
inputs = keras.layers.Input(shape=(maxlen,))
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = keras.layers.Embedding(input_dim=maxlen, output_dim=embed_dim)(positions)
x = keras.layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)(inputs)
x = (x + positions)
x = keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)(x, x)
x = keras.layers.Dropout(0.1)(x)
x = keras.layers.LayerNormalization(epsilon=1e-06)(x)
x = keras.layers.Dense(ff_dim, activation='relu')(x)
x = keras.layers.Dense(embed_dim)(x)
x = keras.layers.Dropout(0.1)(x)
x = keras.layers.LayerNormalization(epsilon=1e-06)(x)
x = keras.layers.GlobalAveragePooling1D()(x)
x = keras.layers.Dropout(0.1)(x)
x = keras.layers.Dense(20, activation='relu')(x)
x = keras.layers.Dropout(0.1)(x)
outputs = keras.layers.Dense(2, activation='softmax')(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs)
quantized_model = QuantizationSimModel(functional_model)
train_inputs = np.random.randint(1, 20000, (1024, 200))
train_outputs = np.random.randint(0, 2, (1024,))
val_inputs = np.random.randint(1, 20000, (256, 200))
val_outputs = np.random.randint(0, 2, (256,))
quantized_model.compute_encodings((lambda m, _: m(val_inputs)), None)
quantized_model.export('./data', 'pre_qat_mha')
for wrapper in quantized_model.quant_wrappers():
for quantizer in wrapper.input_quantizers:
quantizer.enable()
for quantizer in wrapper.output_quantizers:
quantizer.enable()
with open('./data/pre_qat_mha.encodings', 'r') as encodings_file:
pre_encodings = json.load(encodings_file)
quantized_model.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
quantized_model.model.fit(train_inputs, train_outputs, batch_size=32, epochs=1, validation_data=(val_inputs, val_outputs))
quantized_model.compute_encodings((lambda m, _: m(val_inputs)), None)
quantized_model.export('./data', 'post_qat_mha')
with open('./data/post_qat_mha.encodings', 'r') as encodings_file:
post_encodings = json.load(encodings_file)
assert (pre_encodings != post_encodings) |
def transfer_tasks_view(transfer_tasks: Dict[(SecretHash, TransferTask)], token_address: TokenAddress=None, channel_id: ChannelID=None) -> List[Dict[(str, Any)]]:
view = []
for (secrethash, transfer_task) in transfer_tasks.items():
transfer = get_transfer_from_task(secrethash, transfer_task)
if (transfer is None):
continue
if (token_address is not None):
if (transfer.token != token_address):
continue
elif (channel_id is not None):
if (transfer.balance_proof.channel_identifier != channel_id):
continue
view.append(flatten_transfer(transfer, transfer_task.role))
return view |
class mySequential(nn.Sequential, BaseNetwork):
def __init__(self, *args):
super(mySequential, self).__init__(*args)
def forward(self, *inputs):
for module in self._modules.values():
if (type(inputs) == tuple):
inputs = module(*inputs)
else:
inputs = module(inputs)
return inputs |
class MVTecDataset(Dataset):
def __init__(self, dataset_path='D:/dataset/mvtec_anomaly_detection', class_name='bottle', is_train=True, resize=256, cropsize=256):
assert (class_name in CLASS_NAMES), 'class_name: {}, should be in {}'.format(class_name, CLASS_NAMES)
self.dataset_path = dataset_path
self.class_name = class_name
self.is_train = is_train
self.resize = resize
self.cropsize = cropsize
(self.x, self.y, self.mask) = self.load_dataset_folder()
self.transform_x = T.Compose([T.Resize(resize, Image.ANTIALIAS), T.CenterCrop(cropsize), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.transform_mask = T.Compose([T.Resize(resize, Image.NEAREST), T.CenterCrop(cropsize), T.ToTensor()])
def __getitem__(self, idx):
(x, y, mask) = (self.x[idx], self.y[idx], self.mask[idx])
x = Image.open(x).convert('RGB')
x = self.transform_x(x)
if (y == 0):
mask = torch.zeros([1, self.cropsize, self.cropsize])
else:
mask = Image.open(mask)
mask = self.transform_mask(mask)
return (x, y, mask)
def __len__(self):
return len(self.x)
def load_dataset_folder(self):
phase = ('train' if self.is_train else 'test')
(x, y, mask) = ([], [], [])
img_dir = os.path.join(self.dataset_path, self.class_name, phase)
gt_dir = os.path.join(self.dataset_path, self.class_name, 'ground_truth')
img_types = sorted(os.listdir(img_dir))
for img_type in img_types:
img_type_dir = os.path.join(img_dir, img_type)
if (not os.path.isdir(img_type_dir)):
continue
img_fpath_list = sorted([os.path.join(img_type_dir, f) for f in os.listdir(img_type_dir) if f.endswith('.png')])
x.extend(img_fpath_list)
if (img_type == 'good'):
y.extend(([0] * len(img_fpath_list)))
mask.extend(([None] * len(img_fpath_list)))
else:
y.extend(([1] * len(img_fpath_list)))
gt_type_dir = os.path.join(gt_dir, img_type)
img_fname_list = [os.path.splitext(os.path.basename(f))[0] for f in img_fpath_list]
gt_fpath_list = [os.path.join(gt_type_dir, (img_fname + '_mask.png')) for img_fname in img_fname_list]
mask.extend(gt_fpath_list)
assert (len(x) == len(y)), 'number of x and y should be same'
return (list(x), list(y), list(mask)) |
class SimulatorProcessStateExchange(SimulatorProcessBase):
def __init__(self, idx, pipe_c2s, pipe_s2c):
super(SimulatorProcessStateExchange, self).__init__(idx)
self.c2s = pipe_c2s
self.s2c = pipe_s2c
def run(self):
player = self._build_player()
context = zmq.Context()
c2s_socket = context.socket(zmq.PUSH)
c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
c2s_socket.set_hwm(10)
c2s_socket.connect(self.c2s)
s2c_socket = context.socket(zmq.DEALER)
s2c_socket.setsockopt(zmq.IDENTITY, self.identity)
s2c_socket.connect(self.s2c)
player.reset()
player.prepare()
(r, is_over) = (0, False)
lstm_state = np.zeros([(1024 * 2)])
while True:
role_id = player.get_role_ID()
if (role_id in ROLE_IDS_TO_TRAIN):
(prob_state, all_state, curr_handcards_value, last_cards_value, last_category) = (player.get_state_prob(), player.get_state_all_cards(), player.get_curr_handcards(), player.get_last_outcards(), player.get_last_outcategory_idx())
prob_state = np.concatenate([Card.val2onehot60(curr_handcards_value), prob_state])
is_active = (False if (last_cards_value.size > 0) else True)
mask = get_mask(to_char(curr_handcards_value), action_space, (None if is_active else to_char(last_cards_value)))
if is_active:
mask[0] = 0
last_two_cards = player.get_last_two_cards()
last_two_cards_onehot = np.concatenate([Card.val2onehot60(last_two_cards[0]), Card.val2onehot60(last_two_cards[1])])
c2s_socket.send(dumps((self.identity, role_id, prob_state, all_state, last_two_cards_onehot, mask, (0 if is_active else 1), lstm_state, r, is_over)), copy=False)
(action_idx, lstm_state) = loads(s2c_socket.recv(copy=False).bytes)
(r, is_over, _) = player.step_manual(to_value(action_space[action_idx]))
else:
(_, r, _) = player.step_auto()
is_over = (r != 0)
if is_over:
player.reset()
player.prepare()
lstm_state = np.zeros([(1024 * 2)]) |
def raise_winerror(winerror: (int | None)=None, *, filename: (str | None)=None, filename2: (str | None)=None) -> NoReturn:
if (winerror is None):
err = ffi.getwinerror()
if (err is None):
raise RuntimeError('No error set?')
(winerror, msg) = err
else:
err = ffi.getwinerror(winerror)
if (err is None):
raise RuntimeError('No error set?')
(_, msg) = err
raise OSError(0, msg, filename, winerror, filename2) |
def dual_basis_jellium_model(grid: Grid, spinless: bool=False, kinetic: bool=True, potential: bool=True, include_constant: bool=False, non_periodic: bool=False, period_cutoff: Optional[float]=None) -> FermionOperator:
n_points = grid.num_points
position_prefactor = ((2.0 * numpy.pi) / grid.volume_scale())
operator = FermionOperator()
spins = ([None] if spinless else [0, 1])
if (potential and non_periodic and (period_cutoff is None)):
period_cutoff = (grid.volume_scale() ** (1.0 / grid.dimensions))
position_vectors = {}
momentum_vectors = {}
momenta_squared_dict = {}
orbital_ids = {}
for indices in grid.all_points_indices():
position_vectors[indices] = grid.position_vector(indices)
momenta = grid.momentum_vector(indices)
momentum_vectors[indices] = momenta
momenta_squared_dict[indices] = momenta.dot(momenta)
orbital_ids[indices] = {}
for spin in spins:
orbital_ids[indices][spin] = grid.orbital_id(indices, spin)
grid_origin = ((0,) * grid.dimensions)
coordinates_origin = position_vectors[grid_origin]
for grid_indices_b in grid.all_points_indices():
coordinates_b = position_vectors[grid_indices_b]
differences = (coordinates_b - coordinates_origin)
kinetic_coefficient = 0.0
potential_coefficient = 0.0
for momenta_indices in grid.all_points_indices():
momenta = momentum_vectors[momenta_indices]
momenta_squared = momenta_squared_dict[momenta_indices]
if (momenta_squared == 0):
continue
cos_difference = numpy.cos(momenta.dot(differences))
if kinetic:
kinetic_coefficient += ((cos_difference * momenta_squared) / (2.0 * float(n_points)))
if potential:
potential_coefficient += ((position_prefactor * cos_difference) / momenta_squared)
for grid_indices_shift in grid.all_points_indices():
orbital_a = {}
orbital_b = {}
shifted_index_1 = tuple([((grid_origin[i] + grid_indices_shift[i]) % grid.length[i]) for i in range(grid.dimensions)])
shifted_index_2 = tuple([((grid_indices_b[i] + grid_indices_shift[i]) % grid.length[i]) for i in range(grid.dimensions)])
for spin in spins:
orbital_a[spin] = orbital_ids[shifted_index_1][spin]
orbital_b[spin] = orbital_ids[shifted_index_2][spin]
if kinetic:
for spin in spins:
operators = ((orbital_a[spin], 1), (orbital_b[spin], 0))
operator += FermionOperator(operators, kinetic_coefficient)
if potential:
for sa in spins:
for sb in spins:
if (orbital_a[sa] == orbital_b[sb]):
continue
operators = ((orbital_a[sa], 1), (orbital_a[sa], 0), (orbital_b[sb], 1), (orbital_b[sb], 0))
operator += FermionOperator(operators, potential_coefficient)
if include_constant:
operator += (FermionOperator.identity() * (2.8372 / (grid.volume_scale() ** (1.0 / grid.dimensions))))
return operator |
class LiveSessionTimeFlowController(TimeFlowController):
def __init__(self, scheduler: Scheduler, event_manager: EventManager, real_timer: RealTimer, empty_queue_event_notifier: EmptyQueueEventNotifier):
super().__init__(event_manager, empty_queue_event_notifier)
self.scheduler = scheduler
self.real_timer = real_timer
self.logger = qf_logger.getChild(self.__class__.__name__)
def generate_time_event(self):
(time_events_list, next_time_of_event) = self.scheduler.get_next_time_events()
for time_event in time_events_list:
self.logger.info('Next time event: {}'.format(time_event))
self.sleep_until(next_time_of_event)
for time_event in time_events_list:
self.logger.info('Wake up! Current event: {}, Next event: {}'.format(time_event.__class__.__name__, next_time_of_event))
self.event_manager.publish(time_event)
def sleep_until(self, time_of_next_time_event: datetime):
now = self.real_timer.now()
waiting_time = (time_of_next_time_event - now)
self.logger.info('Going to sleep for {} '.format(waiting_time))
time.sleep(waiting_time.total_seconds()) |
class ReduceScatterV_Req(Function):
def forward(ctx, pg: dist.ProcessGroup, myreq: Request[Tensor], rsi: ReduceScatterVInfo, input: Tensor) -> Tensor:
my_rank = dist.get_rank(pg)
if (rsi.codecs is not None):
input = rsi.codecs.forward.encode(input)
output = input.new_empty(rsi.input_sizes[my_rank])
if rsi.equal_splits:
with record_function('## reduce_scatter_base ##'):
req = dist._reduce_scatter_base(output, input, group=pg, async_op=True)
else:
with record_function('## reduce_scatter_v ##'):
req = dist.reduce_scatter(output, list(torch.split(input, rsi.input_splits)), group=pg, async_op=True)
myreq.req = req
myreq.tensor = output
myreq.wait_function = ReduceScatterV_Wait
myreq.rsi = rsi
ctx.myreq = myreq
ctx.pg = pg
return myreq.dummy_tensor
def backward(ctx, *unused: Tensor) -> Tuple[(Optional[Tensor], ...)]:
myreq = ctx.myreq
assert (myreq.req is not None)
myreq.req.wait()
myreq.req = None
grad_input = myreq.tensor
rsi = myreq.rsi
if (rsi.codecs is not None):
grad_input = rsi.codecs.backward.decode(grad_input)
if GRADIENT_DIVISION:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, grad_input) |
def get_share_attributes(movie1, movie2):
genre_list1 = movie1.genre
genre_list2 = movie2.genre
(len1, len2) = (len(genre_list1), len(genre_list2))
if ((len1 == 1) and (len2 == 1)):
if (genre_list1[0] == genre_list2[0]):
shared_genre = genre_list1
else:
shared_genre = []
if ((len1 == 1) and (len2 != 1)):
if (genre_list1[0] in genre_list2):
shared_genre = genre_list1
else:
shared_genre = []
if ((len1 != 1) and (len2 == 1)):
if (genre_list2[0] in genre_list1):
shared_genre = genre_list2
else:
shared_genre = []
if ((len1 != 1) and (len2 != 1)):
shared_genre = filter(set(genre_list1).__contains__, genre_list2)
director_list1 = movie1.director
director_list2 = movie2.director
(len1, len2) = (len(director_list1), len(director_list2))
if ((len1 == 1) and (len2 == 1)):
if (director_list1[0] == director_list2[0]):
shared_director = director_list1
else:
shared_director = []
if ((len1 == 1) and (len2 != 1)):
if (director_list1[0] in director_list2):
shared_director = director_list1
else:
shared_director = []
if ((len1 != 1) and (len2 == 1)):
if (director_list2[0] in director_list1):
shared_director = director_list2
else:
shared_director = []
if ((len1 != 1) and (len2 != 1)):
shared_director = filter(set(director_list1).__contains__, director_list2)
actor_list1 = movie1.actor
actor_list2 = movie2.actor
(len1, len2) = (len(actor_list1), len(actor_list2))
if ((len1 == 1) and (len2 == 1)):
if (actor_list1[0] == actor_list2[0]):
shared_actor = actor_list1
else:
shared_actor = []
if ((len1 == 1) and (len2 != 1)):
if (actor_list1[0] in actor_list2):
shared_actor = actor_list1
else:
shared_actor = []
if ((len1 != 1) and (len2 == 1)):
if (actor_list2[0] in actor_list1):
shared_actor = actor_list2
else:
shared_actor = []
if ((len1 != 1) and (len2 != 1)):
shared_actor = filter(set(actor_list1).__contains__, actor_list2)
return (shared_genre, shared_director, shared_actor) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.