code stringlengths 281 23.7M |
|---|
def provide_schema(overlay: Type[Overlay[Sc]], mediator: Mediator, loc_map: LocMap) -> Sc:
stacked_overlay = mediator.mandatory_provide(OverlayRequest(loc_map=loc_map, overlay_cls=overlay))
if (loc_map.has(TypeHintLoc) and isinstance(loc_map[TypeHintLoc].type, type)):
for parent in loc_map[TypeHintLoc].type.mro()[1:]:
try:
new_overlay = mediator.delegating_provide(OverlayRequest(loc_map=loc_map.add(TypeHintLoc(type=parent)), overlay_cls=overlay))
except CannotProvide:
pass
else:
stacked_overlay = new_overlay.merge(stacked_overlay)
return stacked_overlay.to_schema() |
class Node2vec(object):
def __init__(self, graph, path_length, num_paths, dim, p=1.0, q=1.0, dw=False, **kwargs):
kwargs['workers'] = kwargs.get('workers', 1)
if dw:
kwargs['hs'] = 1
p = 1.0
q = 1.0
self.graph = graph
if dw:
self.walker = walker.BasicWalker(graph, workers=kwargs['workers'])
else:
self.walker = walker.Walker(graph, p=p, q=q, workers=kwargs['workers'])
print('Preprocess transition probs...')
self.walker.preprocess_transition_probs()
sentences = self.walker.simulate_walks(num_walks=num_paths, walk_length=path_length)
kwargs['sentences'] = sentences
kwargs['min_count'] = kwargs.get('min_count', 0)
kwargs['size'] = kwargs.get('size', dim)
kwargs['sg'] = 1
self.size = kwargs['size']
print('Learning representation...')
word2vec = Word2Vec(**kwargs)
self.vectors = {}
for word in graph.G.nodes():
self.vectors[word] = word2vec.wv[word]
del word2vec
def get_embeddings(self):
return self.vectors.items()
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write('{} {}\n'.format(node_num, self.size))
for (node, vec) in self.vectors.items():
fout.write('{} {}\n'.format(node, ' '.join([str(x) for x in vec])))
fout.close() |
def save_images(pred, save_path):
if (len(pred.shape) > 3):
pred = pred.squeeze()
if isinstance(pred, torch.Tensor):
pred = pred.cpu().numpy().astype(np.uint8)
if (pred.shape[0] < 4):
pred = np.transpose(pred, (1, 2, 0))
cv2.imwrite(save_path, pred, [cv2.IMWRITE_PNG_COMPRESSION, 0]) |
def cov_devY_devX(x, y, sigma, l, n, m):
result = 0
if (m == n):
result = ((covariance(x, y, sigma, l) / (l[m] ** 2)) + (((x[n] - y[n]) / (l[n] ** 2)) * cov_devX_y(x, y, sigma, l, m)))
else:
result = (((x[n] - y[n]) / (l[n] ** 2)) * cov_devX_y(x, y, sigma, l, m))
return result |
def get_attn_bias_and_cat(x_list, branges=None):
batch_sizes = ([b.shape[0] for b in branges] if (branges is not None) else [x.shape[0] for x in x_list])
all_shapes = tuple(((b, x.shape[1]) for (b, x) in zip(batch_sizes, x_list)))
if (all_shapes not in attn_bias_cache.keys()):
seqlens = []
for (b, x) in zip(batch_sizes, x_list):
for _ in range(b):
seqlens.append(x.shape[1])
attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
attn_bias._batch_sizes = batch_sizes
attn_bias_cache[all_shapes] = attn_bias
if (branges is not None):
cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, (- 1), x_list[0].shape[(- 1)])
else:
tensors_bs1 = tuple((x.reshape([1, (- 1), *x.shape[2:]]) for x in x_list))
cat_tensors = torch.cat(tensors_bs1, dim=1)
return (attn_bias_cache[all_shapes], cat_tensors) |
class BaseUnitTestLithiumIon():
def check_well_posedness(self, options):
model = self.model(options)
model.check_well_posedness()
def test_well_posed(self):
options = {'thermal': 'isothermal'}
self.check_well_posedness(options)
def test_well_posed_isothermal_heat_source(self):
options = {'calculate heat source for isothermal models': 'true', 'thermal': 'isothermal'}
self.check_well_posedness(options)
def test_well_posed_2plus1D(self):
options = {'current collector': 'potential pair', 'dimensionality': 1}
self.check_well_posedness(options)
options = {'current collector': 'potential pair', 'dimensionality': 2}
self.check_well_posedness(options)
def test_well_posed_lumped_thermal_model_1D(self):
options = {'thermal': 'lumped'}
self.check_well_posedness(options)
def test_well_posed_x_full_thermal_model(self):
options = {'thermal': 'x-full'}
self.check_well_posedness(options)
def test_well_posed_lumped_thermal_1plus1D(self):
options = {'current collector': 'potential pair', 'dimensionality': 1, 'thermal': 'lumped'}
self.check_well_posedness(options)
def test_well_posed_lumped_thermal_2plus1D(self):
options = {'current collector': 'potential pair', 'dimensionality': 2, 'thermal': 'lumped'}
self.check_well_posedness(options)
def test_well_posed_thermal_1plus1D(self):
options = {'current collector': 'potential pair', 'dimensionality': 1, 'thermal': 'x-lumped'}
self.check_well_posedness(options)
def test_well_posed_thermal_2plus1D(self):
options = {'current collector': 'potential pair', 'dimensionality': 2, 'thermal': 'x-lumped'}
self.check_well_posedness(options)
def test_well_posed_contact_resistance(self):
options = {'contact resistance': 'true'}
self.check_well_posedness(options)
def test_well_posed_particle_uniform(self):
options = {'particle': 'uniform profile'}
self.check_well_posedness(options)
def test_well_posed_particle_quadratic(self):
options = {'particle': 'quadratic profile'}
self.check_well_posedness(options)
def test_well_posed_particle_quartic(self):
options = {'particle': 'quartic profile'}
self.check_well_posedness(options)
def test_well_posed_particle_mixed(self):
options = {'particle': ('Fickian diffusion', 'quartic profile')}
self.check_well_posedness(options)
def test_well_posed_constant_utilisation(self):
options = {'interface utilisation': 'constant'}
self.check_well_posedness(options)
def test_well_posed_current_driven_utilisation(self):
options = {'interface utilisation': 'current-driven'}
self.check_well_posedness(options)
def test_well_posed_mixed_utilisation(self):
options = {'interface utilisation': ('current-driven', 'constant')}
self.check_well_posedness(options)
def test_well_posed_loss_active_material_stress_negative(self):
options = {'loss of active material': ('stress-driven', 'none')}
self.check_well_posedness(options)
def test_well_posed_loss_active_material_stress_positive(self):
options = {'loss of active material': ('none', 'stress-driven')}
self.check_well_posedness(options)
def test_well_posed_loss_active_material_stress_both(self):
options = {'loss of active material': 'stress-driven'}
self.check_well_posedness(options)
def test_well_posed_loss_active_material_reaction(self):
options = {'loss of active material': 'reaction-driven'}
self.check_well_posedness(options)
def test_well_posed_loss_active_material_stress_reaction(self):
options = {'loss of active material': 'stress and reaction-driven'}
self.check_well_posedness(options)
def test_well_posed_loss_active_material_current_negative(self):
options = {'loss of active material': ('current-driven', 'none')}
self.check_well_posedness(options)
def test_well_posed_loss_active_material_current_positive(self):
options = {'loss of active material': ('none', 'current-driven')}
self.check_well_posedness(options)
def test_well_posed_surface_form_differential(self):
options = {'surface form': 'differential'}
self.check_well_posedness(options)
def test_well_posed_surface_form_algebraic(self):
options = {'surface form': 'algebraic'}
self.check_well_posedness(options)
def test_well_posed_kinetics_asymmetric_butler_volmer(self):
options = {'intercalation kinetics': 'asymmetric Butler-Volmer'}
self.check_well_posedness(options)
def test_well_posed_kinetics_linear(self):
options = {'intercalation kinetics': 'linear'}
self.check_well_posedness(options)
def test_well_posed_kinetics_marcus(self):
options = {'intercalation kinetics': 'Marcus'}
self.check_well_posedness(options)
def test_well_posed_kinetics_mhc(self):
options = {'intercalation kinetics': 'Marcus-Hush-Chidsey'}
self.check_well_posedness(options)
def test_well_posed_sei_constant(self):
options = {'SEI': 'constant'}
self.check_well_posedness(options)
def test_well_posed_sei_reaction_limited(self):
options = {'SEI': 'reaction limited'}
self.check_well_posedness(options)
def test_well_posed_asymmetric_sei_reaction_limited(self):
options = {'SEI': 'reaction limited (asymmetric)'}
self.check_well_posedness(options)
def test_well_posed_sei_reaction_limited_average_film_resistance(self):
options = {'SEI': 'reaction limited', 'SEI film resistance': 'average'}
self.check_well_posedness(options)
def test_well_posed_asymmetric_sei_reaction_limited_average_film_resistance(self):
options = {'SEI': 'reaction limited (asymmetric)', 'SEI film resistance': 'average'}
self.check_well_posedness(options)
def test_well_posed_sei_solvent_diffusion_limited(self):
options = {'SEI': 'solvent-diffusion limited'}
self.check_well_posedness(options)
def test_well_posed_sei_electron_migration_limited(self):
options = {'SEI': 'electron-migration limited'}
self.check_well_posedness(options)
def test_well_posed_sei_interstitial_diffusion_limited(self):
options = {'SEI': 'interstitial-diffusion limited'}
self.check_well_posedness(options)
def test_well_posed_sei_ec_reaction_limited(self):
options = {'SEI': 'ec reaction limited', 'SEI porosity change': 'true'}
self.check_well_posedness(options)
def test_well_posed_sei_asymmetric_ec_reaction_limited(self):
options = {'SEI': 'ec reaction limited (asymmetric)', 'SEI porosity change': 'true'}
self.check_well_posedness(options)
def test_well_posed_mechanics_negative_cracking(self):
options = {'particle mechanics': ('swelling and cracking', 'none')}
self.check_well_posedness(options)
def test_well_posed_mechanics_positive_cracking(self):
options = {'particle mechanics': ('none', 'swelling and cracking')}
self.check_well_posedness(options)
def test_well_posed_mechanics_both_cracking(self):
options = {'particle mechanics': 'swelling and cracking'}
self.check_well_posedness(options)
def test_well_posed_mechanics_both_swelling_only(self):
options = {'particle mechanics': 'swelling only'}
self.check_well_posedness(options)
def test_well_posed_mechanics_stress_induced_diffusion(self):
options = {'particle mechanics': 'swelling only', 'stress-induced diffusion': 'true'}
self.check_well_posedness(options)
def test_well_posed_mechanics_stress_induced_diffusion_mixed(self):
options = {'particle mechanics': 'swelling only', 'stress-induced diffusion': ('true', 'false')}
self.check_well_posedness(options)
def test_well_posed_sei_reaction_limited_on_cracks(self):
options = {'SEI': 'reaction limited', 'SEI on cracks': 'true', 'particle mechanics': 'swelling and cracking'}
self.check_well_posedness(options)
def test_well_posed_sei_solvent_diffusion_limited_on_cracks(self):
options = {'SEI': 'solvent-diffusion limited', 'SEI on cracks': 'true', 'particle mechanics': 'swelling and cracking'}
self.check_well_posedness(options)
def test_well_posed_sei_electron_migration_limited_on_cracks(self):
options = {'SEI': 'electron-migration limited', 'SEI on cracks': 'true', 'particle mechanics': 'swelling and cracking'}
self.check_well_posedness(options)
def test_well_posed_sei_interstitial_diffusion_limited_on_cracks(self):
options = {'SEI': 'interstitial-diffusion limited', 'SEI on cracks': 'true', 'particle mechanics': 'swelling and cracking'}
self.check_well_posedness(options)
def test_well_posed_sei_ec_reaction_limited_on_cracks(self):
options = {'SEI': 'ec reaction limited', 'SEI porosity change': 'true', 'SEI on cracks': 'true', 'particle mechanics': 'swelling and cracking'}
self.check_well_posedness(options)
def test_well_posed_reversible_plating(self):
options = {'lithium plating': 'reversible'}
self.check_well_posedness(options)
def test_well_posed_irreversible_plating(self):
options = {'lithium plating': 'irreversible'}
self.check_well_posedness(options)
def test_well_posed_partially_reversible_plating(self):
options = {'lithium plating': 'partially reversible'}
self.check_well_posedness(options)
def test_well_posed_reversible_plating_with_porosity(self):
options = {'lithium plating': 'reversible', 'lithium plating porosity change': 'true'}
self.check_well_posedness(options)
def test_well_posed_irreversible_plating_with_porosity(self):
options = {'lithium plating': 'irreversible', 'lithium plating porosity change': 'true'}
self.check_well_posedness(options)
def test_well_posed_partially_reversible_plating_with_porosity(self):
options = {'lithium plating': 'partially reversible', 'lithium plating porosity change': 'true'}
self.check_well_posedness(options)
def test_well_posed_discharge_energy(self):
options = {'calculate discharge energy': 'true'}
self.check_well_posedness(options)
def test_well_posed_external_circuit_voltage(self):
options = {'operating mode': 'voltage'}
self.check_well_posedness(options)
def test_well_posed_external_circuit_power(self):
options = {'operating mode': 'power'}
self.check_well_posedness(options)
def test_well_posed_external_circuit_differential_power(self):
options = {'operating mode': 'differential power'}
self.check_well_posedness(options)
def test_well_posed_external_circuit_resistance(self):
options = {'operating mode': 'resistance'}
self.check_well_posedness(options)
def test_well_posed_external_circuit_differential_resistance(self):
options = {'operating mode': 'differential resistance'}
self.check_well_posedness(options)
def test_well_posed_external_circuit_cccv(self):
options = {'operating mode': 'CCCV'}
self.check_well_posedness(options)
def test_well_posed_external_circuit_function(self):
def external_circuit_function(variables):
I = variables['Current [A]']
V = variables['Voltage [V]']
return ((V + I) - pybamm.FunctionParameter('Function', {'Time [s]': pybamm.t}, print_name='test_fun'))
options = {'operating mode': external_circuit_function}
self.check_well_posedness(options)
def test_well_posed_particle_phases(self):
options = {'particle phases': '2'}
self.check_well_posedness(options)
options = {'particle phases': ('2', '1')}
self.check_well_posedness(options)
options = {'particle phases': ('1', '2')}
self.check_well_posedness(options)
def test_well_posed_particle_phases_sei(self):
options = {'particle phases': '2', 'SEI': 'ec reaction limited'}
self.check_well_posedness(options)
def test_well_posed_current_sigmoid_ocp(self):
options = {'open-circuit potential': 'current sigmoid'}
self.check_well_posedness(options)
def test_well_posed_msmr(self):
options = {'open-circuit potential': 'MSMR', 'particle': 'MSMR', 'number of MSMR reactions': ('6', '4'), 'intercalation kinetics': 'MSMR', 'surface form': 'differential'}
self.check_well_posedness(options)
def test_well_posed_current_sigmoid_exchange_current(self):
options = {'exchange-current density': 'current sigmoid'}
self.check_well_posedness(options)
def test_well_posed_current_sigmoid_diffusivity(self):
options = {'diffusivity': 'current sigmoid'}
self.check_well_posedness(options)
def test_well_posed_psd(self):
options = {'particle size': 'distribution', 'surface form': 'algebraic'}
self.check_well_posedness(options)
def test_well_posed_composite_kinetic_hysteresis(self):
options = {'particle phases': ('2', '1'), 'exchange-current density': (('current sigmoid', 'single'), 'current sigmoid'), 'open-circuit potential': (('current sigmoid', 'single'), 'single')}
self.check_well_posedness(options)
def test_well_posed_composite_diffusion_hysteresis(self):
options = {'particle phases': ('2', '1'), 'diffusivity': (('current sigmoid', 'current sigmoid'), 'current sigmoid'), 'open-circuit potential': (('current sigmoid', 'single'), 'single')}
self.check_well_posedness(options) |
class PULSAR(FBD_view.FunctionBlockView):
_ton = 1000
_toff = 1000
_attribute_decorator('WidgetSpecific', 'Defines the actual TON value', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 0, 'step': 1})
def ton(self):
return self._ton
def ton(self, value):
self._ton = value
_attribute_decorator('WidgetSpecific', 'Defines the actual TOFF value', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 0, 'step': 1})
def toff(self):
return self._toff
def toff(self, value):
self._toff = value
tstart = 0
def __init__(self, name, *args, **kwargs):
FBD_view.FunctionBlockView.__init__(self, name, *args, **kwargs)
self.outputs['OUT'].set_value(False)
self.tstart = time.time()
_model.FunctionBlock.decorate_process(['OUT'])
def do(self):
OUT = ((int(((time.time() - self.tstart) * 1000)) % (self.ton + self.toff)) < self.ton)
return OUT |
('pyinaturalist.session.REFRESH_LIMITER', Limiter(RequestRate(1, 2)))
def test_get_refresh_params():
assert (get_refresh_params('test') == {'refresh': True})
assert (get_refresh_params('test2') == {'refresh': True})
assert (get_refresh_params('test') == {'refresh': True, 'v': 1})
assert (get_refresh_params('test') == {'refresh': True, 'v': 2})
sleep(2)
assert (get_refresh_params('test') == {'refresh': True}) |
class TestMapWindow(EndianTest):
def setUp(self):
self.req_args_0 = {'window': }
self.req_bin_0 = b'\x08\x00\x02\x00\xccF\xa5T'
def testPackRequest0(self):
bin = request.MapWindow._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.MapWindow._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def mobi_header_fields(mobi_content):
pp = PalmDB(mobi_content)
header = pp.readsection(0)
id = struct.unpack_from('4s', header, 16)[0]
version = struct.unpack_from('>L', header, 36)[0]
dict_input = struct.unpack_from('>L', header, 96)[0]
dict_output = struct.unpack_from('>L', header, 100)[0]
text_length = struct.unpack('>I', header[4:8])[0]
locations = ((text_length / 150) + 1)
(toff, tlen) = struct.unpack('>II', header[84:92])
tend = (toff + tlen)
title = header[toff:tend]
return (id, version, title, locations, dict_input, dict_output) |
def test_pyproject_toml_save(pyproject_toml: Path, poetry_section: str, build_system_section: str) -> None:
pyproject = PyProjectTOML(pyproject_toml)
name = str(uuid.uuid4())
build_backend = str(uuid.uuid4())
build_requires = str(uuid.uuid4())
pyproject.poetry_config['name'] = name
pyproject.build_system.build_backend = build_backend
pyproject.build_system.requires.append(build_requires)
pyproject.save()
pyproject = PyProjectTOML(pyproject_toml)
assert isinstance(pyproject.poetry_config['name'], str)
assert (pyproject.poetry_config['name'] == name)
assert (pyproject.build_system.build_backend == build_backend)
assert (build_requires in pyproject.build_system.requires) |
class Base():
def __init__(self, url: str, token: str, verify_ssl: Union[(bool, str)]=True, **request_kwargs):
self._validate_url_and_token(url, token)
self._url = url
self._token = token
self.verify_ssl = verify_ssl
self._validate_request_kwargs(**request_kwargs)
self._request_kwargs = request_kwargs
self._metadata: Optional[Json] = None
self._forms: Optional[List[str]] = None
self._field_names: Optional[List[str]] = None
self._def_field: Optional[str] = None
self._is_longitudinal: Optional[bool] = None
def url(self) -> str:
return self._url
def token(self) -> str:
return self._token
def metadata(self) -> Json:
if (self._metadata is None):
payload = self._initialize_payload('metadata', format_type='json')
self._metadata = cast(Json, self._call_api(payload, return_type='json'))
return self._metadata
def forms(self) -> List[str]:
if (self._forms is None):
self._forms = list(set(self._filter_metadata(key='form_name')))
return self._forms
def field_names(self) -> List[str]:
if (self._field_names is None):
self._field_names = self._filter_metadata(key='field_name')
return self._field_names
def def_field(self) -> str:
if (self._def_field is None):
self._def_field = self.field_names[0]
return self._def_field
def is_longitudinal(self) -> bool:
if (self._is_longitudinal is None):
try:
payload = self._initialize_payload(content='formEventMapping', format_type='json')
self._call_api(payload, return_type='json')
self._is_longitudinal = True
except RedcapError:
self._is_longitudinal = False
return self._is_longitudinal
def _validate_url_and_token(url: str, token: str) -> None:
assert url, 'Error! REDCap URL is missing'
assert token, 'Error! REDCap token is missing'
url_actual_last_5 = url[(- 5):]
url_expected_last_5 = '/api/'
assert (url_actual_last_5 == url_expected_last_5), (f"Incorrect url format '{url}', url must end with", f'{url_expected_last_5}')
actual_token_len = len(token)
expected_token_len = 32
assert (actual_token_len == expected_token_len), (f"Incorrect token format '{token}', token must must be", f'{expected_token_len} characters long')
def _validate_request_kwargs(**request_kwargs):
hardcoded_kwargs = ['url', 'data', 'verify, verify_ssl', 'return_headers', 'files', 'file']
unallowed_kwargs = [kwarg for kwarg in request_kwargs if (kwarg in hardcoded_kwargs)]
assert (len(unallowed_kwargs) == 0), f'Not allowed to define {unallowed_kwargs} when initiating object'
def _read_csv(buf: StringIO, **df_kwargs) -> 'pd.DataFrame':
import pandas as pd
from pandas.errors import EmptyDataError
try:
dataframe = pd.read_csv(buf, **df_kwargs)
except EmptyDataError:
dataframe = pd.DataFrame()
return dataframe
def _lookup_return_type(format_type: Literal[('json', 'csv', 'xml', 'df')], request_type: Literal[('export', 'import', 'delete')], import_records_format: Optional[Literal[('count', 'ids', 'auto_ids', 'nothing')]]=None) -> Literal[('json', 'str', 'int', 'count_dict', 'ids_list', 'empty_json')]:
if (format_type in ['csv', 'xml', 'df']):
return 'str'
if (format_type == 'json'):
if (request_type == 'export'):
return 'json'
if ((request_type in ['import', 'delete']) and (not import_records_format)):
return 'int'
if (import_records_format in ['count', 'auto_ids']):
return 'count_dict'
if (import_records_format == 'ids'):
return 'ids_list'
if (import_records_format == 'nothing'):
return 'empty_json'
raise ValueError(f'Invalid format_type: {format_type}')
def _filter_metadata(self, key: str, field_name: None=None) -> list:
...
def _filter_metadata(self, key: str, field_name: str) -> str:
...
def _filter_metadata(self, key: str, field_name: Optional[str]=None):
res: Union[(list, str)]
if field_name:
try:
res = str([row[key] for row in self.metadata if (row['field_name'] == field_name)][0])
except IndexError:
print(f'{key} not in metadata field: {field_name}')
return ''
else:
res = [row[key] for row in self.metadata]
return res
def _initialize_payload(self, content: str, format_type: Optional[Literal[('json', 'csv', 'xml', 'df')]]=None, return_format_type: Optional[Literal[('json', 'csv', 'xml')]]=None, record_type: Literal[('flat', 'eav')]='flat') -> Dict[(str, str)]:
payload = {'token': self.token, 'content': content}
if format_type:
if (format_type == 'df'):
payload['format'] = 'csv'
else:
payload['format'] = format_type
if return_format_type:
payload['returnFormat'] = return_format_type
if (content == 'record'):
payload['type'] = record_type
return payload
def _initialize_import_payload(self, to_import: Union[(List[dict], str, 'pd.DataFrame')], import_format: Literal[('json', 'csv', 'xml', 'df')], return_format_type: Literal[('json', 'csv', 'xml')], content: str) -> Dict[(str, Any)]:
payload = self._initialize_payload(content=content, return_format_type=return_format_type)
if (import_format == 'df'):
to_import = cast('pd.DataFrame', to_import)
buf = StringIO()
has_named_index = (to_import.index.name is not None)
to_import.to_csv(buf, index=has_named_index)
payload['data'] = buf.getvalue()
buf.close()
import_format = 'csv'
elif (import_format == 'json'):
payload['data'] = json.dumps(to_import, separators=(',', ':'))
else:
to_import = cast('str', to_import)
payload['data'] = to_import
payload['format'] = import_format
return payload
def _return_data(self, response: Union[(Json, str)], content: Literal[('arm', 'dag', 'event', 'exportFieldNames', 'formEventMapping', 'instrument', 'log', 'metadata', 'participantList', 'project', 'record', 'report', 'user', 'userDagMapping', 'userRole', 'userRoleMapping', 'repeatingFormsEvents')], format_type: Literal[('json', 'csv', 'xml', 'df')], df_kwargs: Optional[Dict[(str, Any)]]=None, record_type: Literal[('flat', 'eav')]='flat'):
if (format_type != 'df'):
return response
if (not df_kwargs):
df_kwargs = {}
if (('index_col' not in df_kwargs.keys()) and (record_type != 'eav')):
if (content == 'exportFieldNames'):
df_kwargs['index_col'] = 'original_field_name'
elif (content == 'metadata'):
df_kwargs['index_col'] = 'field_name'
elif (content in ['report', 'record']):
if self.is_longitudinal:
df_kwargs['index_col'] = [self.def_field, 'redcap_event_name']
else:
df_kwargs['index_col'] = self.def_field
response = cast(str, response)
buf = StringIO(response)
dataframe = self._read_csv(buf, **df_kwargs)
buf.close()
return dataframe
def _call_api(self, payload: Dict[(str, Any)], return_type: Literal[('file_map', 'json', 'empty_json', 'count_dict', 'ids_list', 'str', 'int')], file: Optional[FileUpload]=None) -> Union[(FileMap, Json, Dict[(str, int)], List[dict], List[str], int, str, Literal['1'])]:
config = _ContentConfig(return_empty_json=(return_type == 'empty_json'), return_bytes=(return_type == 'file_map'))
return_headers = (return_type == 'file_map')
rcr = _RCRequest(url=self.url, payload=payload, config=config)
return rcr.execute(verify_ssl=self.verify_ssl, return_headers=return_headers, file=file, **self._request_kwargs) |
class TwoCropTransform():
def __init__(self, transformA, transformB=None):
self.transformA = transformA
if (transformB is None):
self.transformB = transformA
else:
self.transformB = transformB
def __call__(self, x):
return [self.transformA(x), self.transformB(x)] |
class BadPOPM(TestCase):
def setUp(self):
self.filename = get_temp_copy(os.path.join(DATA_DIR, 'bad-POPM-frame.mp3'))
def tearDown(self):
os.unlink(self.filename)
def test_read_popm_long_counter(self):
f = ID3(self.filename)
self.failUnless(('POPM:Windows Media Player 9 Series' in f))
popm = f['POPM:Windows Media Player 9 Series']
self.assertEquals(popm.rating, 255)
self.assertEquals(popm.count, )
def test_write_popm_long_counter(self):
f = ID3(self.filename)
f.add(POPM(email='', rating=125, count=((2 ** 32) + 1)))
f.save()
f = ID3(self.filename)
self.failUnless(('POPM:' in f))
self.failUnless(('POPM:Windows Media Player 9 Series' in f))
popm = f['POPM:']
self.assertEquals(popm.rating, 125)
self.assertEquals(popm.count, ((2 ** 32) + 1)) |
def test_det_recog_show_result():
img = (np.ones((100, 100, 3), dtype=np.uint8) * 255)
det_recog_res = {'result': [{'box': [51, 88, 51, 62, 85, 62, 85, 88], 'box_score': 0.9417, 'text': 'hell', 'text_score': 0.8834}]}
vis_img = det_recog_show_result(img, det_recog_res)
assert (vis_img.shape[0] == 100)
assert (vis_img.shape[1] == 200)
assert (vis_img.shape[2] == 3)
det_recog_res['result'][0]['text'] = ''
det_recog_show_result(img, det_recog_res) |
.xfail(reason='causing issues in CI, to be fixed later')
.spark_functions
def test_update_where_float(dataframe, spark_dataframe):
assert_frame_equal(spark_dataframe.update_where(conditions="\n `decorated-elephant` = 1 AND `#$%^` = 'rabbit'\n ", target_column_name='Bell__Chart', target_val=3.234789).toPandas(), dataframe.update_where(((dataframe['decorated-elephant'] == 1) & (dataframe['#$%^'] == 'rabbit')), 'Bell__Chart', 3.234789)) |
class TypeVarInferVarianceTests(BaseTestCase):
def test_typevar(self):
T = typing_extensions.TypeVar('T')
self.assertFalse(T.__infer_variance__)
T_infer = typing_extensions.TypeVar('T_infer', infer_variance=True)
self.assertTrue(T_infer.__infer_variance__)
T_noinfer = typing_extensions.TypeVar('T_noinfer', infer_variance=False)
self.assertFalse(T_noinfer.__infer_variance__)
def test_pickle(self):
global U, U_infer
U = typing_extensions.TypeVar('U')
U_infer = typing_extensions.TypeVar('U_infer', infer_variance=True)
for proto in range(pickle.HIGHEST_PROTOCOL):
for typevar in (U, U_infer):
z = pickle.loads(pickle.dumps(typevar, proto))
self.assertEqual(z.__name__, typevar.__name__)
self.assertEqual(z.__covariant__, typevar.__covariant__)
self.assertEqual(z.__contravariant__, typevar.__contravariant__)
self.assertEqual(z.__bound__, typevar.__bound__)
self.assertEqual(z.__infer_variance__, typevar.__infer_variance__) |
def model_processing(model, src_dir, dest_dir, timeseq_len):
train_dir = os.path.join(src_dir, 'train')
test_dir = os.path.join(src_dir, 'test')
if os.path.exists(dest_dir):
print(dest_dir, 'already exists')
else:
os.mkdir(dest_dir)
print(dest_dir, 'created')
dest_train_dir = os.path.join(dest_dir, 'train')
if os.path.exists(dest_train_dir):
print(dest_train_dir, 'already exists')
else:
os.mkdir(dest_train_dir)
print(dest_train_dir, 'created')
dest_test_dir = os.path.join(dest_dir, 'test')
if os.path.exists(dest_test_dir):
print(dest_test_dir, 'already exists')
else:
os.mkdir(dest_test_dir)
print(dest_test_dir, 'created')
dir_mapping = OrderedDict([(train_dir, dest_train_dir), (test_dir, dest_test_dir)])
for (dir, dest_dir) in dir_mapping.items():
print('Processing data in {}'.format(dir))
for (index, class_name) in enumerate(os.listdir(dir)):
class_dir = os.path.join(dir, class_name)
dest_class_dir = os.path.join(dest_dir, class_name)
if (not os.path.exists(dest_class_dir)):
os.mkdir(dest_class_dir)
print(dest_class_dir, 'created')
for filename in os.listdir(class_dir):
file_dir = os.path.join(class_dir, filename)
clip_data = np.load(file_dir)
processed_data = model.predict(clip_data, batch_size=timeseq_len)
print(processed_data.shape)
dest_file_dir = os.path.join(dest_class_dir, filename)
np.save(dest_file_dir, processed_data)
print('No.{} class {} finished, data saved in {}'.format(index, class_name, dest_class_dir)) |
def random_inj_per_layer_batched(pfi: core.FaultInjection, min_val: int=(- 1), max_val: int=1, rand_loc: bool=True, rand_val: bool=True):
(batch, layer_num, c_rand, h_rand, w_rand, value) = ([] for i in range(6))
for i in range(pfi.get_total_layers()):
if (not rand_loc):
(layer, C, H, W) = random_neuron_location(pfi, layer=i)
if (not rand_val):
err_val = random_value(min_val=min_val, max_val=max_val)
for b in range(pfi.batch_size):
if rand_loc:
(layer, C, H, W) = random_neuron_location(pfi, layer=i)
if rand_val:
err_val = random_value(min_val=min_val, max_val=max_val)
batch.append(b)
layer_num.append(layer)
c_rand.append(C)
h_rand.append(H)
w_rand.append(W)
value.append(err_val)
return pfi.declare_neuron_fault_injection(batch=batch, layer_num=layer_num, dim1=c_rand, dim2=h_rand, dim3=w_rand, value=value) |
_destruct_output_when_exp('contents')
def output(*contents):
import warnings
warnings.warn('`pywebio.output.output()` is deprecated since v1.5 and will remove in the future version, use `pywebio.output.put_scope()` instead', DeprecationWarning, stacklevel=2)
class OutputHandler(Output):
def __del__(self):
pass
def __init__(self, spec, scope):
super().__init__(spec)
self.scope = scope
_destruct_output_when_exp('outputs')
def reset(self, *outputs):
clear_scope(scope=self.scope)
self.append(*outputs)
_destruct_output_when_exp('outputs')
def append(self, *outputs):
for o in outputs:
if (not isinstance(o, Output)):
o = put_text(o)
o.spec['scope'] = scope2dom(self.scope)
o.spec['position'] = OutputPosition.BOTTOM
o.send()
_destruct_output_when_exp('outputs')
def insert(self, idx, *outputs):
direction = (1 if (idx >= 0) else (- 1))
for (acc, o) in enumerate(outputs):
if (not isinstance(o, Output)):
o = put_text(o)
o.spec['scope'] = scope2dom(self.scope)
o.spec['position'] = (idx + (direction * acc))
o.send()
contents = [(c if isinstance(c, Output) else put_text(c)) for c in contents]
dom_name = random_str(10)
tpl = '<div class="{{dom_class_name}}">\n {{#contents}}\n {{#.}}\n {{& pywebio_output_parse}}\n {{/.}}\n {{/contents}}\n </div>'
out_spec = put_widget(template=tpl, data=dict(contents=contents, dom_class_name=scope2dom(dom_name, no_css_selector=True)))
return OutputHandler(Output.dump_dict(out_spec), ('.', dom_name)) |
class Blur(nn.Module):
def __init__(self, in_filters, sfilter=(1, 1), pad_mode='replicate', **kwargs):
super(Blur, self).__init__()
filter_size = len(sfilter)
self.pad = SamePad(filter_size, pad_mode=pad_mode)
self.filter_proto = torch.tensor(sfilter, dtype=torch.float, requires_grad=False)
self.filter = torch.tensordot(self.filter_proto, self.filter_proto, dims=0)
self.filter = (self.filter / torch.sum(self.filter))
self.filter = self.filter.repeat([in_filters, 1, 1, 1])
self.filter = torch.nn.Parameter(self.filter, requires_grad=False)
def forward(self, x):
x = self.pad(x)
x = F.conv2d(x, self.filter, groups=x.size()[1])
return x
def extra_repr(self):
return ('pad=%s, filter_proto=%s' % (self.pad, self.filter_proto.tolist())) |
_dataframe_method
_alias(columns='column_names')
def label_encode(df: pd.DataFrame, column_names: Union[(str, Iterable[str], Hashable)]) -> pd.DataFrame:
warnings.warn('`label_encode` will be deprecated in a 1.x release. Please use `factorize_columns` instead.')
df = _factorize(df, column_names, '_enc')
return df |
def update_pen_val_and_weights(config, pen_val, weights):
if isinstance(config, NoPenalty):
pass
elif isinstance(config, (Ridge, Lasso, GroupLasso, MultiTaskLasso, GeneralizedLasso, FusedLasso)):
update_weights_and_pen_val_for_prod(pen_val=pen_val, new_pen_val=config.pen_val, weights=weights, new_weights=config.weights)
elif isinstance(config, GeneralizedRidge):
pen_val.value = config.pen_Val
elif isinstance(config, (SeparableSum, OverlappingSum)):
configs = config.get_penalties().values()
for (i, cfg) in enumerate(configs):
update_pen_val_and_weights(cfg, pen_val=pen_val[i], weights=weights[i])
else:
raise NotImplementedError('{} not currently available'.format(config)) |
def main(config):
neptune_logger = NeptuneLogger(api_key=None, offline_mode=config['logging_params']['offline_mode'], project_name=config['logging_params']['project_name'], experiment_name=config['logging_params']['exp_name'], params={**config['exp_params'], **config['model_params'], **config['trainer_params']}, tags=config['logging_params']['tags'])
model = models_dict[config['model_params']['model_name']](**config['model_params'])
experiment = SceneFlowExp(model, config['exp_params'])
if ('pre_trained_weights_checkpoint' in config['exp_params'].keys()):
print(f"Loading pre-trained model: {config['exp_params']['pre_trained_weights_checkpoint']}")
checkpoint = torch.load(config['exp_params']['pre_trained_weights_checkpoint'], map_location=(lambda storage, loc: storage))
experiment.load_state_dict(checkpoint['state_dict'])
if config['train']:
time_str = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
exp_ckpt_dir = osp.join(config['logging_params']['ckpt_dir'], time_str)
else:
exp_ckpt_dir = osp.join(config['logging_params']['ckpt_dir'], 'test')
os.makedirs(exp_ckpt_dir, exist_ok=True)
ckpt_callback = ModelCheckpoint(filepath=osp.join(exp_ckpt_dir, '{epoch}'), save_last=True)
trainer = Trainer(logger=neptune_logger, checkpoint_callback=ckpt_callback, **config['trainer_params'])
if config['train']:
print('Start Training!')
trainer.fit(experiment)
else:
print('Start Testing')
trainer.test(experiment) |
def get_semantic_centroids(semantic_obs):
sids = list(np.unique(semantic_obs))
if (0 in sids):
sids.remove(0)
sid_centroids = []
for sid in sids:
one_hot = (semantic_obs == sid)
(xis, yis) = np.nonzero(one_hot)
sid_centroids.append([xis.mean(), yis.mean()])
return (sids, sid_centroids) |
def _acl_to_list(acl):
def acltag_to_char(tag):
if (tag == posix1e.ACL_USER_OBJ):
return 'U'
elif (tag == posix1e.ACL_USER):
return 'u'
elif (tag == posix1e.ACL_GROUP_OBJ):
return 'G'
elif (tag == posix1e.ACL_GROUP):
return 'g'
elif (tag == posix1e.ACL_MASK):
return 'M'
elif (tag == posix1e.ACL_OTHER):
return 'O'
else:
raise ValueError('Unknown ACL tag {atag}.'.format(atag=tag))
def entry_to_tuple(entry):
tagchar = acltag_to_char(entry.tag_type)
if (tagchar == 'u'):
uid = entry.qualifier
owner_pair = (uid, usrgrp.uid2uname(uid))
elif (tagchar == 'g'):
gid = entry.qualifier
owner_pair = (gid, usrgrp.gid2gname(gid))
else:
owner_pair = None
perms = (((entry.permset.read << 2) | (entry.permset.write << 1)) | entry.permset.execute)
return (tagchar, owner_pair, perms)
return list(map(entry_to_tuple, acl)) |
def check_reopen(r1, w):
try:
print('Reopening read end')
r2 = os.open(f'/proc/self/fd/{r1}', os.O_RDONLY)
print(f'r1 is {r1}, r2 is {r2}')
print('checking they both can receive from w...')
os.write(w, b'a')
assert (os.read(r1, 1) == b'a')
os.write(w, b'b')
assert (os.read(r2, 1) == b'b')
print('...ok')
print('setting r2 to non-blocking')
os.set_blocking(r2, False)
print('os.get_blocking(r1) ==', os.get_blocking(r1))
print('os.get_blocking(r2) ==', os.get_blocking(r2))
try:
os.read(r2, 1)
except BlockingIOError:
print('r2 definitely seems to be in non-blocking mode')
def sleep_then_write():
time.sleep(1)
os.write(w, b'c')
threading.Thread(target=sleep_then_write, daemon=True).start()
assert (os.read(r1, 1) == b'c')
print('r1 definitely seems to be in blocking mode')
except Exception as exc:
print(f'ERROR: {exc!r}') |
.parametrize('debug_or_run', ['run', 'debug'])
def test_run_debug_step_function_mark_pending(debug_or_run, mocker, mock_utils_debugger):
step = Step(1, 'I am a Step', 'foo.feature', 1, parent=None, runable=True, context_class=None)
step.definition_func = StepHelper.step_pending_func
step.argument_match = mocker.MagicMock()
step.argument_match.evaluate.return_value = (tuple(), {})
method = getattr(step, debug_or_run)
state = method()
assert (state == Step.State.PENDING == step.state) |
(help=__doc__)
('-r', '--run-number', help='use a specific run number (Default: highest)', type=int, default=None)
('folder', type=click.Path(exists=True, file_okay=False))
_context
def main(ctx: Any, folder: os.PathLike, run_number: Optional[int]) -> None:
scenario = ScenarioItems()
content: List[os.PathLike] = cast(List[os.PathLike], os.listdir(folder))
for fn in sorted(content, reverse=True):
file = os.path.join(folder, fn)
if (os.path.isfile(file) and detect_scenario_player_log(file)):
scenario.scenario_log = file
break
if (scenario.scenario_log is None):
raise ValueError('Could not find scenario player log file')
print(scenario.scenario_log)
scenario.token_networks = get_token_network_addresses(scenario.scenario_log)
nodes = get_nodes(scenario.scenario_log)
if (run_number is None):
run_number = find_last_run(folder)
print(f'Parsing run [{run_number}].')
for node_number in range(len(nodes)):
(user_address, db_file) = parse_node_folder(folder, node_number, run_number, nodes)
scenario.add_user(user_address, db_file)
token_network = select_by_number(scenario.token_networks, 'Select token_network:')
node = select_by_number(scenario.users, 'Select node DB:')
partner = select_by_number(scenario.users, 'Select partner:')
translator = scenario.write_translator()
db_file = scenario.db_files[node]
partner_address = scenario.users[partner]
print(f'''Replaying WAL DB:
{db_file}
Node: {node} {scenario.users[node]}
Partner: {partner} {partner_address}
Token Network: {token_network}''')
with open(translator) as names_translator:
ctx.invoke(replay_wal, db_file=db_file, token_network_address=token_network, partner_address=partner_address, names_translator=names_translator) |
def ensure_adjusted_array(ndarray_or_adjusted_array, missing_value):
if isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, ndarray):
return AdjustedArray(ndarray_or_adjusted_array, {}, missing_value)
else:
raise TypeError(("Can't convert %s to AdjustedArray" % type(ndarray_or_adjusted_array).__name__)) |
def compute_residual(model, state_in, state_out, particle_f, residual, dt):
wp.launch(kernel=compute_particle_residual, dim=model.particle_count, inputs=[state_in.particle_qd, state_out.particle_qd, particle_f, model.particle_mass, model.gravity, dt, residual.astype(dtype=wp.vec3)], device=model.device) |
def rgb_to_hsv(x):
hsv = th.zeros(*x.size())
c_min = x.min(0)
c_max = x.max(0)
delta = (c_max[0] - c_min[0])
r_idx = c_max[1].eq(0)
hsv[0][r_idx] = (((x[1][r_idx] - x[2][r_idx]) / delta[r_idx]) % 6)
g_idx = c_max[1].eq(1)
hsv[0][g_idx] = (2 + ((x[2][g_idx] - x[0][g_idx]) / delta[g_idx]))
b_idx = c_max[1].eq(2)
hsv[0][b_idx] = (4 + ((x[0][b_idx] - x[1][b_idx]) / delta[b_idx]))
hsv[0] = hsv[0].mul(60)
hsv[1] = (delta / c_max[0])
hsv[2] = c_max[0]
return hsv |
class ResNetDownsample(nn.Module):
def __init__(self, in_features, out_features, stride=1):
super().__init__()
self.conv = nn.Conv3d(in_features, out_features, 1, stride, bias=False)
self.norm = nn.InstanceNorm3d(out_features)
def forward(self, x):
return self.norm(self.conv(x)) |
class ApplyGateToLthQubit(UnaryIterationGate):
selection_regs: Tuple[(SelectionRegister, ...)] = attrs.field(converter=(lambda v: ((v,) if isinstance(v, SelectionRegister) else tuple(v))))
nth_gate: Callable[(..., cirq.Gate)]
control_regs: Tuple[(Register, ...)] = attrs.field(converter=(lambda v: ((v,) if isinstance(v, Register) else tuple(v))), default=(Register('control', 1),))
def make_on(cls, *, nth_gate: Callable[(..., cirq.Gate)], **quregs: Sequence[cirq.Qid]) -> cirq.Operation:
return ApplyGateToLthQubit(SelectionRegister('selection', len(quregs['selection']), len(quregs['target'])), nth_gate=nth_gate, control_regs=Register('control', len(quregs['control']))).on_registers(**quregs)
_property
def control_registers(self) -> Tuple[(Register, ...)]:
return self.control_regs
_property
def selection_registers(self) -> Tuple[(SelectionRegister, ...)]:
return self.selection_regs
_property
def target_registers(self) -> Tuple[(Register, ...)]:
total_iteration_size = np.prod(tuple((reg.iteration_length for reg in self.selection_registers)))
return (Register('target', int(total_iteration_size)),)
def _circuit_diagram_info_(self, args: cirq.CircuitDiagramInfoArgs) -> cirq.CircuitDiagramInfo:
wire_symbols = ([''] * total_bits(self.control_registers))
wire_symbols += (['In'] * total_bits(self.selection_registers))
for it in itertools.product(*[range(reg.iteration_length) for reg in self.selection_regs]):
wire_symbols += [str(self.nth_gate(*it))]
return cirq.CircuitDiagramInfo(wire_symbols=wire_symbols)
def nth_operation(self, context: cirq.DecompositionContext, control: cirq.Qid, target: Sequence[cirq.Qid], **selection_indices: int) -> cirq.OP_TREE:
selection_shape = tuple((reg.iteration_length for reg in self.selection_regs))
selection_idx = tuple((selection_indices[reg.name] for reg in self.selection_regs))
target_idx = int(np.ravel_multi_index(selection_idx, selection_shape))
return self.nth_gate(*selection_idx).on(target[target_idx]).controlled_by(control) |
def _collect_metrics(metrics, output_names):
if (not metrics):
return [[] for _ in output_names]
if isinstance(metrics, list):
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if (not isinstance(output_metrics, list)):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError(('Type of `metrics` argument not understood. Expected a list or dictionary, found: ' + str(metrics))) |
def display_images(images: List[np.ndarray], dpi=100.0, format='html5_video', **kwargs):
(h, w) = images[0].shape[:2]
fig = plt.figure(figsize=((h / dpi), (w / dpi)), dpi=dpi)
fig_im = plt.figimage(images[0])
def animate(image):
fig_im.set_array(image)
return (fig_im,)
anim = animation.FuncAnimation(fig, animate, frames=images, **kwargs)
if (format == 'html5_video'):
display(HTML(anim.to_html5_video()))
elif (format == 'jshtml'):
display(HTML(anim.to_jshtml()))
else:
raise NotImplementedError(format)
plt.close(fig) |
def get_raw_video_file_info(filename: str) -> Dict[(str, Any)]:
size_pattern = '(?P<width>\\d+)x(?P<height>\\d+)'
framerate_pattern = '(?P<framerate>[\\d\\.]+)(?:Hz|fps)'
bitdepth_pattern = '(?P<bitdepth>\\d+)bit'
formats = '|'.join(video_formats.keys())
format_pattern = f'(?P<format>{formats})(?:[p_]?(?P<bitdepth2>\d+)(?P<endianness>LE|BE))?'
extension_pattern = f"(?P<extension>{'|'.join(file_extensions)})"
cut_pattern = '([0-9]+)-([0-9]+)'
patterns = (size_pattern, framerate_pattern, bitdepth_pattern, format_pattern, cut_pattern, extension_pattern)
info: Dict[(str, Any)] = {}
for pattern in patterns:
match = re.search(pattern, filename)
if match:
info.update(match.groupdict())
if (not info):
return {}
if (info['bitdepth'] and info['bitdepth2'] and (info['bitdepth'] != info['bitdepth2'])):
raise ValueError(f'Filename "{filename}" specifies bit-depth twice.')
if info['bitdepth2']:
info['bitdepth'] = info['bitdepth2']
del info['bitdepth2']
outinfo: Dict[(str, Union[(str, int, float, Fraction, VideoFormat)])] = {}
outinfo.update(info)
if (info['format'] is not None):
outinfo['format'] = video_formats.get(info['format'].lower(), info['format'])
if (info['endianness'] is not None):
outinfo['endianness'] = info['endianness'].lower()
if (info['framerate'] is not None):
framerate = info['framerate']
if (framerate in framerate_to_fraction):
outinfo['framerate'] = framerate_to_fraction[framerate]
else:
outinfo['framerate'] = Fraction(framerate)
for key in ('width', 'height', 'bitdepth'):
if (info.get(key) is not None):
outinfo[key] = int(info[key])
return outinfo |
def test_const_connect_Bits_signal_to_Bits():
class Top(ComponentLevel3):
def construct(s):
s.wire = Wire(Bits32)
connect(s.wire, Bits32(0))
x = Top()
x.elaborate()
print(x._dsl.consts)
assert (len(x._dsl.consts) == 1)
simple_sim_pass(x)
x.tick() |
def testParameterSetActions():
pa = OSC.ParameterSetAction('Myparam', 3)
pa.setVersion(minor=1)
prettyprint(pa)
pa2 = OSC.ParameterSetAction('Myparam', 3)
pa3 = OSC.ParameterSetAction('Myparam2', 3)
assert (pa == pa2)
assert (pa != pa3)
pa4 = OSC.ParameterSetAction.parse(pa.get_element())
assert (pa == pa4)
assert (version_validation('GlobalAction', pa, 0) == ValidationResponse.OK)
assert (version_validation('GlobalAction', pa, 1) == ValidationResponse.OK)
assert (version_validation('GlobalAction', pa, 2) == ValidationResponse.OSC_VERSION) |
.parametrize('test_args, expected', [([1], '1'), ([None], None), ([0.0001, '{:.0%}'], '0%'), ([0.0001, '{:.0%}', 0.01], '<1%'), ([0.9999, '{:.0%}', None, 0.99], '>99%'), ([0.0001, '{:.0%}', 0.01, None, 'under ', None], 'under 1%'), ([0.9999, '{:.0%}', None, 0.99, None, 'above '], 'above 99%'), ([1, humanize.intword, 1000000.0, None, 'under '], 'under 1.0 million'), ([math.nan], 'NaN'), ([math.inf], '+Inf'), ([(- math.inf)], '-Inf')])
def test_clamp(test_args: list[typing.Any], expected: str) -> None:
assert (humanize.clamp(*test_args) == expected) |
def getValidationCase(file, force=False):
path = join(TEST_FOLDER_PATH, 'validation', file)
if ((not exists(path)) and (not force)):
raise FileNotFoundError('Validation case `{0}` does not exist. Choose one of: \n- {1} or use force=True'.format(file, '\n- '.join(os.listdir(join(TEST_FOLDER_PATH, 'validation')))))
return path |
def test_cube_wcs_freqtovel():
header = fits.Header.fromtextfile(data_path('cubewcs1.hdr'))
w1 = wcs.WCS(header)
newwcs = convert_spectral_axis(w1, 'km/s', 'VRAD', rest_value=(w1.wcs.restfrq * u.Hz))
assert (newwcs.wcs.ctype[2] == 'VRAD')
assert (newwcs.wcs.crval[2] == 305.)
assert (newwcs.wcs.cunit[2] == u.Unit('km/s'))
newwcs = convert_spectral_axis(w1, 'km/s', 'VRAD')
assert (newwcs.wcs.ctype[2] == 'VRAD')
assert (newwcs.wcs.crval[2] == 305.)
assert (newwcs.wcs.cunit[2] == u.Unit('km/s')) |
def convert_hf_name_to_opus_name(hf_model_name):
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
if (hf_model_name in GROUP_TO_OPUS_NAME):
opus_w_prefix = GROUP_TO_OPUS_NAME[hf_model_name]
else:
opus_w_prefix = hf_model_name.replace('_', '+')
return remove_prefix(opus_w_prefix, 'opus-mt-') |
class ClientSpanObserverTests(unittest.TestCase):
def test_metrics(self):
mock_timer = mock.Mock(spec=Timer)
mock_counter = mock.Mock(spec=Counter)
mock_batch = mock.Mock(spec=Batch)
mock_batch.timer.return_value = mock_timer
mock_batch.counter.return_value = mock_counter
mock_client_span = mock.Mock(spec=Span)
mock_client_span.name = 'example'
observer = MetricsClientSpanObserver(mock_batch, mock_client_span)
self.assertEqual(mock_batch.timer.call_count, 1)
self.assertEqual(mock_batch.timer.call_args, mock.call('clients.example'))
observer.on_start()
self.assertEqual(mock_timer.start.call_count, 1)
observer.on_incr_tag('test', delta=1)
mock_counter.increment.assert_called()
mock_counter.reset_mock()
observer.on_finish(exc_info=None)
self.assertEqual(mock_timer.stop.call_count, 1)
self.assertEqual(mock_counter.increment.call_count, 1)
mock_counter.reset_mock()
observer.on_log(name='error.object', payload=TestException())
self.assertEqual(mock_counter.increment.call_count, 1)
self.assertEqual(mock_batch.counter.call_args, mock.call('errors.TestException')) |
def lr0_closure(I):
global _add_count
_add_count += 1
prodlist = Productions
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lrafter:
if (x.lr0_added == _add_count):
continue
J.append(x.lr_next)
x.lr0_added = _add_count
didadd = 1
return J |
.parametrize('username,password', users)
.parametrize('project_id', projects)
.parametrize('snapshot_id', snapshots)
def test_detail(db, client, username, password, project_id, snapshot_id):
client.login(username=username, password=password)
snapshot = Snapshot.objects.filter(project_id=project_id, id=snapshot_id).filter()
url = reverse(urlnames['detail'], args=[project_id, snapshot_id])
response = client.get(url)
if (snapshot and (project_id in view_snapshot_permission_map.get(username, []))):
assert (response.status_code == 200)
assert isinstance(response.json(), dict)
assert (response.json().get('id') == snapshot_id)
else:
assert (response.status_code == 404) |
def download(date_array, tag, inst_id, data_path='', user=None, password=None, test_download_kwarg=None):
pysat.logger.info(''.join(('test_download_kwarg = ', str(test_download_kwarg))))
if (tag == 'no_download'):
warnings.warn('This simulates an instrument without download support')
if (tag == 'user_password'):
if ((not user) and (not password)):
raise ValueError(' '.join(('Tests are not passing user and', 'password to test instruments')))
return |
class PK(object):
keyType = None
def generate(cls):
raise NotImplementedError
def parsePayload(cls, data, private=False):
raise NotImplementedError
def sign(self, data):
raise NotImplementedError
def verify(self, data):
raise NotImplementedError
def fingerprint(self):
raise NotImplementedError
def serializePublicKey(self):
return (struct.pack(b'!H', self.keyType) + self.getSerializedPublicPayload())
def getSerializedPublicPayload(self):
buf = b''
for x in self.getPublicPayload():
buf += pack_mpi(x)
return buf
def getPublicPayload(self):
raise NotImplementedError
def serializePrivateKey(self):
return (struct.pack(b'!H', self.keyType) + self.getSerializedPrivatePayload())
def getSerializedPrivatePayload(self):
buf = b''
for x in self.getPrivatePayload():
buf += pack_mpi(x)
return buf
def getPrivatePayload(self):
raise NotImplementedError
def cfingerprint(self):
return '{0:040x}'.format(bytes_to_long(self.fingerprint()))
def parsePrivateKey(cls, data):
(implCls, data) = cls.getImplementation(data)
logging.debug('Got privkey of type %r', implCls)
return implCls.parsePayload(data, private=True)
def parsePublicKey(cls, data):
(implCls, data) = cls.getImplementation(data)
logging.debug('Got pubkey of type %r', implCls)
return implCls.parsePayload(data)
def __str__(self):
return human_hash(self.cfingerprint())
def __repr__(self):
return "<{cls}(fpr='{fpr}')>".format(cls=self.__class__.__name__, fpr=str(self))
def getImplementation(data):
(typeid, data) = unpack(b'!H', data)
cls = pkTypes.get(typeid, None)
if (cls is None):
raise NotImplementedError(('unknown typeid %r' % typeid))
return (cls, data) |
class AttrVI_ATTR_PXI_MAX_LWIDTH(ValuesAttribute):
resources = [(constants.InterfaceType.pxi, 'INSTR')]
py_name = ''
visa_name = 'VI_ATTR_PXI_MAX_LWIDTH'
visa_type = 'ViInt16'
default = NotAvailable
(read, write, local) = (True, False, False)
values = [(- 1), 1, 2, 4, 8, 16] |
def genSoftmax(embedding_anc, embedding_neg, W_fc, b_fc, label, Loss_type=FLAGS.LossType):
if (Loss_type == 'NpairLoss'):
label_split = tf.split(label, 2, axis=0)
label_pos = tf.reshape(label_split[1], [int((FLAGS.batch_size / 2)), 1])
label_neg_tile = tf.tile(label_pos, [int((FLAGS.batch_size / 2)), 1])
pull_Logits = (tf.matmul(embedding_neg, W_fc) + b_fc)
anc_Logits = (tf.matmul(embedding_anc, W_fc) + b_fc)
label_neg_tile_2 = tf.reshape(label_neg_tile, [(- 1)])
label_anc_2 = tf.reshape(label_split[0], [(- 1)])
gen_cross_entropy = ((FLAGS.Softmax_factor * FLAGS._lambda) * (tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_neg_tile_2, logits=pull_Logits)) + tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_anc_2, logits=anc_Logits))))
return gen_cross_entropy |
_auth
def db_edit(request, pk):
db = DBConfig.objects.select_related('db_server').get(id=pk)
if (request.method == 'GET'):
data = {'db_server': db.db_server_id, 'db_port': db.db_port, 'db_name': db.db_name, 'db_user': db.db_user, 'db_password': CryptPwd().decrypt_pwd(db.db_password), 'db_group': [group.id for group in db.db_group.all()], 'db_memo': db.db_memo}
return JsonResponse({'code': 200, 'data': data})
elif (request.method == 'POST'):
try:
db.db_port = int(request.POST.get('db_port'))
db.db_name = request.POST.get('db_name')
db.db_user = request.POST.get('db_user')
db.db_password = CryptPwd().encrypt_pwd(request.POST.get('db_password'))
db.db_memo = request.POST.get('db_memo')
db.db_group.set(request.POST.getlist('db_group'))
db.save()
return JsonResponse({'code': 200, 'data': None, 'msg': '!'})
except Exception as e:
return JsonResponse({'code': 500, 'data': None, 'msg': '!{}'.format(e)}) |
class TestSimpleTypeChecker(TestCase):
def setUp(self):
super(TestSimpleTypeChecker, self).setUp()
self.tc = get_env().stc
self.x = Symbol('x', BOOL)
self.y = Symbol('y', BOOL)
self.p = Symbol('p', INT)
self.q = Symbol('q', INT)
self.r = Symbol('r', REAL)
self.s = Symbol('s', REAL)
self.qfo = get_env().qfo
def test_boolean(self):
varA = Symbol('At', INT)
varB = Symbol('Bt', INT)
f = And(LT(varA, Plus(varB, Int(1))), GT(varA, Minus(varB, Int(1))))
g = Equals(varA, varB)
h = Iff(f, g)
tc = get_env().stc
res = tc.walk(h)
self.assertEqual(res, BOOL)
def test_arith_relations(self):
self.assertEqual(self.tc.walk(LE(self.p, self.q)), BOOL)
self.assertEqual(self.tc.walk(LT(self.p, self.q)), BOOL)
self.assertEqual(self.tc.walk(LE(self.r, self.s)), BOOL)
self.assertEqual(self.tc.walk(LT(self.r, self.s)), BOOL)
with self.assertRaises(PysmtTypeError):
LE(self.p, self.r)
with self.assertRaises(PysmtTypeError):
LT(self.p, self.r)
with self.assertRaises(PysmtTypeError):
LE(self.x, self.y)
with self.assertRaises(PysmtTypeError):
LT(self.x, self.y)
bv_a = Symbol('BV_A', BV8)
bv_b = Symbol('BV_B', BV8)
with self.assertRaises(PysmtTypeError):
LE(bv_a, bv_b)
with self.assertRaises(PysmtTypeError):
LT(bv_a, bv_b)
def test_functions(self):
vi = Symbol('At', INT)
vr = Symbol('Bt', REAL)
f = Symbol('f', FunctionType(INT, [REAL]))
g = Symbol('g', FunctionType(REAL, [INT]))
tc = get_env().stc
self.assertEqual(tc.walk(Function(f, [vr])), INT)
self.assertEqual(tc.walk(Function(g, [vi])), REAL)
self.assertEqual(tc.walk(Function(f, [Function(g, [vi])])), INT)
self.assertEqual(tc.walk(LE(Plus(vi, Function(f, [Real(4)])), Int(8))), BOOL)
self.assertEqual(tc.walk(LE(Plus(vr, Function(g, [Int(4)])), Real(8))), BOOL)
with self.assertRaises(PysmtTypeError):
LE(Plus(vr, Function(g, [Real(4)])), Real(8))
with self.assertRaises(PysmtTypeError):
LE(Plus(vi, Function(f, [Int(4)])), Int(8))
def test_walk_type_to_type(self):
f = self.x
args1 = [BOOL, BOOL]
args2 = [BOOL, REAL]
args3 = [None, None]
t = self.tc.walk_type_to_type(f, args1, BOOL, REAL)
self.assertEqual(t, REAL)
t = self.tc.walk_type_to_type(f, args2, BOOL, REAL)
self.assertEqual(t, None)
t = self.tc.walk_type_to_type(f, args3, BOOL, REAL)
self.assertEqual(t, None)
def test_misc(self):
bool_list = [And(self.x, self.y), Or(self.x, self.y), Not(self.x), self.x, Equals(self.p, self.q), GE(self.p, self.q), LE(self.p, self.q), GT(self.p, self.q), LT(self.p, self.q), Bool(True), Ite(self.x, self.y, self.x)]
real_list = [self.r, Real(4), Plus(self.r, self.s), Plus(self.r, Real(2)), Minus(self.s, self.r), Times(self.r, Real(1)), Div(self.r, Real(1)), Ite(self.x, self.r, self.s)]
int_list = [self.p, Int(4), Plus(self.p, self.q), Plus(self.p, Int(2)), Minus(self.p, self.q), Times(self.p, Int(1)), Ite(self.x, self.p, self.q)]
for f in bool_list:
t = self.tc.walk(f)
self.assertEqual(t, BOOL, f)
for f in real_list:
t = self.tc.walk(f)
self.assertEqual(t, REAL, f)
for f in int_list:
t = self.tc.walk(f)
self.assertEqual(t, INT, f)
def test_assert_args(self):
assert_no_boolean_in_args([self.r, self.p])
with self.assertRaises(PysmtTypeError):
assert_no_boolean_in_args([self.x, self.y])
assert_boolean_args([self.x, self.y])
with self.assertRaises(PysmtTypeError):
assert_boolean_args([self.r, self.p])
assert_same_type_args([self.x, self.y])
with self.assertRaises(PysmtTypeError):
assert_same_type_args([self.r, self.p])
assert_args_type_in([self.x, self.p], allowed_types=[INT, BOOL])
with self.assertRaises(PysmtTypeError):
assert_args_type_in([self.x, self.p], allowed_types=[REAL, BOOL])
def test_decorator_typecheck_result(self):
from pysmt.fnode import FNode, FNodeContent
from pysmt.operators import AND
_result
def good_function():
return self.x
_result
def super_bad_function():
sb = FNode(FNodeContent(node_type=AND, args=(self.p, self.p), payload=None), (- 1))
return sb
good_function()
with self.assertRaises(PysmtTypeError):
super_bad_function()
def test_examples(self):
for (f, _, _, _) in get_example_formulae():
self.assertIs(f.get_type(), BOOL, f) |
class TraceSpanObserver(SpanObserver):
def __init__(self, service_name: str, hostname: str, span: Span, recorder: 'Recorder'):
self.service_name = service_name
self.hostname = hostname
self.recorder = recorder
self.span = span
self.start: Optional[int] = None
self.end: Optional[int] = None
self.elapsed: Optional[int] = None
self.binary_annotations: List[Dict[(str, Any)]] = []
self.counters: DefaultDict[(str, float)] = collections.defaultdict(float)
self.on_set_tag(ANNOTATIONS['COMPONENT'], 'baseplate')
super().__init__()
def on_start(self) -> None:
self.start = current_epoch_microseconds()
self.client_send = self.start
def on_finish(self, exc_info: Optional[_ExcInfo]) -> None:
if exc_info:
self.on_set_tag(ANNOTATIONS['ERROR'], True)
if (self.span.flags and (self.span.flags & FLAGS['DEBUG'])):
self.on_set_tag(ANNOTATIONS['DEBUG'], True)
self.end = current_epoch_microseconds()
self.elapsed = (self.end - typing.cast(int, self.start))
for (key, value) in self.counters.items():
self.binary_annotations.append(self._create_binary_annotation(f'counter.{key}', value))
self.recorder.send(self)
def on_set_tag(self, key: str, value: Any) -> None:
self.binary_annotations.append(self._create_binary_annotation(key, value))
def on_incr_tag(self, key: str, delta: float) -> None:
self.counters[key] += delta
def _endpoint_info(self) -> Dict[(str, str)]:
return {'serviceName': self.service_name, 'ipv4': self.hostname}
def _create_time_annotation(self, annotation_type: str, timestamp: int) -> Dict[(str, Any)]:
return {'endpoint': self._endpoint_info(), 'timestamp': timestamp, 'value': annotation_type}
def _create_binary_annotation(self, annotation_type: str, annotation_value: Any) -> Dict[(str, Any)]:
endpoint_info = self._endpoint_info()
if isinstance(annotation_value, bool):
annotation_value = str(annotation_value).lower()
elif (not isinstance(annotation_value, str)):
annotation_value = str(annotation_value)
return {'key': annotation_type, 'value': annotation_value, 'endpoint': endpoint_info}
def _to_span_obj(self, annotations: List[Dict[(str, Any)]], binary_annotations: List[Dict[(str, Any)]]) -> Dict[(str, Any)]:
span = {'traceId': self.span.trace_id, 'name': self.span.name, 'id': self.span.id, 'timestamp': self.start, 'duration': self.elapsed, 'annotations': annotations, 'binaryAnnotations': binary_annotations}
span['parentId'] = (self.span.parent_id or 0)
return span
def _serialize(self) -> Dict[(str, Any)]:
annotations = []
annotations.append(self._create_time_annotation(ANNOTATIONS['CLIENT_SEND'], typing.cast(int, self.start)))
annotations.append(self._create_time_annotation(ANNOTATIONS['CLIENT_RECEIVE'], typing.cast(int, self.end)))
return self._to_span_obj(annotations, self.binary_annotations) |
def batch_outer_sum(*tensors):
outer_sum = None
for (i, tensor) in enumerate(tensors):
broadcaster = ([None] * len(tensors))
broadcaster[i] = slice(tensor.shape[(- 1)])
broadcaster = tuple(([...] + broadcaster))
outer_sum = (tensor[broadcaster] if (i == 0) else (outer_sum + tensor[broadcaster]))
return outer_sum |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({'messages': messages}, separators=(',', ':'))
cmd = ['python3', f'{path}/helpers/you.py', config]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
(yield line.decode('utf-8')) |
class IntradayBarEvent(PeriodicEvent):
def __init__(self):
self.frequency = Frequency.MIN_1
self.start_time = self._shift_time(MarketOpenEvent._trigger_time, self.frequency.time_delta())
self.end_time = self._shift_time(MarketCloseEvent._trigger_time, (- self.frequency.time_delta()))
super().__init__()
def notify(self, listener) -> None:
listener.on_new_bar(self)
def _shift_time(self, time_dict: Dict, time_delta: RelativeDelta):
now = datetime.datetime.now()
time = ((now + RelativeDelta(**time_dict)) + time_delta)
base_time = (now + RelativeDelta(hour=0, minute=0, second=0, microsecond=0))
delta = RelativeDelta(time, base_time)
def absolute_to_relative(field_name):
return '{}s'.format(field_name)
result_time_dict = {key: getattr(delta, absolute_to_relative(key)) for key in time_dict.keys()}
return result_time_dict |
class UnionConstraint(BaseConstraint):
def __init__(self, *constraints: BaseConstraint) -> None:
self._constraints = constraints
def constraints(self) -> tuple[(BaseConstraint, ...)]:
return self._constraints
def allows(self, other: BaseConstraint) -> bool:
return any((constraint.allows(other) for constraint in self._constraints))
def allows_any(self, other: BaseConstraint) -> bool:
if other.is_empty():
return False
if other.is_any():
return True
if isinstance(other, (UnionConstraint, MultiConstraint)):
constraints = other.constraints
else:
constraints = (other,)
return any((our_constraint.allows_any(their_constraint) for our_constraint in self._constraints for their_constraint in constraints))
def allows_all(self, other: BaseConstraint) -> bool:
if other.is_any():
return False
if other.is_empty():
return True
if isinstance(other, (UnionConstraint, MultiConstraint)):
constraints = other.constraints
else:
constraints = (other,)
our_constraints = iter(self._constraints)
their_constraints = iter(constraints)
our_constraint = next(our_constraints, None)
their_constraint = next(their_constraints, None)
while (our_constraint and their_constraint):
if our_constraint.allows_all(their_constraint):
their_constraint = next(their_constraints, None)
else:
our_constraint = next(our_constraints, None)
return (their_constraint is None)
def invert(self) -> MultiConstraint:
inverted_constraints = [c.invert() for c in self._constraints]
if any(((not isinstance(c, Constraint)) for c in inverted_constraints)):
raise NotImplementedError('Inversion of complex union constraints not implemented')
return MultiConstraint(*inverted_constraints)
def intersect(self, other: BaseConstraint) -> BaseConstraint:
if other.is_any():
return self
if other.is_empty():
return other
if isinstance(other, Constraint):
other = UnionConstraint(other)
new_constraints = []
if isinstance(other, UnionConstraint):
for our_constraint in self._constraints:
for their_constraint in other.constraints:
intersection = our_constraint.intersect(their_constraint)
if (not (intersection.is_empty() or (intersection in new_constraints))):
new_constraints.append(intersection)
else:
assert isinstance(other, MultiConstraint)
for our_constraint in self._constraints:
intersection = our_constraint
for their_constraint in other.constraints:
intersection = intersection.intersect(their_constraint)
if (not (intersection.is_empty() or (intersection in new_constraints))):
new_constraints.append(intersection)
if (not new_constraints):
return EmptyConstraint()
if (len(new_constraints) == 1):
return new_constraints[0]
return UnionConstraint(*new_constraints)
def union(self, other: BaseConstraint) -> BaseConstraint:
if other.is_any():
return other
if other.is_empty():
return self
if isinstance(other, Constraint):
other = UnionConstraint(other)
new_constraints: list[BaseConstraint] = []
if isinstance(other, UnionConstraint):
our_new_constraints: list[BaseConstraint] = []
their_new_constraints: list[BaseConstraint] = []
merged_new_constraints: list[BaseConstraint] = []
for their_constraint in other.constraints:
for our_constraint in self._constraints:
union = our_constraint.union(their_constraint)
if union.is_any():
return AnyConstraint()
if isinstance(union, Constraint):
if (union == our_constraint):
if (union not in our_new_constraints):
our_new_constraints.append(union)
elif (union == their_constraint):
if (union not in their_new_constraints):
their_new_constraints.append(their_constraint)
elif (union not in merged_new_constraints):
merged_new_constraints.append(union)
else:
if (our_constraint not in our_new_constraints):
our_new_constraints.append(our_constraint)
if (their_constraint not in their_new_constraints):
their_new_constraints.append(their_constraint)
new_constraints = our_new_constraints
for constraint in itertools.chain(their_new_constraints, merged_new_constraints):
if (constraint not in new_constraints):
new_constraints.append(constraint)
else:
assert isinstance(other, MultiConstraint)
new_constraints = [*self._constraints, other]
if (len(new_constraints) == 1):
return new_constraints[0]
return UnionConstraint(*new_constraints)
def __eq__(self, other: object) -> bool:
if (not isinstance(other, UnionConstraint)):
return False
return (self._constraints == other._constraints)
def __hash__(self) -> int:
return hash(('union', *self._constraints))
def __str__(self) -> str:
constraints = [str(constraint) for constraint in self._constraints]
return ' || '.join(constraints) |
class SelectAction(argparse.Action):
placeholder = 'SELECT'
default_dest = 'selections'
def __init__(self, option_strings, dest, type=str, nargs=None, help=None, default=None, **kwargs):
if (('--' + dest.replace('_', '-')) in option_strings):
dest = self.default_dest
if ((type is bool) and (nargs is None)):
nargs = 0
if (default is None):
default = True
include_opts = list(map((lambda x: x.replace(self.placeholder, 'include')), option_strings))
exclude_opts = list(map((lambda x: x.replace(self.placeholder, 'exclude')), option_strings))
if (exclude_opts != include_opts):
option_strings = (exclude_opts + include_opts)
if help:
help = help.replace(self.placeholder, 'exclude/include')
if (default is None):
help += ' (no default)'
elif default:
help += ' (default is include)'
else:
help += ' (default is exclude)'
super().__init__(option_strings, dest, type=type, nargs=nargs, help=help, default=default, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
old_list = getattr(namespace, self.dest, [])
if (old_list is None):
old_list = []
if ((values == []) and (self.default is not None)):
values = self.default
setattr(namespace, self.dest, (old_list + [(option_string.replace('--', ''), values)])) |
class MCTCTProcessor(ProcessorMixin):
feature_extractor_class = 'MCTCTFeatureExtractor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def __call__(self, *args, **kwargs):
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
if ('raw_speech' in kwargs):
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
audio = kwargs.pop('raw_speech')
else:
audio = kwargs.pop('audio', None)
text = kwargs.pop('text', None)
if (len(args) > 0):
audio = args[0]
args = args[1:]
if ((audio is None) and (text is None)):
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if (audio is not None):
inputs = self.feature_extractor(audio, *args, **kwargs)
if (text is not None):
encodings = self.tokenizer(text, **kwargs)
if (text is None):
return inputs
elif (audio is None):
return encodings
else:
inputs['labels'] = encodings['input_ids']
return inputs
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def pad(self, *args, **kwargs):
if self._in_target_context_manager:
return self.current_processor.pad(*args, **kwargs)
input_features = kwargs.pop('input_features', None)
labels = kwargs.pop('labels', None)
if (len(args) > 0):
input_features = args[0]
args = args[1:]
if (input_features is not None):
input_features = self.feature_extractor.pad(input_features, *args, **kwargs)
if (labels is not None):
labels = self.tokenizer.pad(labels, **kwargs)
if (labels is None):
return input_features
elif (input_features is None):
return labels
else:
input_features['labels'] = labels['input_ids']
return input_features
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def as_target_processor(self):
warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.')
self._in_target_context_manager = True
self.current_processor = self.tokenizer
(yield)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False |
class RedirectAfterPost(MethodResult):
def __init__(self, mime_type='text/html', encoding='utf-8'):
super().__init__(catch_exception=DomainException, mime_type=mime_type, encoding=encoding)
def create_response(self, return_value):
next_url = return_value
return HTTPSeeOther(location=str(next_url))
def create_exception_response(self, exception):
next_url = SubResource.get_parent_url()
return HTTPSeeOther(location=str(next_url)) |
class DiskImageDataset(QueueDataset):
def __init__(self, cfg, data_source, path, split, dataset_name):
super(DiskImageDataset, self).__init__(queue_size=cfg['DATA'][split]['BATCHSIZE_PER_REPLICA'])
assert (data_source in ['disk_filelist', 'disk_folder']), 'data_source must be either disk_filelist or disk_folder'
if (data_source == 'disk_filelist'):
assert PathManager.isfile(path), f'File {path} does not exist'
elif (data_source == 'disk_folder'):
assert PathManager.isdir(path), f'Directory {path} does not exist'
self.cfg = cfg
self.split = split
self.dataset_name = dataset_name
self.data_source = data_source
self._path = path
self.image_dataset = []
self.is_initialized = False
self._load_data(path)
self._num_samples = len(self.image_dataset)
self._remove_prefix = cfg['DATA'][self.split]['REMOVE_IMG_PATH_PREFIX']
self._new_prefix = cfg['DATA'][self.split]['NEW_IMG_PATH_PREFIX']
if (self.data_source == 'disk_filelist'):
self.image_dataset = []
self.enable_queue_dataset = cfg['DATA'][self.split]['ENABLE_QUEUE_DATASET']
def _load_data(self, path):
if (self.data_source == 'disk_filelist'):
if self.cfg['DATA'][self.split].MMAP_MODE:
self.image_dataset = load_file(path, mmap_mode='r')
else:
self.image_dataset = load_file(path)
elif (self.data_source == 'disk_folder'):
self.image_dataset = ImageFolder(path)
logging.info(f'Loaded {len(self.image_dataset)} samples from folder {path}')
self.is_initialized = True
def num_samples(self):
return self._num_samples
def get_image_paths(self):
self._load_data(self._path)
return self.image_dataset
def _replace_img_path_prefix(self, img_path, replace_prefix, new_prefix):
if img_path.startswith(replace_prefix):
return img_path.replace(replace_prefix, new_prefix)
return img_path
def __len__(self):
return self.num_samples()
def __getitem__(self, idx):
if (not self.is_initialized):
self._load_data(self._path)
self.is_initialized = True
if ((not self.queue_init) and self.enable_queue_dataset):
self._init_queues()
is_success = True
image_path = self.image_dataset[idx]
try:
if (self.data_source == 'disk_filelist'):
image_path = self._replace_img_path_prefix(image_path, replace_prefix=self._remove_prefix, new_prefix=self._new_prefix)
with PathManager.open(image_path, 'rb') as fopen:
img = Image.open(fopen).convert('RGB')
elif (self.data_source == 'disk_folder'):
img = self.image_dataset[idx][0]
if (is_success and self.enable_queue_dataset):
self.on_sucess(img)
except Exception as e:
logging.warning(f'''Couldn't load: {self.image_dataset[idx]}. Exception:
{e}''')
is_success = False
if self.enable_queue_dataset:
(img, is_success) = self.on_failure()
if (img is None):
img = get_mean_image(self.cfg['DATA'][self.split].DEFAULT_GRAY_IMG_SIZE)
else:
img = get_mean_image(self.cfg['DATA'][self.split].DEFAULT_GRAY_IMG_SIZE)
return (img, is_success) |
def _recursive_tuples(iterable, box_class, recreate_tuples=False, **kwargs):
out_list = []
for i in iterable:
if isinstance(i, dict):
out_list.append(box_class(i, **kwargs))
elif (isinstance(i, list) or (recreate_tuples and isinstance(i, tuple))):
out_list.append(_recursive_tuples(i, box_class, recreate_tuples, **kwargs))
else:
out_list.append(i)
return tuple(out_list) |
def get_dota_short_names(label):
DOTA_SHORT_NAMES = {'roundabout': 'RA', 'tennis-court': 'TC', 'swimming-pool': 'SP', 'storage-tank': 'ST', 'soccer-ball-field': 'SBF', 'small-vehicle': 'SV', 'ship': 'SH', 'plane': 'PL', 'large-vehicle': 'LV', 'helicopter': 'HC', 'harbor': 'HA', 'ground-track-field': 'GTF', 'bridge': 'BR', 'basketball-court': 'BC', 'baseball-diamond': 'BD', 'container-crane': 'CC', 'airport': 'AP', 'helipad': 'HP'}
return DOTA_SHORT_NAMES[label] |
def identify_pdfium():
log = run_cmd(['git', 'log', '-100', '--pretty=%D'], cwd=PDFiumDir, capture=True)
(v_short, n_commits) = _walk_refs(log)
if n_commits:
hash = ('g' + run_cmd(['git', 'rev-parse', '--short', 'HEAD'], cwd=PDFiumDir, capture=True))
else:
hash = None
v_info = dict(n_commits=n_commits, hash=hash)
return (v_short, v_info) |
def sample_generate_light(gen, dst, rows=5, cols=5, seed=0):
.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = (rows * cols)
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen(z)
x = chainer.cuda.to_cpu(x.data)
np.random.seed()
x = np.asarray(np.clip(((x * 127.5) + 127.5), 0.0, 255.0), dtype=np.uint8)
(_, _, H, W) = x.shape
x = x.reshape((rows, cols, 3, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape(((rows * H), (cols * W), 3))
preview_dir = '{}/preview'.format(dst)
preview_path = (preview_dir + '/image_latest.png')
if (not os.path.exists(preview_dir)):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0032_sponsorcontact_accounting')]
operations = [migrations.CreateModel(name='TieredQuantity', fields=[('benefitfeature_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='sponsors.BenefitFeature')), ('quantity', models.PositiveIntegerField()), ('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sponsors.SponsorshipPackage'))], options={'verbose_name': 'Tiered Quantity', 'verbose_name_plural': 'Tiered Quantities', 'abstract': False, 'base_manager_name': 'objects'}, bases=('sponsors.benefitfeature', models.Model)), migrations.CreateModel(name='TieredQuantityConfiguration', fields=[('benefitfeatureconfiguration_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='sponsors.BenefitFeatureConfiguration')), ('quantity', models.PositiveIntegerField()), ('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sponsors.SponsorshipPackage'))], options={'verbose_name': 'Tiered Benefit Configuration', 'verbose_name_plural': 'Tiered Benefit Configurations', 'abstract': False, 'base_manager_name': 'objects'}, bases=('sponsors.benefitfeatureconfiguration', models.Model))] |
class AverageMeter(Meter):
def __init__(self, name, fmt=':f', write_val=True, write_avg=True):
self.name = name
self.fmt = fmt
self.reset()
self.write_val = write_val
self.write_avg = write_avg
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__) |
def load_data(data_path, dataset, images):
all_datas = {}
for split in ['train', 'val', 'test']:
datas = []
dropdata = 0
with open(((data_path + split) + '.json'), 'r', encoding='utf-8') as fin:
for line in fin:
jterm = json.loads(line.strip())
if (dataset == 'bird'):
if ((jterm['img1'] in images) and (jterm['img2'] in images)):
if (split == 'train'):
datas.append(jterm)
else:
for des in jterm['description']:
new_jterm = {}
new_jterm['img1'] = jterm['img1']
new_jterm['img2'] = jterm['img2']
new_jterm['description'] = des
datas.append(new_jterm)
else:
dropdata += 1
elif (dataset == 'ImageEdit'):
if ((jterm['img1'].replace('.jpg', '') in images) and (jterm['img2'].replace('.jpg', '') in images)):
if (split == 'train'):
jterm['img1'] = jterm['img1'].replace('.jpg', '')
jterm['img2'] = jterm['img2'].replace('.jpg', '')
jterm['sentences'] = jterm['sentences'].split(' ')
datas.append(jterm)
else:
for des in jterm['sentences']:
new_jterm = {}
new_jterm['img1'] = jterm['img1'].replace('.jpg', '')
new_jterm['img2'] = jterm['img2'].replace('.jpg', '')
new_jterm['sentences'] = des.split(' ')
datas.append(new_jterm)
else:
dropdata += 1
elif (dataset == 'nlvr'):
new_jterm = {}
ids = jterm['ImgId'].split('-')
new_jterm['img1'] = ((((((ids[0] + '-') + ids[1]) + '-') + ids[2]) + '-') + 'img0.npy')
new_jterm['img2'] = ((((((ids[0] + '-') + ids[1]) + '-') + ids[2]) + '-') + 'img1.npy')
new_jterm['description'] = jterm['sentence']
new_jterm['label'] = 1
datas.append(new_jterm)
elif (dataset == 'clver'):
for des in jterm['sentences']:
new_jterm = {}
new_jterm['img1'] = jterm['img1']
new_jterm['img2'] = jterm['img2']
new_jterm['sentences'] = des.split(' ')
datas.append(new_jterm)
print('dataset:', dataset, 'Total True Label datas ', len(datas), 'drop ', dropdata, ' data')
random.shuffle(datas)
all_datas[split] = datas
return all_datas |
class ProgBarCounter(object):
def __init__(self, total_count):
self.total_count = total_count
self.max_progress = 1000000
self.cur_progress = 0
self.cur_count = 0
if (not logger.get_log_tabular_only()):
self.pbar = pyprind.ProgBar(self.max_progress)
else:
self.pbar = None
def inc(self, increment):
if (not logger.get_log_tabular_only()):
self.cur_count += increment
new_progress = ((self.cur_count * self.max_progress) / self.total_count)
if (new_progress < self.max_progress):
self.pbar.update((new_progress - self.cur_progress))
self.cur_progress = new_progress
def stop(self):
if ((self.pbar is not None) and self.pbar.active):
self.pbar.stop() |
class Server(threading.Thread):
def __init__(self, dht: Optional[DHT], expert_backends: Dict[(str, ExpertBackend)], listen_on: Endpoint='0.0.0.0:*', num_connection_handlers: int=1, update_period: int=30, start=False, checkpoint_dir=None, **kwargs):
super().__init__()
(self.dht, self.experts, self.update_period) = (dht, expert_backends, update_period)
if (get_port(listen_on) is None):
listen_on = replace_port(listen_on, new_port=get_free_port())
(self.listen_on, self.port) = (listen_on, get_port(listen_on))
self.conn_handlers = [ConnectionHandler(listen_on, self.experts) for _ in range(num_connection_handlers)]
if (checkpoint_dir is not None):
self.checkpoint_saver = CheckpointSaver(expert_backends, checkpoint_dir, update_period)
else:
self.checkpoint_saver = None
self.runtime = Runtime(self.experts, **kwargs)
if (self.dht and self.experts):
self.dht_handler_thread = DHTHandlerThread(experts=self.experts, dht=self.dht, endpoint=self.listen_on, update_period=self.update_period, daemon=True)
if start:
self.run_in_background(await_ready=True)
def create(cls, listen_on='0.0.0.0:*', num_experts: int=None, expert_uids: str=None, expert_pattern: str=None, expert_cls='ffn', hidden_dim=1024, optim_cls=torch.optim.Adam, scheduler: str='none', num_warmup_steps=None, num_total_steps=None, clip_grad_norm=None, num_handlers=None, min_batch_size=1, max_batch_size=1, use_averaging: bool=False, averaging_target_batch_size: Optional[int]=None, averaging_target_group_size: Optional[int]=None, averaging_min_refresh_period=1, averaging_max_refresh_period=60, averaging_default_refresh_period=10, averaging_expiration=30, metadata_expiration=120, averaging_timeout=30, reuse_grad_buffers=True, device=None, fp16=False, offload=False, no_dht=False, dht_port=None, dht_listen_on=None, initial_peers=(), checkpoint_dir: Optional[Path]=None, compression=CompressionType.NONE, averaging_compression=CompressionType.FLOAT16, stats_report_interval: Optional[int]=None, custom_module_path=None, identity_path=None, *, start: bool, **kwargs) -> Server:
if (custom_module_path is not None):
add_custom_models_from_file(custom_module_path)
assert (expert_cls in name_to_block)
if no_dht:
dht = None
else:
dht_port = (dht_port or src.get_free_port())
host_maddrs = []
announce_maddrs = []
if (dht_listen_on is not None):
dht_maddr = f'/{dht_listen_on}/tcp/{dht_port}'
host_maddrs.append(dht_maddr)
announce_maddrs.append(dht_maddr)
dht = src.DHT(initial_peers=initial_peers, start=True, identity_path=identity_path, host_maddrs=host_maddrs, announce_maddrs=announce_maddrs)
visible_maddrs_str = [str(a) for a in dht.get_visible_maddrs()]
logger.info(f'Running DHT node on {visible_maddrs_str}, initial peers = {initial_peers}')
assert (((expert_pattern is None) and (num_experts is None) and (expert_uids is not None)) or ((num_experts is not None) and (expert_uids is None))), 'Please provide either expert_uids *or* num_experts (possibly with expert_pattern), but not both'
if (expert_uids is None):
if (checkpoint_dir is not None):
assert is_directory(checkpoint_dir)
expert_uids = [child.name for child in checkpoint_dir.iterdir() if (child / 'checkpoint_last.pt').exists()]
total_experts_in_checkpoint = len(expert_uids)
logger.info(f'Located {total_experts_in_checkpoint} checkpoints for experts {expert_uids}')
if (total_experts_in_checkpoint > num_experts):
raise ValueError(f'Found {total_experts_in_checkpoint} checkpoints, but num_experts is set to {num_experts}, which is smaller. Either increase num_experts or remove unneeded checkpoints.')
else:
expert_uids = []
uids_to_generate = (num_experts - len(expert_uids))
if (uids_to_generate > 0):
logger.info(f'Generating {uids_to_generate} expert uids from pattern {expert_pattern}')
expert_uids.extend(generate_uids_from_pattern(uids_to_generate, expert_pattern, dht))
num_experts = len(expert_uids)
num_handlers = (num_handlers if (num_handlers is not None) else (num_experts * 8))
sample_input = name_to_input[expert_cls](3, hidden_dim)
if isinstance(sample_input, tuple):
args_schema = tuple((BatchTensorDescriptor.from_tensor(arg, compression) for arg in sample_input))
else:
args_schema = (BatchTensorDescriptor.from_tensor(sample_input, compression),)
scheduler = schedule_name_to_scheduler[scheduler]
device = (device or ('cuda' if torch.cuda.is_available() else 'cpu'))
experts = {}
for expert_uid in expert_uids:
expert = name_to_block[expert_cls](hidden_dim)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in expert.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in expert.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optim_kwargs = dict(lr=0., betas=(0.9, 0.999), eps=1e-06, weight_decay=0.01, max_grad_norm=1, clamp_value=10000.0, debias=True)
if offload:
optim = OffloadOptimizer(optimizer_grouped_parameters, optim_cls=LambWithGradientClipping, **optim_kwargs)
else:
optim = LambWithGradientClipping(optimizer_grouped_parameters, **optim_kwargs)
scheduler = scheduler(optim, num_warmup_steps=num_warmup_steps, num_training_steps=num_total_steps)
if use_averaging:
assert (averaging_target_batch_size is not None)
assert (averaging_target_group_size is not None)
averaging_compression = SizeAdaptiveCompression(threshold=((2 ** 16) + 1), less=Float16Compression(), greater_equal=Uniform8BitQuantization())
optim = CollaborativeOptimizer(optim, dht=dht, prefix=expert_uid.split(UID_DELIMITER)[0], scheduler=scheduler, compression=averaging_compression, state_compression=Float16Compression(), target_batch_size=averaging_target_batch_size, target_group_size=averaging_target_group_size, min_refresh_period=averaging_min_refresh_period, max_refresh_period=averaging_max_refresh_period, default_refresh_period=averaging_default_refresh_period, averaging_expiration=averaging_expiration, metadata_expiration=metadata_expiration, averaging_timeout=averaging_timeout, reuse_grad_buffers=reuse_grad_buffers, verbose=True, start=True)
optim.load_state_from_peers()
experts[expert_uid] = ExpertBackend(name=expert_uid, expert=expert, args_schema=args_schema, optimizer=optim, device=device, fp16=fp16, clip_grad_norm=clip_grad_norm, min_batch_size=min_batch_size, max_batch_size=max_batch_size)
else:
experts[expert_uid] = ExpertBackend(name=expert_uid, expert=expert, args_schema=args_schema, optimizer=optim, device=device, fp16=fp16, clip_grad_norm=clip_grad_norm, min_batch_size=min_batch_size, max_batch_size=max_batch_size, target_batch_size=averaging_target_batch_size)
if (checkpoint_dir is not None):
load_experts(experts, checkpoint_dir)
return cls(dht, experts, listen_on=listen_on, num_connection_handlers=num_handlers, device=device, checkpoint_dir=checkpoint_dir, stats_report_interval=stats_report_interval, start=start, **kwargs)
def run(self):
logger.info(f'Server started at {self.listen_on}')
logger.info(f'Got {len(self.experts)} experts:')
for (expert_name, backend) in self.experts.items():
num_parameters = sum((p.numel() for p in backend.expert.parameters() if p.requires_grad))
logger.info(f'{expert_name}: {backend.expert.__class__.__name__}, {num_parameters} parameters')
if self.dht:
if (not self.dht.is_alive()):
self.dht.run_in_background(await_ready=True)
if self.experts:
self.dht_handler_thread.start()
if (self.checkpoint_saver is not None):
self.checkpoint_saver.start()
for process in self.conn_handlers:
if (not process.is_alive()):
process.start()
process.ready.wait()
try:
self.runtime.run()
finally:
self.shutdown()
def run_in_background(self, await_ready=True, timeout=None):
self.start()
if (await_ready and (not self.ready.wait(timeout=timeout))):
raise TimeoutError("Server didn't notify .ready in {timeout} seconds")
def ready(self) -> mp.synchronize.Event:
return self.runtime.ready
def shutdown(self):
self.ready.clear()
for process in self.conn_handlers:
process.terminate()
process.join()
logger.debug('Connection handlers terminated')
if (self.dht and self.experts):
self.dht_handler_thread.stop.set()
self.dht_handler_thread.join()
if (self.checkpoint_saver is not None):
self.checkpoint_saver.stop.set()
self.checkpoint_saver.join()
if (self.dht is not None):
self.dht.shutdown()
self.dht.join()
logger.debug(f'Shutting down runtime')
self.runtime.shutdown()
logger.info('Server shutdown succesfully') |
class Tourney(BaseDbModel):
class Meta():
table = 'tm.tourney'
id = fields.BigIntField(pk=True, index=True)
guild_id = fields.BigIntField()
name = fields.CharField(max_length=30, default='Quotient-Tourney')
registration_channel_id = fields.BigIntField(index=True)
confirm_channel_id = fields.BigIntField()
role_id = fields.BigIntField()
required_mentions = fields.SmallIntField(default=4, validators=[ValueRangeValidator(range(0, 11))])
total_slots = fields.SmallIntField()
banned_users = ArrayField(fields.BigIntField(), default=list)
host_id = fields.BigIntField()
multiregister = fields.BooleanField(default=False)
started_at = fields.DatetimeField(null=True)
closed_at = fields.DatetimeField(null=True)
open_role_id = fields.BigIntField(null=True)
teamname_compulsion = fields.BooleanField(default=False)
ping_role_id = fields.BigIntField(null=True)
no_duplicate_name = fields.BooleanField(default=True)
autodelete_rejected = fields.BooleanField(default=False)
slotlist_start = fields.SmallIntField(default=2)
group_size = fields.SmallIntField(null=True)
success_message = fields.CharField(max_length=500, null=True)
emojis = fields.JSONField(default=_dict)
slotm_channel_id = fields.BigIntField(null=True)
slotm_message_id = fields.BigIntField(null=True)
required_lines = fields.SmallIntField(default=0)
allow_duplicate_tags = fields.BooleanField(default=True)
assigned_slots: fields.ManyToManyRelation['TMSlot'] = fields.ManyToManyField('models.TMSlot')
media_partners: fields.ManyToManyRelation['MediaPartner'] = fields.ManyToManyField('models.MediaPartner')
def __str__(self):
return f"{getattr(self.registration_channel, 'mention', 'deleted-channel')} [ID: `{self.id}`]"
async def convert(cls, ctx, argument: str):
try:
argument = int(argument)
except ValueError:
pass
else:
try:
return (await cls.get(pk=argument, guild_id=ctx.guild.id))
except exceptions.DoesNotExist:
pass
raise BadArgument(f'''This is not a valid Tourney ID.
Get a valid ID with `{ctx.prefix}tourney config`''')
def guild(self) -> Optional[discord.Guild]:
return self.bot.get_guild(self.guild_id)
def logschan(self) -> Optional[discord.TextChannel]:
if ((g := self.guild) is not None):
return discord.utils.get(g.text_channels, name='quotient-tourney-logs')
def registration_channel(self) -> Optional[discord.TextChannel]:
if ((g := self.guild) is not None):
return g.get_channel(self.registration_channel_id)
def confirm_channel(self) -> Optional[discord.TextChannel]:
if ((g := self.guild) is not None):
return g.get_channel(self.confirm_channel_id)
def slotm_channel(self) -> Optional[discord.TextChannel]:
if ((g := self.guild) is not None):
return g.get_channel(self.slotm_channel_id)
def closed(self):
return bool(self.closed_at)
def role(self) -> Optional[discord.Role]:
if ((g := self.guild) is not None):
return g.get_role(self.role_id)
def open_role(self):
if ((g := self.guild) is not None):
if (self.open_role_id is not None):
return g.get_role(self.open_role_id)
return self.guild.default_role
def ping_role(self):
if ((g := self.guild) is not None):
if (self.ping_role_id is not None):
return g.get_role(self.ping_role_id)
return None
def modrole(self):
if ((g := self.guild) is not None):
return discord.utils.get(g.roles, name='tourney-mod')
def check_emoji(self):
return self.emojis.get('tick', '')
def cross_emoji(self):
return self.emojis.get('cross', '')
def is_ignorable(member: discord.Member) -> bool:
return ('tourney-mod' in (role.name.lower() for role in member.roles))
async def _get_groups(self) -> List[List['TMSlot']]:
return split_list((await self.assigned_slots.all().order_by('num')), self.group_size)
async def get_group(self, num: int) -> List['TMSlot']:
_all = (await self._get_groups())
for group in _all:
if (_all.index(group) == (num - 1)):
return group
async def add_assigned_slot(self, slot: 'TMSlot', message: discord.Message):
_e = discord.Embed(color=self.bot.color)
_e.description = f'''**{slot.num}) NAME: [{slot.team_name.upper()}]({slot.jump_url})**
'''
if (len(message.mentions) > 0):
_e.description += f"Team: {', '.join([str(m) for m in message.mentions])}"
if (_chan := self.confirm_channel):
m = (await _chan.send(content=message.author.mention, embed=_e, allowed_mentions=discord.AllowedMentions(users=True)))
slot.confirm_jump_url = m.jump_url
(await slot.save())
(await self.assigned_slots.add(slot))
async def finalize_slot(self, ctx: Context, slot: 'TMSlot'):
with suppress(discord.HTTPException):
if (not ((_role := self.role) in ctx.author.roles)):
(await ctx.author.add_roles(_role))
(await ctx.message.add_reaction(self.check_emoji))
if self.success_message:
embed = discord.Embed(color=self.bot.color, description=self.success_message)
embed.title = f'Message from {ctx.guild.name}'
embed.url = slot.jump_url
(await ctx.author.send(embed=embed, view=ctx.get_dm_view(f'Sent from {ctx.guild.name}')))
async def end_process(self):
from cogs.esports.helpers.utils import toggle_channel
closed_at = self.bot.current_time
registration_channel = self.registration_channel
open_role = self.open_role
(await Tourney.filter(pk=self.id).update(started_at=None, closed_at=closed_at))
channel_update = (await toggle_channel(registration_channel, open_role, False))
(await registration_channel.send(embed=discord.Embed(color=self.bot.color, description='**Registration is now closed!**')))
self.bot.dispatch('tourney_log', EsportsLog.closed, self, permission_updated=channel_update)
async def setup_slotm(self):
from cogs.esports.views.tourney.slotm import TourneySlotManager
_view = TourneySlotManager(self.bot, tourney=self)
_category = (self.registration_channel.category or self.registration_channel.guild)
overwrites = {self.guild.default_role: discord.PermissionOverwrite(read_messages=True, send_messages=False, read_message_history=True), self.guild.me: discord.PermissionOverwrite(manage_channels=True, manage_permissions=True)}
slotm_channel = (await _category.create_text_channel(name='tourney-slotmanager', overwrites=overwrites))
return (await slotm_channel.send(embed=TourneySlotManager.initial_embed(self), view=_view))
async def get_csv(self):
guild = self.guild
member_ids = [_.id for _ in guild.members]
_x = 'Reg Posi,Team Name,Leader,Leader ID,Teammates,Teammates in Server,Jump URL\n'
async for slot in self.assigned_slots.all().order_by('num'):
_team = ' | '.join((f'{str(guild.get_member(m))} ({m})' for m in slot.members))
_x += f'''{slot.num},{slot.team_name},{str(guild.get_member(slot.leader_id))},'{slot.leader_id}',{_team},{sum((1 for i in slot.members if (i in member_ids)))},{slot.jump_url}
'''
fp = io.BytesIO(_x.encode())
return discord.File(fp, filename=f'tourney_data_{self.id}_{self.bot.current_time.timestamp()}.csv')
async def full_delete(self, member: discord.Member=None) -> None:
if (self.logschan != None):
member = (member.mention if member else 'Unknown')
embed = discord.Embed(color=discord.Color.red())
embed.title = f'A tournament was completely deleted.'
embed.description = (f'Tourney name : {self.name} [{self.id}]' + f'''
Deleted by: {member}''')
(await self.logschan.send(embed=embed, file=(await self.get_csv())))
self.bot.cache.tourney_channels.discard(self.registration_channel_id)
_data = (await self.assigned_slots.all())
(await TMSlot.filter(pk__in=[_.id for _ in _data]).delete())
(await self.delete())
if self.slotm_channel_id:
with suppress(discord.HTTPException, AttributeError):
(await self.slotm_channel.delete())
async def prompt_selector(ctx: Context, *, tourneys: List['Tourney']=None, placeholder: str=None):
placeholder = (placeholder or 'Choose a tourney to contine...')
from cogs.esports.views.tourney._select import QuotientView, TourneySelector
tourneys = (tourneys or (await Tourney.filter(guild_id=ctx.guild.id).order_by('id').limit(25)))
if (not tourneys):
return None
if (len(tourneys) == 1):
return tourneys[0]
view = QuotientView(ctx)
view.add_item(TourneySelector(placeholder, tourneys))
view.message = (await ctx.send('Choose a tourney from the dropdown below...', view=view))
(await view.wait())
if view.custom_id:
(await view.message.delete())
return (await Tourney.get_or_none(id=view.custom_id))
async def setup_logs(self):
_reason = 'Created for tournament management.'
_g = self.guild
if (not (tourney_mod := self.modrole)):
tourney_mod = (await self.guild.create_role(name='tourney-mod', color=self.bot.color, reason=_reason))
overwrite = self.registration_channel.overwrites_for(_g.default_role)
overwrite.update(read_messages=True, send_messages=True, read_message_history=True)
(await self.registration_channel.set_permissions(tourney_mod, overwrite=overwrite))
if ((tourney_log_channel := self.logschan) is None):
overwrites = {_g.default_role: discord.PermissionOverwrite(read_messages=False), _g.me: discord.PermissionOverwrite(read_messages=True), tourney_mod: discord.PermissionOverwrite(read_messages=True)}
tourney_log_channel = (await _g.create_text_channel(name='quotient-tourney-logs', overwrites=overwrites, reason=_reason, topic='**DO NOT RENAME THIS CHANNEL**'))
note = (await tourney_log_channel.send(embed=discord.Embed(description=f'''If events related to tournament i.e opening registrations or adding roles, etc are triggered, then they will be logged in this channel. Also I have created {tourney_mod.mention}, you can give that role to your tourney-moderators. User with {tourney_mod.mention} can also send messages in registration channels and they won't be considered as tourney-registration.
`Note`: **Do not rename/delete this channel.**''', color=discord.Color(self.bot.color))))
(await tourney_log_channel.send(f'<{self.host_id}> **Read This Message **'))
(await note.pin())
async def toggle_registrations(self):
(channel, open_role) = (self.registration_channel, self.open_role)
if (not channel):
return (False, f'I cannot find the registration channel. ({self.registration_channel_id})')
if (not channel.permissions_for(self.guild.me).manage_permissions):
return (False, f"I don't have permission to manage channel permissions. ({channel.id})")
if (not open_role):
return (False, f'I cannot find the open role. ({self.open_role_id})')
if self.started_at:
return (await self.__stop_registrations())
return (await self.__start_registrations())
async def __start_registrations(self):
registration_channel = self.registration_channel
if (self.total_slots <= (await self.assigned_slots.all().count())):
return (False, 'Slots are already full, Increase slots to start again.')
(await Tourney.filter(pk=self.id).update(started_at=self.bot.current_time, closed_at=None))
self.bot.cache.tourney_channels.add(self.registration_channel_id)
_e = discord.Embed(color=self.bot.color)
_e.description = f'''**Registration Open for {self.name}**
``` {self.required_mentions} mentions required.
Total slots: {self.total_slots} [{(self.total_slots - (await self.assigned_slots.all().count()))} slots left]```'''
_e.set_thumbnail(url=getattr(self.guild.icon, 'url', self.bot.user.avatar.url))
_ping = None
if (p := self.ping_role):
if (p == self.guild.default_role):
_ping = ''
else:
_ping = p.mention
(await registration_channel.send(_ping, embed=_e, allowed_mentions=discord.AllowedMentions(roles=True, everyone=True)))
overwrite = registration_channel.overwrites_for(self.open_role)
overwrite.update(send_messages=True)
(await registration_channel.set_permissions(self.open_role, overwrite=overwrite, reason='Open for Registrations!'))
return (True, True)
async def __stop_registrations(self):
registration_channel = self.registration_channel
overwrite = registration_channel.overwrites_for(self.open_role)
overwrite.update(send_messages=False)
(await registration_channel.set_permissions(self.open_role, overwrite=overwrite, reason='Closed Registrations!'))
(await registration_channel.send(embed=discord.Embed(color=self.bot.color, description=f'**{self.name} registration paused.**')))
(await Tourney.filter(pk=self.id).update(started_at=None, closed_at=self.bot.current_time))
return (True, True)
async def ban_user(self, user: Union[(discord.Member, discord.User)]):
(await Tourney.filter(pk=self.id).update(banned_users=ArrayAppend('banned_users', user.id)))
async def unban_user(self, user: Union[(discord.Member, discord.User)]):
(await Tourney.filter(pk=self.id).update(banned_users=ArrayRemove('banned_users', user.id)))
async def remove_slot(self, slot: 'TMSlot'):
if slot.confirm_jump_url:
self.bot.loop.create_task(self.update_confirmed_message(slot.confirm_jump_url))
(await slot.delete())
if (not (await self.assigned_slots.filter(leader_id=slot.leader_id).exists())):
m = self.guild.get_member(slot.leader_id)
if m:
(await m.remove_roles(discord.Object(id=self.role_id)))
async def update_confirmed_message(self, link: str):
_ids = [int(i) for i in link.split('/')[5:]]
with suppress(discord.HTTPException, IndexError):
message = (await self.guild.get_channel(_ids[0]).fetch_message(_ids[1]))
if message:
e = message.embeds[0]
e.description = (('~~' + e.description.strip()) + '~~')
e.title = 'Cancelled Slot'
e.color = discord.Color.red()
(await message.edit(embed=e))
async def make_changes(self, **kwargs):
return (await Tourney.filter(pk=self.id).update(**kwargs))
async def refresh_slotlm(self):
from cogs.esports.views.tourney import TourneySlotManager
msg = (await self.bot.get_or_fetch_message(self.slotm_channel, self.slotm_message_id))
_view = TourneySlotManager(self.bot, tourney=self)
_e = TourneySlotManager.initial_embed(self)
try:
(await msg.edit(embed=_e, view=_view))
except discord.HTTPException:
msg = (await self.slotm_channel.send(embed=_e, view=_view))
(await self.make_changes(slotm_message_id=msg.id))
finally:
return True
async def check_fake_tags(self, message: discord.Message):
query = '\n SELECT *\n FROM PUBLIC."tm.tourney_tm.register" AS ASSIGNED_SLOT\n INNER JOIN PUBLIC."tm.register" AS SLOTS ON SLOTS.ID = ASSIGNED_SLOT.TMSLOT_ID\n WHERE ASSIGNED_SLOT."tm.tourney_id" = $1\n AND $2 && SLOTS.MEMBERS;\n\n '
return (await self.bot.db.fetch(query, self.id, [i.id for i in message.mentions])) |
class SingleIndexWriterMixin(object):
def add_property_name(self, property_name_idx, property_name):
self.conn.execute(self.ADD_PROPERTY_NAME_SQL, (property_name_idx, property_name))
def add_rule_smiles(self, smiles_idx, smiles):
self.conn.execute(self.ADD_RULE_SMILES_SQL, (smiles_idx, smiles, get_num_heavies_from_smiles(smiles)))
def add_rule(self, rule_idx, from_smiles_idx, to_smiles_idx):
self.conn.execute(self.ADD_RULE_SQL, (rule_idx, from_smiles_idx, to_smiles_idx))
def add_environment_fingerprint(self, fp_idx, smarts, pseudosmiles, parent_smarts):
if (parent_smarts is None):
parent_smarts = ''
self.conn.execute(self.ADD_ENVIRONMENT_FINGERPRINT_SQL, (fp_idx, smarts, pseudosmiles, parent_smarts))
def add_rule_environment(self, rule_env_idx, rule_idx, env_fp_idx, radius):
self.conn.execute(self.ADD_RULE_ENVIRONMENT_SQL, (rule_env_idx, rule_idx, env_fp_idx, radius, 0))
def add_compound(self, compound_idx, compound_id, input_smiles, normalized_smiles, num_normalized_heavies):
self.conn.execute(self.ADD_COMPOUND_SQL, (compound_idx, compound_id, input_smiles, normalized_smiles, num_normalized_heavies))
def add_constant_smiles(self, smiles_idx, constant_smiles):
self.conn.execute(self.ADD_CONSTANT_SMILES_SQL, (smiles_idx, constant_smiles))
def add_rule_environment_pair(self, pair_idx, env_idx, compound1_idx, compound2_idx, constant_idx):
self.conn.execute(self.ADD_RULE_ENVIRONMENT_PAIR_SQL, (pair_idx, env_idx, compound1_idx, compound2_idx, constant_idx))
def add_compound_property(self, compound_idx, property_name_idx, value):
self.conn.execute(self.ADD_COMPOUND_PROPERTY_SQL, (compound_idx, property_name_idx, value))
def add_rule_environment_statistics(self, rule_env_idx, property_name_idx, values):
(count, avg, std, kurtosis, skewness, min, q1, median, q3, max, paired_t, p_value) = values
assert (rule_env_idx is not None)
assert (property_name_idx is not None)
assert (count is not None)
assert (avg is not None)
assert (min is not None)
assert (q1 is not None)
assert (median is not None)
assert (q3 is not None)
assert (max is not None)
self.conn.execute(self.ADD_RULE_ENVIRONMENT_STATISTICS_SQL, (rule_env_idx, property_name_idx, count, avg, std, kurtosis, skewness, min, q1, median, q3, max, paired_t, p_value)) |
class TestBloombergBeapHapiRequestProvider(unittest.TestCase):
def setUp(self):
self.session_mock = Mock()
self.post_response = Mock()
self.session_mock.post.return_value = self.post_response
self.address_url = '/eap/catalogs/address_url_id/'
self.request_id = 'sOmwhEReOveRTHeRainBOW'
self.host = '
self.account_url = urljoin(self.host, self.address_url)
self.trigger_url = urljoin(self.host, '{}triggers/ctaAdhocTrigger/'.format(self.address_url))
def test_create_request__unknown_get_response(self):
self.session_mock.get.return_value.status_code = 404
provider = BloombergBeapHapiRequestsProvider(self.host, self.session_mock, self.account_url, self.trigger_url)
self.assertRaises(BloombergError, provider.create_request, self.request_id, 'some_universe_url', 'some_field_list_url')
def test_create_request__unknown_post_response(self):
self.session_mock.get.return_value.status_code = 404
self.post_response.status_code = 200
provider = BloombergBeapHapiRequestsProvider(self.host, self.session_mock, self.account_url, self.trigger_url)
self.assertRaises(BloombergError, provider.create_request, self.request_id, 'some_universe_url', 'some_field_list_url') |
def main():
parser = argparse.ArgumentParser(description='Testing')
parser.add_argument('--obj', type=str, default='.')
parser.add_argument('--data_type', type=str, default='mvtec')
parser.add_argument('--data_path', type=str, default='.')
parser.add_argument('--checkpoint_dir', type=str, default='.')
parser.add_argument('--grayscale', action='store_true', help='color or grayscale input image')
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--img_resize', type=int, default=128)
parser.add_argument('--crop_size', type=int, default=128)
parser.add_argument('--seed', type=int, default=None)
args = parser.parse_args()
args.save_dir = ((((('./' + args.data_type) + '/') + args.obj) + '/vgg_feature') + '/seed_{}/'.format(args.seed))
if (not os.path.exists(args.save_dir)):
os.makedirs(args.save_dir)
args.input_channel = (1 if args.grayscale else 3)
model = VAE(input_channel=args.input_channel, z_dim=100).to(device)
checkpoint = torch.load(args.checkpoint_dir)
model.load_state_dict(checkpoint['model'])
teacher = models.vgg16(pretrained=True).to(device)
for param in teacher.parameters():
param.requires_grad = False
img_size = (args.crop_size if (args.img_resize != args.crop_size) else args.img_resize)
kwargs = ({'num_workers': 4, 'pin_memory': True} if use_cuda else {})
test_dataset = MVTecDataset(args.data_path, class_name=args.obj, is_train=False, resize=img_size)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
(scores, test_imgs, recon_imgs, gt_list, gt_mask_list) = test(model, teacher, test_loader)
scores = np.asarray(scores)
max_anomaly_score = scores.max()
min_anomaly_score = scores.min()
scores = ((scores - min_anomaly_score) / (max_anomaly_score - min_anomaly_score))
gt_mask = np.asarray(gt_mask_list)
(precision, recall, thresholds) = precision_recall_curve(gt_mask.flatten(), scores.flatten())
a = ((2 * precision) * recall)
b = (precision + recall)
f1 = np.divide(a, b, out=np.zeros_like(a), where=(b != 0))
threshold = thresholds[np.argmax(f1)]
(fpr, tpr, _) = roc_curve(gt_mask.flatten(), scores.flatten())
per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())
print(('pixel ROCAUC: %.3f' % per_pixel_rocauc))
plt.plot(fpr, tpr, label=('%s ROCAUC: %.3f' % (args.obj, per_pixel_rocauc)))
plt.legend(loc='lower right')
save_dir = ((((args.save_dir + '/') + f'seed_{args.seed}') + '/') + 'pictures_{:.4f}'.format(threshold))
os.makedirs(save_dir, exist_ok=True)
plt.savefig(os.path.join(save_dir, (args.obj + '_roc_curve.png')), dpi=100)
plot_fig(args, test_imgs, recon_imgs, scores, gt_mask_list, threshold, save_dir) |
class TestWindow(window.Window):
def __init__(self, content_valign, *args, **kwargs):
super(TestWindow, self).__init__(*args, **kwargs)
self.batch = graphics.Batch()
self.document = text.decode_text(doctext)
self.margin = 2
self.layout = layout.IncrementalTextLayout(self.document, (self.width - (self.margin * 2)), (self.height - (self.margin * 2)), multiline=True, batch=self.batch)
self.layout.content_valign = content_valign
self.caret = caret.Caret(self.layout)
self.push_handlers(self.caret)
self.set_mouse_cursor(self.get_system_mouse_cursor('text'))
def on_resize(self, width, height):
super(TestWindow, self).on_resize(width, height)
self.layout.begin_update()
self.layout.x = self.margin
self.layout.y = self.margin
self.layout.width = (width - (self.margin * 2))
self.layout.height = (height - (self.margin * 2))
self.layout.end_update()
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.layout.view_x -= scroll_x
self.layout.view_y += (scroll_y * 16)
def on_draw(self):
gl.glClearColor(1, 1, 1, 1)
self.clear()
self.batch.draw()
def on_key_press(self, symbol, modifiers):
super(TestWindow, self).on_key_press(symbol, modifiers)
if (symbol == key.TAB):
self.caret.on_text('\t') |
class AoAModel3_d1_w2(AttModel):
def __init__(self, opt):
super(AoAModel3_d1_w2, self).__init__(opt)
self.num_layers = 2
self.use_mean_feats = getattr(opt, 'mean_feats', 1)
if (opt.use_multi_head == 2):
del self.ctx2att
self.ctx2att = (lambda x: x)
if self.use_mean_feats:
del self.fc_embed
if opt.refine:
self.refiner = AoA_Refiner_Core(opt)
else:
self.refiner = (lambda x, y: x)
self.core = AoA_Decoder_Core(opt)
def _prepare_feature(self, fc_feats, att_feats, flag_feats, att_masks):
(att_feats, att_masks) = self.clip_att(att_feats, att_masks)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
att_feats = self.refiner(att_feats, flag_feats, att_masks)
if self.use_mean_feats:
if (att_masks is None):
mean_feats = torch.mean(att_feats, dim=1)
else:
mean_feats = (torch.sum((att_feats * att_masks.unsqueeze((- 1))), 1) / torch.sum(att_masks.unsqueeze((- 1)), 1))
else:
mean_feats = self.fc_embed(fc_feats)
p_att_feats = self.ctx2att(att_feats)
return (mean_feats, att_feats, p_att_feats, att_masks) |
def tbb_process_pool_worker3(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False):
from multiprocessing.pool import worker
worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
if ipc_enabled:
try:
librml = ctypes.CDLL(libirml)
librml.release_resources()
except:
print('Warning: Can not load ', libirml, file=sys.stderr) |
def test_bezier_to_polygon():
bezier_points = [37.0, 249.0, 72.5, 229.55, 95.34, 220.65, 134.0, 216.0, 132.0, 233.0, 82.11, 240.2, 72.46, 247.16, 38.0, 263.0]
pts = bezier_to_polygon(bezier_points)
target = np.array([[37.0, 249.0], [42., 246.], [47., 243.], [52., 240.], [58., 238.], [62., 235.], [67., 233.], [72., 231.], [77., 229.], [81., 227.], [86., 226.], [91., 224.], [96., 223.], [101., 221.], [106., 220.], [111., 219.], [116., 218.], [122., 217.], [128., 216.], [134.0, 216.0], [132.0, 233.0], [124., 234.], [117., 235.], [111., 236.], [105., 237.], [99., 238.], [94., 240.], [89., 241.], [85., 242.], [81., 244.], [77., 245.], [73., 247.], [69., 248.], [65., 250.], [61., 252.], [57., 254.], [52., 256.], [48., 258.], [43., 260.], [38.0, 263.0]])
assert np.allclose(pts, target)
bezier_points = [0, 0, 0, 1, 0, 2, 0, 3, 1, 0, 1, 1, 1, 2, 1, 3]
pts = bezier_to_polygon(bezier_points, num_sample=3)
target = np.array([[0, 0], [0, 1.5], [0, 3], [1, 0], [1, 1.5], [1, 3]])
assert np.allclose(pts, target)
with pytest.raises(AssertionError):
bezier_to_polygon(bezier_points, num_sample=(- 1))
bezier_points = [0, 1]
with pytest.raises(AssertionError):
bezier_to_polygon(bezier_points) |
def _scan_badge_mutation(graphql_client, variables):
return graphql_client.query('\n mutation ScanBadge($url: String!, $conferenceCode: String!) {\n scanBadge(input: { url: $url, conferenceCode: $conferenceCode }) {\n __typename\n ... on BadgeScan {\n id\n attendee {\n fullName\n email\n }\n notes\n }\n ... on ScanError {\n message\n }\n }\n }\n ', variables=variables) |
def read_and_resize_pair(path_lr, path_hr, low_res=(60, 80), high_res=(480, 640)):
img_lr = misc.imread(path_lr, mode='RGB').astype(np.float)
img_lr = misc.imresize(img_lr, low_res)
img_hr = misc.imread(path_hr, mode='RGB').astype(np.float)
img_hr = misc.imresize(img_hr, high_res)
return (img_lr, img_hr) |
def parse_option():
hostname = socket.gethostname()
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--print_freq', type=int, default=100, help='print frequency')
parser.add_argument('--tb_freq', type=int, default=500, help='tb frequency')
parser.add_argument('--save_freq', type=int, default=40, help='save frequency')
parser.add_argument('--batch_size', type=int, default=64, help='batch_size')
parser.add_argument('--num_workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=240, help='number of training epochs')
parser.add_argument('--init_epochs', type=int, default=30, help='init training for two-stage methods')
parser.add_argument('--learning_rate', type=float, default=0.05, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='150,180,210', help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--dataset', type=str, default='cifar100', choices=['cifar100'], help='dataset')
parser.add_argument('--model_s', type=str, default='resnet8', choices=['resnet8', 'resnet14', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet8x4', 'resnet32x4', 'wrn_16_1', 'wrn_16_2', 'wrn_40_1', 'wrn_40_2', 'vgg8', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'ResNet50', 'MobileNetV2', 'ShuffleV1', 'ShuffleV2'])
parser.add_argument('--path_t', type=str, default=None, help='teacher model snapshot')
parser.add_argument('--distill', type=str, default='kd', choices=['kd', 'hint', 'attention', 'similarity', 'correlation', 'vid', 'crd', 'kdsvd', 'fsp', 'rkd', 'pkt', 'abound', 'factor', 'nst', 'hkd'])
parser.add_argument('--trial', type=str, default='1', help='trial id')
parser.add_argument('-r', '--gamma', type=float, default=1, help='weight for classification')
parser.add_argument('-a', '--alpha', type=float, default=None, help='weight balance for KD')
parser.add_argument('-b', '--beta', type=float, default=None, help='weight balance for other losses')
parser.add_argument('--kd_T', type=float, default=4, help='temperature for KD distillation')
parser.add_argument('--feat_dim', default=128, type=int, help='feature dimension')
parser.add_argument('--mode', default='exact', type=str, choices=['exact', 'relax', 'hkd'])
parser.add_argument('--nce_k', default=16384, type=int, help='number of negative samples for NCE')
parser.add_argument('--nce_t', default=0.07, type=float, help='temperature parameter for softmax')
parser.add_argument('--nce_m', default=0.5, type=float, help='momentum for non-parametric updates')
parser.add_argument('--hint_layer', default=2, type=int, choices=[0, 1, 2, 3, 4])
opt = parser.parse_args()
if (opt.model_s in ['MobileNetV2', 'ShuffleV1', 'ShuffleV2']):
opt.learning_rate = 0.01
if hostname.startswith('visiongpu'):
opt.model_path = '/path/to/my/student_model'
opt.tb_path = '/path/to/my/student_tensorboards'
else:
opt.model_path = './save/student_model'
opt.tb_path = './save/student_tensorboards'
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_t = get_teacher_name(opt.path_t)
opt.model_name = 'S:{}_T:{}_{}_{}_r:{}_a:{}_b:{}_{}'.format(opt.model_s, opt.model_t, opt.dataset, opt.distill, opt.gamma, opt.alpha, opt.beta, opt.trial)
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if (not os.path.isdir(opt.tb_folder)):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if (not os.path.isdir(opt.save_folder)):
os.makedirs(opt.save_folder)
return opt |
class tuple(Generic[T_co], Sequence[T_co], Iterable[T_co]):
def __init__(self, i: Iterable[T_co]) -> None:
pass
def __getitem__(self, i: int) -> T_co:
pass
def __getitem__(self, i: slice) -> Tuple[(T_co, ...)]:
pass
def __len__(self) -> int:
pass
def __iter__(self) -> Iterator[T_co]:
...
def __contains__(self, item: object) -> int:
... |
class DeepLabv3(nn.Module):
def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), num_classes=21):
super(DeepLabv3, self).__init__()
assert (in_channels > 0)
self.in_size = in_size
self.num_classes = num_classes
self.aux = aux
self.fixed_size = fixed_size
self.backbone = backbone
pool_out_size = (((self.in_size[0] // 8), (self.in_size[1] // 8)) if fixed_size else None)
self.pool = AtrousSpatialPyramidPooling(in_channels=backbone_out_channels, upscale_out_size=pool_out_size)
pool_out_channels = (backbone_out_channels // 8)
self.final_block = DeepLabv3FinalBlock(in_channels=pool_out_channels, out_channels=num_classes, bottleneck_factor=1)
if self.aux:
aux_out_channels = (backbone_out_channels // 2)
self.aux_block = DeepLabv3FinalBlock(in_channels=aux_out_channels, out_channels=num_classes, bottleneck_factor=4)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
in_size = (self.in_size if self.fixed_size else x.shape[2:])
(x, y) = self.backbone(x)
x = self.pool(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return (x, y)
else:
return x |
class OnionRoutingFailureMessage():
def __init__(self, code: int, data: bytes):
self.code = code
self.data = data
def __repr__(self):
return repr((self.code, self.data))
def to_bytes(self) -> bytes:
ret = self.code.to_bytes(2, byteorder='big')
ret += self.data
return ret
def from_bytes(cls, failure_msg: bytes):
failure_code = int.from_bytes(failure_msg[:2], byteorder='big')
try:
failure_code = OnionFailureCode(failure_code)
except ValueError:
pass
failure_data = failure_msg[2:]
return OnionRoutingFailureMessage(failure_code, failure_data)
def code_name(self) -> str:
if isinstance(self.code, OnionFailureCode):
return str(self.code.name)
return f'Unknown error ({self.code!r})' |
class TestPassportFileWithoutRequest(TestPassportFileBase):
def test_slot_behaviour(self, passport_file):
inst = passport_file
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_expected_values(self, passport_file):
assert (passport_file.file_id == self.file_id)
assert (passport_file.file_unique_id == self.file_unique_id)
assert (passport_file.file_size == self.file_size)
assert (passport_file.file_date == self.file_date)
def test_to_dict(self, passport_file):
passport_file_dict = passport_file.to_dict()
assert isinstance(passport_file_dict, dict)
assert (passport_file_dict['file_id'] == passport_file.file_id)
assert (passport_file_dict['file_unique_id'] == passport_file.file_unique_id)
assert (passport_file_dict['file_size'] == passport_file.file_size)
assert (passport_file_dict['file_date'] == passport_file.file_date)
def test_equality(self):
a = PassportFile(self.file_id, self.file_unique_id, self.file_size, self.file_date)
b = PassportFile('', self.file_unique_id, self.file_size, self.file_date)
c = PassportFile(self.file_id, self.file_unique_id, '', '')
d = PassportFile('', '', self.file_size, self.file_date)
e = PassportElementError('source', 'type', 'message')
assert (a == b)
assert (hash(a) == hash(b))
assert (a is not b)
assert (a == c)
assert (hash(a) == hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
def test_file_date_deprecated(self, passport_file, recwarn):
passport_file.file_date
assert (len(recwarn) == 1)
assert ('The attribute `file_date` will return a datetime instead of an integer in future major versions.' in str(recwarn[0].message))
assert (recwarn[0].category is PTBDeprecationWarning)
assert (recwarn[0].filename == __file__)
async def test_get_file_instance_method(self, monkeypatch, passport_file):
async def make_assertion(*_, **kwargs):
result = (kwargs['file_id'] == passport_file.file_id)
return File(file_id=result, file_unique_id=result)
assert check_shortcut_signature(PassportFile.get_file, Bot.get_file, ['file_id'], [])
assert (await check_shortcut_call(passport_file.get_file, passport_file.get_bot(), 'get_file'))
assert (await check_defaults_handling(passport_file.get_file, passport_file.get_bot()))
monkeypatch.setattr(passport_file.get_bot(), 'get_file', make_assertion)
assert ((await passport_file.get_file()).file_id == 'True') |
class Effect6536(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Shield Command') or mod.item.requiresSkill('Information Command'))), 'warfareBuff3Value', src.getModifiedItemAttr('shipBonusForceAuxiliaryC4'), skill='Caldari Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Shield Command') or mod.item.requiresSkill('Information Command'))), 'warfareBuff4Value', src.getModifiedItemAttr('shipBonusForceAuxiliaryC4'), skill='Caldari Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Shield Command') or mod.item.requiresSkill('Information Command'))), 'warfareBuff2Value', src.getModifiedItemAttr('shipBonusForceAuxiliaryC4'), skill='Caldari Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Shield Command') or mod.item.requiresSkill('Information Command'))), 'buffDuration', src.getModifiedItemAttr('shipBonusForceAuxiliaryC4'), skill='Caldari Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Shield Command') or mod.item.requiresSkill('Information Command'))), 'warfareBuff1Value', src.getModifiedItemAttr('shipBonusForceAuxiliaryC4'), skill='Caldari Carrier', **kwargs) |
class EightBitBg(BgColor, Enum):
BLACK = 0
RED = 1
GREEN = 2
YELLOW = 3
BLUE = 4
MAGENTA = 5
CYAN = 6
LIGHT_GRAY = 7
DARK_GRAY = 8
LIGHT_RED = 9
LIGHT_GREEN = 10
LIGHT_YELLOW = 11
LIGHT_BLUE = 12
LIGHT_MAGENTA = 13
LIGHT_CYAN = 14
WHITE = 15
GRAY_0 = 16
NAVY_BLUE = 17
DARK_BLUE = 18
BLUE_3A = 19
BLUE_3B = 20
BLUE_1 = 21
DARK_GREEN = 22
DEEP_SKY_BLUE_4A = 23
DEEP_SKY_BLUE_4B = 24
DEEP_SKY_BLUE_4C = 25
DODGER_BLUE_3 = 26
DODGER_BLUE_2 = 27
GREEN_4 = 28
SPRING_GREEN_4 = 29
TURQUOISE_4 = 30
DEEP_SKY_BLUE_3A = 31
DEEP_SKY_BLUE_3B = 32
DODGER_BLUE_1 = 33
GREEN_3A = 34
SPRING_GREEN_3A = 35
DARK_CYAN = 36
LIGHT_SEA_GREEN = 37
DEEP_SKY_BLUE_2 = 38
DEEP_SKY_BLUE_1 = 39
GREEN_3B = 40
SPRING_GREEN_3B = 41
SPRING_GREEN_2A = 42
CYAN_3 = 43
DARK_TURQUOISE = 44
TURQUOISE_2 = 45
GREEN_1 = 46
SPRING_GREEN_2B = 47
SPRING_GREEN_1 = 48
MEDIUM_SPRING_GREEN = 49
CYAN_2 = 50
CYAN_1 = 51
DARK_RED_1 = 52
DEEP_PINK_4A = 53
PURPLE_4A = 54
PURPLE_4B = 55
PURPLE_3 = 56
BLUE_VIOLET = 57
ORANGE_4A = 58
GRAY_37 = 59
MEDIUM_PURPLE_4 = 60
SLATE_BLUE_3A = 61
SLATE_BLUE_3B = 62
ROYAL_BLUE_1 = 63
CHARTREUSE_4 = 64
DARK_SEA_GREEN_4A = 65
PALE_TURQUOISE_4 = 66
STEEL_BLUE = 67
STEEL_BLUE_3 = 68
CORNFLOWER_BLUE = 69
CHARTREUSE_3A = 70
DARK_SEA_GREEN_4B = 71
CADET_BLUE_2 = 72
CADET_BLUE_1 = 73
SKY_BLUE_3 = 74
STEEL_BLUE_1A = 75
CHARTREUSE_3B = 76
PALE_GREEN_3A = 77
SEA_GREEN_3 = 78
AQUAMARINE_3 = 79
MEDIUM_TURQUOISE = 80
STEEL_BLUE_1B = 81
CHARTREUSE_2A = 82
SEA_GREEN_2 = 83
SEA_GREEN_1A = 84
SEA_GREEN_1B = 85
AQUAMARINE_1A = 86
DARK_SLATE_GRAY_2 = 87
DARK_RED_2 = 88
DEEP_PINK_4B = 89
DARK_MAGENTA_1 = 90
DARK_MAGENTA_2 = 91
DARK_VIOLET_1A = 92
PURPLE_1A = 93
ORANGE_4B = 94
LIGHT_PINK_4 = 95
PLUM_4 = 96
MEDIUM_PURPLE_3A = 97
MEDIUM_PURPLE_3B = 98
SLATE_BLUE_1 = 99
YELLOW_4A = 100
WHEAT_4 = 101
GRAY_53 = 102
LIGHT_SLATE_GRAY = 103
MEDIUM_PURPLE = 104
LIGHT_SLATE_BLUE = 105
YELLOW_4B = 106
DARK_OLIVE_GREEN_3A = 107
DARK_GREEN_SEA = 108
LIGHT_SKY_BLUE_3A = 109
LIGHT_SKY_BLUE_3B = 110
SKY_BLUE_2 = 111
CHARTREUSE_2B = 112
DARK_OLIVE_GREEN_3B = 113
PALE_GREEN_3B = 114
DARK_SEA_GREEN_3A = 115
DARK_SLATE_GRAY_3 = 116
SKY_BLUE_1 = 117
CHARTREUSE_1 = 118
LIGHT_GREEN_2 = 119
LIGHT_GREEN_3 = 120
PALE_GREEN_1A = 121
AQUAMARINE_1B = 122
DARK_SLATE_GRAY_1 = 123
RED_3A = 124
DEEP_PINK_4C = 125
MEDIUM_VIOLET_RED = 126
MAGENTA_3A = 127
DARK_VIOLET_1B = 128
PURPLE_1B = 129
DARK_ORANGE_3A = 130
INDIAN_RED_1A = 131
HOT_PINK_3A = 132
MEDIUM_ORCHID_3 = 133
MEDIUM_ORCHID = 134
MEDIUM_PURPLE_2A = 135
DARK_GOLDENROD = 136
LIGHT_SALMON_3A = 137
ROSY_BROWN = 138
GRAY_63 = 139
MEDIUM_PURPLE_2B = 140
MEDIUM_PURPLE_1 = 141
GOLD_3A = 142
DARK_KHAKI = 143
NAVAJO_WHITE_3 = 144
GRAY_69 = 145
LIGHT_STEEL_BLUE_3 = 146
LIGHT_STEEL_BLUE = 147
YELLOW_3A = 148
DARK_OLIVE_GREEN_3 = 149
DARK_SEA_GREEN_3B = 150
DARK_SEA_GREEN_2 = 151
LIGHT_CYAN_3 = 152
LIGHT_SKY_BLUE_1 = 153
GREEN_YELLOW = 154
DARK_OLIVE_GREEN_2 = 155
PALE_GREEN_1B = 156
DARK_SEA_GREEN_5B = 157
DARK_SEA_GREEN_5A = 158
PALE_TURQUOISE_1 = 159
RED_3B = 160
DEEP_PINK_3A = 161
DEEP_PINK_3B = 162
MAGENTA_3B = 163
MAGENTA_3C = 164
MAGENTA_2A = 165
DARK_ORANGE_3B = 166
INDIAN_RED_1B = 167
HOT_PINK_3B = 168
HOT_PINK_2 = 169
ORCHID = 170
MEDIUM_ORCHID_1A = 171
ORANGE_3 = 172
LIGHT_SALMON_3B = 173
LIGHT_PINK_3 = 174
PINK_3 = 175
PLUM_3 = 176
VIOLET = 177
GOLD_3B = 178
LIGHT_GOLDENROD_3 = 179
TAN = 180
MISTY_ROSE_3 = 181
THISTLE_3 = 182
PLUM_2 = 183
YELLOW_3B = 184
KHAKI_3 = 185
LIGHT_GOLDENROD_2A = 186
LIGHT_YELLOW_3 = 187
GRAY_84 = 188
LIGHT_STEEL_BLUE_1 = 189
YELLOW_2 = 190
DARK_OLIVE_GREEN_1A = 191
DARK_OLIVE_GREEN_1B = 192
DARK_SEA_GREEN_1 = 193
HONEYDEW_2 = 194
LIGHT_CYAN_1 = 195
RED_1 = 196
DEEP_PINK_2 = 197
DEEP_PINK_1A = 198
DEEP_PINK_1B = 199
MAGENTA_2B = 200
MAGENTA_1 = 201
ORANGE_RED_1 = 202
INDIAN_RED_1C = 203
INDIAN_RED_1D = 204
HOT_PINK_1A = 205
HOT_PINK_1B = 206
MEDIUM_ORCHID_1B = 207
DARK_ORANGE = 208
SALMON_1 = 209
LIGHT_CORAL = 210
PALE_VIOLET_RED_1 = 211
ORCHID_2 = 212
ORCHID_1 = 213
ORANGE_1 = 214
SANDY_BROWN = 215
LIGHT_SALMON_1 = 216
LIGHT_PINK_1 = 217
PINK_1 = 218
PLUM_1 = 219
GOLD_1 = 220
LIGHT_GOLDENROD_2B = 221
LIGHT_GOLDENROD_2C = 222
NAVAJO_WHITE_1 = 223
MISTY_ROSE1 = 224
THISTLE_1 = 225
YELLOW_1 = 226
LIGHT_GOLDENROD_1 = 227
KHAKI_1 = 228
WHEAT_1 = 229
CORNSILK_1 = 230
GRAY_100 = 231
GRAY_3 = 232
GRAY_7 = 233
GRAY_11 = 234
GRAY_15 = 235
GRAY_19 = 236
GRAY_23 = 237
GRAY_27 = 238
GRAY_30 = 239
GRAY_35 = 240
GRAY_39 = 241
GRAY_42 = 242
GRAY_46 = 243
GRAY_50 = 244
GRAY_54 = 245
GRAY_58 = 246
GRAY_62 = 247
GRAY_66 = 248
GRAY_70 = 249
GRAY_74 = 250
GRAY_78 = 251
GRAY_82 = 252
GRAY_85 = 253
GRAY_89 = 254
GRAY_93 = 255
def __str__(self) -> str:
return f'{CSI}48;5;{self.value}m' |
class BaseInboundShipmentItem(MWSDataType):
quantity_param = ''
def __init__(self, sku: str, quantity: int, quantity_in_case: int=None, prep_details_list: List[PrepDetails]=None):
self.sku = sku
self.quantity = quantity
self.quantity_in_case = quantity_in_case
self.prep_details_list = prep_details_list
def _base_params_dict(self) -> dict:
if (not self.quantity_param):
raise ValueError(f'{self.__class__.__name__}.quantity_param must be defined.')
data = {'SellerSKU': self.sku, self.quantity_param: self.quantity, 'QuantityInCase': self.quantity_in_case}
if self.prep_details_list:
parameterized_prep_details = [x.to_params() for x in self.prep_details_list]
data.update(enumerate_keyed_param('PrepDetailsList.member', parameterized_prep_details))
return data |
.unit()
.parametrize(('name', 'extra', 'errors', 'caller', 'expectation', 'expected'), [pytest.param('python', '', 'raise', 'pytask', does_not_raise(), True, id='program exists'), pytest.param('unknown_program', '', 'raise', 'pytask', pytest.raises(RuntimeError, match='pytask requires the optional program'), None, id='program does not exist and error raised'), pytest.param('unknown_program', '', 'warn', 'pytask', pytest.warns(UserWarning, match='pytask requires the optional program'), False, id='program does not exist and warning'), pytest.param('unknown_program', 'extra included', 'warn', 'pytask', pytest.warns(UserWarning, match='extra included'), False, id='program does not exist and warning and extra'), pytest.param('unknown_program', 'extra included', 'ignore', 'pytask', does_not_raise(), False, id='program does not exist and ignore and extra'), pytest.param(None, '', 'unknown_errors', 'pytask', pytest.raises(ValueError, match="'errors' must be one of"), None, id='unknown errors')])
def test_check_for_optional_program(name, extra, errors, caller, expectation, expected):
with expectation:
program_exists = check_for_optional_program(name, extra, errors, caller)
assert (program_exists is expected) |
class CostCalculator():
def get_compressed_model_cost(cls, layer_db, layer_ratio_list, original_model_cost, cost_metric):
for layer in layer_db:
if (layer not in layer_db.get_selected_layers()):
layer_ratio_list.append(LayerCompRatioPair(layer, None))
compressed_model_cost = cls.calculate_compressed_cost(layer_db, layer_ratio_list, cost_metric)
if (cost_metric == CostMetric.memory):
current_comp_ratio = Decimal((compressed_model_cost.memory / original_model_cost.memory))
else:
current_comp_ratio = Decimal((compressed_model_cost.mac / original_model_cost.mac))
return current_comp_ratio
def compute_layer_cost(layer: Layer):
weight_dim = list(layer.weight_shape)
additional_act_dim = [layer.output_shape[2], layer.output_shape[3]]
mem_cost = reduce((lambda x, y: (x * y)), weight_dim)
mac_dim = (weight_dim + additional_act_dim)
mac_cost = reduce((lambda x, y: (x * y)), mac_dim)
return Cost(mem_cost, mac_cost)
def compute_network_cost(cls, layers):
network_cost = Cost(0, 0)
for layer in layers.values():
cost = cls.compute_layer_cost(layer)
network_cost += cost
return network_cost
def compute_model_cost(cls, layer_db: LayerDatabase):
network_cost_memory = 0
network_cost_mac = 0
for layer in layer_db:
cost = cls.compute_layer_cost(layer)
network_cost_memory += cost.memory
network_cost_mac += cost.mac
return Cost(network_cost_memory, network_cost_mac)
def calculate_comp_ratio_given_rank(cls, layer: Layer, rank: int, cost_metric: CostMetric):
original_cost = CostCalculator.compute_layer_cost(layer)
if (cost_metric == CostMetric.memory):
compressed_cost = cls.calculate_cost_given_rank(layer, rank).memory
updated_comp_ratio = (Decimal(compressed_cost) / Decimal(original_cost.memory))
else:
compressed_cost = cls.calculate_cost_given_rank(layer, rank).mac
updated_comp_ratio = (Decimal(compressed_cost) / Decimal(original_cost.mac))
return updated_comp_ratio
def calculate_rank_given_comp_ratio(cls, layer: Layer, comp_ratio: float, cost_metric: CostMetric) -> int:
orig_cost = CostCalculator.compute_layer_cost(layer)
if (cost_metric == CostMetric.mac):
target_cost = (orig_cost.mac * comp_ratio)
else:
target_cost = (orig_cost.memory * comp_ratio)
current_rank_candidate = cls.calculate_max_rank(layer)
if (cost_metric == CostMetric.memory):
running_cost = cls.calculate_cost_given_rank(layer, current_rank_candidate).memory
else:
running_cost = cls.calculate_cost_given_rank(layer, current_rank_candidate).mac
while ((running_cost > target_cost) and (current_rank_candidate > 0)):
current_rank_candidate -= 1
cost = cls.calculate_cost_given_rank(layer, current_rank_candidate)
if (cost_metric == CostMetric.memory):
running_cost = cost.memory
else:
running_cost = cost.mac
if (current_rank_candidate <= 0):
current_rank_candidate = 1
return current_rank_candidate
def calculate_per_layer_compressed_cost(cls, layer: Layer, comp_ratio: float, cost_metric: CostMetric) -> Cost:
rank = cls.calculate_rank_given_comp_ratio(layer, comp_ratio, cost_metric)
cost = cls.calculate_cost_given_rank(layer, rank)
return cost
def calculate_compressed_cost(cls, _layer_db: LayerDatabase, layer_ratio_list: List[LayerCompRatioPair], cost_metric: CostMetric) -> Cost:
running_cost = Cost(0, 0)
for layer_comp_ratio_pair in layer_ratio_list:
if (layer_comp_ratio_pair.comp_ratio is not None):
cost = cls.calculate_per_layer_compressed_cost(layer_comp_ratio_pair.layer, layer_comp_ratio_pair.comp_ratio, cost_metric)
else:
cost = cls.compute_layer_cost(layer_comp_ratio_pair.layer)
running_cost += cost
return running_cost
def calculate_compressed_cost_given_ranks(cls, _layer_db: LayerDatabase, layer_rank_list: List[Tuple[(Layer, int)]]) -> Cost:
running_cost = Cost(0, 0)
for (layer, rank) in layer_rank_list:
if rank:
cost = cls.calculate_cost_given_rank(layer, rank)
else:
cost = cls.compute_layer_cost(layer)
running_cost += cost
return running_cost
def calculate_cost_given_rank(layer: Layer, rank: int) -> Cost:
def calculate_max_rank(layer: Layer) -> int: |
def test_datetime_parsing():
val1 = catalog._parse_datetime_header('2006-06-28 23:24+0200')
assert (val1.year == 2006)
assert (val1.month == 6)
assert (val1.day == 28)
assert (val1.tzinfo.zone == 'Etc/GMT+120')
val2 = catalog._parse_datetime_header('2006-06-28 23:24')
assert (val2.year == 2006)
assert (val2.month == 6)
assert (val2.day == 28)
assert (val2.tzinfo is None) |
class UniverseAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.universe.UniverseOAuth2'
user_data_url = '
expected_username = 'scott+'
access_token_body = json.dumps({'access_token': 'foobar', 'token_type': 'bearer'})
user_data_body = json.dumps({'current_user': {'id': '123456', 'slug': 'foo-bar', 'first_name': 'Scott', 'last_name': 'Vitale', 'created_at': '2019-01-08T15:49:42.514Z', 'updated_at': '2019-01-17T19:41:39.711Z', 'email': 'scott+'}})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline() |
class PQTuple():
def __init__(self, tuple, schema):
self.tuple = tuple
self.schema = schema
def __getattr__(self, attr):
return self.tuple[self.schema[attr]]
def __getitem__(self, item):
if isinstance(item, int):
return self.tuple[item]
else:
return self.tuple[self.schema[item]]
def __iter__(self):
return self.tuple.__iter__()
def getDict(self):
res = {}
for v in self.schema:
res[v] = self.tuple[self.schema[v]]
return res
def __setitem__(self, item, value):
self.tuple[self.schema[item]] = value
def copy(self):
return PQTuple(list(self.tuple), self.schema)
def __lt__(self, other):
if isinstance(other, self.__class__):
if (self.schema == other.schema):
return (self.tuple < other.tuple)
else:
return False
elif isinstance(other, tuple):
return (self.tuple < other)
else:
return False
def __eq__(self, other):
if isinstance(other, self.__class__):
if (self.schema == other.schema):
return (self.tuple == other.tuple)
else:
return False
elif isinstance(other, tuple):
return (self.tuple == other)
else:
return False
def __gt__(self, other):
if isinstance(other, self.__class__):
if (self.schema == other.schema):
return (self.tuple > other.tuple)
else:
return False
elif isinstance(other, tuple):
return (self.tuple > other)
else:
return False
def __le__(self, other):
return (self.__gt__(other) or self.__eq__(other))
def __ge__(self, other):
return (self.__gt__(other) or self.__eq__(other))
def __ne__(self, other):
return (not self.__eq__(other))
def __hash__(self):
return hash(self.tuple)
def __repr__(self):
itms = list(self.schema.items())
itms.sort(key=(lambda x: x[1]))
return (('{' + ','.join([('"%s":%s' % (str_encode(i[0].lstrip().rstrip()), repr(self.tuple[i[1]]))) for i in itms])) + '}') |
class ToolButtonWithMenuIndication(QtWidgets.QToolButton):
SIZE = (21, 16)
def __init__(self):
QtWidgets.QToolButton.__init__(self)
self.setIconSize(QtCore.QSize(*self.SIZE))
self.setStyleSheet('QToolButton{ border: none; }')
self._menuarrow1 = self._createMenuArrowPixmap(0)
self._menuarrow2 = self._createMenuArrowPixmap(70)
self._menuarrow = self._menuarrow1
self._icon = None
self._menuPressed = False
def mousePressEvent(self, event):
event.ignore()
self._menuPressed = event.pos()
def mouseMoveEvent(self, event):
QtWidgets.QToolButton.mouseMoveEvent(self, event)
if self._menuPressed:
dragDist = QtWidgets.QApplication.startDragDistance()
if ((event.pos() - self._menuPressed).manhattanLength() >= dragDist):
self._menuPressed = False
def mouseReleaseEvent(self, event):
event.ignore()
if self._menuPressed:
tabs = self.parent().parent()
pos = self.mapTo(tabs, event.pos())
tabs.customContextMenuRequested.emit(pos)
def enterEvent(self, event):
QtWidgets.QToolButton.enterEvent(self, event)
self._menuarrow = self._menuarrow2
self.setIcon()
self._menuPressed = False
def leaveEvent(self, event):
QtWidgets.QToolButton.leaveEvent(self, event)
self._menuarrow = self._menuarrow1
self.setIcon()
self._menuPressed = False
def setIcon(self, icon=None):
if (icon is not None):
self._icon = icon
artist = IconArtist(self.SIZE)
if self._icon:
artist.addLayer(self._icon, 5, 0)
artist.addLayer(self._menuarrow, 0, 0)
icon = artist.finish()
QtWidgets.QToolButton.setIcon(self, icon)
def _createMenuArrowPixmap(self, strength):
artist = IconArtist()
artist.addMenuArrow(strength)
return artist.finish().pixmap(16, 16) |
def get_excludes(session):
conn = get_database_conn()
curs = query_execute_wrapper(conn, query_string='SELECT * FROM scansweep_excludes WHERE session=?', query_list=[session], no_return=False)
excludes_list = []
for row in curs:
excludes_list.append(row['target'])
if (len(excludes_list) == 0):
return []
return excludes_list |
def test(sess, model, users_to_test, data_generator, args, drop_flag=True, batch_test_flag=False):
global _data_generator
global _USR_NUM
global _OUTFIT_NUM
global _N_TRAIN
global _N_TEST
global Ks
global _BATCH_SIZE
Ks = eval(args.Ks)
_BATCH_SIZE = args.batch_size
_data_generator = data_generator
(_USR_NUM, _OUTFIT_NUM) = (_data_generator.n_users, _data_generator.n_train_outfits)
_N_TEST = _data_generator.n_recom_tests
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)), 'hit_ratio': np.zeros(len(Ks))}
pool = multiprocessing.Pool(_cores)
u_batch_size = (_BATCH_SIZE * 2)
i_batch_size = _BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = ((n_test_users // u_batch_size) + 1)
count = 0
for u_batch_id in range(n_user_batchs):
start = (u_batch_id * u_batch_size)
end = ((u_batch_id + 1) * u_batch_size)
user_batch = test_users[start:end]
outfit_batch = range(_OUTFIT_NUM)
if (drop_flag == False):
rate_batch = sess.run(model.batch_ratings, {model.user_input: user_batch, model.po_input: outfit_batch})
else:
rate_batch = sess.run(model.batch_ratings, {model.user_input: user_batch, model.po_input: outfit_batch, model.node_dropout: [0.0], model.mess_dropout: [0.0]})
user_batch_rating_uid = zip(rate_batch, user_batch)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
count += len(batch_result)
for re in batch_result:
result['precision'] += (re['precision'] / n_test_users)
result['recall'] += (re['recall'] / n_test_users)
result['ndcg'] += (re['ndcg'] / n_test_users)
result['hit_ratio'] += (re['hit_ratio'] / n_test_users)
assert (count == n_test_users)
pool.close()
return result |
_module()
class PadMultiViewImage(object):
def __init__(self, size=None, size_divisor=None, pad_val=0):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
assert ((size is not None) or (size_divisor is not None))
assert ((size is None) or (size_divisor is None))
def _pad_img(self, results):
if (self.size is not None):
padded_img = [mmcv.impad(img, shape=self.size, pad_val=self.pad_val) for img in results['img']]
elif (self.size_divisor is not None):
padded_img = [mmcv.impad_to_multiple(img, self.size_divisor, pad_val=self.pad_val) for img in results['img']]
results['ori_shape'] = [img.shape for img in results['img']]
results['img'] = padded_img
results['img_shape'] = [img.shape for img in padded_img]
results['pad_shape'] = [img.shape for img in padded_img]
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def __call__(self, results):
self._pad_img(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, '
repr_str += f'size_divisor={self.size_divisor}, '
repr_str += f'pad_val={self.pad_val})'
return repr_str |
class _Dice(MessageFilter):
__slots__ = ('emoji', 'values')
def __init__(self, values: Optional[SCT[int]]=None, emoji: Optional[DiceEmojiEnum]=None):
super().__init__()
self.emoji: Optional[DiceEmojiEnum] = emoji
self.values: Optional[Collection[int]] = ([values] if isinstance(values, int) else values)
if emoji:
self.name = f'filters.Dice.{emoji.name}'
if (self.values and emoji):
self.name = f"filters.Dice.{emoji.name.title().replace('_', '')}({self.values})"
elif values:
self.name = f'filters.Dice({self.values})'
else:
self.name = 'filters.Dice.ALL'
def filter(self, message: Message) -> bool:
if (not (dice := message.dice)):
return False
if self.emoji:
emoji_match = (dice.emoji == self.emoji)
if self.values:
return ((dice.value in self.values) and emoji_match)
return emoji_match
return ((dice.value in self.values) if self.values else True) |
class DownSamplerB(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
n = int((nOut / 5))
n1 = (nOut - (4 * n))
self.c1 = C(nIn, n, 3, 2)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.d16 = CDilated(n, n, 3, 1, 16)
self.bn = nn.BatchNorm2d(nOut, eps=0.001)
self.act = nn.PReLU(nOut)
def forward(self, input):
output1 = self.c1(input)
d1 = self.d1(output1)
d2 = self.d2(output1)
d4 = self.d4(output1)
d8 = self.d8(output1)
d16 = self.d16(output1)
add1 = d2
add2 = (add1 + d4)
add3 = (add2 + d8)
add4 = (add3 + d16)
combine = torch.cat([d1, add1, add2, add3, add4], 1)
output = self.bn(combine)
output = self.act(output)
return output |
def test_parse_version() -> None:
version_str = '3.6'
versions_list = ['-cp36-', '-pp36-', '-ip36-', '-jy36-', '-py3.6-', '-py3.6.']
assert (versions_list == parse_version(version_str))
assert ('-cp36-' in parse_version(version_str))
assert ('-py3.6.' in parse_version(version_str)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.