code stringlengths 281 23.7M |
|---|
class Events(object):
def __init__(self, core: Core) -> None:
self.core = core
self.core_initialized = EventHub[Core]()
self.user_state_transition = EventHub[Tuple[(Client, str, str)]]()
self.client_connected = EventHub[Client]()
self.client_dropped = EventHub[Client]()
self.client_pivot = EventHub[Client]()
self.client_command = _ClientCommandMapping(core)
self.game_created = EventHub[Game]()
self.game_data_send = EventHub[Tuple[(Game, Client, Packet)]]()
self.game_data_recv = EventHub[Tuple[(Game, Client, Packet)]]()
self.game_started = EventHub[Game]()
self.game_joined = EventHub[Tuple[(Game, Client)]]()
self.game_left = EventHub[Tuple[(Game, Client)]]()
self.game_ended = EventHub[Game]()
self.game_aborted = EventHub[Game]()
self.game_crashed = EventHub[Game]()
def __setattr__(self, name: str, v: Any) -> None:
if hasattr(v, 'name'):
v.name = f'{repr(self.core)}::{name}'
object.__setattr__(self, name, v) |
class OptionPlotoptionsParetoSonificationDefaultinstrumentoptionsMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_widget_refresh(lfs_manager, manager_nospawn, caplog):
manager_nospawn.start(lfs_manager)
matches_loaded(manager_nospawn)
manager_nospawn.c.widget['livefootballscores'].eval('self.set_refresh_timer()')
manager_nospawn.c.widget['livefootballscores'].eval('self.refresh_timer.cancel()')
manager_nospawn.c.widget['livefootballscores'].eval('self.refresh_timer = None')
with caplog.at_level(logging.INFO):
manager_nospawn.c.widget['livefootballscores'].refresh()
check_timer(manager_nospawn)
assert (caplog.record_tuples == []) |
def test_data_transfer_from_broker(load_broker_data):
call_command('transfer_assistance_records', '--reload-all')
table = SourceAssistanceTransaction().table_name
with connections[DEFAULT_DB_ALIAS].cursor() as cursor:
cursor.execute(f'SELECT COUNT(*) FROM {table}')
assert (cursor.fetchall()[0][0] == (NUMBER_OF_SOURCE_RECORDS - 1)), 'Inactive Record Copied!'
id_field = 'published_fabs_id'
cursor.execute(f'SELECT MIN({id_field}), MAX({id_field}) FROM {table}')
(min_id, max_id) = cursor.fetchall()[0]
assert (min_id == )
assert (max_id == )
cursor.execute(f"SELECT * FROM {table} WHERE afa_generated_unique = '9100_P033A173267_-none-_84.033_3'")
assert (cursor.fetchall()[0] == (, '9100_P033A173267_-none-_84.033_3', '07/12/2017', 'C', None, '06', None, 'UNKNOWN TITLE', '3', 'Columbus State Community College', None, '091', 'Department of Education (ED)', None, None, '9100', 'Department of Education', None, None, 'NON', '06', None, '84.033', 'Federal Work-Study Program', None, None, datetime.datetime(2017, 9, 16, 22, 22, 42, 760993), Decimal('0'), 'P033A173267', Decimal('520000'), None, '091', 'EDUCATION, DEPARTMENT OF (9100)', None, None, None, None, None, None, None, None, None, None, None, None, None, None, True, True, '550 E Spring St', None, None, None, 'Columbus', '03', 'USA', 'UNITED STATES', '049', 'Franklin', None, None, None, None, 'OH', 'Ohio', '43215', '1722', datetime.datetime(2017, 7, 21, 0, 0), Decimal('0'), Decimal('0'), '08/31/2023', None, 'OH', 'USA', 'UNITED STATES', '041', 'Delaware', 'Ohio', '1722', 'COLUMBUS', 'OH18000', '03', None, '', '43215', 'Single ZIP Code', 2, None, None, None, '520000.0', None, None, 'ASST_NON_P033A173267_9100', datetime.datetime(2017, 9, 16, 22, 22, 42, 760993), None, 'awardee-uei', 'parent-uei', 'funding-opportunity-goals', 'funding-opportunity-number', 123456)) |
def extractDsnovelsArtBlog(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('taming a munchkin', 'taming a munchkin', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_api_key_auth_body():
req: PreparedRequest = Request(method='POST', url=' json={'opt_out': True}).prepare()
api_key = 'imakeyblademaster'
secrets = {'api_key': api_key}
authenticated_request = AuthenticationStrategy.get_strategy('api_key', {'body': '{\n "key": "<api_key>"\n}\n'}).add_authentication(req, ConnectionConfig(secrets=secrets))
assert (authenticated_request.url == f'
assert (json.loads(authenticated_request.body) == {'opt_out': True, 'key': 'imakeyblademaster'}) |
class SNFList():
def __init__(self):
self.stmts = []
self.var_counter = 0
def add(self, expr):
vname = f'intm{self.var_counter}'
self.var_counter += 1
self.stmts.append(f'{vname} = {expr.strip()}')
return vname
def ast(self):
return ast.parse('\n'.join(self.stmts))
def last_var(self):
assert (self.var_counter > 0), 'No last variable available (0 statements in SNF).'
return f'intm{(self.var_counter - 1)}'
def str(self):
return '\n'.join(self.stmts) |
class InferenceService(object):
def Infer(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/InferenceService/Infer', protos_dot_inferrequest__pb2.InferRequest.SerializeToString, protos_dot_inferresult__pb2.InferResult.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
class OptionSeriesHeatmapSonificationContexttracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def voltlvl_idx(net, element, voltage_levels, branch_bus=None, vn_kv_limits=[145, 60, 1]):
if (not net[element].shape[0]):
return []
if (isinstance(voltage_levels, str) | (not hasattr(voltage_levels, '__iter__'))):
return _voltlvl_idx(net, element, voltage_levels, branch_bus=branch_bus, vn_kv_limits=vn_kv_limits)
else:
Idx = []
for voltage_level in voltage_levels:
Idx += _voltlvl_idx(net, element, voltage_level, branch_bus=branch_bus, vn_kv_limits=vn_kv_limits)
return Idx |
def _patch_asyncio():
def run(main, *, debug=False):
loop = asyncio.get_event_loop()
loop.set_debug(debug)
task = asyncio.ensure_future(main)
try:
return loop.run_until_complete(task)
finally:
if (not task.done()):
task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
def _get_event_loop(stacklevel=3):
loop = events._get_running_loop()
if (loop is None):
loop = events.get_event_loop_policy().get_event_loop()
return loop
if hasattr(asyncio, '_nest_patched'):
return
if (sys.version_info >= (3, 6, 0)):
asyncio.Task = asyncio.tasks._CTask = asyncio.tasks.Task = asyncio.tasks._PyTask
asyncio.Future = asyncio.futures._CFuture = asyncio.futures.Future = asyncio.futures._PyFuture
if (sys.version_info < (3, 7, 0)):
asyncio.tasks._current_tasks = asyncio.tasks.Task._current_tasks
asyncio.all_tasks = asyncio.tasks.Task.all_tasks
if (sys.version_info >= (3, 9, 0)):
events._get_event_loop = events.get_event_loop = asyncio.get_event_loop = _get_event_loop
asyncio.run = run
asyncio._nest_patched = True |
class ConfigStoreResponseAllOf(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
return {'id': (str,)}
_property
def discriminator():
return None
attribute_map = {'id': 'id'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
def jsonable(value):
import torch
if isinstance(value, dict):
return {k: jsonable(v) for (k, v) in value.items()}
elif isinstance(value, (list, tuple)):
return [jsonable(v) for v in value]
elif isinstance(value, torch.Tensor):
return value.detach().cpu().tolist()
elif isinstance(value, Path):
return str(value)
elif ((value is None) or isinstance(value, (int, float, str, bool))):
return value
elif isinstance(value, BaseContainer):
return OmegaConf.to_container(value)
else:
raise ValueError(f'{repr(value)} is not jsonable.') |
class ZenpyCache(object):
AVAILABLE_CACHES = [c for c in dir(cachetools) if (c.endswith('Cache') and (c != 'Cache'))]
def __init__(self, cache_impl, maxsize, **kwargs):
self.cache = self._get_cache_impl(cache_impl, maxsize, **kwargs)
self.purge_lock = RLock()
def set_cache_impl(self, cache_impl, maxsize, **kwargs):
new_cache = self._get_cache_impl(cache_impl, maxsize, **kwargs)
self._populate_new_cache(new_cache)
self.cache = new_cache
def pop(self, key, default=None):
return self.cache.pop(key, default)
def items(self):
return self.cache.items()
def impl_name(self):
return self.cache.__class__.__name__
def maxsize(self):
return self.cache.maxsize
def set_maxsize(self, maxsize, **kwargs):
new_cache = self._get_cache_impl(self.impl_name, maxsize, **kwargs)
self._populate_new_cache(new_cache)
self.cache = new_cache
def purge(self):
with self.purge_lock:
self.cache.clear()
def currsize(self):
return len(self.cache)
def _populate_new_cache(self, new_cache):
for (key, value) in self.cache.items():
new_cache[key] = value
def _get_cache_impl(self, cache_impl, maxsize, **kwargs):
if (cache_impl not in self.AVAILABLE_CACHES):
raise ZenpyCacheException(('No such cache: %s, available caches: %s' % (cache_impl, str(self.AVAILABLE_CACHES))))
return getattr(cachetools, cache_impl)(maxsize, **kwargs)
def __iter__(self):
return self.cache.__iter__()
def __getitem__(self, item):
return self.cache[item]
def __setitem__(self, key, value):
if (not issubclass(type(value), BaseObject)):
raise ZenpyCacheException('{} is not a subclass of BaseObject!'.format(type(value)))
self.cache[key] = value
def __delitem__(self, key):
del self.cache[key]
def __contains__(self, item):
return (item in self.cache)
def __len__(self):
return len(self.cache) |
def _load_contract_schema(schema_path: str) -> Optional[Dict[(Any, Any)]]:
if (not os.path.isdir(schema_path)):
return None
schema = {}
for filename in os.listdir(schema_path):
if filename.endswith('.json'):
msg_name = os.path.splitext(os.path.basename(filename))[0]
full_path = os.path.join(schema_path, filename)
with open(full_path, 'r', encoding='utf-8') as msg_schema_file:
msg_schema = json.load(msg_schema_file)
schema[msg_name] = msg_schema
return schema |
class OptionPlotoptionsWindbarbSonificationContexttracksMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def _discover_tests_in_directory_tree(src_path, bin_path) -> Dict[(pathlib.Path, List[str])]:
test_cases = dict()
test_code_files = src_path.glob('*.c')
test_binaries = [f for f in bin_path.glob('**/*') if f.is_file()]
for test_code_file in test_code_files:
if (test_functions := _discover_test_functions_in_sample_code(test_code_file)):
for f in test_binaries:
if f.name.endswith(test_code_file.stem):
test_cases[f] = test_functions
return test_cases |
def upgrade():
encryptor = sqlalchemy_utils.types.encrypted.encrypted_type.StringEncryptedType(JSONTypeOverride, CONFIG.security.app_encryption_key, AesGcmEngine, 'pkcs5')
empty_obj = encryptor.process_bind_param({}, JSON)
op.add_column('applicationconfig', sa.Column('config_set', StringEncryptedType(), nullable=False, server_default=empty_obj))
op.alter_column('applicationconfig', 'config_set', nullable=False, server_default=None)
op.alter_column('applicationconfig', 'api_set', nullable=False) |
class OptionPlotoptionsFunnel3dSonificationContexttracksMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def visualize_ner(doc: Union[(spacy.tokens.Doc, List[Dict[(str, str)]])], *, labels: Sequence[str]=tuple(), attrs: List[str]=NER_ATTRS, show_table: bool=True, title: Optional[str]='Named Entities', colors: Dict[(str, str)]={}, key: Optional[str]=None, manual: bool=False, displacy_options: Optional[Dict]=None):
if (not displacy_options):
displacy_options = dict()
if colors:
displacy_options['colors'] = colors
if title:
st.header(title)
if manual:
if show_table:
st.warning("When the parameter 'manual' is set to True, the parameter 'show_table' must be set to False.")
if (not isinstance(doc, list)):
st.warning("When the parameter 'manual' is set to True, the parameter 'doc' must be of type 'list', not 'spacy.tokens.Doc'.")
else:
labels = (labels or list({ent.label_ for ent in doc.ents}))
if (not labels):
st.warning("The parameter 'labels' should not be empty or None.")
else:
exp = st.expander('Select entity labels')
label_select = exp.multiselect('Entity labels', options=labels, default=list(labels), key=f'{key}_ner_label_select')
displacy_options['ents'] = label_select
html = displacy.render(doc, style='ent', options=displacy_options, manual=manual)
style = '<style>mark.entity { display: inline-block }</style>'
st.write(f'{style}{get_html(html)}', unsafe_allow_html=True)
if show_table:
data = [[str(getattr(ent, attr)) for attr in attrs] for ent in doc.ents if (ent.label_ in label_select)]
if data:
df = pd.DataFrame(data, columns=attrs)
st.dataframe(df) |
class OptionPlotoptionsHeatmapTooltipDatetimelabelformats(Options):
def day(self):
return self._config_get('%A, %e %b %Y')
def day(self, text: str):
self._config(text, js_type=False)
def hour(self):
return self._config_get('%A, %e %b, %H:%M')
def hour(self, text: str):
self._config(text, js_type=False)
def millisecond(self):
return self._config_get('%A, %e %b, %H:%M:%S.%L')
def millisecond(self, text: str):
self._config(text, js_type=False)
def minute(self):
return self._config_get('%A, %e %b, %H:%M')
def minute(self, text: str):
self._config(text, js_type=False)
def month(self):
return self._config_get('%B %Y')
def month(self, text: str):
self._config(text, js_type=False)
def second(self):
return self._config_get('%A, %e %b, %H:%M:%S')
def second(self, text: str):
self._config(text, js_type=False)
def week(self):
return self._config_get('Week from %A, %e %b %Y')
def week(self, text: str):
self._config(text, js_type=False)
def year(self):
return self._config_get('%Y')
def year(self, text: str):
self._config(text, js_type=False) |
()
('type_', metavar='TYPE', type=click.Choice(list(ledger_apis_registry.supported_ids)), required=True)
_option()
_context
_aea_project
def get_wealth(click_context: click.Context, type_: str, password: Optional[str]) -> None:
ctx = cast(Context, click_context.obj)
wealth = _try_get_wealth(ctx, type_, password)
click.echo(wealth) |
class TestDataAvailabilityAssetBasedNotificationRuleOnIngress():
def test_data_availability_asset(self, fledge_url, add_south, skip_verify_north_interface, wait_time, retries):
get_url = '/fledge/audit?source=NTFSN'
resp1 = utils.get_request(fledge_url, get_url)
put_url = '/fledge/category/ruletest #1'
data = {'auditCode': '', 'assetCode': SOUTH_ASSET_NAME}
utils.put_request(fledge_url, urllib.parse.quote(put_url), data)
time.sleep(wait_time)
get_url = '/fledge/audit?source=NTFSN'
resp2 = utils.get_request(fledge_url, get_url)
assert (len(resp2['audit']) > len(resp1['audit'])), 'ERROR: NTFSN not triggered properly with asset code' |
def parse_tax_scope(tax_scope):
tax_scope_ids = None
if (os.path.exists(tax_scope) and os.path.isfile(tax_scope)):
tax_scope_ids = parse_tax_scope_file(tax_scope)
else:
tax_scope_file = os.path.join(get_tax_scopes_path(), tax_scope)
if (os.path.exists(tax_scope_file) and os.path.isfile(tax_scope_file)):
tax_scope_ids = parse_tax_scope_file(tax_scope_file)
elif ((tax_scope is not None) and (tax_scope != 'none')):
tax_scope_ids = tax_scope.strip().split(',')
elif ((tax_scope is None) or (tax_scope == 'none')):
tax_scope_ids = None
else:
raise EmapperException(f'Unrecognized tax scope {tax_scope}')
if ((tax_scope_ids is not None) and (len(tax_scope_ids) > 0)):
tax_scope_ids_int = []
for tax_id in tax_scope_ids:
if (tax_id in LEVEL_NAMES):
tax_scope_ids_int.append(tax_id)
elif (tax_id in LEVEL_DICT):
tax_scope_ids_int.append(LEVEL_DICT[tax_id])
else:
raise EmapperException(f"Unrecognized tax ID, tax name or tax_scope mode: '{tax_id}'.")
tax_scope_ids = tax_scope_ids_int
return tax_scope_ids |
def write_protect_efuse(esp, efuses, args):
util.check_duplicate_name_in_list(args.efuse_name)
for efuse_name in args.efuse_name:
efuse = efuses[efuse_name]
if (not efuse.is_writeable()):
print(('Efuse %s is already write protected' % efuse.name))
else:
all_disabling = [e for e in efuses if (e.write_disable_bit == efuse.write_disable_bit)]
names = ', '.join((e.name for e in all_disabling))
print(('Permanently write-disabling efuse%s %s' % (('s' if (len(all_disabling) > 1) else ''), names)))
efuse.disable_write()
if (not efuses.burn_all(check_batch_mode=True)):
return
print('Checking efuses...')
raise_error = False
for efuse_name in args.efuse_name:
efuse = efuses[efuse_name]
if efuse.is_writeable():
print(('Efuse %s is not write-protected.' % efuse.name))
raise_error = True
if raise_error:
raise esptool.FatalError('The burn was not successful.')
else:
print('Successful') |
def test_emitter_python_only():
m = MyObject()
with raises(TypeError):
event.emitter(3)
if ('__pypy__' in sys.builtin_module_names):
pass
else:
with raises(TypeError):
event.emitter(isinstance)
assert isinstance(m.foo, event._emitter.Emitter)
with raises(AttributeError):
m.foo = 3
with raises(AttributeError):
del m.foo
assert ('emitter' in repr(m.__class__.foo).lower())
assert ('emitter' in repr(m.foo).lower())
assert ('foo' in repr(m.foo)) |
def test_pm_get_all_package_versions(loaded_sol_registry, w3):
(registry, _, _) = loaded_sol_registry
w3.pm.registry = registry
all_rls_pkg_0 = w3.pm.get_all_package_releases('package')
all_rls_pkg_1 = w3.pm.get_all_package_releases('package1')
all_rls_pkg_2 = w3.pm.get_all_package_releases('package2')
assert (all_rls_pkg_0 == (('1.0.0', 'ipfs://Qme4otpS88NV8yQi8TfTP89EsQC5bko3F5N1yhRoi6cwGV'), ('1.0.1', 'ipfs://Qme4otpS88NV8yQi8TfTP89EsQC5bko3F5N1yhRoi6cwGW'), ('1.0.2', 'ipfs://Qme4otpS88NV8yQi8TfTP89EsQC5bko3F5N1yhRoi6cwGX'), ('1.0.3', 'ipfs://Qme4otpS88NV8yQi8TfTP89EsQC5bko3F5N1yhRoi6cwGJ'), ('1.0.4', 'ipfs://Qme4otpS88NV8yQi8TfTP89EsQC5bko3F5N1yhRoi6cwGK'), ('1.0.5', 'ipfs://Qme4otpS88NV8yQi8TfTP89EsQC5bko3F5N1yhRoi6cwGH')))
assert (all_rls_pkg_1 == (('1.0.1', 'ipfs://Qme4otpS88NV8yQi8TfTP89EsQC5bko3F5N1yhRoi6cwGZ'),))
assert (all_rls_pkg_2 == (('1.0.1', 'ipfs://Qme4otpS88NV8yQi8TfTP89EsQC5bko3F5N1yhRoi6cwGT'),)) |
def find_elb_dns_zone_id(name='', env='dev', region='us-east-1'):
LOG.info('Find %s ELB DNS Zone ID in %s [%s].', name, env, region)
client = boto3.Session(profile_name=env).client('elb', region_name=region)
elbs = client.describe_load_balancers(LoadBalancerNames=[name])
return elbs['LoadBalancerDescriptions'][0]['CanonicalHostedZoneNameID'] |
def save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file, template_settings_file):
mappings_section = mapping_settings(mapping_settings_file)
template = template_settings(ecs_version, mappings_section, template_settings_file, component_names=component_names)
filename = join(out_dir, 'elasticsearch/composable/template.json')
save_json(filename, template) |
def read_results(filename):
if filename.endswith('.json'):
_open = open
elif filename.endswith('.json.gz'):
_open = gzip.open
else:
raise NotImplementedError(filename)
with _open(filename) as infile:
results = json.load(infile)
if (results['version'] == '1.0'):
return results
else:
raise NotImplementedError(results['version']) |
class Response(SimpleTemplateResponse):
def __init__(self, data=None, status=None, template_name=None, headers=None, exception=False, content_type=None, request=None):
super(Response, self).__init__(None, status=status)
self.data = data
self.template_name = template_name
self.exception = exception
self.content_type = content_type
if headers:
for (name, value) in six.iteritems(headers):
self[name] = value
if request:
self.set_response_headers(request)
def rendered_content(self):
renderer = getattr(self, 'accepted_renderer', None)
accepted_media_type = getattr(self, 'accepted_media_type', None)
context = getattr(self, 'renderer_context', None)
assert renderer, '.accepted_renderer not set on Response'
assert accepted_media_type, '.accepted_media_type not set on Response'
assert context, '.renderer_context not set on Response'
context['response'] = self
media_type = renderer.media_type
charset = renderer.charset
content_type = self.content_type
if ((content_type is None) and (charset is not None)):
content_type = '{0}; charset={1}'.format(media_type, charset)
elif (content_type is None):
content_type = media_type
self['Content-Type'] = content_type
ret = renderer.render(self.data, accepted_media_type, context)
if isinstance(ret, six.text_type):
assert charset, 'renderer returned unicode, and did not specify a charset value.'
return bytes(ret.encode(charset))
if (not ret):
del self['Content-Type']
return ret
def status_text(self):
return responses.get(self.status_code, '')
def __getstate__(self):
state = super(Response, self).__getstate__()
for key in ('accepted_renderer', 'renderer_context', 'resolver_match', 'client', 'request', 'json', 'wsgi_request'):
if (key in state):
del state[key]
state['_closable_objects'] = []
return state
def set_response_headers(self, request, **headers):
self['es_version'] = __version__
try:
self['es_username'] = request.user.username
except AttributeError:
pass
try:
self['es_dc'] = request.dc.name
except AttributeError:
pass
for (key, val) in headers.items():
self[key] = val |
class RequestListThread(QObject, threading.Thread):
parameter_list_signal = Signal(str, int, str, list)
def __init__(self, masteruri, ns, parent=None):
QObject.__init__(self)
threading.Thread.__init__(self)
self._masteruri = masteruri
self._ns = ns
self.setDaemon(True)
def run(self):
if self._masteruri:
try:
name = rospy.get_name()
master = xmlrpcclient.ServerProxy(self._masteruri)
(code, msg, params) = master.getParamNames(name)
result = []
for p in params:
if p.startswith(self._ns):
result.append(p)
self.parameter_list_signal.emit(self._masteruri, code, msg, result)
except Exception:
import traceback
err_msg = ('Error while retrieve the parameter list from %s: %s' % (self._masteruri, traceback.format_exc(1)))
rospy.logwarn(err_msg)
self.parameter_list_signal.emit(self._masteruri, (- 1), err_msg, []) |
def field_mandatory_attributes(field: FieldDetails) -> None:
if ecs_helpers.is_intermediate(field):
return
current_field_attributes: List[str] = sorted(field['field_details'].keys())
missing_attributes: List[str] = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)
if ((field['field_details'].get('type') == 'alias') and ('path' not in current_field_attributes)):
missing_attributes.append('path')
if ((field['field_details'].get('type') == 'scaled_float') and ('scaling_factor' not in current_field_attributes)):
missing_attributes.append('scaling_factor')
if (len(missing_attributes) > 0):
msg: str = 'Field is missing the following mandatory attributes: {}.\nFound these: {}.\nField details: {}'
raise ValueError(msg.format(', '.join(missing_attributes), current_field_attributes, field)) |
def process_value(setting_info, colors):
color_field_length = setting_info['rgbgradientv2_header']['color_field_length']
duration_length = setting_info['rgbgradientv2_header']['duration_length']
maxgradient = setting_info['rgbgradientv2_header']['maxgradient']
duration = _default_duration
gradient = []
if isinstance(colors, (tuple, list)):
gradient = _handle_color_tuple(colors)
elif (isinstance(colors, str) and is_color(colors)):
gradient = _handle_color_string(colors)
elif isinstance(colors, dict):
(duration, gradient) = _handle_rgbgradient_dict(colors)
elif is_rgbgradient(colors)[0]:
(duration, gradient) = _handle_rgbgradient_string(colors)
else:
raise ValueError(('Not a valid color or rgbgradient %s' % str(colors)))
if (len(gradient) == 0):
raise ValueError(('no color: %s' % str(colors)))
gradient_length = len(gradient)
if (gradient_length > maxgradient):
raise ValueError(('a maximum of %i color stops are allowed' % maxgradient))
minimum_duration = int((gradient_length * 33.3))
if (duration < minimum_duration):
raise ValueError(('a duration of %i or above is need for %i gradient' % (minimum_duration, gradient_length)))
if (duration > 30000):
raise ValueError('a maximum duration of 30000ms is allowed')
start_header = [29, 1, 2, 49, 81, 255, 200, 0]
header = merge_bytes(setting_info['led_id'], start_header)
last_real_pos = gradient[0]['pos']
start_color = gradient[0]['color']
del gradient[0]
index = 0
stage = []
oldcolor = list(start_color)
for (pos, color) in [(item['pos'], item['color']) for item in gradient]:
if (pos <= last_real_pos):
raise ValueError('Incorrect order for gradient or duplicate order found please check position order')
stage.append(index)
stage.append(0)
time = int(((duration / 100) * (pos - last_real_pos)))
last_real_pos = pos
if (time == 0):
raise ValueError('Incompatble timings set, please set different timings')
rgb_index = 0
for rgb in color:
diff = (rgb - oldcolor[rgb_index])
ramp = int(((diff / float(time)) * 16))
oldcolor[rgb_index] = rgb
stage = merge_bytes(stage, (ramp & 255))
rgb_index = (rgb_index + 1)
stage.append(0)
time = uint_to_little_endian_bytearray(time, 2)
stage = merge_bytes(stage, time)
index = (index + 1)
header = merge_bytes(header, stage)
padding = ([0] * (color_field_length - len(header)))
header = merge_bytes(header, padding)
split_color = []
for i in range(len(start_color)):
(high, low) = bytes_to_high_low_nibbles(start_color[i])
left_byte = nibbles_to_byte(low, 0)
right_byte = nibbles_to_byte(0, high)
split_color.append(left_byte)
split_color.append(right_byte)
end_suffix = [255, 0]
focal_x = uint_to_little_endian_bytearray(1500, 2)
focal_y = uint_to_little_endian_bytearray(650, 2)
end_suffix2 = [0, 0, 0, 0, 1, 0]
num_color = uint_to_little_endian_bytearray((gradient_length - 1), 2)
duration = uint_to_little_endian_bytearray(duration, duration_length)
suffix = merge_bytes(split_color, end_suffix, focal_x, focal_y, end_suffix2, num_color, duration)
return merge_bytes(header, suffix) |
class OptionSeriesHistogramSonificationTracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class TestTicketValidation(OpenEventTestCase):
def test_date_db_populate(self):
with self.app.test_request_context():
schema = TicketSchema()
TicketFactory()
original_data = {'data': {'id': 1}}
data = {}
TicketSchema.validate_date(schema, data, original_data) |
class OptionSeriesVariwideSonificationContexttracksMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
class Remote(object):
def sideband_progress(self, string):
pass
def credentials(self, url, username_from_url, allowed_types):
pass
def transfer_progress(self, stats):
pass
def update_tips(self, refname, old, new):
def push_update_reference(self, refname, message):
def __init__(self, repo, ptr):
self._repo = repo
self._remote = ptr
self._stored_exception = None
def __del__(self):
C.git_remote_free(self._remote)
def name(self):
return maybe_string(C.git_remote_name(self._remote))
def url(self):
return maybe_string(C.git_remote_url(self._remote))
def url(self, value):
err = C.git_remote_set_url(self._remote, to_bytes(value))
check_error(err)
def push_url(self):
return maybe_string(C.git_remote_pushurl(self._remote))
_url.setter
def push_url(self, value):
err = C.git_remote_set_pushurl(self._remote, to_bytes(value))
check_error(err)
def save(self):
err = C.git_remote_save(self._remote)
check_error(err)
def fetch(self, signature=None, message=None):
defaultcallbacks = ffi.new('git_remote_callbacks *')
err = C.git_remote_init_callbacks(defaultcallbacks, 1)
check_error(err)
callbacks = ffi.new('git_remote_callbacks *')
callbacks.version = 1
callbacks.sideband_progress = self._sideband_progress_cb
callbacks.transfer_progress = self._transfer_progress_cb
callbacks.update_tips = self._update_tips_cb
callbacks.credentials = self._credentials_cb
self._self_handle = ffi.new_handle(self)
callbacks.payload = self._self_handle
err = C.git_remote_set_callbacks(self._remote, callbacks)
try:
check_error(err)
except:
self._self_handle = None
raise
if signature:
ptr = signature._pointer[:]
else:
ptr = ffi.NULL
self._stored_exception = None
try:
err = C.git_remote_fetch(self._remote, ffi.NULL, ptr, to_bytes(message))
if self._stored_exception:
raise self._stored_exception
check_error(err)
finally:
self._self_handle = None
err = C.git_remote_set_callbacks(self._remote, defaultcallbacks)
check_error(err)
return TransferProgress(C.git_remote_stats(self._remote))
def refspec_count(self):
return C.git_remote_refspec_count(self._remote)
def get_refspec(self, n):
spec = C.git_remote_get_refspec(self._remote, n)
return Refspec(self, spec)
def fetch_refspecs(self):
specs = ffi.new('git_strarray *')
err = C.git_remote_get_fetch_refspecs(specs, self._remote)
check_error(err)
return strarray_to_strings(specs)
_refspecs.setter
def fetch_refspecs(self, l):
(arr, refs) = strings_to_strarray(l)
err = C.git_remote_set_fetch_refspecs(self._remote, arr)
check_error(err)
def push_refspecs(self):
specs = ffi.new('git_strarray *')
err = C.git_remote_get_push_refspecs(specs, self._remote)
check_error(err)
return strarray_to_strings(specs)
_refspecs.setter
def push_refspecs(self, l):
(arr, refs) = strings_to_strarray(l)
err = C.git_remote_set_push_refspecs(self._remote, arr)
check_error(err)
def add_fetch(self, spec):
err = C.git_remote_add_fetch(self._remote, to_bytes(spec))
check_error(err)
def add_push(self, spec):
err = C.git_remote_add_push(self._remote, to_bytes(spec))
check_error(err)
def push(self, specs, signature=None, message=None):
defaultcallbacks = ffi.new('git_remote_callbacks *')
err = C.git_remote_init_callbacks(defaultcallbacks, 1)
check_error(err)
(refspecs, refspecs_refs) = strings_to_strarray(specs)
if signature:
sig_cptr = ffi.new('git_signature **')
ffi.buffer(sig_cptr)[:] = signature._pointer[:]
sig_ptr = sig_cptr[0]
else:
sig_ptr = ffi.NULL
callbacks = ffi.new('git_remote_callbacks *')
callbacks.version = 1
callbacks.sideband_progress = self._sideband_progress_cb
callbacks.transfer_progress = self._transfer_progress_cb
callbacks.update_tips = self._update_tips_cb
callbacks.credentials = self._credentials_cb
callbacks.push_update_reference = self._push_update_reference_cb
self._self_handle = ffi.new_handle(self)
callbacks.payload = self._self_handle
try:
err = C.git_remote_set_callbacks(self._remote, callbacks)
check_error(err)
except:
self._self_handle = None
raise
try:
err = C.git_remote_push(self._remote, refspecs, ffi.NULL, sig_ptr, to_bytes(message))
check_error(err)
finally:
self._self_handle = None
('git_transfer_progress_cb')
def _transfer_progress_cb(stats_ptr, data):
self = ffi.from_handle(data)
if ((not hasattr(self, 'transfer_progress')) or (not self.transfer_progress)):
return 0
try:
self.transfer_progress(TransferProgress(stats_ptr))
except Exception as e:
self._stored_exception = e
return C.GIT_EUSER
return 0
('git_transport_message_cb')
def _sideband_progress_cb(string, length, data):
self = ffi.from_handle(data)
if ((not hasattr(self, 'progress')) or (not self.progress)):
return 0
try:
s = ffi.string(string, length).decode()
self.progress(s)
except Exception as e:
self._stored_exception = e
return C.GIT_EUSER
return 0
('int (*update_tips)(const char *refname, const git_oid *a,const git_oid *b, void *data)')
def _update_tips_cb(refname, a, b, data):
self = ffi.from_handle(data)
if ((not hasattr(self, 'update_tips')) or (not self.update_tips)):
return 0
try:
s = maybe_string(refname)
a = Oid(raw=bytes(ffi.buffer(a)[:]))
b = Oid(raw=bytes(ffi.buffer(b)[:]))
self.update_tips(s, a, b)
except Exception as e:
self._stored_exception = e
return C.GIT_EUSER
return 0
('int (*push_update_reference)(const char *ref, const char *msg, void *data)')
def _push_update_reference_cb(ref, msg, data):
self = ffi.from_handle(data)
if ((not hasattr(self, 'push_update_reference')) or (not self.push_update_reference)):
return 0
try:
refname = ffi.string(ref)
message = maybe_string(msg)
self.push_update_reference(refname, message)
except Exception as e:
self._stored_exception = e
return C.GIT_EUSER
return 0
('int (*credentials)(git_cred **cred, const char *url,const char *username_from_url, unsigned int allowed_types,void *data)')
def _credentials_cb(cred_out, url, username, allowed, data):
self = ffi.from_handle(data)
if ((not hasattr(self, 'credentials')) or (not self.credentials)):
return 0
try:
ccred = get_credentials(self.credentials, url, username, allowed)
cred_out[0] = ccred[0]
except Exception as e:
self._stored_exception = e
return C.GIT_EUSER
return 0 |
class Segment():
require_unique_name = True
def get_class_for_type(seg_type) -> Type['Segment']:
if seg_type.startswith('.'):
seg_type = seg_type[1:]
segment_class = Segment.get_base_segment_class(seg_type)
if (segment_class == None):
segment_class = Segment.get_extension_segment_class(seg_type)
return segment_class
def get_base_segment_class(seg_type):
platform = options.opts.platform
is_platform_seg = False
try:
segmodule = importlib.import_module(f'.segtypes.{platform}.{seg_type}', package=__package_name__)
is_platform_seg = True
except ModuleNotFoundError:
try:
segmodule = importlib.import_module(f'.segtypes.common.{seg_type}', package=__package_name__)
except ModuleNotFoundError:
return None
seg_prefix = (platform.capitalize() if is_platform_seg else 'Common')
return getattr(segmodule, f'{seg_prefix}Seg{seg_type.capitalize()}')
def get_extension_segment_class(seg_type):
platform = options.opts.platform
ext_path = options.opts.extensions_path
if (not ext_path):
log.error(f"could not load presumed extended segment type '{seg_type}' because no extensions path is configured")
assert (ext_path is not None)
try:
ext_spec = importlib.util.spec_from_file_location(f'{__package_name__}.segtypes.{platform}.{seg_type}', (ext_path / f'{seg_type}.py'))
assert (ext_spec is not None)
ext_mod = importlib.util.module_from_spec(ext_spec)
assert (ext_spec.loader is not None)
ext_spec.loader.exec_module(ext_mod)
except Exception as err:
log.write(err, status='error')
log.error(f'''could not load segment type '{seg_type}'
(hint: confirm your extension directory is configured correctly)''')
return getattr(ext_mod, f'{platform.upper()}Seg{seg_type[0].upper()}{seg_type[1:]}')
def parse_segment_start(segment: Union[(dict, list)]) -> Optional[int]:
if isinstance(segment, dict):
s = segment.get('start', 'auto')
else:
s = segment[0]
if (s == 'auto'):
return None
elif (s == '...'):
return None
else:
return int(s)
def parse_segment_type(segment: Union[(dict, list)]) -> str:
if isinstance(segment, dict):
return str(segment['type'])
else:
return str(segment[1])
def parse_segment_name(cls, rom_start, segment: Union[(dict, list)]) -> str:
if (isinstance(segment, dict) and ('name' in segment)):
return str(segment['name'])
elif (isinstance(segment, dict) and ('dir' in segment)):
return str(segment['dir'])
elif (isinstance(segment, list) and (len(segment) >= 3)):
return str(segment[2])
else:
return str(cls.get_default_name(rom_start))
def parse_segment_symbol_name_format(segment: Union[(dict, list)]) -> str:
if (isinstance(segment, dict) and ('symbol_name_format' in segment)):
return str(segment['symbol_name_format'])
else:
return options.opts.symbol_name_format
def parse_segment_symbol_name_format_no_rom(segment: Union[(dict, list)]) -> str:
if (isinstance(segment, dict) and ('symbol_name_format_no_rom' in segment)):
return str(segment['symbol_name_format_no_rom'])
else:
return options.opts.symbol_name_format_no_rom
def parse_segment_file_path(segment: Union[(dict, list)]) -> Optional[Path]:
if (isinstance(segment, dict) and ('path' in segment)):
return Path(segment['path'])
return None
def parse_segment_bss_contains_common(segment: Union[(dict, list)]) -> bool:
if (isinstance(segment, dict) and ('bss_contains_common' in segment)):
return bool(segment['bss_contains_common'])
else:
return False
def parse_linker_section_order(yaml: Union[(dict, list)]) -> Optional[str]:
if (isinstance(yaml, dict) and ('linker_section_order' in yaml)):
return str(yaml['linker_section_order'])
return None
def parse_linker_section(yaml: Union[(dict, list)]) -> Optional[str]:
if (isinstance(yaml, dict) and ('linker_section' in yaml)):
return str(yaml['linker_section'])
return None
def parse_ld_fill_value(yaml: Union[(dict, list)], default: Optional[int]) -> Optional[int]:
if (isinstance(yaml, dict) and ('ld_fill_value' in yaml)):
return yaml['ld_fill_value']
return default
def __init__(self, rom_start: Optional[int], rom_end: Optional[int], type: str, name: str, vram_start: Optional[int], args: list, yaml):
self.rom_start = rom_start
self.rom_end = rom_end
self.type = type
self.name = name
self.vram_start: Optional[int] = vram_start
self.align: Optional[int] = None
self.given_subalign: int = options.opts.subalign
self.exclusive_ram_id: Optional[str] = None
self.given_dir: Path = Path()
self.given_find_file_boundaries: Optional[bool] = None
self.given_seg_symbols: Dict[(int, List[Symbol])] = {}
self.symbol_ranges_ram: IntervalTree = IntervalTree()
self.symbol_ranges_rom: IntervalTree = IntervalTree()
self.given_section_order: List[str] = options.opts.section_order
self.vram_class: Optional[VramClass] = None
self.given_follows_vram: Optional[str] = None
self.given_vram_symbol: Optional[str] = None
self.given_symbol_name_format: str = options.opts.symbol_name_format
self.given_symbol_name_format_no_rom: str = options.opts.symbol_name_format_no_rom
self.parent: Optional[Segment] = None
self.sibling: Optional[Segment] = None
self.data_sibling: Optional[Segment] = None
self.rodata_sibling: Optional[Segment] = None
self.file_path: Optional[Path] = None
self.args: List[str] = args
self.yaml = yaml
self.extract: bool = True
self.has_linker_entry: bool = True
if (self.rom_start is None):
self.extract = False
elif self.type.startswith('.'):
self.extract = False
self.warnings: List[str] = []
self.did_run = False
self.bss_contains_common = Segment.parse_segment_bss_contains_common(yaml)
self.special_vram_segment: bool = False
self.linker_section_order: Optional[str] = self.parse_linker_section_order(yaml)
self.linker_section: Optional[str] = self.parse_linker_section(yaml)
self.ld_fill_value: Optional[int] = self.parse_ld_fill_value(yaml, options.opts.ld_fill_value)
if ((self.rom_start is not None) and (self.rom_end is not None)):
if (self.rom_start > self.rom_end):
log.error(f'Error: segments out of order - ({self.name} starts at 0x{self.rom_start:X}, but next segment starts at 0x{self.rom_end:X})')
def from_yaml(cls: Type['Segment'], yaml: Union[(dict, list)], rom_start: Optional[int], rom_end: Optional[int], vram=None):
type = Segment.parse_segment_type(yaml)
name = Segment.parse_segment_name(cls, rom_start, yaml)
vram_class = parse_segment_vram_class(yaml)
if (vram is not None):
vram_start = vram
elif vram_class:
vram_start = vram_class.vram
else:
vram_start = parse_segment_vram(yaml)
args: List[str] = ([] if isinstance(yaml, dict) else yaml[3:])
ret = cls(rom_start=rom_start, rom_end=rom_end, type=type, name=name, vram_start=vram_start, args=args, yaml=yaml)
ret.given_section_order = parse_segment_section_order(yaml)
ret.given_subalign = parse_segment_subalign(yaml)
if isinstance(yaml, dict):
ret.extract = bool(yaml.get('extract', ret.extract))
ret.exclusive_ram_id = yaml.get('exclusive_ram_id')
ret.given_dir = Path(yaml.get('dir', ''))
ret.has_linker_entry = bool(yaml.get('linker_entry', True))
ret.given_find_file_boundaries = yaml.get('find_file_boundaries', None)
ret.given_symbol_name_format = Segment.parse_segment_symbol_name_format(yaml)
ret.given_symbol_name_format_no_rom = Segment.parse_segment_symbol_name_format_no_rom(yaml)
ret.file_path = Segment.parse_segment_file_path(yaml)
ret.bss_contains_common = Segment.parse_segment_bss_contains_common(yaml)
ret.given_follows_vram = parse_segment_follows_vram(yaml)
ret.given_vram_symbol = parse_segment_vram_symbol(yaml)
if vram_class:
ret.vram_class = vram_class
if ret.given_follows_vram:
log.error(f'Error: segment {ret.name} has both a vram class and a follows_vram property')
if ret.given_vram_symbol:
log.error(f'Error: segment {ret.name} has both a vram class and a vram_symbol property')
if (not ret.align):
ret.align = parse_segment_align(yaml)
return ret
def is_text() -> bool:
return False
def is_data() -> bool:
return False
def is_rodata() -> bool:
return False
def is_noload() -> bool:
return False
def estimate_size(yaml: Union[(Dict, List)]) -> Optional[int]:
return None
def needs_symbols(self) -> bool:
return False
def dir(self) -> Path:
if self.parent:
return (self.parent.dir / self.given_dir)
else:
return self.given_dir
def show_file_boundaries(self) -> bool:
if (self.given_find_file_boundaries is not None):
return self.given_find_file_boundaries
if (not self.parent):
return options.opts.find_file_boundaries
return self.parent.show_file_boundaries
def symbol_name_format(self) -> str:
return self.given_symbol_name_format
def symbol_name_format_no_rom(self) -> str:
return self.given_symbol_name_format_no_rom
def subalign(self) -> int:
if self.parent:
return self.parent.subalign
else:
return self.given_subalign
def vram_symbol(self) -> Optional[str]:
if (self.vram_class and self.vram_class.vram_symbol):
return self.vram_class.vram_symbol
elif self.given_vram_symbol:
return self.given_vram_symbol
else:
return None
def get_exclusive_ram_id(self) -> Optional[str]:
if self.parent:
return self.parent.get_exclusive_ram_id()
return self.exclusive_ram_id
def add_symbol(self, symbol: Symbol):
if (symbol.vram_start not in self.given_seg_symbols):
self.given_seg_symbols[symbol.vram_start] = []
self.given_seg_symbols[symbol.vram_start].append(symbol)
if (symbol.size > 4):
self.symbol_ranges_ram.addi(symbol.vram_start, symbol.vram_end, symbol)
if (symbol.rom is not None):
self.symbol_ranges_rom.addi(symbol.rom, symbol.rom_end, symbol)
def seg_symbols(self) -> Dict[(int, List[Symbol])]:
if self.parent:
return self.parent.seg_symbols
else:
return self.given_seg_symbols
def size(self) -> Optional[int]:
if ((self.rom_start is not None) and (self.rom_end is not None)):
return (self.rom_end - self.rom_start)
else:
return None
def vram_end(self) -> Optional[int]:
if ((self.vram_start is not None) and (self.size is not None)):
return (self.vram_start + self.size)
else:
return None
def section_order(self) -> List[str]:
return self.given_section_order
def rodata_follows_data(self) -> bool:
if (('.rodata' not in self.section_order) or ('.data' not in self.section_order)):
return False
return ((self.section_order.index('.rodata') - self.section_order.index('.data')) == 1)
def get_cname(self) -> str:
name = self.name
if self.parent:
name = ((self.parent.name + '_') + name)
return to_cname(name)
def contains_vram(self, vram: int) -> bool:
if ((self.vram_start is not None) and (self.vram_end is not None)):
return ((vram >= self.vram_start) and (vram < self.vram_end))
else:
return False
def contains_rom(self, rom: int) -> bool:
if ((self.rom_start is not None) and (self.rom_end is not None)):
return ((rom >= self.rom_start) and (rom < self.rom_end))
else:
return False
def rom_to_ram(self, rom_addr: int) -> Optional[int]:
if ((self.vram_start is not None) and (self.rom_start is not None)):
return ((self.vram_start + rom_addr) - self.rom_start)
else:
return None
def ram_to_rom(self, ram_addr: int) -> Optional[int]:
if ((not self.contains_vram(ram_addr)) and (ram_addr != self.vram_end)):
return None
if ((self.vram_start is not None) and (self.rom_start is not None)):
return ((self.rom_start + ram_addr) - self.vram_start)
else:
return None
def should_scan(self) -> bool:
return self.should_split()
def should_split(self) -> bool:
return (self.extract and options.opts.is_mode_active(self.type))
def scan(self, rom_bytes: bytes):
pass
def split(self, rom_bytes: bytes):
pass
def cache(self):
return (self.yaml, self.rom_end)
def get_linker_section(self) -> str:
return '.data'
def get_linker_section_order(self) -> str:
if (self.linker_section_order is not None):
return self.linker_section_order
return self.get_linker_section()
def get_linker_section_linksection(self) -> str:
if (self.linker_section is not None):
return self.linker_section
return self.get_linker_section()
def get_section_flags(self) -> Optional[str]:
return None
def out_path(self) -> Optional[Path]:
return None
def get_most_parent(self) -> 'Segment':
seg = self
while seg.parent:
seg = seg.parent
return seg
def get_linker_entries(self) -> 'List[LinkerEntry]':
from ..segtypes.linker_entry import LinkerEntry
if (not self.has_linker_entry):
return []
path = self.out_path()
if path:
return [LinkerEntry(self, [path], path, self.get_linker_section_order(), self.get_linker_section_linksection(), self.is_noload())]
else:
return []
def log(self, msg):
if options.opts.verbose:
log.write(f'{self.type} {self.name}: {msg}')
def warn(self, msg: str):
self.warnings.append(msg)
def get_default_name(addr) -> str:
return f'{addr:X}'
def is_name_default(self):
return (self.name == self.get_default_name(self.rom_start))
def unique_id(self):
if self.parent:
s = (self.parent.unique_id() + '_')
else:
s = ''
return (((s + self.type) + '_') + self.name)
def visible_ram(seg1: 'Segment', seg2: 'Segment') -> bool:
if (seg1.get_most_parent() == seg2.get_most_parent()):
return True
if ((seg1.get_exclusive_ram_id() is None) or (seg2.get_exclusive_ram_id() is None)):
return True
return (seg1.get_exclusive_ram_id() != seg2.get_exclusive_ram_id())
def retrieve_symbol(self, syms: Dict[(int, List[Symbol])], addr: int) -> Optional[Symbol]:
if (addr not in syms):
return None
items = syms[addr]
items = [i for i in items if ((i.segment is None) or Segment.visible_ram(self, i.segment))]
if (len(items) > 1):
pass
if (len(items) == 0):
return None
return items[0]
def retrieve_sym_type(self, syms: Dict[(int, List[Symbol])], addr: int, type: str) -> Optional[symbols.Symbol]:
if (addr not in syms):
return None
items = syms[addr]
items = [i for i in items if ((i.segment is None) or (Segment.visible_ram(self, i.segment) and (type == i.type)))]
if (len(items) == 0):
return None
return items[0]
def get_symbol(self, addr: int, in_segment: bool=False, type: Optional[str]=None, create: bool=False, define: bool=False, reference: bool=False, search_ranges: bool=False, local_only: bool=False) -> Optional[Symbol]:
ret: Optional[Symbol] = None
rom: Optional[int] = None
most_parent = self.get_most_parent()
if in_segment:
rom = most_parent.ram_to_rom(addr)
ret = most_parent.retrieve_symbol(most_parent.seg_symbols, addr)
if ((not ret) and search_ranges):
if (rom is not None):
cands: Set[Interval] = most_parent.symbol_ranges_rom[rom]
if cands:
ret = cands.pop().data
if (not ret):
cands = most_parent.symbol_ranges_ram[addr]
if cands:
ret = cands.pop().data
elif (not local_only):
ret = most_parent.retrieve_symbol(symbols.all_symbols_dict, addr)
if ((not ret) and search_ranges):
cands = symbols.all_symbols_ranges[addr]
if cands:
ret = cands.pop().data
if ((not ret) and create):
ret = Symbol(addr, rom=rom, type=type)
symbols.add_symbol(ret)
if in_segment:
ret.segment = most_parent
if (addr not in most_parent.seg_symbols):
most_parent.seg_symbols[addr] = []
most_parent.seg_symbols[addr].append(ret)
if ret:
if define:
ret.defined = True
if reference:
ret.referenced = True
if (ret.type is None):
ret.type = type
if (ret.rom is None):
ret.rom = rom
if in_segment:
if (ret.segment is None):
ret.segment = most_parent
return ret
def create_symbol(self, addr: int, in_segment: bool, type: Optional[str]=None, define: bool=False, reference: bool=False, search_ranges: bool=False, local_only: bool=False) -> Symbol:
ret = self.get_symbol(addr, in_segment=in_segment, type=type, create=True, define=define, reference=reference, search_ranges=search_ranges, local_only=local_only)
assert (ret is not None)
return ret
def get_func_for_addr(self, addr) -> Optional[Symbol]:
for syms in self.seg_symbols.values():
for sym in syms:
if ((sym.type == 'func') and sym.contains_vram(addr)):
return sym
return None |
class TestSpecialEvent(unittest.TestCase):
def setUp(self):
push_exception_handler(reraise_exceptions=True)
self.addCleanup(pop_exception_handler)
def test_events(self):
with self.assertRaises(ValueError) as exception_cm:
FooWithEventMetadata()
self.assertIn("Trait named 'the_trait' not found", str(exception_cm.exception)) |
class Migration(migrations.Migration):
dependencies = [('auth', '0012_alter_user_first_name_max_length'), ('accounts', '0002_alter_first_name_and_id')]
operations = [migrations.AlterField(model_name='user', name='groups', field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.group', verbose_name='groups')), migrations.AlterField(model_name='user', name='is_active', field=models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active'))] |
class BasicTransactionTests(unittest.TestCase):
def test_txid_validator_error(self):
self.assertRaises(ValueError, validate_txid, 'abc')
self.assertRaises(TypeError, validate_txid, None)
self.assertRaises(TypeError, validate_txid, [])
def test_simple(self):
tx = Transaction(txid='0be29e98d833d948a3be7f18f9ce8693d7ee407c7d38b6ef2a5a264')
self.assertEqual('{}'.format(tx), '<Cardano tx: 0be29e98d833d948a3be7f18f9ce8693d7ee407c7d38b6ef2a5a264>')
self.assertEqual('{:s}'.format(tx), '<Cardano tx: 0be29e98d833d948a3be7f18f9ce8693d7ee407c7d38b6ef2a5a264>')
self.assertEqual(tx.amount_in, 0)
self.assertEqual(tx.amount_out, 0)
self.assertIsNone(tx.fee)
self.assertIsInstance(tx.inputs, list)
self.assertIsInstance(tx.outputs, list)
self.assertEqual(len(tx.inputs), 0)
self.assertEqual(len(tx.outputs), 0)
def test_args(self):
inp = Input('f9ab4d2dbbd28cb280cab4405fb90fb93bb18aee216fa')
tx = Transaction(txid='f854eea5b2f35a863d748b294299deecf62ec9629ff08fca87fff45c', fee=Decimal('0.168801'), inputs=[inp])
self.assertEqual(len(tx.inputs), 1)
self.assertEqual(len(tx.local_inputs), 0)
self.assertIsInstance(tx.amount_in, Decimal)
self.assertEqual(tx.amount_in, Decimal('0'))
self.assertIsInstance(tx.amount_out, Decimal)
self.assertEqual(tx.amount_out, Decimal('0'))
def test_inherited(self):
class CustomTx(Transaction):
txid = 'f854eea5b2f35a863d748b294299deecf62ec9629ff08fca87fff45c'
fee = Decimal('0.168801')
tx = CustomTx()
self.assertEqual(tx.txid, 'f854eea5b2f35a863d748b294299deecf62ec9629ff08fca87fff45c')
self.assertEqual(tx.fee, Decimal('0.168801'))
self.assertIsInstance(tx.amount_in, Decimal)
self.assertEqual(tx.amount_in, Decimal('0'))
self.assertIsInstance(tx.amount_out, Decimal)
self.assertEqual(tx.amount_out, Decimal('0'))
def test_inherited_with_args(self):
class CustomTx(Transaction):
txid = '0be29e98d833d948a3be7f18f9ce8693d7ee407c7d38b6ef2a5a264'
fee = Decimal('0.000000')
tx = CustomTx('f854eea5b2f35a863d748b294299deecf62ec9629ff08fca87fff45c', fee=Decimal('0.168801'))
self.assertEqual(tx.txid, 'f854eea5b2f35a863d748b294299deecf62ec9629ff08fca87fff45c')
self.assertEqual(tx.fee, Decimal('0.168801'))
self.assertIsInstance(tx.amount_in, Decimal)
self.assertEqual(tx.amount_in, Decimal('0'))
self.assertIsInstance(tx.amount_out, Decimal)
self.assertEqual(tx.amount_out, Decimal('0')) |
class OptionPlotoptionsSankeySonificationTracksMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def crlf_get_uri_method(uri, method, headers, scanid=None):
par_key = {}
url_query = urllib.parse.urlparse(uri)
parsed_query = urllib.parse.parse_qs(url_query.query)
for (key, value) in list(parsed_query.items()):
crlf_payloads = fetch_crlf_payload()
for payload in crlf_payloads:
par_key.update(parsed_query)
par_key[key] = payload
parsed_uri = (((((urllib.parse.urlparse(uri).scheme + '://') + urllib.parse.urlparse(uri).netloc) + urllib.parse.urlparse(uri).path) + '?') + urllib.parse.urlparse(uri).query.replace(value[0], payload))
crlf_get_method = req.api_request(parsed_uri, 'GET', headers)
for name in crlf_get_method.headers:
if ('CRLF-Test' in name):
attack_result = {'id': 13, 'scanid': scanid, 'url': parsed_uri, 'alert': 'CRLF injection', 'impact': 'High', 'req_headers': headers, 'req_body': 'NA', 'res_headers': crlf_get_method.headers, 'res_body': crlf_get_method.text}
dbupdate.insert_record(attack_result)
print('[+]{0} is vulnerable to CRLF injection'.format(parsed_uri))
return |
class OptionPlotoptionsVariablepieSonificationDefaultspeechoptionsMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def _validateCredentials(parameters, method):
try:
_logger.debug('DIGEST via {}; details: {!r}'.format(method, parameters))
nonce = parameters['nonce'].lower()
cnonce = parameters['cnonce'].lower()
ha1 = hashlib.md5('{username}:{realm}:{password}'.format(config.WEB_DIGEST_USERNAME, config.SYSTEM_NAME.replace('"', "'"), config.WEB_DIGEST_PASSWORD).encode('utf-8')).hexdigest()
ha2 = hashlib.md5('{}:{}'.format(method, parameters['uri']).encode('utf-8')).hexdigest()
if (parameters.get('qop', '').lower() == 'auth'):
target = hashlib.md5('{}:{}:{}:{}:{}:{}'.format(ha1, nonce, parameters['nc'].lower(), cnonce, parameters['qop'].lower(), ha2).encode('utf-8')).hexdigest()
else:
target = hashlib.md5('{}:{}:{}'.format(ha1, nonce, ha2)).hexdigest()
return (target == parameters['response'].lower())
except Exception as e:
raise ValueError('Authorization data from client is not spec-compliant: {}'.format(e)) |
def fetch_evm_tools_tests(test_dir: str, fork_name: str, slow_tests: Tuple[(str, ...)]=None) -> Generator:
if (slow_tests is None):
slow_tests = tuple()
for (root, _, files) in os.walk(test_dir):
for filename in files:
if filename.endswith('.json'):
test_file_path = os.path.join(root, filename)
with open(test_file_path) as test_file:
tests = json.load(test_file)
for (key, test) in tests.items():
slow = (True if (key in slow_tests) else False)
if (fork_name in test['post']):
for (idx, transition) in enumerate(test['post'][fork_name]):
test_case = {'test_file': test_file_path, 'test_key': key, 'index': idx}
if slow:
(yield pytest.param(test_case, marks=pytest.mark.slow))
else:
(yield test_case) |
def run_aea(ctx: Context, connection_ids: List[PublicId], env_file: str, is_install_deps: bool, password: Optional[str]=None) -> None:
skip_consistency_check = ctx.config['skip_consistency_check']
_prepare_environment(ctx, env_file, is_install_deps)
aea = _build_aea(connection_ids, skip_consistency_check, password)
click.echo((((AEA_LOGO + 'v') + __version__) + '\n'))
click.echo("Starting AEA '{}' in '{}' mode...".format(aea.name, aea.runtime.loop_mode))
try:
aea.start()
except KeyboardInterrupt:
click.echo(" AEA '{}' interrupted!".format(aea.name))
except Exception as e:
raise click.ClickException(str(e))
finally:
click.echo("Stopping AEA '{}' ...".format(aea.name))
aea.stop()
click.echo("AEA '{}' stopped.".format(aea.name)) |
class TestUtilities(unittest.TestCase):
def test_get_set_has(self):
class TestObject(object):
def __init__(self):
self.object = None
self.set_to_five = 5
obj = TestObject()
obj.object = TestObject()
obj.object.set_to_five = 10
rsetattr(obj, 'object.set_to_five', 1)
self.assertTrue(rhasattr(obj, 'object.set_to_five'))
self.assertEqual(1, rgetattr(obj, 'object.set_to_five'))
self.assertEqual(5, rgetattr(obj, 'set_to_five'))
with self.assertRaises(AttributeError):
rsetattr(obj, 'object.does_not_exist.five', 5) |
def serialization_settings() -> SerializationSettings:
default_img = Image(name='default', fqn='test', tag='tag')
settings = SerializationSettings(project='project', domain='domain', version='version', env={'FOO': 'baz'}, image_config=ImageConfig(default_image=default_img, images=[default_img]))
return settings |
class Alien():
total_aliens_created = 0
def __init__(self, x_coordinate, y_coordinate):
Alien.total_aliens_created += 1
self.x_coordinate = x_coordinate
self.y_coordinate = y_coordinate
self.health = 3
def hit(self):
self.health -= 1
def is_alive(self):
return (self.health > 0)
def teleport(self, new_x_coordinate, new_y_coordinate):
self.x_coordinate = new_x_coordinate
self.y_coordinate = new_y_coordinate
def collision_detection(self, other):
pass |
def test_lobatto_edge1():
print('1st Order Polynomial')
print('Edge')
lobattoEdge.setOrder(1)
int0_f1 = dot(f1(lobattoEdge.points), lobattoEdge.weights)
print(int0_f1)
lobattoEdge.setOrder(2)
int1_f1 = dot(f1(lobattoEdge.points), lobattoEdge.weights)
print(int1_f1)
lobattoEdge.setOrder(3)
int2_f1 = dot(f1(lobattoEdge.points), lobattoEdge.weights)
print(int1_f1)
npt.assert_almost_equal(int0_f1, int1_f1)
npt.assert_almost_equal(int1_f1, int2_f1) |
class Fastq(object):
def __init__(self, name, sequence, name2, quality):
self.name = name
self.sequence = sequence
self.name2 = name2
self.quality = quality
def write_to_file(self, handle):
handle.write((self.name + '\n'))
handle.write((self.sequence + '\n'))
handle.write((self.name2 + '\n'))
handle.write((self.quality + '\n')) |
class OptionSeriesBellcurveSonificationDefaultinstrumentoptionsMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_EXTRACTORS.register_module()
class PyinPitchExtractor(BasePitchExtractor):
def __call__(self, x, sampling_rate=44100, pad_to=None):
assert (x.ndim == 2), f'Expected 2D tensor, got {x.ndim}D tensor.'
assert (x.shape[0] == 1), f'Expected 1 channel, got {x.shape[0]} channels.'
if (sampling_rate != 22050):
y = resampy.resample(x[0].cpu().numpy(), sampling_rate, 22050)
else:
y = x[0].cpu().numpy()
pyin_tuple = librosa.pyin(y, frame_length=1024, fmin=self.f0_min, fmax=self.f0_max)
frequencies = pyin_tuple[0]
nan_indices = np.isnan(frequencies)
if np.any(nan_indices):
frequencies[nan_indices] = 0
return self.post_process(x, sampling_rate, frequencies, pad_to) |
class QVTKRenderWindowInteractor(QVTKRWIBaseClass):
_CURSOR_MAP = {0: CursorShape.ArrowCursor, 1: CursorShape.ArrowCursor, 2: CursorShape.SizeBDiagCursor, 3: CursorShape.SizeFDiagCursor, 4: CursorShape.SizeBDiagCursor, 5: CursorShape.SizeFDiagCursor, 6: CursorShape.SizeVerCursor, 7: CursorShape.SizeHorCursor, 8: CursorShape.SizeAllCursor, 9: CursorShape.PointingHandCursor, 10: CursorShape.CrossCursor}
def __init__(self, parent=None, **kw):
self._ActiveButton = MouseButton.NoButton
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = KeyboardModifier.NoModifier
self.__saveButtons = MouseButton.NoButton
self.__wheelDelta = 0
try:
stereo = bool(kw['stereo'])
except KeyError:
stereo = False
try:
rw = kw['rw']
except KeyError:
rw = None
if (QVTKRWIBase == 'QWidget'):
if ('wflags' in kw):
wflags = kw['wflags']
else:
wflags = WindowType.Widget
QWidget.__init__(self, parent, (wflags | WindowType.MSWindowsOwnDC))
elif (QVTKRWIBase == 'QGLWidget'):
QGLWidget.__init__(self, parent)
elif (QVTKRWIBase == 'QOpenGLWidget'):
QOpenGLWidget.__init__(self, parent)
if rw:
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
wid = self._get_win_id()
self._RenderWindow.SetWindowInfo(wid)
self._should_set_parent_info = (sys.platform == 'win32')
if stereo:
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
try:
self._Iren = kw['iren']
except KeyError:
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
if hasattr(self, 'devicePixelRatio'):
self._pixel_ratio = self.devicePixelRatio()
else:
self._pixel_ratio = 1.0
self.setAttribute(WidgetAttribute.WA_OpaquePaintEvent)
self.setAttribute(WidgetAttribute.WA_PaintOnScreen)
self.setMouseTracking(True)
self.setFocusPolicy(FocusPolicy.WheelFocus)
self.setSizePolicy(QSizePolicy(SizePolicy.Expanding, SizePolicy.Expanding))
self._Timer = QTimer(self)
self._Timer.timeout.connect(self.TimerEvent)
self.wheel_timer = None
if is_qt4:
self.wheel_timer = QTimer()
self.wheel_timer.setSingleShot(True)
self.wheel_timer.setInterval(25)
self.wheel_timer.timeout.connect(self._emit_wheel_event)
self._saved_wheel_event_info = ()
self._Iren.AddObserver('CreateTimerEvent', messenger.send)
messenger.connect(self._Iren, 'CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', messenger.send)
messenger.connect(self._Iren, 'DestroyTimerEvent', self.DestroyTimer)
self._RenderWindow.AddObserver('CursorChangedEvent', messenger.send)
messenger.connect(self._RenderWindow, 'CursorChangedEvent', self.CursorChangedEvent)
self._hidden = QWidget(self)
self._hidden.hide()
self._hidden.destroyed.connect(self.Finalize)
def __getattr__(self, attr):
if (attr == '__vtk__'):
return (lambda t=self._Iren: t)
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(((self.__class__.__name__ + ' has no attribute named ') + attr))
def _get_win_id(self):
WId = self.winId()
if (type(WId).__name__ == 'PyCObject'):
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
WId = pythonapi.PyCObject_AsVoidPtr(WId)
elif (type(WId).__name__ == 'PyCapsule'):
from ctypes import pythonapi, c_void_p, py_object, c_char_p
pythonapi.PyCapsule_GetName.restype = c_char_p
pythonapi.PyCapsule_GetName.argtypes = [py_object]
name = pythonapi.PyCapsule_GetName(WId)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
WId = pythonapi.PyCapsule_GetPointer(WId, name)
return str(int(WId))
def Finalize(self):
self._RenderWindow.Finalize()
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
self.setCursor(CursorShape.BlankCursor)
def ShowCursor(self):
vtk_cursor = self._Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, CursorShape.ArrowCursor)
self.setCursor(qt_cursor)
def closeEvent(self, evt):
self.Finalize()
def sizeHint(self):
return QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self._RenderWindow.Render()
def resizeEvent(self, ev):
if self._should_set_parent_info:
winid = self._get_win_id()
self._RenderWindow.SetWindowInfo(winid)
parent = self.parent()
if (parent is not None):
self._RenderWindow.SetParentInfo(winid)
else:
self._RenderWindow.SetParentInfo('')
pxr = self._pixel_ratio
w = int((self.width() * pxr))
h = int((self.height() * pxr))
vtk.vtkRenderWindow.SetSize(self._RenderWindow, w, h)
self._Iren.SetSize(w, h)
self._Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if (ev.modifiers() & KeyboardModifier.ShiftModifier):
shift = True
if (ev.modifiers() & KeyboardModifier.ControlModifier):
ctrl = True
else:
if (self.__saveModifiers & KeyboardModifier.ShiftModifier):
shift = True
if (self.__saveModifiers & KeyboardModifier.ControlModifier):
ctrl = True
return (ctrl, shift)
def enterEvent(self, ev):
(ctrl, shift) = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY, ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self, ev):
(ctrl, shift) = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY, ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self, ev):
(ctrl, shift) = self._GetCtrlShift(ev)
repeat = 0
if (ev.type() == EventType.MouseButtonDblClick):
repeat = 1
pxr = self._pixel_ratio
self._Iren.SetEventInformationFlipY(int((ev.x() * pxr)), int((ev.y() * pxr)), ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if (self._ActiveButton == MouseButton.LeftButton):
self._Iren.LeftButtonPressEvent()
elif (self._ActiveButton == MouseButton.RightButton):
self._Iren.RightButtonPressEvent()
elif (self._ActiveButton == MiddleButton):
self._Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
(ctrl, shift) = self._GetCtrlShift(ev)
pxr = self._pixel_ratio
self._Iren.SetEventInformationFlipY(int((ev.x() * pxr)), int((ev.y() * pxr)), ctrl, shift, chr(0), 0, None)
if (self._ActiveButton == MouseButton.LeftButton):
self._Iren.LeftButtonReleaseEvent()
elif (self._ActiveButton == MouseButton.RightButton):
self._Iren.RightButtonReleaseEvent()
elif (self._ActiveButton == MiddleButton):
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
pxr = self._pixel_ratio
self.__saveX = int((ev.x() * pxr))
self.__saveY = int((ev.y() * pxr))
(ctrl, shift) = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(int((ev.x() * pxr)), int((ev.y() * pxr)), ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
(ctrl, shift) = self._GetCtrlShift(ev)
key_sym = _qt_key_to_key_sym(ev.key())
if (ev.key() < 256):
if (ev.text() and (ev.text() <= u'y')):
key = ev.text().encode('latin-1')
else:
key = chr(ev.key())
else:
key = chr(0)
if ev.isAutoRepeat():
key = key[0]
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY, ctrl, shift, key, 0, key_sym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self, ev):
(ctrl, shift) = self._GetCtrlShift(ev)
key_sym = _qt_key_to_key_sym(ev.key())
if (ev.key() < 256):
if (ev.text() and (ev.text() <= u'y')):
key = ev.text().encode('latin-1')
else:
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY, ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
if hasattr(ev, 'delta'):
self.__wheelDelta += ev.delta()
self._saved_wheel_event_info = (ev.pos(), ev.globalPos(), self.__wheelDelta, ev.buttons(), ev.modifiers(), ev.orientation())
else:
self.__wheelDelta += ev.angleDelta().y()
if (self.__wheelDelta >= 60):
self._Iren.MouseWheelForwardEvent()
self.__wheelDelta = 0
elif (self.__wheelDelta <= (- 60)):
self._Iren.MouseWheelBackwardEvent()
self.__wheelDelta = 0
if (self.wheel_timer and (not self.wheel_timer.isActive())):
ev.setAccepted(True)
self.wheel_timer.start()
def _emit_wheel_event(self):
ev = QWheelEvent(*self._saved_wheel_event_info)
if (ev.delta() >= 0):
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
self.wheel_timer.stop()
self.__wheelDelta = 0
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self.update() |
class MsgAndPduDispatcher(object):
def __init__(self, mibInstrumController=None):
if (mibInstrumController is None):
self.mibInstrumController = instrum.MibInstrumController(builder.MibBuilder())
else:
self.mibInstrumController = mibInstrumController
self.mibInstrumController.mibBuilder.loadModules('SNMPv2-MIB', 'SNMP-MPD-MIB', 'SNMP-COMMUNITY-MIB', 'SNMP-TARGET-MIB', 'SNMP-USER-BASED-SM-MIB')
self._cache = cache.Cache()
self._appsRegistration = {}
self._sendPduHandle = nextid.Integer()
self._transportInfo = {}
def getTransportInfo(self, stateReference):
if (stateReference in self._transportInfo):
return self._transportInfo[stateReference]
else:
raise error.ProtocolError(('No data for stateReference %s' % stateReference))
def registerContextEngineId(self, contextEngineId, pduTypes, processPdu):
for pduType in pduTypes:
k = (contextEngineId, pduType)
if (k in self._appsRegistration):
raise error.ProtocolError(('Duplicate registration %r/%s' % (contextEngineId, pduType)))
self._appsRegistration[k] = processPdu
((debug.logger & debug.FLAG_DSP) and debug.logger(('registerContextEngineId: contextEngineId %r pduTypes %s' % (contextEngineId, pduTypes))))
def unregisterContextEngineId(self, contextEngineId, pduTypes):
if (contextEngineId is None):
(contextEngineId,) = self.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')
for pduType in pduTypes:
k = (contextEngineId, pduType)
if (k in self._appsRegistration):
del self._appsRegistration[k]
((debug.logger & debug.FLAG_DSP) and debug.logger(('unregisterContextEngineId: contextEngineId %r pduTypes %s' % (contextEngineId, pduTypes))))
def getRegisteredApp(self, contextEngineId, pduType):
k = (contextEngineId, pduType)
if (k in self._appsRegistration):
return self._appsRegistration[k]
k = (null, pduType)
if (k in self._appsRegistration):
return self._appsRegistration[k]
def sendPdu(self, snmpEngine, transportDomain, transportAddress, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, expectResponse, timeout=0, cbFun=None, cbCtx=None):
k = int(messageProcessingModel)
if (k in snmpEngine.messageProcessingSubsystems):
mpHandler = snmpEngine.messageProcessingSubsystems[k]
else:
raise error.StatusInformation(errorIndication=errind.unsupportedMsgProcessingModel)
((debug.logger & debug.FLAG_DSP) and debug.logger(('sendPdu: securityName %s, PDU\n%s' % (securityName, PDU.prettyPrint()))))
sendPduHandle = self._sendPduHandle()
if expectResponse:
self._cache.add(sendPduHandle, messageProcessingModel=messageProcessingModel, sendPduHandle=sendPduHandle, timeout=(timeout + snmpEngine.transportDispatcher.getTimerTicks()), cbFun=cbFun, cbCtx=cbCtx)
((debug.logger & debug.FLAG_DSP) and debug.logger(('sendPdu: current time %d ticks, one tick is %s seconds' % (snmpEngine.transportDispatcher.getTimerTicks(), snmpEngine.transportDispatcher.getTimerResolution()))))
((debug.logger & debug.FLAG_DSP) and debug.logger(('sendPdu: new sendPduHandle %s, timeout %s ticks, cbFun %s' % (sendPduHandle, timeout, cbFun))))
origTransportDomain = transportDomain
origTransportAddress = transportAddress
try:
(transportDomain, transportAddress, outgoingMessage) = mpHandler.prepareOutgoingMessage(snmpEngine, origTransportDomain, origTransportAddress, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, expectResponse, sendPduHandle)
((debug.logger & debug.FLAG_DSP) and debug.logger('sendPdu: MP succeeded'))
except PySnmpError:
if expectResponse:
self._cache.pop(sendPduHandle)
self.releaseStateInformation(snmpEngine, sendPduHandle, messageProcessingModel)
raise
if (snmpEngine.transportDispatcher is None):
if expectResponse:
self._cache.pop(sendPduHandle)
raise error.PySnmpError('Transport dispatcher not set')
snmpEngine.observer.storeExecutionContext(snmpEngine, 'rfc3412.sendPdu', dict(transportDomain=transportDomain, transportAddress=transportAddress, outgoingMessage=outgoingMessage, messageProcessingModel=messageProcessingModel, securityModel=securityModel, securityName=securityName, securityLevel=securityLevel, contextEngineId=contextEngineId, contextName=contextName, pdu=PDU))
try:
snmpEngine.transportDispatcher.sendMessage(outgoingMessage, transportDomain, transportAddress)
except PySnmpError:
if expectResponse:
self._cache.pop(sendPduHandle)
raise
snmpEngine.observer.clearExecutionContext(snmpEngine, 'rfc3412.sendPdu')
if expectResponse:
self._cache.update(sendPduHandle, transportDomain=origTransportDomain, transportAddress=origTransportAddress, securityModel=securityModel, securityName=securityName, securityLevel=securityLevel, contextEngineId=contextEngineId, contextName=contextName, pduVersion=pduVersion, PDU=PDU)
return sendPduHandle
def returnResponsePdu(self, snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, maxSizeResponseScopedPDU, stateReference, statusInformation):
k = int(messageProcessingModel)
if (k in snmpEngine.messageProcessingSubsystems):
mpHandler = snmpEngine.messageProcessingSubsystems[k]
else:
raise error.StatusInformation(errorIndication=errind.unsupportedMsgProcessingModel)
((debug.logger & debug.FLAG_DSP) and debug.logger(('returnResponsePdu: PDU %s' % (((PDU and PDU.prettyPrint()) or '<empty>'),))))
try:
(transportDomain, transportAddress, outgoingMessage) = mpHandler.prepareResponseMessage(snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, maxSizeResponseScopedPDU, stateReference, statusInformation)
((debug.logger & debug.FLAG_DSP) and debug.logger('returnResponsePdu: MP suceeded'))
except error.StatusInformation:
raise
mibBuilder = self.mibInstrumController.mibBuilder
(snmpEngineMaxMessageSize,) = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineMaxMessageSize')
if (snmpEngineMaxMessageSize.syntax and (len(outgoingMessage) > snmpEngineMaxMessageSize.syntax)):
(snmpSilentDrops,) = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpSilentDrops')
snmpSilentDrops.syntax += 1
raise error.StatusInformation(errorIndication=errind.tooBig)
snmpEngine.observer.storeExecutionContext(snmpEngine, 'rfc3412.returnResponsePdu', dict(transportDomain=transportDomain, transportAddress=transportAddress, outgoingMessage=outgoingMessage, messageProcessingModel=messageProcessingModel, securityModel=securityModel, securityName=securityName, securityLevel=securityLevel, contextEngineId=contextEngineId, contextName=contextName, pdu=PDU))
snmpEngine.transportDispatcher.sendMessage(outgoingMessage, transportDomain, transportAddress)
snmpEngine.observer.clearExecutionContext(snmpEngine, 'rfc3412.returnResponsePdu')
def receiveMessage(self, snmpEngine, transportDomain, transportAddress, wholeMsg):
mibBuilder = self.mibInstrumController.mibBuilder
(snmpInPkts,) = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInPkts')
snmpInPkts.syntax += 1
restOfWholeMsg = null
try:
msgVersion = verdec.decodeMessageVersion(wholeMsg)
except error.ProtocolError:
(snmpInASNParseErrs,) = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs')
snmpInASNParseErrs.syntax += 1
return null
((debug.logger & debug.FLAG_DSP) and debug.logger(('receiveMessage: msgVersion %s, msg decoded' % msgVersion)))
messageProcessingModel = msgVersion
try:
mpHandler = snmpEngine.messageProcessingSubsystems[int(messageProcessingModel)]
except KeyError:
(snmpInBadVersions,) = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInBadVersions')
snmpInBadVersions.syntax += 1
return restOfWholeMsg
try:
(messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, pduType, sendPduHandle, maxSizeResponseScopedPDU, statusInformation, stateReference) = mpHandler.prepareDataElements(snmpEngine, transportDomain, transportAddress, wholeMsg)
((debug.logger & debug.FLAG_DSP) and debug.logger('receiveMessage: MP succeded'))
except error.StatusInformation as exc:
statusInformation = exc
if ('sendPduHandle' in statusInformation):
((debug.logger & debug.FLAG_DSP) and debug.logger(('receiveMessage: MP failed, statusInformation %s, forcing retry' % statusInformation)))
self.__expireRequest(statusInformation['sendPduHandle'], self._cache.pop(statusInformation['sendPduHandle']), snmpEngine, statusInformation)
return restOfWholeMsg
except PyAsn1Error as exc:
((debug.logger & debug.FLAG_MP) and debug.logger(('receiveMessage: %s' % exc)))
(snmpInASNParseErrs,) = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs')
snmpInASNParseErrs.syntax += 1
return restOfWholeMsg
((debug.logger & debug.FLAG_DSP) and debug.logger(('receiveMessage: PDU %s' % PDU.prettyPrint())))
if (sendPduHandle is None):
((debug.logger & debug.FLAG_DSP) and debug.logger(('receiveMessage: pduType %s' % pduType)))
processPdu = self.getRegisteredApp(contextEngineId, pduType)
if (processPdu is None):
(snmpUnknownPDUHandlers,) = importSymbols('__SNMP-MPD-MIB', 'snmpUnknownPDUHandlers')
snmpUnknownPDUHandlers.syntax += 1
statusInformation = {'errorIndication': errind.unknownPDUHandler, 'oid': snmpUnknownPDUHandlers.name, 'val': snmpUnknownPDUHandlers.syntax}
((debug.logger & debug.FLAG_DSP) and debug.logger('receiveMessage: unhandled PDU type'))
try:
(destTransportDomain, destTransportAddress, outgoingMessage) = mpHandler.prepareResponseMessage(snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, maxSizeResponseScopedPDU, stateReference, statusInformation)
snmpEngine.transportDispatcher.sendMessage(outgoingMessage, destTransportDomain, destTransportAddress)
except PySnmpError as exc:
((debug.logger & debug.FLAG_DSP) and debug.logger(('receiveMessage: report failed, statusInformation %s' % exc)))
else:
((debug.logger & debug.FLAG_DSP) and debug.logger('receiveMessage: reporting succeeded'))
return restOfWholeMsg
else:
snmpEngine.observer.storeExecutionContext(snmpEngine, 'rfc3412.receiveMessage:request', dict(transportDomain=transportDomain, transportAddress=transportAddress, wholeMsg=wholeMsg, messageProcessingModel=messageProcessingModel, securityModel=securityModel, securityName=securityName, securityLevel=securityLevel, contextEngineId=contextEngineId, contextName=contextName, pdu=PDU))
if (stateReference is not None):
self._transportInfo[stateReference] = (transportDomain, transportAddress)
processPdu(snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, maxSizeResponseScopedPDU, stateReference)
snmpEngine.observer.clearExecutionContext(snmpEngine, 'rfc3412.receiveMessage:request')
if (stateReference is not None):
del self._transportInfo[stateReference]
((debug.logger & debug.FLAG_DSP) and debug.logger('receiveMessage: processPdu initiated'))
return restOfWholeMsg
else:
cachedParams = self._cache.pop(sendPduHandle)
if (cachedParams is None):
(snmpUnknownPDUHandlers,) = mibBuilder.importSymbols('__SNMP-MPD-MIB', 'snmpUnknownPDUHandlers')
snmpUnknownPDUHandlers.syntax += 1
return restOfWholeMsg
((debug.logger & debug.FLAG_DSP) and debug.logger(('receiveMessage: cache read by sendPduHandle %s' % sendPduHandle)))
snmpEngine.observer.storeExecutionContext(snmpEngine, 'rfc3412.receiveMessage:response', dict(transportDomain=transportDomain, transportAddress=transportAddress, wholeMsg=wholeMsg, messageProcessingModel=messageProcessingModel, securityModel=securityModel, securityName=securityName, securityLevel=securityLevel, contextEngineId=contextEngineId, contextName=contextName, pdu=PDU))
processResponsePdu = cachedParams['cbFun']
processResponsePdu(snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, statusInformation, cachedParams['sendPduHandle'], cachedParams['cbCtx'])
snmpEngine.observer.clearExecutionContext(snmpEngine, 'rfc3412.receiveMessage:response')
((debug.logger & debug.FLAG_DSP) and debug.logger('receiveMessage: processResponsePdu succeeded'))
return restOfWholeMsg
def releaseStateInformation(self, snmpEngine, sendPduHandle, messageProcessingModel):
k = int(messageProcessingModel)
if (k in snmpEngine.messageProcessingSubsystems):
mpHandler = snmpEngine.messageProcessingSubsystems[k]
mpHandler.releaseStateInformation(sendPduHandle)
self._cache.pop(sendPduHandle)
def __expireRequest(self, cacheKey, cachedParams, snmpEngine, statusInformation=None):
timeNow = snmpEngine.transportDispatcher.getTimerTicks()
timeoutAt = cachedParams['timeout']
if ((statusInformation is None) and (timeNow < timeoutAt)):
return
processResponsePdu = cachedParams['cbFun']
((debug.logger & debug.FLAG_DSP) and debug.logger(('__expireRequest: req cachedParams %s' % cachedParams)))
if (not statusInformation):
statusInformation = error.StatusInformation(errorIndication=errind.requestTimedOut)
self.releaseStateInformation(snmpEngine, cachedParams['sendPduHandle'], cachedParams['messageProcessingModel'])
processResponsePdu(snmpEngine, None, None, None, None, None, None, None, None, statusInformation, cachedParams['sendPduHandle'], cachedParams['cbCtx'])
return True
def receiveTimerTick(self, snmpEngine, timeNow):
self._cache.expire(self.__expireRequest, snmpEngine) |
def extractWuxiaTranslations(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
releases = ['A Martial Odyssey', 'Law of the Devil', 'Tensei Shitara Slime Datta Ken', 'The Nine Cauldrons', 'Sovereign of the Three Realms']
for name in releases:
if ((name in item['title']) and (chp or vol)):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix)
return False |
def test_qtclipboard_is_compatible_without_pyside6(monkeypatch, mock_import):
mock_import(parent_module=qtclipboard, import_name='QtGui', throw_exc=ImportError)
monkeypatch.setenv('WAYLAND_DISPLAY', '')
monkeypatch.setenv('XDG_SESSION_TYPE', '')
assert (qtclipboard.QtCopyHandler().is_compatible is False) |
def extractSmallvestWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('BMW', 'Bastard Male Wife', 'translated'), ('Non-critical Elevator', 'Non-critical Elevator', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def parse_experiments(experiments: List[str]) -> Dict[(str, Path)]:
event_log_dirs = dict()
assert ((len(experiments) % 2) == 0), 'experiments argument is wrong!'
n_experiments = (len(experiments) // 2)
for i_exp in range(n_experiments):
name = experiments[(2 * i_exp)]
path = Path(experiments[((2 * i_exp) + 1)])
event_log_dirs[name] = path
return event_log_dirs |
def test_switch_condition_node_as_case_node_child(task):
var_0 = Variable('var_0', Integer(32, True), None, True, Variable('var_10', Integer(32, True), 0, True, None))
var_1 = Variable('var_1', Integer(32, True), None, True, Variable('var_14', Integer(32, True), 0, True, None))
var_2_1 = Variable('var_2', Pointer(Integer(32, True), 32), None, False, Variable('var_28', Pointer(Integer(32, True), 32), 1, False, None))
var_2_2 = Variable('var_2', Pointer(Integer(32, True), 32), None, False, Variable('var_28_1', Pointer(Integer(32, True), 32), 2, False, None))
task.graph.add_nodes_from((vertices := [BasicBlock(0, [Assignment(ListOperation([]), print_call('Enter an even number between 4 and 14: ', 1)), Assignment(var_2_1, UnaryOperation(OperationType.address, [var_0], Pointer(Integer(32, True), 32), None, False)), Assignment(ListOperation([]), scanf_call(var_2_1, , 2)), Assignment(ListOperation([]), print_call('Enter 1 if you want to divide by two: ', 3)), Assignment(var_2_2, UnaryOperation(OperationType.address, [var_1], Pointer(Integer(32, True), 32), None, False)), Assignment(ListOperation([]), scanf_call(var_2_2, , 4)), Branch(Condition(OperationType.greater_us, [BinaryOperation(OperationType.minus, [var_0, Constant(4, Integer(32, True))], Integer(32, True)), Constant(10, Integer(32, True))], CustomType('bool', 1)))]), BasicBlock(2, [IndirectBranch(BinaryOperation(OperationType.minus, [var_0, Constant(4, Integer(32, True))], Integer(32, True)))]), BasicBlock(3, [Assignment(ListOperation([]), print_call('Not in the range', 13))]), BasicBlock(4, [Branch(Condition(OperationType.not_equal, [var_1, Constant(1, Integer(32, True))], CustomType('bool', 1)))]), BasicBlock(5, [Branch(Condition(OperationType.not_equal, [var_1, Constant(1, Integer(32, True))], CustomType('bool', 1)))]), BasicBlock(6, [Assignment(ListOperation([]), putchar_call(52, 9))]), BasicBlock(7, [Assignment(ListOperation([]), putchar_call(53, 10))]), BasicBlock(8, [Assignment(ListOperation([]), putchar_call(54, 11))]), BasicBlock(9, [Assignment(ListOperation([]), putchar_call(55, 12))]), BasicBlock(10, [Return(ListOperation([Constant(0, Integer(32, True))]))]), BasicBlock(11, [Assignment(ListOperation([]), putchar_call(52, 5))]), BasicBlock(12, [Assignment(ListOperation([]), putchar_call(50, 6))]), BasicBlock(14, [Assignment(ListOperation([]), putchar_call(51, 7))])]))
task.graph.add_edges_from([FalseCase(vertices[0], vertices[1]), TrueCase(vertices[0], vertices[2]), SwitchCase(vertices[1], vertices[2], [Constant(i, Integer(32)) for i in (1, 3, 5, 7, 9)]), SwitchCase(vertices[1], vertices[3], [Constant(0, Integer(32))]), SwitchCase(vertices[1], vertices[4], [Constant(2, Integer(32))]), SwitchCase(vertices[1], vertices[5], [Constant(4, Integer(32))]), SwitchCase(vertices[1], vertices[6], [Constant(6, Integer(32))]), SwitchCase(vertices[1], vertices[7], [Constant(8, Integer(32))]), SwitchCase(vertices[1], vertices[8], [Constant(10, Integer(32))]), UnconditionalEdge(vertices[2], vertices[9]), TrueCase(vertices[3], vertices[10]), FalseCase(vertices[3], vertices[11]), FalseCase(vertices[4], vertices[12]), TrueCase(vertices[4], vertices[9]), UnconditionalEdge(vertices[5], vertices[9]), UnconditionalEdge(vertices[6], vertices[9]), UnconditionalEdge(vertices[7], vertices[9]), UnconditionalEdge(vertices[8], vertices[9]), UnconditionalEdge(vertices[10], vertices[9]), UnconditionalEdge(vertices[11], vertices[9]), UnconditionalEdge(vertices[12], vertices[9])])
PatternIndependentRestructuring().run(task)
assert (isinstance((seq_node := task._ast.root), SeqNode) and (len(seq_node.children) == 3))
assert (isinstance(seq_node.children[0], CodeNode) and (seq_node.children[0].instructions == vertices[0].instructions[:(- 1)]))
assert isinstance((switch := seq_node.children[1]), SwitchNode)
assert (isinstance(seq_node.children[2], CodeNode) and (seq_node.children[2].instructions == vertices[9].instructions))
assert ((switch.expression == BinaryOperation(OperationType.minus, [var_0, Constant(4, Integer(32, True))], Integer(32, True))) and (len(switch.children) == 7))
assert (isinstance((case1 := switch.cases[0]), CaseNode) and (case1.constant == Constant(0, Integer(32))) and (case1.break_case is True))
assert (isinstance((case2 := switch.cases[1]), CaseNode) and (case2.constant == Constant(2, Integer(32))) and (case2.break_case is True))
assert (isinstance((case3 := switch.cases[2]), CaseNode) and (case3.constant == Constant(4, Integer(32))) and (case3.break_case is True))
assert (isinstance((case4 := switch.cases[3]), CaseNode) and (case4.constant == Constant(6, Integer(32))) and (case4.break_case is True))
assert (isinstance((case5 := switch.cases[4]), CaseNode) and (case5.constant == Constant(8, Integer(32))) and (case5.break_case is True))
assert (isinstance((case6 := switch.cases[5]), CaseNode) and (case6.constant == Constant(10, Integer(32))) and (case6.break_case is True))
assert (isinstance((default := switch.default), CaseNode) and (default.constant == 'default') and (default.break_case is False))
assert isinstance((cond_node1 := case1.child), ConditionNode)
assert (isinstance(cond_node1.true_branch.child, CodeNode) and isinstance(cond_node1.false_branch.child, CodeNode))
if cond_node1.condition.is_symbol:
assert (task._ast.condition_map[cond_node1.condition] == vertices[3].instructions[0].condition)
assert (cond_node1.true_branch_child.instructions == vertices[10].instructions)
assert (cond_node1.false_branch_child.instructions == vertices[11].instructions)
else:
assert (task._ast.condition_map[(~ cond_node1.condition)] == vertices[3].instructions[0].condition)
assert (cond_node1.true_branch_child.instructions == vertices[11].instructions)
assert (cond_node1.false_branch_child.instructions == vertices[10].instructions)
assert isinstance((cond_node2 := case2.child), ConditionNode)
assert (isinstance(cond_node2.true_branch.child, CodeNode) and (cond_node2.false_branch is None))
assert (cond_node2.condition.is_negation and (~ cond_node2.condition).is_symbol)
assert (task._ast.condition_map[(~ cond_node2.condition)] == vertices[4].instructions[0].condition)
assert (cond_node2.true_branch_child.instructions == vertices[12].instructions)
assert (isinstance(case3.child, CodeNode) and (case3.child.instructions == vertices[5].instructions))
assert (isinstance(case4.child, CodeNode) and (case4.child.instructions == vertices[6].instructions))
assert (isinstance(case5.child, CodeNode) and (case5.child.instructions == vertices[7].instructions))
assert (isinstance(case6.child, CodeNode) and (case6.child.instructions == vertices[8].instructions))
assert (isinstance(default.child, CodeNode) and (default.child.instructions == vertices[2].instructions)) |
class MidiSynth():
def __init__(self, soundfont_name=None):
self.synthesizer = fluidsynth.Synth()
self.synthesizer.start()
self.soundfont_id = self.load_soundfont((soundfont_name or DEFAULT_SOUND_FONT))
self.select_midi_program(0)
def load_soundfont(self, name):
soundfont_path = os.path.join(SOUNDFONTS_DIR, name)
return self.synthesizer.sfload(soundfont_path)
def select_midi_program(self, program_id, channel=0, bank_id=0):
self.synthesizer.program_select(channel, self.soundfont_id, bank_id, program_id)
def note_on(self, note_value, channel=0, velocity=100):
self.synthesizer.noteon(channel, note_value, velocity)
def note_off(self, note_value, channel=0):
self.synthesizer.noteoff(channel, note_value)
def set_sustain(self, value, channel=0):
self.synthesizer.cc(channel, 64, value)
def set_volume(self, value, channel=0):
self.synthesizer.cc(channel, 7, value)
def set_chorus(self, value, channel=0):
self.synthesizer.cc(channel, 93, value)
def set_reverb(self, value, channel=0):
self.synthesizer.cc(channel, 91, value) |
class sessions():
set_module_s_s_s = '%s.%s = %s'
set_s_s = '%s = %s'
unset_module_s_s = '%s.%s is now unset'
unset_s = '%s is now unset'
error_loading_sessions = 'Session loading error'
error_session_s_not_modified = "Error setting session variable '%s'"
connection_info = "<%!\nfrom urllib.parse import urlparse\n%><%\nif not host:\n urlparsed = urlparse(url)\n if urlparsed and urlparsed.netloc:\n hostname = urlparsed.netloc\n else:\n hostname = 'undefined host'\nelse:\n hostname = host\n%>${'%' % user if user else ''}${hostname}${':%s' % path if path and path != '.' else ''}" |
class OptionPlotoptionsColumnrangeSonificationContexttracksMapping(Options):
def frequency(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsColumnrangeSonificationContexttracksMappingFrequency)
def gapBetweenNotes(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingGapbetweennotes':
return self._config_sub_data('gapBetweenNotes', OptionPlotoptionsColumnrangeSonificationContexttracksMappingGapbetweennotes)
def highpass(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingHighpass':
return self._config_sub_data('highpass', OptionPlotoptionsColumnrangeSonificationContexttracksMappingHighpass)
def lowpass(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingLowpass':
return self._config_sub_data('lowpass', OptionPlotoptionsColumnrangeSonificationContexttracksMappingLowpass)
def noteDuration(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingNoteduration':
return self._config_sub_data('noteDuration', OptionPlotoptionsColumnrangeSonificationContexttracksMappingNoteduration)
def pan(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingPan':
return self._config_sub_data('pan', OptionPlotoptionsColumnrangeSonificationContexttracksMappingPan)
def pitch(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingPitch':
return self._config_sub_data('pitch', OptionPlotoptionsColumnrangeSonificationContexttracksMappingPitch)
def playDelay(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingPlaydelay':
return self._config_sub_data('playDelay', OptionPlotoptionsColumnrangeSonificationContexttracksMappingPlaydelay)
def rate(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingRate':
return self._config_sub_data('rate', OptionPlotoptionsColumnrangeSonificationContexttracksMappingRate)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def time(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingTime':
return self._config_sub_data('time', OptionPlotoptionsColumnrangeSonificationContexttracksMappingTime)
def tremolo(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingTremolo':
return self._config_sub_data('tremolo', OptionPlotoptionsColumnrangeSonificationContexttracksMappingTremolo)
def volume(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingVolume':
return self._config_sub_data('volume', OptionPlotoptionsColumnrangeSonificationContexttracksMappingVolume) |
class DistilbertStudentModel(nn.Module):
def __init__(self, teacher_model_name: str='bert-base-uncased', layers: List[int]=None, extract: bool=True):
super().__init__()
if (layers is None):
layers = [0, 2, 4, 7, 9, 11]
teacher_config = transformers.AutoConfig.from_pretrained(teacher_model_name, output_hidden_states=True, output_logits=True)
teacher = transformers.BertForMaskedLM.from_pretrained(teacher_model_name, config=teacher_config)
distil_sd = None
if extract:
distil_sd = self._extract(teacher, layers)
if (teacher_model_name == 'bert-base-uncased'):
student_config = transformers.AutoConfig.from_pretrained('distilbert-base-uncased', output_hidden_states=True, output_logits=True)
self.student = transformers.DistilBertForMaskedLM.from_pretrained('distilbert-base-uncased', config=student_config, state_dict=distil_sd)
elif (teacher_model_name == 'bert-base-cased'):
student_config = transformers.AutoConfig.from_pretrained('distilbert-base-cased', output_hidden_states=True, output_logits=True)
self.student = transformers.DistilBertForMaskedLM.from_pretrained('distilbert-base-cased', config=student_config, state_dict=distil_sd)
else:
student_config = transformers.AutoConfig.from_pretrained('distilbert-base-multilingual-cased', output_hidden_states=True, output_logits=True)
self.student = transformers.DistilBertForMaskedLM.from_pretrained('distilbert-base-multilingual-cased', config=student_config, state_dict=distil_sd)
def forward(self, *model_args, **model_kwargs):
return self.student(*model_args, **model_kwargs)
def _extract(teacher_model, layers: List[int], prefix_teacher: str='bert', prefix_student: str='distilbert') -> Dict[(str, torch.Tensor)]:
state_dict = teacher_model.state_dict()
compressed_sd = {}
for w in ['word_embeddings', 'position_embeddings']:
compressed_sd[f'{prefix_student}.embeddings.{w}.weight'] = state_dict[f'{prefix_teacher}.embeddings.{w}.weight']
for w in ['weight', 'bias']:
compressed_sd[f'{prefix_student}.embeddings.LayerNorm.{w}'] = state_dict[f'{prefix_teacher}.embeddings.LayerNorm.{w}']
std_idx = 0
for teacher_idx in layers:
for w in ['weight', 'bias']:
compressed_sd[f'{prefix_student}.transformer.layer.{std_idx}.attention.q_lin.{w}'] = state_dict[f'{prefix_teacher}.encoder.layer.{teacher_idx}.attention.self.query.{w}']
compressed_sd[f'{prefix_student}.transformer.layer.{std_idx}.attention.k_lin.{w}'] = state_dict[f'{prefix_teacher}.encoder.layer.{teacher_idx}.attention.self.key.{w}']
compressed_sd[f'{prefix_student}.transformer.layer.{std_idx}.attention.v_lin.{w}'] = state_dict[f'{prefix_teacher}.encoder.layer.{teacher_idx}.attention.self.value.{w}']
compressed_sd[f'{prefix_student}.transformer.layer.{std_idx}.attention.out_lin.{w}'] = state_dict[f'{prefix_teacher}.encoder.layer.{teacher_idx}.attention.output.dense.{w}']
compressed_sd[f'{prefix_student}.transformer.layer.{std_idx}.sa_layer_norm.{w}'] = state_dict[f'{prefix_teacher}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}']
compressed_sd[f'{prefix_student}.transformer.layer.{std_idx}.ffn.lin1.{w}'] = state_dict[f'{prefix_teacher}.encoder.layer.{teacher_idx}.intermediate.dense.{w}']
compressed_sd[f'{prefix_student}.transformer.layer.{std_idx}.ffn.lin2.{w}'] = state_dict[f'{prefix_teacher}.encoder.layer.{teacher_idx}.output.dense.{w}']
compressed_sd[f'{prefix_student}.transformer.layer.{std_idx}.output_layer_norm.{w}'] = state_dict[f'{prefix_teacher}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}']
std_idx += 1
compressed_sd[f'vocab_projector.weight'] = state_dict[f'cls.predictions.decoder.weight']
compressed_sd[f'vocab_projector.bias'] = state_dict[f'cls.predictions.bias']
for w in ['weight', 'bias']:
compressed_sd[f'vocab_transform.{w}'] = state_dict[f'cls.predictions.transform.dense.{w}']
compressed_sd[f'vocab_layer_norm.{w}'] = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
return compressed_sd |
class MessageBoard(object):
def __init__(self, surface, rect, text, font=('arial', 20), font_color=Color('white'), bgcolor=Color('gray25'), border_width=0, border_color=Color('black')):
self.surface = surface
self.rect = rect
self.text = text
self.bgcolor = bgcolor
self.font = pygame.font.SysFont(*font)
self.font_color = font_color
self.border_width = border_width
self.box = Box(surface, rect, bgcolor, border_width, border_color)
def draw(self):
self.box.draw()
text_rect = Rect((self.rect.left + self.border_width), (self.rect.top + self.border_width), (self.rect.width - (self.border_width * 2)), (self.rect.height - (self.border_width * 2)))
x_pos = text_rect.left
y_pos = text_rect.top
for line in self.text:
line_sf = self.font.render(line, True, self.font_color, self.bgcolor)
if (((line_sf.get_width() + x_pos) > self.rect.right) or ((line_sf.get_height() + y_pos) > self.rect.bottom)):
raise LayoutError(('Cannot fit line "%s" in widget' % line))
self.surface.blit(line_sf, (x_pos, y_pos))
y_pos += line_sf.get_height() |
class Solution():
def insert(self, head: 'Node', insertVal: int) -> 'Node':
new_node = Node(insertVal, head)
if (not head):
return new_node
node = head
while True:
if ((node.next.val < node.val) and ((insertVal <= node.next.val) or (insertVal >= node.val))):
break
elif (node.val <= insertVal <= node.next.val):
break
elif (node.next == head):
break
node = node.next
new_node.next = node.next
node.next = new_node
return head |
class AD_Structure(Packet):
def __init__(self):
self.name = ''
self.length = 0
self.payload = []
def __len__(self):
return self.length
def decode(self, data):
length = UIntByte('sublen')
data = length.decode(data)
self.length = (len(length) + length.val)
self.payload = []
if (length.val == 0):
return data
type = EIR_Hdr()
data = type.decode(data)
val = None
if (type.val == 1):
val = BitFieldByte('flags', 0, ['Undef', 'Undef', 'Simul LE - BR/EDR (Host)', 'Simul LE - BR/EDR (Control.)', 'BR/EDR Not Supported', 'LE General Disc.', 'LE Limited Disc.'])
elif (type.val == 2):
val = NBytes_List('Incomplete uuids', 2)
elif (type.val == 3):
val = NBytes_List('Complete uuids', 2)
elif (type.val == 4):
val = NBytes_List('Incomplete uuids', 4)
elif (type.val == 5):
val = NBytes_List('Complete uuids', 4)
elif (type.val == 6):
val = NBytes_List('Incomplete uuids', 16)
elif (type.val == 7):
val = NBytes_List('Complete uuids', 16)
elif (type.val == 8):
val = String('Short Name')
elif (type.val == 9):
val = String('Complete Name')
elif (type.val == 20):
val = NBytes_List('Service Solicitation uuid', 2)
elif (type.val == 21):
val = NBytes_List('Service Solicitation uuid', 16)
elif (type.val == 22):
val = Adv_Data('Advertised Data', 2)
elif (type.val == 31):
val = NBytes_List('Service Solicitation uuid', 4)
elif (type.val == 32):
val = Adv_Data('Advertised Data', 4)
elif (type.val == 33):
val = Adv_Data('Advertised Data', 16)
elif (type.val == 255):
val = ManufacturerSpecificData()
else:
val = Itself(('Payload for %s' % type.strval))
val.decode(data[:(length.val - len(type))])
self.payload.append(type)
self.payload.append(val)
return data[(length.val - len(type)):]
def show(self, depth=0):
for x in self.payload:
x.show((depth + 1)) |
def update_values_from_carray(self: GridProperty, carray: cArray, dtype: DTypeLike, delete: bool=False) -> None:
logger.debug('Update numpy from C array values')
nv = self.ntotal
self._isdiscrete = False
if (dtype == np.float64):
logger.info('Entering conversion to numpy (float64) ...')
values1d = _cxtgeo.swig_carr_to_numpy_1d(nv, carray)
else:
logger.info('Entering conversion to numpy (int32) ...')
values1d = _cxtgeo.swig_carr_to_numpy_i1d(nv, carray)
self._isdiscrete = True
values = np.reshape(values1d, (self._ncol, self._nrow, self._nlay), order='F')
values = np.asanyarray(values, order='C')
self.values = values
self.mask_undef()
if delete:
delete_carray(self, carray) |
_deserializable
class ChromaDbConfig(BaseVectorDbConfig):
def __init__(self, collection_name: Optional[str]=None, dir: Optional[str]=None, host: Optional[str]=None, port: Optional[str]=None, allow_reset=False, chroma_settings: Optional[dict]=None):
self.chroma_settings = chroma_settings
self.allow_reset = allow_reset
super().__init__(collection_name=collection_name, dir=dir, host=host, port=port) |
def ui():
state = gr.State({})
with gr.Column(elem_classes='oobaAgentBase'):
with gr.Accordion(label='Output'):
output = gr.HTML(label='Output', value='')
user_input = gr.Textbox(label='Goal for AgentOoba')
with gr.Row():
submit_button = gr.Button('Execute', variant='primary')
cancel_button = gr.Button('Cancel')
with gr.Accordion(label='Options', open=False):
with gr.Column():
recursion_level_slider = gr.Slider(label='Recursion Depth', minimum=1, maximum=7, step=1, value=RECURSION_DEPTH_DEFAULT, interactive=True)
distance_cutoff_slider = gr.Slider(label='Task Similarity Cutoff (Higher = less repeat tasks, but might accidentally drop tasks)', minimum=0, maximum=1, step=0.01, value=DISTANCE_CUTOFF_DEFAULT)
max_tasks_slider = gr.Slider(label='Max tasks in a list', minimum=3, maximum=12, step=1, value=MAX_TASKS_DEFAULT, interactive=True)
expanded_context_toggle = gr.Checkbox(label='Expanded Context (runs out of memory at high recursion)', value=EXPANDED_CONTEXT_DEFAULT)
with gr.Accordion(label='Tools', open=False):
setup_tools()
for tool_name in AgentOobaVars['tools']:
with gr.Row():
cb_active = gr.Checkbox(label=tool_name, value=False, interactive=True)
cb_active.change((lambda x, tn=tool_name, statetype='active': update_tool_state(tn, statetype, x)), [cb_active])
cb_execute = gr.Checkbox(label='Execute', value=False, interactive=True)
cb_execute.change((lambda x, tn=tool_name, statetype='execute': update_tool_state(tn, statetype, x)), [cb_execute])
textbox = gr.Textbox(label='Tool description (as passed to the model)', interactive=True, value=AgentOobaVars['tools'][tool_name]['desc'])
textbox.change((lambda x, tn=tool_name: update_tool_description(tn, x)), [textbox])
with gr.Accordion(label='Prompting', open=False):
with gr.Row():
human_prefix_input = gr.Textbox(label='Human prefix', value=HUMAN_PREFIX)
assistant_prefix_input = gr.Textbox(label='Assistant prefix', value=ASSISTANT_PREFIX)
human_prefix_def = gr.Textbox(visible=False, value=HUMAN_PREFIX)
assistant_prefix_def = gr.Textbox(visible=False, value=ASSISTANT_PREFIX)
directive_inputs = []
directive_defaults = []
for (directive_name, directive) in AgentOobaVars['directives'].items():
directive_inputs.append(gr.TextArea(label=directive_name, value=directive))
directive_defaults.append(gr.Textbox(visible=False, value=directive))
prompt_inputs = (directive_inputs + [human_prefix_input, assistant_prefix_input])
prompt_defaults = (directive_defaults + [human_prefix_def, assistant_prefix_def])
reset_prompts_button = gr.Button('Reset prompts to default')
with gr.Row():
export_prompts_button = gr.Button('Export prompts to JSON')
import_prompts_button = gr.Button('Import prompts from JSON')
with gr.Row():
exported_prompts = gr.File(interactive=False)
imported_prompts = gr.File(interactive=True, type='binary')
submit_event_1 = submit_button.click(gather_interface_values, inputs=gradio(list_interface_input_elements()), outputs=state).then(gather_agentooba_parameters, inputs=([recursion_level_slider, distance_cutoff_slider, max_tasks_slider, expanded_context_toggle, human_prefix_input, assistant_prefix_input] + directive_inputs), outputs=None).then(mainloop, inputs=[user_input, state], outputs=output)
submit_event_2 = user_input.submit(gather_interface_values, inputs=gradio(list_interface_input_elements()), outputs=state).then(gather_agentooba_parameters, inputs=([recursion_level_slider, distance_cutoff_slider, max_tasks_slider, expanded_context_toggle, human_prefix_input, assistant_prefix_input] + directive_inputs), outputs=None).then(mainloop, inputs=[user_input, state], outputs=output)
def cancel_agent():
AgentOobaVars['main-objective'].done = True
output.value = ''
cancel_event = cancel_button.click(cancel_agent, None, None, cancels=[submit_event_1, submit_event_2])
reset_event = reset_prompts_button.click((lambda a, b, c, d, e, f, g, h, i, j: [a, b, c, d, e, f, g, h, i, j]), inputs=prompt_defaults, outputs=prompt_inputs)
def make_prompt_template():
d = AgentOobaVars['directives'].copy()
d['human-prefix'] = AgentOobaVars['human-prefix']
d['assistant-prefix'] = AgentOobaVars['assistant-prefix']
with open('extensions/AgentOoba/prompt_template.json', 'w') as f:
f.write(json.dumps(d))
f.flush()
return 'extensions/AgentOoba/prompt_template.json'
def import_prompt_template(template):
if (not template):
return ([p.value for p in prompt_inputs] + [human_prefix_input.value, assistant_prefix_input.value])
d = json.loads(template)
return [d['Primary directive'], d['Assess ability directive'], d['Do objective directive'], d['Split objective directive'], d['Assess tool directive'], d['Use tool directive'], d['Generate thoughts directive'], d['Summarize directive'], d['human-prefix'], d['assistant-prefix']]
export_event = export_prompts_button.click(make_prompt_template, inputs=None, outputs=[exported_prompts])
import_event = import_prompts_button.click(import_prompt_template, inputs=imported_prompts, outputs=prompt_inputs) |
class bsn_stats_request(experimenter_stats_request):
subtypes = {}
version = 5
type = 18
stats_type = 65535
experimenter = 6035143
def __init__(self, xid=None, flags=None, subtype=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (flags != None):
self.flags = flags
else:
self.flags = 0
if (subtype != None):
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.stats_type))
packed.append(struct.pack('!H', self.flags))
packed.append(('\x00' * 4))
packed.append(struct.pack('!L', self.experimenter))
packed.append(struct.pack('!L', self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
(subtype,) = reader.peek('!L', 20)
subclass = bsn_stats_request.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn_stats_request()
_version = reader.read('!B')[0]
assert (_version == 5)
_type = reader.read('!B')[0]
assert (_type == 18)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_stats_type = reader.read('!H')[0]
assert (_stats_type == 65535)
obj.flags = reader.read('!H')[0]
reader.skip(4)
_experimenter = reader.read('!L')[0]
assert (_experimenter == 6035143)
obj.subtype = reader.read('!L')[0]
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.flags != other.flags):
return False
if (self.subtype != other.subtype):
return False
return True
def pretty_print(self, q):
q.text('bsn_stats_request {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('flags = ')
value_name_map = {1: 'OFPSF_REQ_MORE'}
q.text(util.pretty_flags(self.flags, value_name_map.values()))
q.breakable()
q.text('}') |
def create_award_level_string(download_types):
type_list = []
for award_level in download_types:
if ('type_name' in VALUE_MAPPINGS[award_level]):
type_list.append(VALUE_MAPPINGS[award_level]['type_name'])
else:
type_list.append(VALUE_MAPPINGS[award_level]['download_name'])
return 'And'.join(type_list) |
class UniformAgent(Agent):
def __init__(self, n_actions):
super().__init__()
self.n_actions = n_actions
def __call__(self, state, observation, agent_info=None, history=None):
B = observation.n_elems()
agent_state = None
if (agent_info is None):
agent_info = DictTensor({'agent_id': torch.tensor([0]).repeat(B)})
if (state is None):
agent_state = DictTensor({'timestep': torch.zeros(B).long()})
else:
agent_state = state
scores = torch.randn(B, self.n_actions)
probabilities = torch.softmax(scores, dim=1)
actions = torch.distributions.Categorical(probabilities).sample()
new_state = DictTensor({'timestep': (agent_state['timestep'] + 1)})
return (agent_state, DictTensor({'action': actions, 'action_probabilities': probabilities, 'agent_id': agent_info['agent_id']}), new_state) |
class TestMenuTemplateParse(BaseEvenniaTest):
def setUp(self):
super().setUp()
self.menu_template = '\n ## node start\n\n Neque ea alias perferendis molestiae eligendi. Debitis exercitationem\n exercitationem quas blanditiis quisquam officia ut. Fugit aut fugit enim quia\n non. Earum et excepturi animi ex esse accusantium et. Id adipisci eos enim\n ratione.\n\n ## options\n\n 1: first option -> node1\n 2: second option -> node2\n next: node1\n\n ## node node1\n\n Node 1\n\n ## options\n\n fwd: node2\n call1: callnode1()\n call2: callnode2(foo=bar, bar=22, goo="another test")\n >: start\n\n ## node node2\n\n Text of node 2\n\n ## options\n\n > foo*: node1\n > [0-9]+?: node2\n > back: start\n\n '
self.goto_callables = {'callnode1': _callnode1, 'callnode2': _callnode2}
def test_parse_menu_template(self):
menutree = evmenu.parse_menu_template(self.char1, self.menu_template, self.goto_callables)
self.assertEqual(menutree, {'start': Anything, 'node1': Anything, 'node2': Anything})
def test_template2menu(self):
evmenu.template2menu(self.char1, self.menu_template, self.goto_callables)
def test_parse_menu_fail(self):
template = '\n ## NODE\n\n Text\n\n ## OPTIONS\n\n next: callnode2(invalid)\n '
with self.assertRaises(RuntimeError):
evmenu.parse_menu_template(self.char1, template, self.goto_callables) |
class Test(unittest.TestCase):
def setUp(self):
self.lexer = fractlexer.lexer
self.lexer.lineno = 1
def tokensFromFile(self, f):
data = open(f).read()
return self.tokensFromString(data)
def tokensFromString(self, data):
self.lexer.input(data)
toklist = []
while True:
tok = self.lexer.token()
if (not tok):
break
toklist.append(tok)
return toklist
def testUGR(self):
tokens = self.tokensFromString('cl1rorangemixed {\ngradient:\n title="cl1rorangemixed" smooth=no\n index=0 color=5153516\n index=2 color=5087212\n index=399 color=5349352\n}\n ')
self.assertEqual(tokens[0].type, 'FORM_ID')
self.assertEqual(tokens[2].type, 'SECT_PARMS')
self.assertEqual(tokens[9].type, 'CONST')
def testEmpty(self):
self.assertEqual(self.tokensFromString(''), [])
def testBasics(self):
tokens = self.tokensFromString('; Formulas from Andre Vandergoten\n; (2 + #hash foo ^|+| "hello" a coment containing expressions\nAAA-5-grt{\ninit:\n z = #pixel\nloop:\n if == 0\n z = z^/(#pixel)\n endif\nbailout:\n |z|>\ndefault:\n title = "foo;bar\\\n baz"\n}\n')
self.assertTrue((tokens[0].type == tokens[1].type == 'NEWLINE'), 'first 2 should be newlines')
str = [tok for tok in tokens if (tok.type == 'STRING')]
self.assertTrue(((len(str) == 1) and (str[0].value == 'foo;barbaz')), ('string literal parsing problem' and (str[0].lineno == 14)))
sections = [tok for tok in tokens if (tok.type == 'SECT_STM')]
self.assertEqual(len(sections), 3, 'wrong number of sections')
self.assertEqual(sections[0].lineno, 4, 'line counting wrong')
self.assertEqual(sections[2].lineno, 10, 'line counting wrong')
def testBadChars(self):
tokens = self.tokensFromString("$ hello ~\n ` ' goodbye")
self.assertTrue(((tokens[0].type == 'error') and (tokens[0].value == '$')))
self.assertEqual(tokens[4].type, 'error')
self.assertEqual(tokens[4].value, '`')
self.assertEqual(tokens[4].lineno, 2)
def testFormIDs(self):
tokens = self.tokensFromString('\n=05 { }\n0008 { }\n-fred- { }\n')
for token in tokens:
self.assertTrue(((token.type == 'FORM_ID') or (token.type == 'FORM_END') or (token.type == 'NEWLINE')))
def testIDs(self):
tokens = self.tokensFromString('_bailout_part _314159 #__ hello f_i7')
for t in tokens:
self.assertTrue((t.type == 'ID'))
def testCommentFormula(self):
tokens = self.tokensFromString('\n;Comment {\n\#$%554""}\nmyComment {}\n')
tokens = [tok for tok in tokens if (tok.type != 'NEWLINE')]
self.assertTrue(((tokens[0].type == 'FORM_ID') and (tokens[0].value == 'myComment') and (tokens[0].lineno == 5)))
def testKeywords(self):
ts = self.tokensFromString('if a elseif b else c')
self.assertTrue(((ts[0].type == 'IF') and (ts[2].type == 'ELSEIF') and (ts[4].type == 'ELSE') and (ts[1].type == ts[3].type == ts[5].type == 'ID')))
def testNumbers(self):
ts = self.tokensFromString('1.0 0.5e+7 1i 1 i')
self.assertTrue(((ts[0].type == ts[1].type == ts[3].type == 'NUMBER') and (ts[2].type == 'COMPLEX') and (ts[2].value == '1') and (ts[4].type == 'ID')))
def testEscapedNewline(self):
pp = preprocessor.T('&\\\n&')
ts = self.tokensFromString(pp.out())
self.assertTrue((ts[0].type == 'BOOL_AND')) |
class BeaconKeys(NamedTuple):
DEFAULT_AES_IV = b'abcdefghijklmnop'
aes_key: Optional[bytes]
hmac_key: Optional[bytes] = None
iv: bytes = DEFAULT_AES_IV
def from_aes_rand(cls, aes_rand: bytes, iv: bytes=DEFAULT_AES_IV) -> 'BeaconKeys':
(aes_key, hmac_key) = derive_aes_hmac_keys(aes_rand)
return cls(aes_key=aes_key, hmac_key=hmac_key, iv=iv)
def from_beacon_metadata(cls, metadata: BeaconMetadata, iv: bytes=DEFAULT_AES_IV) -> 'BeaconKeys':
return cls.from_aes_rand(metadata.aes_rand, iv=iv) |
.parametrize('test_case', TEST_CASES, ids=['al-non-empty-list', 'al-empty-list', 'al-many-lists', 'al-no-explicit-type', 'df-1', 'df-2-int-values-and-access-list', 'df-no-explicit-type'])
def test_decode_encode(test_case):
raw_transaction = test_case['expected_raw_transaction']
actual = TypedTransaction.from_bytes(HexBytes(raw_transaction))
assert isinstance(actual.transaction, test_case['expected_type'])
expected = TypedTransaction.from_dict(test_case['transaction'])
assert (actual.as_dict() == expected.as_dict())
encoded = actual.encode()
assert (HexBytes(encoded) == HexBytes(raw_transaction)) |
def make_tensor_for_dtype_shape(make_tensor_for_number_dtype_shape, make_tensor_for_string_shape):
def _make_tensor(shape: TENSOR_SHAPE, dtype: TENSOR_DTYPE_STR) -> TF_TENSOR:
if (dtype == 'string'):
return make_tensor_for_string_shape(shape)
return make_tensor_for_number_dtype_shape(shape, dtype)
return _make_tensor |
def test_token():
token = Token('name', 'tag', 0)
assert (token.as_dict() == {'type': 'name', 'tag': 'tag', 'nesting': 0, 'attrs': None, 'map': None, 'level': 0, 'children': None, 'content': '', 'markup': '', 'info': '', 'meta': {}, 'block': False, 'hidden': False})
token.attrSet('a', 'b')
assert (token.attrGet('a') == 'b')
token.attrJoin('a', 'c')
assert (token.attrGet('a') == 'b c')
token.attrPush(('x', 'y'))
assert (token.attrGet('x') == 'y')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
assert (token.attrIndex('a') == 0)
assert (token.attrIndex('x') == 1)
assert (token.attrIndex('j') == (- 1)) |
class LogStatsWriterTensorboard(LogStatsWriter):
def __init__(self, log_dir: str, tensorboard_render_figure: bool):
self.tensorboard_render_figure = tensorboard_render_figure
self.summary_writer = SummaryWriter(log_dir=log_dir)
self.previous_step_tags = set()
self.this_step_tags = set()
GlobalLogState.hook_on_log_step.append(self.on_log_step_increment)
class _IgnoreTensorboardCheckNaN(logging.Filter):
def filter(self, record):
return (not ((record.funcName == 'check_nan') and (record.msg == 'NaN or Inf found in input tensor.')))
logging.getLogger().addFilter(_IgnoreTensorboardCheckNaN())
(LogStatsWriter)
def write(self, path: str, step: int, stats: LogStats) -> None:
if self.tensorboard_render_figure:
for ((event, name, groups), value) in stats.items():
tag = self._event_to_tag(event, name, groups)
if path:
tag = ((path.replace('/', '_') + '_') + tag)
render_figure_dict = getattr(event, 'tensorboard_render_figure_dict', dict())
render_figure_function = render_figure_dict.get(name, None)
if render_figure_function:
fig = render_figure_function(value, event=event, name=event, groups=groups)
self.summary_writer.add_figure(tag=tag, figure=fig, global_step=step)
self.summary_writer.flush()
for ((event, name, groups), value) in stats.items():
tag = self._event_to_tag(event, name, groups)
if path:
tag = ((path.replace('/', '_') + '_') + tag)
if getattr(event, 'tensorboard_render_figure_dict', dict()):
continue
if isinstance(value, List):
self.summary_writer.add_histogram(tag, np.array(value), step)
elif isinstance(value, numbers.Number):
self.summary_writer.add_scalar(tag, value, step)
else:
pass
self.this_step_tags.add(tag)
self.summary_writer.flush()
def on_log_step_increment(self):
for tag in self.previous_step_tags:
if (tag not in self.this_step_tags):
self.summary_writer.add_scalar(tag, math.nan, GlobalLogState.global_step)
self.previous_step_tags = self.this_step_tags
self.this_step_tags = set()
self.summary_writer.flush()
(LogStatsWriter)
def close(self) -> None:
self.summary_writer.close()
def _event_to_tag(event: Callable, name: str, groups: Optional[List[Union[(int, str)]]]) -> str:
qualified_name = event.__qualname__
key_name = qualified_name.replace('.', '/')
if ((name is not None) and len(name)):
key_name = ((key_name + '/') + name)
if (groups is not None):
for group in groups:
if (group is None):
continue
key_name = ((key_name + '/') + str(group))
return key_name |
class OptionSeriesBarDatalabelsFilter(Options):
def operator(self):
return self._config_get(None)
def operator(self, value: Any):
self._config(value, js_type=False)
def property(self):
return self._config_get(None)
def property(self, text: str):
self._config(text, js_type=False) |
class VmReplicaSerializer(s.InstanceSerializer):
_model_ = SlaveVm
_default_fields_ = ('repname',)
_update_fields_ = ('reserve_resources', 'sleep_time', 'enabled', 'bwlimit')
hostname = s.CharField(source='master_vm.hostname', read_only=True)
repname = s.RegexField('^[A-Za-z0-9][A-Za-z0-9\\._-]*$', source='name', max_length=24, min_length=1)
node = s.SlugRelatedField(slug_field='hostname', queryset=Node.objects, required=True)
root_zpool = s.CharField(max_length=64, required=False)
disk_zpools = DiskPoolDictField(required=False)
reserve_resources = s.BooleanField(default=True)
sleep_time = s.IntegerField(source='rep_sleep_time', min_value=0, max_value=86400, default=60)
enabled = s.BooleanField(source='rep_enabled', default=True)
bwlimit = s.IntegerField(source='rep_bwlimit', required=False, min_value=0, max_value=)
last_sync = s.DateTimeField(read_only=True, required=False)
reinit_required = s.BooleanField(source='rep_reinit_required', read_only=True, required=False)
node_status = s.DisplayChoiceField(source='vm.node.status', choices=Node.STATUS_DB, read_only=True)
created = s.DateTimeField(source='vm.created', read_only=True, required=False)
def __init__(self, request, slave_vm, *args, **kwargs):
self.img_required = None
self.reserve_resources_changed = False
self._detail_dict = {}
super(VmReplicaSerializer, self).__init__(request, slave_vm, *args, **kwargs)
if (request.method == 'POST'):
vm = slave_vm.vm
dc_settings = request.dc.settings
self.fields['reserve_resources'].default = dc_settings.VMS_VM_REPLICA_RESERVATION_DEFAULT
self.fields['node'].queryset = get_nodes(request, is_compute=True)
self._disks = vm.json_get_disks()
if vm.is_hvm():
self.fields['disk_zpools'].max_items = len(self._disks)
else:
del self.fields['disk_zpools']
else:
self.fields['node'].required = False
self.fields['node'].read_only = True
self.fields['root_zpool'].read_only = True
self.fields['disk_zpools'].read_only = True
def validate_disk_zpools(self, attrs, source):
disk_zpools = attrs.get(source, None)
if disk_zpools:
if (max(disk_zpools.keys()) > len(self._disks)):
raise s.ValidationError(_('Invalid disk_id.'))
return attrs
def validate_node(self, attrs, source):
try:
node = attrs[source]
except KeyError:
return attrs
if (node == self.object.node):
raise s.ValidationError(_('Target node is the same as current node.'))
if (node.status != Node.ONLINE):
raise s.ValidationError(_('Target node is not in online state.'))
try:
validate_nic_tags(self.object.vm, new_node=node)
except s.ValidationError:
raise s.ValidationError(_('Some networks are not available on target node.'))
return attrs
def _validate_create(self, attrs):
node = attrs['node']
self._detail_dict['node'] = node.hostname
slave_vm = self.object
slave_vm.set_rep_hostname()
slave_vm.node = node
slave_vm.reserve_resources = attrs.get('reserve_resources', True)
slave_vm_define = SlaveVmDefine(slave_vm)
root_zpool = attrs.get('root_zpool', None)
try:
root_zpool = slave_vm_define.save_root_zpool(root_zpool)
except s.APIValidationError as exc:
self._errors['node'] = exc.api_errors
return False
else:
if root_zpool:
self._detail_dict['root_zpool'] = root_zpool
if slave_vm.vm.is_hvm():
disk_zpools = attrs.get('disk_zpools', {})
try:
disk_zpools = slave_vm_define.save_disk_zpools(disk_zpools)
except s.APIValidationError as exc:
self._errors['node'] = exc.api_errors
return False
else:
if disk_zpools:
self._detail_dict['disk_zpools'] = disk_zpools
try:
slave_vm_define.validate_node_resources(ignore_cpu_ram=(not slave_vm.reserve_resources))
except s.APIValidationError as exc:
self._errors['node'] = exc.api_errors
return False
try:
slave_vm_define.validate_storage_resources()
except s.APIValidationError as exc:
self._errors['node'] = exc.api_errors
return False
self.img_required = slave_vm_define.check_required_images()
self.slave_vm_define = slave_vm_define
return True
def _validate_update(self, attrs):
try:
reserve_resource = attrs['reserve_resources']
except KeyError:
pass
else:
self.reserve_resources_changed = (reserve_resource != self.object.reserve_resources)
if (self.reserve_resources_changed and reserve_resource):
slave_vm_define = SlaveVmDefine(self.object)
try:
slave_vm_define.validate_node_resources(ignore_cpu_ram=False, ignore_disk=True)
except s.APIValidationError as exc:
self._errors['node'] = exc.api_errors
return False
return True
def validate(self, attrs):
if self.object.rep_reinit_required:
raise s.ValidationError(_('Server replica requires re-initialization.'))
if (self.request.method == 'POST'):
total = SlaveVm.objects.filter(master_vm=self.object.master_vm).exclude(name=u'').count()
self.object.rep_id = (total + 1)
limit = self.request.dc.settings.VMS_VM_REPLICA_LIMIT
if (limit is not None):
if (int(limit) <= total):
raise s.ValidationError(_('Maximum number of server replicas reached.'))
self._validate_create(attrs)
else:
self._validate_update(attrs)
return attrs
def save_slave_vm(self):
slave_vm = self.object
hostname = slave_vm.vm.hostname
slave_vm.vm.hostname = slave_vm.master_vm.hostname
slave_vm.vm.choose_vnc_port()
slave_vm.vm.sync_json()
slave_vm.vm.hostname = hostname
sync_status = slave_vm.sync_status
slave_vm.sync_status = SlaveVm.DIS
self.slave_vm_define.save()
slave_vm.sync_status = sync_status
return self.slave_vm_define.slave_vm
def node_image_import(self):
if self.img_required:
(ns, img) = self.img_required
return NodeImageView.import_for_vm(self.request, ns, img, self.object)
return None
def detail_dict(self, **kwargs):
self.data
dd = super(VmReplicaSerializer, self).detail_dict(**kwargs)
dd.update(self._detail_dict)
dd['repname'] = self.object.name
return dd |
def start_south_service_for_filter(config, fledge_url='localhost:8081', service_name='numpy_ingest', plugin_name='numpy_south', enabled='true'):
data = {'name': service_name, 'type': 'south', 'plugin': plugin_name, 'enabled': enabled, 'config': config}
conn =
conn.request('POST', '/fledge/service', json.dumps(data))
r = conn.getresponse()
assert (200 == r.status), 'Could not start south service'
r = r.read().decode()
conn.close()
retval = json.loads(r)
print(retval) |
def interpol_alpha_cubic(f_0, df_0, f_alpha_0, f_alpha_1, alpha_0, alpha_1):
quot = (1 / (((alpha_0 ** 2) * (alpha_1 ** 2)) * (alpha_1 - alpha_0)))
A = np.array((((alpha_0 ** 2), (- (alpha_1 ** 2))), ((- (alpha_0 ** 3)), (alpha_1 ** 3))))
B = np.array((((f_alpha_1 - f_0) - (df_0 * alpha_1)), ((f_alpha_0 - f_0) - (df_0 * alpha_0))))
(a, b) = ((quot * A) B)
alpha_cubic = (((- b) + (((b ** 2) - ((3 * a) * df_0)) ** 0.5)) / (3 * a))
return alpha_cubic |
def fortios_ips(data, fos):
fos.do_member_operation('ips', 'global')
if data['ips_global']:
resp = ips_global(data, fos)
else:
fos._module.fail_json(msg=('missing task body: %s' % 'ips_global'))
return ((not is_successful_status(resp)), (is_successful_status(resp) and (resp['revision_changed'] if ('revision_changed' in resp) else True)), resp, {}) |
def extractSoraratranslationsTumblrCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def rotate_around_axis(vector, rot_axis, apply_point=[0.0, 0.0, 0.0]):
x = vector[0]
y = vector[1]
z = vector[2]
u = rot_axis[0]
v = rot_axis[1]
w = rot_axis[2]
ux = (u * x)
uy = (u * y)
uz = (u * z)
vx = (v * x)
vy = (v * y)
vz = (v * z)
wx = (w * x)
wy = (w * y)
wz = (w * z)
sa = math.sin(rot_axis[3])
ca = math.cos(rot_axis[3])
p0 = ((((u * ((ux + vy) + wz)) + (((x * ((v * v) + (w * w))) - (u * (vy + wz))) * ca)) + (((- wy) + vz) * sa)) + apply_point[0])
p1 = ((((v * ((ux + vy) + wz)) + (((y * ((u * u) + (w * w))) - (v * (ux + wz))) * ca)) + ((wx - uz) * sa)) + apply_point[1])
p2 = ((((w * ((ux + vy) + wz)) + (((z * ((u * u) + (v * v))) - (w * (ux + vy))) * ca)) + (((- vx) + uy) * sa)) + apply_point[2])
return np.array([p0, p1, p2]) |
class TestRowExporter(TestCase):
def setUp(self):
self.value_type = Mock()
self.value_type.has_text = Mock(return_value=False)
self.value_type.has_editor_value = Mock(return_value=True)
self.value_type.get_editor_value = Mock(return_value=1, side_effect=count())
self.model = Mock()
self.model.get_column_count = Mock(return_value=3)
self.model.get_value = Mock(return_value=0)
self.model.get_value_type = Mock(return_value=self.value_type)
def test_get_data(self):
exporter = RowExporter(format=trivial_format)
result = exporter.get_data(self.model, [((0,), (0,)), ((1,), ())])
self.assertEqual(result, [[0, 1, 2], [3, 4, 5]])
def test_get_data_deduplicate(self):
exporter = RowExporter(format=trivial_format)
result = exporter.get_data(self.model, [((0,), (0,)), ((0,), (2,)), ((1,), ())])
self.assertEqual(result, [[0, 1, 2], [3, 4, 5]])
def test_get_data_row_headers(self):
exporter = RowExporter(format=trivial_format, row_headers=True)
result = exporter.get_data(self.model, [((0,), (0,)), ((1,), ())])
self.assertEqual(result, [[0, 1, 2, 3], [4, 5, 6, 7]])
def test_get_data_column_headers(self):
exporter = RowExporter(format=trivial_format, column_headers=True)
result = exporter.get_data(self.model, [((0,), (0,)), ((1,), ())])
self.assertEqual(result, [[0, 1, 2], [3, 4, 5], [6, 7, 8]]) |
class WlCopyHandler(ClipboardHandlerBase):
def _copy(text: str) -> None:
subprocess.run(args=['wl-copy'], shell=False, input=text, encoding='utf-8', check=True, stdout=Path('/dev/null').open('w'), timeout=5)
def _is_compatible(self) -> bool:
if (not self._os_has_wayland_display_manager()):
logger.debug('%s is not compatible on non-Linux systems and on Linux w/o Wayland', self.name)
return False
if (not (wl_copy_bin := shutil.which('wl-copy'))):
logger.debug('%s is not compatible: wl-copy was not found', self.name)
logger.warning("Your Linux runs with Wayland. Please install the system package 'wl-clipboard' to ensure that text can be copied to the clipboard correctly")
return False
logger.debug('%s is compatible (%s)', self.name, wl_copy_bin)
return True |
def sharpe_iid_rolling(rtns, window: int, min_periods: int, bench=0, factor=1, log=True):
if log:
excess = log_excess(rtns, bench)
else:
excess = pct_to_log_excess(rtns, bench)
roll = excess.rolling(window=window, min_periods=min_periods)
return ((np.sqrt(factor) * roll.mean()) / roll.std(ddof=1)) |
class Preferences(PrefMixin, UpdateNoIdMixin, QuickbooksTransactionEntity):
class_dict = {'EmailMessagesPrefs': EmailMessagesPrefs, 'ProductAndServicesPrefs': ProductAndServicesPrefs, 'ReportPrefs': ReportPrefs, 'AccountingInfoPrefs': AccountingInfoPrefs, 'SalesFormsPrefs': SalesFormsPrefs, 'VendorAndPurchasesPrefs': VendorAndPurchasesPrefs, 'TaxPrefs': TaxPrefs, 'OtherPrefs': OtherPrefs, 'TimeTrackingPrefs': TimeTrackingPrefs, 'CurrencyPrefs': CurrencyPrefs}
qbo_object_name = 'Preferences'
def __init__(self):
super().__init__()
self.EmailMessagesPrefs = None
self.ProductAndServicesPrefs = None
self.ReportPrefs = None
self.AccountingInfoPrefs = None
self.SalesFormsPrefs = None
self.VendorAndPurchasesPrefs = None
self.TaxPrefs = None
self.OtherPrefs = None
self.TimeTrackingPrefs = None
self.CurrencyPrefs = None
def __str__(self):
return 'Preferences {0}'.format(self.Id) |
def gahj():
with (DATADIR / 'apps.json').open() as f:
data = json.load(f)
with (DATADIR / 'apps-custom.json').open() as f:
data_custom = json.load(f)
apps = data['apps']
categories = data['categories']
apps.update(data_custom['apps'])
categories.update(data_custom['categories'])
for app in apps:
for field in ['url', 'html', 'env', 'script', 'implies', 'excludes']:
if (field in apps[app]):
if (not isinstance(apps[app][field], list)):
apps[app][field] = [apps[app][field]]
with (DATADIR / 'apps-habu.json').open('w') as f:
f.write(json.dumps({'apps': apps, 'categories': categories}, indent=4)) |
class Proxy(object):
def __init__(self, exe, ffid, prop_ffid=None, prop_name='', es6=False):
self.ffid = ffid
self._exe = exe
self._ix = 0
self._pffid = (prop_ffid if (prop_ffid != None) else ffid)
self._pname = prop_name
self._es6 = es6
self._resolved = {}
self._Keys = None
def _call(self, method, methodType, val):
this = self
debug('MT', method, methodType, val)
if (methodType == 'fn'):
return Proxy(self._exe, val, self.ffid, method)
if (methodType == 'class'):
return Proxy(self._exe, val, es6=True)
if (methodType == 'obj'):
return Proxy(self._exe, val)
if (methodType == 'inst'):
return Proxy(self._exe, val)
if (methodType == 'void'):
return None
if (methodType == 'py'):
return self._exe.get(val)
else:
return val
def __call__(self, *args, timeout=10, forceRefs=False):
(mT, v) = (self._exe.initProp(self._pffid, self._pname, args) if self._es6 else self._exe.callProp(self._pffid, self._pname, args, timeout=timeout, forceRefs=forceRefs))
if (mT == 'fn'):
return Proxy(self._exe, v)
return self._call(self._pname, mT, v)
def __getattr__(self, attr):
if (attr == 'new'):
return self._call((self._pname if (self._pffid == self.ffid) else ''), 'class', self._pffid)
(methodType, val) = self._exe.getProp(self._pffid, attr)
return self._call(attr, methodType, val)
def __getitem__(self, attr):
(methodType, val) = self._exe.getProp(self.ffid, attr)
return self._call(attr, methodType, val)
def __iter__(self):
self._ix = 0
if (self.length == None):
self._Keys = self._exe.keys(self.ffid)
return self
def __next__(self):
if self._Keys:
if (self._ix < len(self._Keys)):
result = self._Keys[self._ix]
self._ix += 1
return result
else:
raise StopIteration
elif (self._ix < self.length):
result = self[self._ix]
self._ix += 1
return result
else:
raise StopIteration
def __setattr__(self, name, value):
if (name in INTERNAL_VARS):
object.__setattr__(self, name, value)
else:
return self._exe.setProp(self.ffid, name, value)
def __setitem__(self, name, value):
return self._exe.setProp(self.ffid, name, value)
def __contains__(self, key):
return (True if (self[key] is not None) else False)
def valueOf(self):
ser = self._exe.ipc('serialize', self.ffid, '')
return ser['val']
def blobValueOf(self):
blob = self._exe.ipc('blob', self.ffid, '')
return blob['blob']
def __str__(self):
return self._exe.inspect(self.ffid, 'str')
def __repr__(self):
return self._exe.inspect(self.ffid, 'repr')
def __json__(self):
return {'ffid': self.ffid}
def __del__(self):
self._exe.free(self.ffid) |
def test_copy_with_replacing():
class _Container(containers.DeclarativeContainer):
p11 = providers.Object(0)
p12 = providers.Factory(dict, p11=p11)
(_Container)
class _Container1(_Container):
p11 = providers.Object(1)
p13 = providers.Object(11)
(_Container)
class _Container2(_Container):
p11 = providers.Object(2)
p13 = providers.Object(22)
assert (_Container.p11 is not _Container1.p11)
assert (_Container.p12 is not _Container1.p12)
assert (_Container.p11 is not _Container2.p11)
assert (_Container.p12 is not _Container2.p12)
assert (_Container1.p11 is not _Container2.p11)
assert (_Container1.p12 is not _Container2.p12)
assert (_Container.p12() == {'p11': 0})
assert (_Container1.p12() == {'p11': 1})
assert (_Container2.p12() == {'p11': 2})
assert (_Container1.p13() == 11)
assert (_Container2.p13() == 22) |
class ApiService():
async def get(*, pk: int) -> Api:
async with async_db_session() as db:
api = (await ApiDao.get(db, pk))
if (not api):
raise errors.NotFoundError(msg='')
return api
async def get_select(*, name: str=None, method: str=None, path: str=None) -> Select:
return (await ApiDao.get_list(name=name, method=method, path=path))
async def get_all() -> Sequence[Api]:
async with async_db_session() as db:
apis = (await ApiDao.get_all(db))
return apis
async def create(*, obj: CreateApi) -> None:
async with async_db_session.begin() as db:
api = (await ApiDao.get_by_name(db, obj.name))
if api:
raise errors.ForbiddenError(msg='')
(await ApiDao.create(db, obj))
async def update(*, pk: int, obj: UpdateApi) -> int:
async with async_db_session.begin() as db:
count = (await ApiDao.update(db, pk, obj))
return count
async def delete(*, pk: list[int]) -> int:
async with async_db_session.begin() as db:
count = (await ApiDao.delete(db, pk))
return count |
def _sample_good_instances(nb_agents: int, good_ids: List[str], base_amount: int, uniform_lower_bound_factor: int, uniform_upper_bound_factor: int) -> Dict[(str, int)]:
a = ((base_amount * nb_agents) + (nb_agents * uniform_lower_bound_factor))
b = ((base_amount * nb_agents) + (nb_agents * uniform_upper_bound_factor))
nb_instances = {good_id: round(np.random.uniform(a, b)) for good_id in good_ids}
return nb_instances |
class TestTachoMotorFullTravelCountValue(ptc.ParameterizedTestCase):
def test_full_travel_count_value(self):
self.assertEqual(self._param['motor'].full_travel_count, motor_info[self._param['motor'].driver_name]['full_travel_count'])
def test_full_travel_count_value_is_read_only(self):
with self.assertRaises(AttributeError):
self._param['motor'].count_per_m = 'ThisShouldNotWork' |
def println(msg, console_prefix=None, end='\n', flush=False, force=False, logger=None, overline=None, underline=None):
allow_print = (force or ((not QUIET) and (RALLY_RUNNING_IN_DOCKER or ASSUME_TTY or sys.stdout.isatty())))
if allow_print:
complete_msg = (('%s %s' % (console_prefix, msg)) if console_prefix else msg)
if overline:
print(format.underline_for(complete_msg, underline_symbol=overline), flush=flush)
print(complete_msg, end=end, flush=flush)
if underline:
print(format.underline_for(complete_msg, underline_symbol=underline), flush=flush)
if logger:
logger(msg) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.