code stringlengths 281 23.7M |
|---|
def demo(use_files=False, dir_name=None, config_filename='', title='Traits UI Demos', css_filename='default.css'):
if (dir_name is None):
dir_name = dirname(abspath(sys.argv[0]))
(path, name) = split(dir_name)
if ((len(config_filename) > 0) and (not isabs(config_filename))):
config_filename = join(path, name, config_filename)
Demo(path=path, title=title, model=DemoPath(name=dir_name, nice_name=user_name_for(name), use_files=use_files, config_filename=config_filename, css_filename=css_filename)).configure_traits() |
class EmmetInsertAttribute(sublime_plugin.TextCommand):
def run(self, edit, attribute=None, **kw):
if (not attribute):
return
prefix = ''
if self.view.sel():
sel = self.view.sel()[0]
if (not self.view.substr(sublime.Region((sel.begin() - 1), sel.begin())).isspace()):
prefix = ' '
self.view.run_command('insert_snippet', {'contents': ('%s%s="$1"' % (prefix, attribute))}) |
class SwapBrackets(BracketRemove):
def run(self, edit, name, remove_content=False, remove_indent=False, remove_block=False):
offset = self.left.toregion().size()
selection = [sublime.Region(self.left.begin, (self.right.begin - offset))]
left = self.left.move(self.left.end, self.left.end)
right = self.right.move(self.right.begin, self.right.begin)
super(SwapBrackets, self).run(edit, name)
self.selection = selection
self.left = left
self.right = right
self.nobracket = False |
class Resolver():
ball_ball: BallBallCollisionStrategy
ball_linear_cushion: BallLCushionCollisionStrategy
ball_circular_cushion: BallCCushionCollisionStrategy
ball_pocket: BallPocketStrategy
stick_ball: StickBallCollisionStrategy
transition: BallTransitionStrategy
def resolve(self, shot: System, event: Event) -> None:
_snapshot_initial(shot, event)
ids = event.ids
if (event.event_type == EventType.NONE):
return
elif event.event_type.is_transition():
ball = shot.balls[ids[0]]
self.transition.resolve(ball, event.event_type, inplace=True)
elif (event.event_type == EventType.BALL_BALL):
ball1 = shot.balls[ids[0]]
ball2 = shot.balls[ids[1]]
self.ball_ball.resolve(ball1, ball2, inplace=True)
ball1.state.t = event.time
ball2.state.t = event.time
elif (event.event_type == EventType.BALL_LINEAR_CUSHION):
ball = shot.balls[ids[0]]
cushion = shot.table.cushion_segments.linear[ids[1]]
self.ball_linear_cushion.resolve(ball, cushion, inplace=True)
ball.state.t = event.time
elif (event.event_type == EventType.BALL_CIRCULAR_CUSHION):
ball = shot.balls[ids[0]]
cushion_jaw = shot.table.cushion_segments.circular[ids[1]]
self.ball_circular_cushion.resolve(ball, cushion_jaw, inplace=True)
ball.state.t = event.time
elif (event.event_type == EventType.BALL_POCKET):
ball = shot.balls[ids[0]]
pocket = shot.table.pockets[ids[1]]
self.ball_pocket.resolve(ball, pocket, inplace=True)
ball.state.t = event.time
elif (event.event_type == EventType.STICK_BALL):
cue = shot.cue
ball = shot.balls[ids[1]]
self.stick_ball.resolve(cue, ball, inplace=True)
ball.state.t = event.time
_snapshot_final(shot, event)
def default(cls) -> Resolver:
return cls.from_config(ResolverConfig.default())
def from_config(cls, config: ResolverConfig) -> Resolver:
ball_ball = get_ball_ball_model(model=config.ball_ball, params=config.ball_ball_params)
ball_linear_cushion = get_ball_lin_cushion_model(model=config.ball_linear_cushion, params=config.ball_linear_cushion_params)
ball_circular_cushion = get_ball_circ_cushion_model(model=config.ball_circular_cushion, params=config.ball_circular_cushion_params)
ball_pocket = get_ball_pocket_model(model=config.ball_pocket, params=config.ball_pocket_params)
stick_ball = get_stick_ball_model(model=config.stick_ball, params=config.stick_ball_params)
transition = get_transition_model(model=config.transition, params=config.transition_params)
return cls(ball_ball, ball_linear_cushion, ball_circular_cushion, ball_pocket, stick_ball, transition) |
class Circle(Html.Html):
name = 'Progress Circle'
def __init__(self, value: float, page: primitives.PageModel, width: tuple, height: tuple, html_code: Optional[str], options: Optional[dict], profile: Optional[Union[(dict, bool)]]):
page.properties.css.add_text("\ --pgPercentage {\n syntax: '<number>';\n inherits: false;\n initial-value: 0;\n}\n\ growProgressBar {\n from { --pgPercentage: var(--start); }\n to { --pgPercentage: var(--value); }\n}\n\n.CircleProgressbar {\n animation: growProgressBar 1s 1 forwards ease-in-out;\n counter-set: percentage var(--pgPercentage);\n border-radius: 50%;\n display: grid;\n margin: 2px;\n place-items: center;\n}\n\n.CircleProgressbar::after {\n content: counter(percentage) '%';\n line-height: 90px;\n text-align: center;\n display: block;\n}\n")
super(Circle, self).__init__(page, [], html_code=html_code, css_attrs={'width': width, 'height': height}, profile=profile, options=options)
self.aria.role = 'progressbar'
self.style.css.display = 'inline-block'
self.aria.valuemax = 100
self.aria.valuemin = 0
self.attr['class'].add('CircleProgressbar')
self.style.css.color = self.page.theme.notch()
self.style.css.background = ('radial-gradient(closest-side, %(back)s 80%%, transparent 0 99.9%%, %(back)s 0), conic-gradient(%(color)s calc(var(--pgPercentage) * 1%%), %(grey)s 0)' % {'color': self.page.theme.notch(), 'grey': self.page.theme.greys[1], 'back': self.page.theme.greys[0]})
self.style.css.font_size = int((width[0] / 4))
self.aria.valuenow = value
self.css({'--value': value})
self.css({'--start': 0})
_js__builder__ = "\n htmlObj.style.setProperty('--start', htmlObj.getAttribute('aria-valuenow'));\n htmlObj.style.setProperty('--value', data);\n htmlObj.style.webkitAnimation = 'none';\n setTimeout(function() {\n htmlObj.style.webkitAnimation = '';\n }, 1)"
def __str__(self):
str_div = ''.join([(v.html() if hasattr(v, 'html') else str(v)) for v in self.val])
return ('<div %s>%s</div>' % (self.get_attrs(css_class_names=self.style.get_classes()), str_div)) |
class TestSequenceFunctions_FlatTree(unittest.TestCase):
def __init__(self, *args, **kwargs):
logSetup.initLogging()
super().__init__(*args, **kwargs)
def setUp(self):
self.tree = hamDb.BkHammingTree()
for x in range(4):
with self.tree.writer_context():
self.tree.dropTree()
self.buildTestTree()
def buildTestTree(self):
self.tree = hamDb.BkHammingTree()
for (nodeId, node_hash) in enumerate(TEST_DATA_FLAT):
print('Inserting node id: ', nodeId, 'hash', node_hash, 'value: ', b2i(node_hash))
node_hash = b2i(node_hash)
self.tree.unlocked_insert(node_hash, nodeId)
def test_1(self):
tgtHash = ''
tgtHash = b2i(tgtHash)
ret = self.tree.getWithinDistance(tgtHash, 0)
self.assertEqual(ret, set((1,)))
def test_2(self):
tgtHash = ''
tgtHash = b2i(tgtHash)
ret = self.tree.getWithinDistance(tgtHash, 0)
self.assertEqual(ret, set((22,)))
def test_3(self):
tgtHash = ''
tgtHash = b2i(tgtHash)
ret = self.tree.getWithinDistance(tgtHash, 0)
self.assertEqual(ret, set((64,))) |
def bulk_run_local_args__init__(self, device_to_commands=None, timeout=bulk_run_local_args.thrift_spec[3][4], open_timeout=bulk_run_local_args.thrift_spec[4][4], client_ip=bulk_run_local_args.thrift_spec[10][4], client_port=bulk_run_local_args.thrift_spec[11][4], uuid=bulk_run_local_args.thrift_spec[12][4]):
self.device_to_commands = device_to_commands
self.timeout = timeout
self.open_timeout = open_timeout
self.client_ip = client_ip
self.client_port = client_port
self.uuid = uuid |
class RunLevelParams(PyFlyteParams):
project: str = make_click_option_field(project_option)
domain: str = make_click_option_field(domain_option)
destination_dir: str = make_click_option_field(click.Option(param_decls=['--destination-dir', 'destination_dir'], required=False, type=str, default='/root', show_default=True, help='Directory inside the image where the tar file containing the code will be copied to'))
copy_all: bool = make_click_option_field(click.Option(param_decls=['--copy-all', 'copy_all'], required=False, is_flag=True, default=False, show_default=True, help='Copy all files in the source root directory to the destination directory'))
image_config: ImageConfig = make_click_option_field(click.Option(param_decls=['-i', '--image', 'image_config'], required=False, multiple=True, type=click.UNPROCESSED, callback=ImageConfig.validate_image, default=[DefaultImages.default_image()], show_default=True, help='Image used to register and run.'))
service_account: str = make_click_option_field(click.Option(param_decls=['--service-account', 'service_account'], required=False, type=str, default='', help='Service account used when executing this workflow'))
wait_execution: bool = make_click_option_field(click.Option(param_decls=['--wait-execution', 'wait_execution'], required=False, is_flag=True, default=False, show_default=True, help='Whether to wait for the execution to finish'))
dump_snippet: bool = make_click_option_field(click.Option(param_decls=['--dump-snippet', 'dump_snippet'], required=False, is_flag=True, default=False, show_default=True, help='Whether to dump a code snippet instructing how to load the workflow execution using flyteremote'))
overwrite_cache: bool = make_click_option_field(click.Option(param_decls=['--overwrite-cache', 'overwrite_cache'], required=False, is_flag=True, default=False, show_default=True, help='Whether to overwrite the cache if it already exists'))
envvars: typing.Dict[(str, str)] = make_click_option_field(click.Option(param_decls=['--envvars', '--env'], required=False, multiple=True, type=str, show_default=True, callback=key_value_callback, help='Environment variables to set in the container, of the format `ENV_NAME=ENV_VALUE`'))
tags: typing.List[str] = make_click_option_field(click.Option(param_decls=['--tags', '--tag'], required=False, multiple=True, type=str, show_default=True, help='Tags to set for the execution'))
name: str = make_click_option_field(click.Option(param_decls=['--name'], required=False, type=str, show_default=True, help='Name to assign to this execution'))
labels: typing.Dict[(str, str)] = make_click_option_field(click.Option(param_decls=['--labels', '--label'], required=False, multiple=True, type=str, show_default=True, callback=key_value_callback, help='Labels to be attached to the execution of the format `label_key=label_value`.'))
annotations: typing.Dict[(str, str)] = make_click_option_field(click.Option(param_decls=['--annotations', '--annotation'], required=False, multiple=True, type=str, show_default=True, callback=key_value_callback, help='Annotations to be attached to the execution of the format `key=value`.'))
raw_output_data_prefix: str = make_click_option_field(click.Option(param_decls=['--raw-output-data-prefix', '--raw-data-prefix'], required=False, type=str, show_default=True, help=('File Path prefix to store raw output data. Examples are file://, s3://, gs:// etc as supported by fsspec. If not specified, raw data will be stored in default configured location in remote of locally to temp file system.' + click.style('Note, this is not metadata, but only the raw data location used to store Flytefile, Flytedirectory, Structuredataset, dataframes'))))
max_parallelism: int = make_click_option_field(click.Option(param_decls=['--max-parallelism'], required=False, type=int, show_default=True, help='Number of nodes of a workflow that can be executed in parallel. If not specified, project/domain defaults are used. If 0 then it is unlimited.'))
disable_notifications: bool = make_click_option_field(click.Option(param_decls=['--disable-notifications'], required=False, is_flag=True, default=False, show_default=True, help='Should notifications be disabled for this execution.'))
remote: bool = make_click_option_field(click.Option(param_decls=['-r', '--remote'], required=False, is_flag=True, default=False, is_eager=True, show_default=True, help='Whether to register and run the workflow on a Flyte deployment'))
limit: int = make_click_option_field(click.Option(param_decls=['--limit', 'limit'], required=False, type=int, default=10, show_default=True, help='Use this to limit number of launch plans retreived from the backend, if `from-server` option is used'))
cluster_pool: str = make_click_option_field(click.Option(param_decls=['--cluster-pool', 'cluster_pool'], required=False, type=str, default='', help='Assign newly created execution to a given cluster pool'))
computed_params: RunLevelComputedParams = field(default_factory=RunLevelComputedParams)
_remote: typing.Optional[FlyteRemote] = None
def remote_instance(self) -> FlyteRemote:
if (self._remote is None):
data_upload_location = None
if self.is_remote:
data_upload_location = remote_fs.REMOTE_PLACEHOLDER
self._remote = get_plugin().get_remote(self.config_file, self.project, self.domain, data_upload_location)
return self._remote
def is_remote(self) -> bool:
return self.remote
def from_dict(cls, d: typing.Dict[(str, typing.Any)]) -> 'RunLevelParams':
return cls(**d)
def options(cls) -> typing.List[click.Option]:
return [get_option_from_metadata(f.metadata) for f in fields(cls) if f.metadata] |
def _wrap_type_validation(template: object, callable_mock: _CallableMock, callable_templates: List[Callable]) -> Callable:
def callable_mock_with_type_validation(*args: Any, **kwargs: Any) -> Any:
for callable_template in callable_templates:
if _validate_callable_signature(False, callable_template, template, callable_template.__name__, args, kwargs):
_validate_callable_arg_types(False, callable_template, args, kwargs)
return callable_mock(*args, **kwargs)
return callable_mock_with_type_validation |
def filter_endpoint_control_fctems_data(json):
option_list = ['admin_password', 'admin_username', 'call_timeout', 'capabilities', 'certificate', 'cloud_server_type', 'dirty_reason', 'ems_id', 'fortinetone_cloud_authentication', ' 'interface', 'interface_select_method', 'name', 'out_of_sync_threshold', 'preserve_ssl_session', 'pull_avatars', 'pull_malware_hash', 'pull_sysinfo', 'pull_tags', 'pull_vulnerabilities', 'serial_number', 'server', 'source_ip', 'status', 'status_check_interval', 'tenant_id', 'trust_ca_cn', 'websocket_override']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
class FrappeClient(object):
def __init__(self, url=None, username=None, password=None, api_key=None, api_secret=None, verify=True):
self.headers = dict(Accept='application/json')
self.session = requests.Session()
self.can_download = []
self.verify = verify
self.url = url
if (username and password):
self.login(username, password)
if (api_key and api_secret):
self.authenticate(api_key, api_secret)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.logout()
def login(self, username, password):
r = self.session.post(self.url, data={'cmd': 'login', 'usr': username, 'pwd': password}, verify=self.verify, headers=self.headers)
if (r.json().get('message') == 'Logged In'):
self.can_download = []
return r.json()
else:
raise AuthError
def authenticate(self, api_key, api_secret):
token = b64encode('{}:{}'.format(api_key, api_secret).encode()).decode()
auth_header = {'Authorization': 'Basic {}'.format(token)}
self.session.headers.update(auth_header)
def logout(self):
self.session.get(self.url, params={'cmd': 'logout'})
def get_list(self, doctype, fields='"*"', filters=None, limit_start=0, limit_page_length=0, order_by=None):
if (not isinstance(fields, unicode)):
fields = json.dumps(fields)
params = {'fields': fields}
if filters:
params['filters'] = json.dumps(filters)
if limit_page_length:
params['limit_start'] = limit_start
params['limit_page_length'] = limit_page_length
if order_by:
params['order_by'] = order_by
res = self.session.get(((self.url + '/api/resource/') + doctype), params=params, verify=self.verify, headers=self.headers)
return self.post_process(res)
def insert(self, doc):
res = self.session.post(((self.url + '/api/resource/') + quote(doc.get('doctype'))), data={'data': json.dumps(doc)})
return self.post_process(res)
def insert_many(self, docs):
return self.post_request({'cmd': 'frappe.client.insert_many', 'docs': frappe.as_json(docs)})
def update(self, doc):
url = ((((self.url + '/api/resource/') + quote(doc.get('doctype'))) + '/') + quote(doc.get('name')))
res = self.session.put(url, data={'data': json.dumps(doc)})
return self.post_process(res)
def bulk_update(self, docs):
return self.post_request({'cmd': 'frappe.client.bulk_update', 'docs': json.dumps(docs)})
def delete(self, doctype, name):
return self.post_request({'cmd': 'frappe.client.delete', 'doctype': doctype, 'name': name})
def submit(self, doclist):
return self.post_request({'cmd': 'frappe.client.submit', 'doclist': json.dumps(doclist)})
def get_value(self, doctype, fieldname=None, filters=None):
return self.get_request({'cmd': 'frappe.client.get_value', 'doctype': doctype, 'fieldname': (fieldname or 'name'), 'filters': json.dumps(filters)})
def set_value(self, doctype, docname, fieldname, value):
return self.post_request({'cmd': 'frappe.client.set_value', 'doctype': doctype, 'name': docname, 'fieldname': fieldname, 'value': value})
def cancel(self, doctype, name):
return self.post_request({'cmd': 'frappe.client.cancel', 'doctype': doctype, 'name': name})
def get_doc(self, doctype, name='', filters=None, fields=None):
params = {}
if filters:
params['filters'] = json.dumps(filters)
if fields:
params['fields'] = json.dumps(fields)
res = self.session.get(((((self.url + '/api/resource/') + doctype) + '/') + name), params=params)
return self.post_process(res)
def rename_doc(self, doctype, old_name, new_name):
params = {'cmd': 'frappe.client.rename_doc', 'doctype': doctype, 'old_name': old_name, 'new_name': new_name}
return self.post_request(params)
def get_pdf(self, doctype, name, print_format='Standard', letterhead=True):
params = {'doctype': doctype, 'name': name, 'format': print_format, 'no_letterhead': int((not bool(letterhead)))}
response = self.session.get((self.url + '/api/method/frappe.templates.pages.print.download_pdf'), params=params, stream=True)
return self.post_process_file_stream(response)
def get_html(self, doctype, name, print_format='Standard', letterhead=True):
params = {'doctype': doctype, 'name': name, 'format': print_format, 'no_letterhead': int((not bool(letterhead)))}
response = self.session.get((self.url + '/print'), params=params, stream=True)
return self.post_process_file_stream(response)
def __load_downloadable_templates(self):
self.can_download = self.get_api('frappe.core.page.data_import_tool.data_import_tool.get_doctypes')
def get_upload_template(self, doctype, with_data=False):
if (not self.can_download):
self.__load_downloadable_templates()
if (doctype not in self.can_download):
raise NotUploadableException(doctype)
params = {'doctype': doctype, 'parent_doctype': doctype, 'with_data': ('Yes' if with_data else 'No'), 'all_doctypes': 'Yes'}
request = self.session.get((self.url + '/api/method/frappe.core.page.data_import_tool.exporter.get_template'), params=params)
return self.post_process_file_stream(request)
def get_api(self, method, params={}):
res = self.session.get((((self.url + '/api/method/') + method) + '/'), params=params)
return self.post_process(res)
def post_api(self, method, params={}):
res = self.session.post((((self.url + '/api/method/') + method) + '/'), params=params)
return self.post_process(res)
def get_request(self, params):
res = self.session.get(self.url, params=self.preprocess(params))
res = self.post_process(res)
return res
def post_request(self, data):
res = self.session.post(self.url, data=self.preprocess(data))
res = self.post_process(res)
return res
def preprocess(self, params):
for (key, value) in params.items():
if isinstance(value, (dict, list)):
params[key] = json.dumps(value)
return params
def post_process(self, response):
try:
rjson = response.json()
except ValueError:
print(response.text)
raise
if (rjson and ('exc' in rjson) and rjson['exc']):
raise FrappeException(rjson['exc'])
if ('message' in rjson):
return rjson['message']
elif ('data' in rjson):
return rjson['data']
else:
return None
def post_process_file_stream(self, response):
if response.ok:
output = StringIO()
for block in response.iter_content(1024):
output.write(block)
return output
else:
try:
rjson = response.json()
except ValueError:
print(response.text)
raise
if (rjson and ('exc' in rjson) and rjson['exc']):
raise FrappeException(rjson['exc'])
if ('message' in rjson):
return rjson['message']
elif ('data' in rjson):
return rjson['data']
else:
return None |
def main(argv):
fuzzers_dir = os.path.abspath(os.path.dirname(__file__))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('fuzzer', help='fuzzer to run')
parser.add_argument('--retries', type=int, default=0, help='Retry a failed fuzzer n times.')
args = parser.parse_args()
fuzzer_length = max((len(f) for f in os.listdir(fuzzers_dir)))
logger = Logger(args.fuzzer, datetime.utcnow(), fuzzer_length)
signal.signal(signal.SIGCHLD, (lambda sig, frame: None))
fuzzer_dir = os.path.join(fuzzers_dir, args.fuzzer)
assert os.path.exists(fuzzer_dir), fuzzer_dir
fuzzer_logdir = os.path.join(fuzzer_dir, 'logs_{}'.format(os.environ['XRAY_PART']))
if (not os.path.exists(fuzzer_logdir)):
os.makedirs(fuzzer_logdir)
assert os.path.exists(fuzzer_logdir)
environment = get_environment_variables()
for (key, value) in environment.items():
os.environ[key] = value
exit_code = (- 1)
args.retries += 1
for retry_count in range(0, args.retries):
logger.log('Running fuzzer attempt: {}', [retry_count])
exit_code = run_fuzzer(args.fuzzer, fuzzer_dir, fuzzer_logdir, logger, will_retry=(retry_count < (args.retries - 1)))
if (exit_code <= 0):
break
logger.log('WARNING: Fuzzer failed!')
return exit_code |
(np.conjugate, np.conj)
def conjugate(x, out=None, out_like=None, sizing='optimal', method='raw', **kwargs):
def _conjugate_raw(x, n_frac, **kwargs):
precision_cast = ((lambda m: np.array(m, dtype=object)) if (n_frac >= _n_word_max) else (lambda m: m))
val_real = np.vectorize((lambda v: v.real))(x.val)
val_imag = np.vectorize((lambda v: v.imag))(x.val)
return ((val_real - (1j * val_imag)) * precision_cast((2 ** (n_frac - x.n_frac))))
return _function_over_one_var(repr_func=np.conjugate, raw_func=_conjugate_raw, x=x, out=out, out_like=out_like, sizing=sizing, method=method, **kwargs) |
def decoder_factory_from_dict(feature_map: Mapping[(str, FEATURE_DESCRIPTOR)], dtype_map: Mapping[(str, Any)]) -> DECODER_TYPE:
def _decoder(sample: TF_TENSOR):
return {key: tf.cast(tensor, dtype=dtype_map[key]) for (key, tensor) in tf.io.parse_single_example(sample, feature_map).items()}
return _decoder |
class AgentComponentRegistry(Registry[(ComponentId, Component)]):
__slots__ = ('_components_by_type', '_registered_keys')
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self._components_by_type: Dict[(ComponentType, Dict[(PublicId, Component)])] = {}
self._registered_keys: Set[ComponentId] = set()
def register(self, component_id: ComponentId, component: Component, is_dynamically_added: bool=False) -> None:
if (component_id in self._registered_keys):
raise ValueError("Component already registered with item id '{}'".format(component_id))
if (component.component_id != component_id):
raise ValueError("Component id '{}' is different to the id '{}' specified.".format(component.component_id, component_id))
self._register(component_id, component)
def _register(self, component_id: ComponentId, component: Component) -> None:
self._components_by_type.setdefault(component_id.component_type, {})[component_id.public_id] = component
self._registered_keys.add(component_id)
def _unregister(self, component_id: ComponentId) -> Optional[Component]:
item = self._components_by_type.get(component_id.component_type, {}).pop(component_id.public_id, None)
self._registered_keys.discard(component_id)
if (item is not None):
self.logger.debug("Component '{}' has been removed.".format(item.component_id))
return item
def unregister(self, component_id: ComponentId) -> Optional[Component]:
if (component_id not in self._registered_keys):
raise ValueError("No item registered with item id '{}'".format(component_id))
return self._unregister(component_id)
def fetch(self, component_id: ComponentId) -> Optional[Component]:
return self._components_by_type.get(component_id.component_type, {}).get(component_id.public_id, None)
def fetch_all(self) -> List[Component]:
return [component for components_by_public_id in self._components_by_type.values() for component in components_by_public_id.values()]
def fetch_by_type(self, component_type: ComponentType) -> List[Component]:
return list(self._components_by_type.get(component_type, {}).values())
def ids(self) -> Set[ComponentId]:
return self._registered_keys
def setup(self) -> None:
def teardown(self) -> None: |
class OptionSeriesScatter3dDataAccessibility(Options):
def description(self):
return self._config_get(None)
def description(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
class CustomFormDetail(ResourceDetail):
def before_get_object(self, view_kwargs):
event = None
if view_kwargs.get('event_id'):
event = safe_query_kwargs(Event, view_kwargs, 'event_id')
elif view_kwargs.get('event_identifier'):
event = safe_query_kwargs(Event, view_kwargs, 'event_identifier', 'identifier')
if event:
custom_form = safe_query(CustomForms, 'event_id', event.id, 'event_id')
view_kwargs['id'] = custom_form.id
def before_patch(_args, kwargs, data):
translation = data.get('translations')
if translation:
for translate in translation:
customFormTranslate = None
if ('id' in translate):
customFormTranslate = CustomFormTranslates.check_custom_form_translate(kwargs['id'], translate['id'])
if ((customFormTranslate is not None) and ('isDeleted' in translate) and translate['isDeleted']):
db.session.delete(customFormTranslate)
elif customFormTranslate:
customFormTranslate.name = translate['name']
customFormTranslate.language_code = translate['language_code']
customFormTranslate.form_id = data['form_id']
db.session.add(customFormTranslate)
else:
customFormTranslate = CustomFormTranslates()
customFormTranslate.form_id = data['form_id']
customFormTranslate.custom_form_id = kwargs['id']
customFormTranslate.name = translate['name']
customFormTranslate.language_code = translate['language_code']
db.session.add(customFormTranslate)
def before_delete(_obj, kwargs):
customFormTranslate = CustomFormTranslates.query.filter_by(custom_form_id=kwargs['id']).all()
for item in customFormTranslate:
db.session.delete(item)
def after_patch(custom_form):
translation = []
data = custom_form['data']
attributes = data['attributes']
if (attributes and attributes['is-complex']):
customFormTranslates = CustomFormTranslates.query.filter_by(custom_form_id=data['id']).filter_by(form_id=attributes['form-id']).all()
for customFormTranslate in customFormTranslates:
translation.append(customFormTranslate.convert_to_dict())
attributes['translations'] = translation
return custom_form
decorators = (api.has_permission('is_coorganizer', fetch='event_id', model=CustomForms, methods='PATCH,DELETE'),)
schema = CustomFormSchema
data_layer = {'session': db.session, 'model': CustomForms, 'methods': {'before_get_object': before_get_object, 'before_patch': before_patch, 'before_delete': before_delete, 'after_patch': after_patch}} |
def main() -> None:
setup_version = get_setup_version(setup_file_path)
pypi_version = get_pypi_version(package)
if (version_parse(setup_version) > version_parse(pypi_version)):
print(f'setup.py {setup_version} is higher than Pypi version {pypi_version}')
print('higher')
elif (version_parse(setup_version) == version_parse(pypi_version)):
print(f'setup.py {setup_version} is equal to Pypi version {pypi_version}')
print('equal')
else:
print(f'Error: setup.py {setup_version} is lower than to Pypi version {pypi_version}, this is not exepected.')
sys.exit(1) |
class AudioInterface():
def audio__speech_to_text_async__launch_job(self, file: str, language: str, speakers: int, profanity_filter: bool, vocabulary: Optional[List[str]], audio_attributes: tuple, model: str=None, file_url: str='', provider_params: dict=dict()) -> AsyncLaunchJobResponseType:
raise NotImplementedError
def audio__speech_to_text_async__get_job_result(self, provider_job_id: str) -> AsyncBaseResponseType[SpeechToTextAsyncDataClass]:
raise NotImplementedError
def audio__speech_to_text_async__get_results_from_webhook(self, data: dict) -> AsyncBaseResponseType[SpeechToTextAsyncDataClass]:
raise NotImplementedError
def audio__text_to_speech(self, language: str, text: str, option: Literal[('MALE', 'FEMALE')]) -> ResponseType[TextToSpeechDataClass]:
raise NotImplementedError
def audio__text_to_speech_async__launch_job(self, language: str, text: str, option: Literal[('MALE', 'FEMALE')]) -> AsyncLaunchJobResponseType:
raise NotImplementedError
def audio__text_to_speech_async__get_job_result(self, provider_job_id: str) -> AsyncBaseResponseType[TextToSpeechAsyncDataClass]:
raise NotImplementedError
def audio__text_to_speech_async__get_results_from_webhook(self, data: dict) -> AsyncBaseResponseType[TextToSpeechAsyncDataClass]:
raise NotImplementedError |
def test_set_fill_pattern_url():
with Image(width=50, height=50, background='white') as img:
was = img.signature
with Drawing() as ctx:
ctx.push_pattern('green_circle', 0, 0, 10, 10)
ctx.fill_color = 'green'
ctx.stroke_color = 'black'
ctx.circle(origin=(5, 5), perimeter=(5, 0))
ctx.pop_pattern()
ctx.set_fill_pattern_url('#green_circle')
ctx.rectangle(top=5, left=5, width=40, height=40)
ctx.draw(img)
assert (was != img.signature) |
class OptionPlotoptionsPolygonSonificationTracksMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_project_chroots_ns.route('/')
class ProjectChroot(Resource):
_to_parameters
_project_chroots_ns.doc(params=project_chroot_get_params)
_project_chroots_ns.marshal_with(project_chroot_model)
def get(self, ownername, projectname, chrootname):
copr = get_copr(ownername, projectname)
chroot = ComplexLogic.get_copr_chroot(copr, chrootname)
return to_dict(chroot) |
class TutorialMirror(DefaultObject):
def return_appearance(self, looker, **kwargs):
if isinstance(looker, self.__class__):
return 'The image of yourself stretches into infinity.'
return f'''{self.key} shows your reflection:
{looker.db.desc}'''
def msg(self, text=None, from_obj=None, **kwargs):
if (not text):
text = '<silence>'
text = (text[0] if is_iter(text) else text)
if from_obj:
for obj in make_iter(from_obj):
obj.msg(f'''{self.key} echoes back to you:
"{text}".''')
elif self.location:
self.location.msg_contents(f'''{self.key} echoes back:
"{text}".''', exclude=[self])
else:
logger.log_msg(f'{self.key}.msg was called without from_obj and .location is None.') |
_member_required
def remove_document_storage(request, uuid, metadata_uuid=None):
uuid = UUID(uuid)
if (metadata_uuid is not None):
metadata_uuid = UUID(metadata_uuid)
doc = get_object_or_404(Document, uuid=uuid)
if (metadata_uuid is not None):
has = get_object_or_404(DocumentHasBinaryMetadata, name=STORAGE_NAME, document=doc, metadata=metadata_uuid)
m = has.metadata
try:
has.delete()
doc.set_last_modified()
messages.add_message(request, messages.SUCCESS, f'Removed storage "{m.name}".')
except Exception as exc:
messages.add_message(request, messages.ERROR, f'Metadata "{m.name}" could not be removed: {exc}.')
return redirect(doc) |
class Ui_OpenNewick():
def setupUi(self, OpenNewick):
OpenNewick.setObjectName('OpenNewick')
OpenNewick.resize(569, 353)
self.comboBox = QtGui.QComboBox(OpenNewick)
self.comboBox.setGeometry(QtCore.QRect(460, 300, 81, 23))
self.comboBox.setObjectName('comboBox')
self.widget = QtGui.QWidget(OpenNewick)
self.widget.setGeometry(QtCore.QRect(30, 10, 371, 321))
self.widget.setObjectName('widget')
self.retranslateUi(OpenNewick)
QtCore.QMetaObject.connectSlotsByName(OpenNewick)
def retranslateUi(self, OpenNewick):
OpenNewick.setWindowTitle(QtGui.QApplication.translate('OpenNewick', 'Dialog', None, QtGui.QApplication.UnicodeUTF8)) |
def test_forum_get_topics(topic, user):
forum = topic.forum
with current_app.test_request_context():
login_user(user)
topics = Forum.get_topics(forum_id=forum.id, user=current_user)
assert (topics.items == [(topic, topic.last_post, None)])
logout_user()
topics = Forum.get_topics(forum_id=forum.id, user=current_user)
assert (topics.items == [(topic, topic.last_post, None)]) |
class ESDevRunner(ESMasterRunner):
n_eval_rollouts: int
(ESMasterRunner)
def create_distributed_rollouts(self, env: Union[(StructuredEnv, StructuredEnvSpacesMixin)], shared_noise: SharedNoiseTable, agent_instance_seed: int) -> ESDistributedRollouts:
return ESDummyDistributedRollouts(env=env, shared_noise=shared_noise, n_eval_rollouts=self.n_eval_rollouts, agent_instance_seed=agent_instance_seed) |
('subprocess.run')
def test_command_authenticator(mock_subprocess: MagicMock):
with pytest.raises(AuthenticationError):
authn = CommandAuthenticator(None)
authn = CommandAuthenticator(['echo'])
authn.refresh_credentials()
assert authn._creds
mock_subprocess.assert_called()
mock_subprocess.side_effect = subprocess.CalledProcessError((- 1), ['x'])
with pytest.raises(AuthenticationError):
authn.refresh_credentials() |
class lift(_coconut_base_callable):
__slots__ = ('func',)
def __new__(cls, func, *func_args, **func_kwargs):
self = _coconut.super(lift, cls).__new__(cls)
self.func = func
if (func_args or func_kwargs):
self = self(*func_args, **func_kwargs)
return self
def __reduce__(self):
return (self.__class__, (self.func,))
def __call__(self, *func_args, **func_kwargs):
return _coconut_lifted(self.func, *func_args, **func_kwargs)
def __repr__(self):
return ('lift(%r)' % (self.func,)) |
('foremast.securitygroup.create_securitygroup.get_security_group_id')
('foremast.securitygroup.create_securitygroup.get_properties')
('foremast.securitygroup.create_securitygroup.get_details')
def test_missing_configuration(get_details, get_properties, get_sec_id):
get_properties.return_value = {}
security_group = SpinnakerSecurityGroup()
with pytest.raises(ForemastConfigurationFileError):
security_group.create_security_group() |
class FirstSwitch():
def __init__(self, gr):
self.gr = gr
def __call__(self, *args, **kw):
gr = self.gr
del gr.switch
(run, gr.run) = (gr.run, None)
t = stackless.tasklet(run)
gr.t = t
tasklet_to_greenlet[t] = gr
t.setup(*args, **kw)
t.run() |
def get_command_search(text):
parts = text.split(None, 1)
if (parts[0] not in ['/r', '/e', '/t']):
abort(400, ('invalid command %r' % parts[0]))
if (len(parts) != 2):
abort(400, ('missing argument to command %r' % parts[0]))
(command, arg) = parts
if (command == '/r'):
return (lambda node: re.search(arg, node.props.get('name', '')))
elif (command == '/e'):
return get_eval_search(arg)
elif (command == '/t'):
return get_topological_search(arg)
else:
abort(400, ('invalid command %r' % command)) |
class BookmarkListForm(forms.Form):
org_bookmarks = forms.MultipleChoiceField(label='Alerts about organisations', widget=forms.CheckboxSelectMultiple())
search_bookmarks = forms.MultipleChoiceField(label='Alerts about searches', widget=forms.CheckboxSelectMultiple())
ncso_concessions_bookmarks = forms.MultipleChoiceField(label='Alerts about NCSO price concessions', widget=forms.CheckboxSelectMultiple())
def __init__(self, *args, **kwargs):
org_bookmarks = kwargs.pop('org_bookmarks', [])
search_bookmarks = kwargs.pop('search_bookmarks', [])
ncso_concessions_bookmarks = kwargs.pop('ncso_concessions_bookmarks', [])
super(BookmarkListForm, self).__init__(*args, **kwargs)
if org_bookmarks:
self.fields['org_bookmarks'].choices = [(x.id, _name_with_url(x)) for x in org_bookmarks]
else:
del self.fields['org_bookmarks']
if search_bookmarks:
self.fields['search_bookmarks'].choices = [(x.id, _name_with_url(x)) for x in search_bookmarks]
else:
del self.fields['search_bookmarks']
if ncso_concessions_bookmarks:
self.fields['ncso_concessions_bookmarks'].choices = [(x.id, _name_with_url(x)) for x in ncso_concessions_bookmarks]
else:
del self.fields['ncso_concessions_bookmarks'] |
class ReflectionServiceStub(object):
def __init__(self, channel):
self.ListAllInterfaces = channel.unary_unary('/cosmos.base.reflection.v1beta1.ReflectionService/ListAllInterfaces', request_serializer=cosmos_dot_base_dot_reflection_dot_v1beta1_dot_reflection__pb2.ListAllInterfacesRequest.SerializeToString, response_deserializer=cosmos_dot_base_dot_reflection_dot_v1beta1_dot_reflection__pb2.ListAllInterfacesResponse.FromString)
self.ListImplementations = channel.unary_unary('/cosmos.base.reflection.v1beta1.ReflectionService/ListImplementations', request_serializer=cosmos_dot_base_dot_reflection_dot_v1beta1_dot_reflection__pb2.ListImplementationsRequest.SerializeToString, response_deserializer=cosmos_dot_base_dot_reflection_dot_v1beta1_dot_reflection__pb2.ListImplementationsResponse.FromString) |
class EventTap(object):
def __init__(self):
self._taps = {}
def tap(self, *cores):
for core in cores:
for k in dir(core.events):
if k.startswith('__'):
continue
hub = getattr(core.events, k)
if (not isinstance(hub, EventHub)):
continue
def tapper(ev, hub=hub):
self._taps[hub] = ev
return ev
hub += tapper
def take(self, hub):
v = self._taps[hub]
del self._taps[hub]
return v
def clear(self):
self._taps.clear()
def __getitem__(self, k):
return self._taps[k]
def __contains__(self, k):
return (k in self._taps)
def wait(self, *hubs):
while True:
for hub in hubs:
if (hub in self._taps):
return
gevent.sleep(0.01) |
def extractSparemysiteWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
titlemap = [('IWBKD Chapter', 'I was Born as the Kings Daughter', 'translated')]
for (titlecomponent, name, tl_type) in titlemap:
if (titlecomponent.lower() in item['title'].lower()):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class Events(enum.Enum):
TIMER_START = 0
TIMER_UPDATE = 1
TIMER_STOP = 2
TIMER_END = 3
SESSION_READY = 4
SESSION_START = 5
SESSION_INTERRUPT = 6
SESSION_CHANGE = 7
SESSION_END = 8
SESSION_RESET = 9
WINDOW_SHOW = 10
WINDOW_HIDE = 11
CONFIG_CHANGE = 12 |
class ResumeDialog(xbmcgui.WindowXMLDialog):
resumePlay = (- 1)
resumeTimeStamp = ''
action_exitkeys_id = None
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self, *args, **kwargs)
log.debug('ResumeDialog INITIALISED')
def onInit(self):
self.action_exitkeys_id = [10, 13]
self.getControl(3010).setLabel(self.resumeTimeStamp)
self.getControl(3011).setLabel(string_load(30237))
def onFocus(self, controlId):
pass
def doAction(self, actionID):
pass
def onClick(self, controlID):
if (controlID == 3010):
self.resumePlay = 0
self.close()
if (controlID == 3011):
self.resumePlay = 1
self.close()
def setResumeTime(self, timeStamp):
self.resumeTimeStamp = timeStamp
def getResumeAction(self):
return self.resumePlay |
def check_file(filename, *, expected=None):
(kind, st) = _get_file_kind(filename)
if (not kind):
return None
kinds = kind.split()
if callable(expected):
if (not expected(filename, kinds)):
return None
elif (expected and (expected is not True)):
if isinstance(expected, str):
expected = expected.replace(',', ' ').split()
expected = set(expected)
if ('exe' in expected):
expected.remove('exe')
if (not is_executable(filename, st)):
return None
if ('symlink' in expected):
expected.remove('symlink')
if ('symlink' not in kinds):
return None
if (expected and (not any(((k in kinds) for k in expected)))):
return None
return kind |
def memoize(f):
global total_memoized_functions
total_memoized_functions += 1
cache: Dict[(Any, Any)] = {}
(f)
def wrapper(*args):
if count_calls:
global total_memoized_calls
total_memoized_calls += 1
function_calls.add_item(f)
key = MemoizationKey(wrapper, args)
if (key not in cache):
global total_cache_misses
total_cache_misses += 1
result = f(*args)
cache[key] = result
return result
return cache[key]
if inspect.ismethod(f):
meth_name = (f.__name__ + '_wrapper')
setattr(f.__self__, meth_name, wrapper)
else:
f._wrapper = wrapper
return wrapper |
def generate_thumbnail():
import tempfile
import glob
from anima.dcc import mayaEnv
m_env = mayaEnv.Maya()
v = m_env.get_current_version()
if (not v):
return
if ('' in v.take_name):
return
task = v.task
project = task.project
imf = project.image_format
width = int((imf.width * 0.5))
height = int((imf.height * 0.5))
temp_output = tempfile.mktemp()
current_frame = pm.currentTime(q=1)
output_file = pm.playblast(fmt='image', startTime=current_frame, endTime=current_frame, sequenceTime=1, forceOverwrite=1, filename=temp_output, clearCache=1, showOrnaments=1, percent=100, wh=(width, height), offScreen=1, viewer=0, compression='PNG', quality=70, framePadding=0)
pm.currentTime(current_frame)
output_file = output_file.replace('####', '*')
found_output_file = glob.glob(output_file)
if found_output_file:
output_file = found_output_file[0]
from anima.ui import utils
anima.utils.upload_thumbnail(task, output_file)
return found_output_file |
class OptionSeriesPackedbubbleSonificationTracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_retrieve_from_
with RetrieveFileFromUri(URL).get_file_object() as f:
c = f.read()
assert (type(c) is bytes)
assert (len(c) > 0)
with RetrieveFileFromUri(URL).get_file_object(True) as f:
c = f.read()
assert (type(c) is str)
assert (len(c) > 0) |
class TestMrtlib(unittest.TestCase):
def test_reader(self):
files = ['rib..0000_pick.bz2', 'updates..0000.bz2']
for f in files:
counter = 0
input_file = os.path.join(MRT_DATA_DIR, f)
for record in mrtlib.Reader(bz2.BZ2File(input_file, 'rb')):
ok_((not isinstance(record, mrtlib.UnknownMrtRecord)))
counter += 1
def test_writer(self):
files = ['rib..0000_pick.bz2', 'updates..0000.bz2']
for f in files:
input_file = os.path.join(MRT_DATA_DIR, f)
input_buf = bz2.BZ2File(input_file, 'rb').read()
input_records = list(mrtlib.Reader(bz2.BZ2File(input_file, 'rb')))
counter = 0
f = io.BytesIO()
mrt_writer = mrtlib.Writer(f)
for record in input_records:
mrt_writer.write(record)
counter += 1
output_buf = f.getvalue()
eq_(binary_str(input_buf), binary_str(output_buf))
mrt_writer.close()
eq_(True, mrt_writer._f.closed) |
class TestColumnIntegerToIndex(unittest.TestCase):
def test_column_integer_to_index(self):
self.assertEqual(column_integer_to_index(0), 'A')
self.assertEqual(column_integer_to_index(1), 'B')
self.assertEqual(column_integer_to_index(25), 'Z')
self.assertEqual(column_integer_to_index(26), 'AA')
self.assertEqual(column_integer_to_index(27), 'AB')
self.assertEqual(column_integer_to_index(51), 'AZ') |
class BaseLandingArea(BaseDataLakeArea, abc.ABC):
schemas = LandingSchemas
def _github_repositories_base(self):
return 'github/repository'
def save_push_events_commits(self, push_event_commits, date: datetime):
raise NotImplementedError()
def get_hour_push_events_commits(self, date: datetime):
raise NotImplementedError()
def get_daily_push_events_commits(self, date: datetime):
raise NotImplementedError()
def save_repositories(self, df: pd.DataFrame, date: datetime):
raise NotImplementedError()
def get_repositories(self, date: datetime) -> pd.DataFrame:
raise NotImplementedError() |
def check_strava_connection():
if (not strava_connected()):
return html.A(className='col-lg-12', children=[dbc.Button('Connect Strava', id='connect-strava-button', color='primary', className='mb-2', size='sm')], href=connect_strava_link(get_strava_client()))
else:
return html.H4('Strava Connected!', className='col-lg-12') |
def deno_parser(subparser):
subparser.set_defaults(func=deno)
subparser.add_argument('-n', '--name', required=True, help='The name of the page to be transpiled (without the extension)')
subparser.add_argument('-p', '--path', help='The path where the new environment will be created: -p /foo/bar') |
def test_no_summary_not_seeking(tmpdir: Path):
filepath = (tmpdir / 'no_summary.mcap')
write_no_summary_mcap(filepath)
with open(filepath, 'rb') as f:
assert (len(list(NonSeekingReader(f).iter_messages())) == 200)
with open(filepath, 'rb') as f:
assert (len(list(NonSeekingReader(f).iter_attachments())) == 1)
with open(filepath, 'rb') as f:
assert (len(list(NonSeekingReader(f).iter_metadata())) == 1) |
def test_alternative_hashing_algorithms(data_dir_mirror):
fname = str((data_dir_mirror / 'tiny-data.txt'))
check_tiny_data(fname)
with open(fname, 'rb') as fin:
data = fin.read()
for alg in ('sha512', 'md5'):
hasher = hashlib.new(alg)
hasher.update(data)
registry = {'tiny-data.txt': f'{alg}:{hasher.hexdigest()}'}
pup = Pooch(path=data_dir_mirror, base_url='some bogus URL', registry=registry)
assert (fname == pup.fetch('tiny-data.txt'))
check_tiny_data(fname) |
class OptionSeriesStreamgraphSonificationContexttracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def bootstrap_registries(enable_cache: bool=True, catch_exception: bool=True):
global _IS_BOOTSTRAPPED
if _IS_BOOTSTRAPPED:
logger.warning('Registries are already bootstrapped, skipped!')
return
if _INSIDE_BOOTSTRAP:
_log(1, 'calling bootstrap_registries() inside bootstrap process, skip ...')
return
start = time.perf_counter()
cached_bootstrap_results: Dict[(str, CachedResult)] = {}
if enable_cache:
filename = os.path.join(_get_cache_dir(), _BOOTSTRAP_CACHE_FILENAME)
if os.path.isfile(filename):
logger.info(f'Loading bootstrap cache at {filename} ...')
cached_bootstrap_results = _load_cached_results(filename)
else:
logger.info(f"Can't find the bootstrap cache at {filename}, start from scratch")
d2go_root = pkg_resources.resource_filename('d2go', '')
logger.info(f'Start bootstrapping for d2go_root: {d2go_root} ...')
all_files = glob.glob(f'{d2go_root}/**/*.py', recursive=True)
all_files = [os.path.relpath(x, os.path.dirname(d2go_root)) for x in all_files]
new_bootstrap_results: Dict[(str, CachedResult)] = {}
files_per_status = defaultdict(list)
time_per_file = {}
for filename in all_files:
_log(1, f'bootstrap for file: {filename}')
cached_result = cached_bootstrap_results.get(filename, None)
with _catchtime() as t:
(result, status) = _bootstrap_file(filename, catch_exception, cached_result)
new_bootstrap_results[filename] = result
files_per_status[status].append(filename)
time_per_file[filename] = t.time
end = time.perf_counter()
duration = (end - start)
status_breakdown = ', '.join([f'{len(files_per_status[status])} {status.name}' for status in BootstrapStatus])
logger.info(f'Finished bootstrapping for {len(all_files)} files ({status_breakdown}) in {duration:.2f} seconds.')
exception_files = [filename for (filename, result) in new_bootstrap_results.items() if (result.status == BootstrapStatus.FAILED.name)]
if (len(exception_files) > 0):
logger.warning('Found exception for the following {} files (either during this bootstrap run or from previous cached result), registration inside those files might not work!\n{}'.format(len(exception_files), '\n'.join(exception_files)))
TOP_N = 100
_log(2, f'Top-{TOP_N} slowest files during bootstrap:')
all_time = [(os.path.relpath(k, d2go_root), v) for (k, v) in time_per_file.items()]
for x in sorted(all_time, key=(lambda x: x[1]))[(- TOP_N):]:
_log(2, x)
if enable_cache:
filename = os.path.join(_get_cache_dir(), _BOOTSTRAP_CACHE_FILENAME)
logger.info(f'Writing updated bootstrap results to {filename} ...')
_dump_cached_results(new_bootstrap_results, filename)
_IS_BOOTSTRAPPED = True |
class PornEmbyCheckin(AnswerBotCheckin):
name = 'Pornemby'
bot_username = 'PronembyTGBot2_bot'
bot_success_pat = '.*?(\\d+)$'
async def start(self):
if (not self.client.me.username):
self.log.warning(f': .')
return None
return (await super().start())
async def on_photo(self, message: Message):
(await asyncio.sleep(random.uniform(2, 4)))
async with self.client.catch_reply(self.bot_username) as f:
try:
(await message.click(''))
except TimeoutError:
pass
try:
(await asyncio.wait_for(f, 10))
except asyncio.TimeoutError:
self.log.warning(f': , {self.name}.')
(await self.fail()) |
def calc_dataset_item(cache: Tuple[(Tuple[(int, ...)], ...)], i: int) -> Tuple[(int, ...)]:
n = len(cache)
r = (HASH_BYTES // WORD_BYTES)
mix = keccak_512(int_to_le_bytes((le_uint32_sequence_to_uint(cache[(i % n)]) ^ i), HASH_BYTES))
mix_integers = le_bytes_to_uint32_sequence(mix)
for j in range(DATASET_PARENTS):
cache_index = fnv((i ^ j), mix_integers[(j % r)])
mix_integers = fnv_hash(mix_integers, cache[(cache_index % n)])
mix = le_uint32_sequence_to_bytes(mix_integers)
return le_bytes_to_uint32_sequence(keccak_512(mix)) |
class OptionSeriesFunnel3dStatesHoverHalo(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def opacity(self):
return self._config_get(0.25)
def opacity(self, num: float):
self._config(num, js_type=False)
def size(self):
return self._config_get(10)
def size(self, num: float):
self._config(num, js_type=False) |
class Cubelet():
def __init__(self, cube, coords, coords_names=None):
self._coords_names = coords_names
self.owner = cube
assert all((isinstance(_, int) for _ in coords)), coords
self.coords = coords
self.flatten_values = cube.flatten_values
def __repr__(self):
return f'{self.__class__.__name__}({self.coords},index_names={self._coords_names})'
def extended_icoords(self):
return self.coords
def to_numpy(self, **kwargs):
return self.owner[self.coords].to_numpy(reshape=(not self.flatten_values), **kwargs) |
def build_database(sql_files):
print('\n* Building the database...')
db_conn = sqlite3.connect(':memory:')
cur = db_conn.cursor()
for sql_file in sql_files:
if ('migratedTables' in sql_file):
print((' * Skipping %s...' % sql_file))
continue
print((' * Running %s...' % sql_file))
with open(sql_file, 'r') as f:
sql = f.read()
for sql_part in sql.split('\n\n'):
try:
cur.executescript(sql_part)
except sqlite3.OperationalError as e:
print((' -> Error: %s' % str(e)))
print(' -> Done.')
return db_conn |
class OptionSeriesVariwideTooltipDatetimelabelformats(Options):
def day(self):
return self._config_get('%A, %e %b %Y')
def day(self, text: str):
self._config(text, js_type=False)
def hour(self):
return self._config_get('%A, %e %b, %H:%M')
def hour(self, text: str):
self._config(text, js_type=False)
def millisecond(self):
return self._config_get('%A, %e %b, %H:%M:%S.%L')
def millisecond(self, text: str):
self._config(text, js_type=False)
def minute(self):
return self._config_get('%A, %e %b, %H:%M')
def minute(self, text: str):
self._config(text, js_type=False)
def month(self):
return self._config_get('%B %Y')
def month(self, text: str):
self._config(text, js_type=False)
def second(self):
return self._config_get('%A, %e %b, %H:%M:%S')
def second(self, text: str):
self._config(text, js_type=False)
def week(self):
return self._config_get('Week from %A, %e %b %Y')
def week(self, text: str):
self._config(text, js_type=False)
def year(self):
return self._config_get('%Y')
def year(self, text: str):
self._config(text, js_type=False) |
def extractExecutionergirlWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class LocalFileDAGLoader(DAGLoader):
def __init__(self, dag_dirs: List[str]) -> None:
self._dag_dirs = dag_dirs
def load_dags(self) -> List[DAG]:
dags = []
for filepath in self._dag_dirs:
if (not os.path.exists(filepath)):
continue
if os.path.isdir(filepath):
dags += _process_directory(filepath)
else:
dags += _process_file(filepath)
return dags |
class AgentDialogue(TacDialogue):
def __init__(self, dialogue_label: DialogueLabel, self_address: Address, role: BaseDialogue.Role, message_class: Type[TacMessage]) -> None:
TacDialogue.__init__(self, dialogue_label=dialogue_label, self_address=self_address, role=role, message_class=message_class) |
def prompt_credentials():
credentials = config.get('koapy.backend.kiwoom_open_api_plus.credentials')
default_user_id = credentials['user_id']
default_user_password = credentials['user_password']
default_server = ('simulation' if credentials['is_simulation'] else 'real')
default_cert_password = credentials['cert_password']
user_id = click.prompt('User ID', default=default_user_id)
user_password = click.prompt('User Password', hide_input=True, default=default_user_password, show_default=False)
is_simulation = (click.prompt('Server Type', type=click.Choice(['real', 'simulation'], case_sensitive=False), default=default_server) == 'simulation')
if is_simulation:
cert_password = default_cert_password
else:
cert_password = click.prompt('Cert Password', hide_input=True, default=default_cert_password, show_default=False)
account_passwords = {}
if is_simulation:
account_passwords[''] = '0000'
else:
account_count = click.prompt('Account Count', type=int, default=1)
for _ in range(account_count):
account_number = click.prompt('Account Number', default='')
account_password = click.prompt('Account Password', hide_input=True, show_default=False)
account_passwords[account_number] = account_password
credentials = {'user_id': user_id, 'user_password': user_password, 'cert_password': cert_password, 'is_simulation': is_simulation, 'account_passwords': account_passwords}
credentials = config_from_dict(credentials)
return credentials |
class MapboxVectorTileSerializer(Serializer):
mimetype: ClassVar[str] = 'application/vnd.mapbox-vector-tile'
def loads(self, data: bytes) -> bytes:
return data
def dumps(self, data: bytes) -> bytes:
if isinstance(data, bytes):
return data
raise SerializationError(f'Cannot serialize {data!r} into a MapBox vector tile') |
def run():
print('\nmodule top();\n ')
params = {}
sites = list(gen_brams())
fuzz_iter = iter(util.gen_fuzz_states((len(sites) * 5)))
for (tile_name, bram_sites, int_tiles) in sites:
(b0_diadi3, b1_wraddr10, b1_wraddr9, b1_wraddr13, b1_diadi7) = itertools.islice(fuzz_iter, 5)
params[int_tiles[0]] = (b0_diadi3,)
params[int_tiles[1]] = (b1_wraddr10,)
params[int_tiles[2]] = (b1_wraddr9,)
params[int_tiles[3]] = (b1_wraddr13,)
params[int_tiles[4]] = (b1_diadi7,)
print('\n wire [15:0] {bram_site0}_diadi;\n wire [15:0] {bram_site0}_dibdi;\n wire [13:0] {bram_site0}_wraddr;\n\n wire [15:0] {bram_site1}_diadi;\n wire [15:0] {bram_site1}_dibdi;\n wire [7:0] {bram_site1}_webwe;\n wire [13:0] {bram_site1}_rdaddr;\n wire [13:0] {bram_site1}_wraddr;\n\n // INT 0\n assign {bram_site0}_diadi[2] = 0;\n assign {bram_site0}_diadi[3] = {b0_diadi3};\n\n // INT 1\n assign {bram_site1}_wraddr[7] = 0;\n assign {bram_site1}_wraddr[10] = {b1_wraddr10};\n\n // INT 2\n assign {bram_site1}_rdaddr[9] = 0;\n assign {bram_site1}_wraddr[9] = {b1_wraddr9};\n\n // INT 3\n assign {bram_site1}_wraddr[4] = 0;\n assign {bram_site1}_wraddr[13] = {b1_wraddr13};\n\n // INT 4\n assign {bram_site1}_dibdi[15] = 0;\n assign {bram_site1}_diadi[7] = {b1_diadi7};\n\n (* KEEP, DONT_TOUCH, LOC = "{bram_site0}" *)\n RAMB18E1 #(\n ) bram_{bram_site0} (\n .CLKARDCLK(),\n .CLKBWRCLK(),\n .ENARDEN(),\n .ENBWREN(),\n .REGCEAREGCE(),\n .REGCEB(),\n .RSTRAMARSTRAM(),\n .RSTRAMB(),\n .RSTREGARSTREG(),\n .RSTREGB(),\n .ADDRARDADDR(),\n .ADDRBWRADDR({bram_site0}_wraddr),\n .DIADI({bram_site0}_diadi),\n .DIBDI({bram_site0}_dibdi),\n .DIPADIP(),\n .DIPBDIP(),\n .WEA(),\n .WEBWE(),\n .DOADO(),\n .DOBDO(),\n .DOPADOP(),\n .DOPBDOP());\n\n (* KEEP, DONT_TOUCH, LOC = "{bram_site1}" *)\n RAMB18E1 #(\n ) bram_{bram_site1} (\n .CLKARDCLK(),\n .CLKBWRCLK(),\n .ENARDEN(),\n .ENBWREN(),\n .REGCEAREGCE(),\n .REGCEB(),\n .RSTRAMARSTRAM(),\n .RSTRAMB(),\n .RSTREGARSTREG(),\n .RSTREGB(),\n .ADDRARDADDR({bram_site1}_rdaddr),\n .ADDRBWRADDR({bram_site1}_wraddr),\n .DIADI({bram_site1}_diadi),\n .DIBDI({bram_site1}_dibdi),\n .DIPADIP(),\n .DIPBDIP(),\n .WEA(),\n .WEBWE({bram_site1}_webwe),\n .DOADO(),\n .DOBDO(),\n .DOPADOP(),\n .DOPBDOP());\n'.format(bram_site0=bram_sites[0], bram_site1=bram_sites[1], b0_diadi3=b0_diadi3, b1_wraddr10=b1_wraddr10, b1_wraddr9=b1_wraddr9, b1_wraddr13=b1_wraddr13, b1_diadi7=b1_diadi7))
print('endmodule')
write_params(params) |
class OptionSeriesFunnel3dSonificationDefaultinstrumentoptionsMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_member_required
def import_documents(request):
if (request.method == 'POST'):
print(request.POST)
form = ImportDocumentsForm(request.POST, request.FILES)
print(form)
files = request.FILES.getlist('files')
print(files)
if form.is_valid():
print('got ', len(files), 'files')
data_urls = []
for f in files:
dbg(f.name)
data_url = None
try:
dbg(f.name.endswith('.pdf'))
dbg(f.name.endswith('.epub'))
if f.name.endswith('.pdf'):
data_url = generate_pdf_thumbnail(f.name, blob=f.read())
elif f.name.endswith('.epub'):
data_url = generate_epub_thumbnail(f.name, blob=f.read())
elif f.content_type.startswith('image'):
data_url = generate_image_thumbnail(f.name, blob=f.read())
except Exception as exc:
messages.add_message(request, messages.WARNING, f'Could not create thumbnail for {f.name}: {exc}')
data_urls.append(data_url)
request.session['import_thumbnails'] = json.dumps(data_urls)
cache.set('import_files', files, timeout=(15 * 60))
return HttpResponseRedirect(reverse('import_documents_2'))
else:
messages.add_message(request, messages.ERROR, f'Form data is invalid.')
else:
form = ImportDocumentsForm()
template_name = 'import.html'
context = {'title': 'import files', 'form': form, 'add_document_form': AddDocument()}
template = loader.get_template('import.html')
return HttpResponse(template.render(context, request)) |
class CriteoDataModule(pl.LightningDataModule):
def __init__(self, num_days: int=1, num_days_test: int=0, num_embeddings: Optional[int]=100000, num_embeddings_per_feature: Optional[List[int]]=None, batch_size: int=32, train_percent: float=0.8, num_workers: int=0, read_chunk_size: int=100000, dataset_name: str='criteo_1t', dataset_path: str=None, undersampling_rate: Optional[float]=None, pin_memory: bool=False, seed: Optional[int]=None, worker_init_fn: Optional[Callable[([int], None)]]=None) -> None:
super().__init__()
self._dataset_name: str = dataset_name
self._dataset_path: str = dataset_path
if (dataset_name == 'criteo_1t'):
if (not (1 <= num_days <= 24)):
raise ValueError(f'Dataset has only 24 days of data. User asked for {num_days} days')
if (not (0 <= num_days_test <= 24)):
raise ValueError(f'Dataset has only 24 days of data. User asked for {num_days_test} days')
if (not ((num_days + num_days_test) <= 24)):
raise ValueError(f'Dataset has only 24 days of data. User asked for {num_days} train days and {num_days_test} test days')
elif (dataset_name != 'criteo_kaggle'):
raise ValueError((f'Unknown dataset {self._dataset_name}. ' + 'Please choose {criteo_1t, criteo_kaggle} for dataset_name'))
if (not (0.0 <= train_percent <= 1.0)):
raise ValueError(f'train_percent {train_percent} must be between 0 and 1')
if (((num_embeddings is None) and (num_embeddings_per_feature is None)) or ((num_embeddings is not None) and (num_embeddings_per_feature is not None))):
raise ValueError('One - and only one - of num_embeddings or num_embeddings_per_feature must be set.')
if ((num_embeddings_per_feature is not None) and (len(num_embeddings_per_feature) != len(DEFAULT_CAT_NAMES))):
raise ValueError(f'Length of num_embeddings_per_feature ({len(num_embeddings_per_feature)}) does not match the number of sparse features ({{DEFAULT_CAT_NAMES}}).')
self.batch_size = batch_size
self._num_workers = num_workers
self._read_chunk_size = read_chunk_size
self._num_days = num_days
self._num_days_test = num_days_test
self.num_embeddings = num_embeddings
self.num_embeddings_per_feature = num_embeddings_per_feature
self._train_percent = train_percent
self._undersampling_rate = undersampling_rate
self._pin_memory = pin_memory
self._seed = seed
self._worker_init_fn = worker_init_fn
self._train_datapipe: Optional[IterDataPipe] = None
self._val_datapipe: Optional[IterDataPipe] = None
self._test_datapipe: Optional[IterDataPipe] = None
self.keys: List[str] = DEFAULT_CAT_NAMES
def _create_datapipe_1t(self, day_range: Iterable[int]) -> IterDataPipe:
paths = [f'{self._dataset_path}/day_{day}.tsv' for day in day_range]
datapipe = criteo_terabyte(paths, read_chunk_size=self._read_chunk_size)
undersampling_rate = self._undersampling_rate
if (undersampling_rate is not None):
datapipe = ProportionUnderSampler(datapipe, self._get_label, {0: undersampling_rate, 1: 1.0}, seed=self._seed)
return datapipe
def _create_datapipe_kaggle(self, partition: str) -> IterDataPipe:
path = f'{self._dataset_path}/{partition}.txt'
return criteo_kaggle(path, read_chunk_size=self._read_chunk_size)
def _get_label(row: Any) -> Any:
return row['label']
def _batch_collate_transform(self, datapipe: IterDataPipe) -> IterDataPipe:
_transform_partial = partial(_transform, num_embeddings=self.num_embeddings, num_embeddings_per_feature=self.num_embeddings_per_feature)
return datapipe.batch(self.batch_size).collate().map(_transform_partial)
def setup(self, stage: Optional[str]=None) -> None:
if (self._worker_init_fn is not None):
self._worker_init_fn(0)
if ((stage == 'fit') or (stage is None)):
if (self._dataset_name == 'criteo_1t'):
datapipe = self._create_datapipe_1t(range(self._num_days))
elif (self._dataset_name == 'criteo_kaggle'):
datapipe = self._create_datapipe_kaggle('train')
else:
raise ValueError((f'Unknown dataset {self._dataset_name}. ' + 'Please choose {criteo_1t, criteo_kaggle} for dataset_name'))
(train_datapipe, val_datapipe) = rand_split_train_val(datapipe, self._train_percent)
self._train_datapipe = self._batch_collate_transform(train_datapipe)
self._val_datapipe = self._batch_collate_transform(val_datapipe)
if ((stage == 'test') or (stage is None)):
if (self._dataset_name == 'criteo_1t'):
datapipe = self._create_datapipe_1t(range(self._num_days, (self._num_days + self._num_days_test)))
elif (self._dataset_name == 'criteo_kaggle'):
datapipe = self._create_datapipe_kaggle('test')
else:
raise ValueError((f'Unknown dataset {self._dataset_name}. ' + 'Please choose {criteo_1t, criteo_kaggle} for dataset_name'))
self._test_datapipe = self._batch_collate_transform(datapipe)
def _create_dataloader(self, datapipe: IterDataPipe) -> DataLoader:
return DataLoader(datapipe, num_workers=self._num_workers, pin_memory=self._pin_memory, batch_size=None, batch_sampler=None, worker_init_fn=self._worker_init_fn)
def train_dataloader(self) -> DataLoader:
datapipe = self._train_datapipe
assert isinstance(datapipe, IterDataPipe)
return self._create_dataloader(datapipe)
def val_dataloader(self) -> DataLoader:
datapipe = self._val_datapipe
assert isinstance(datapipe, IterDataPipe)
return self._create_dataloader(datapipe)
def test_dataloader(self) -> DataLoader:
if (self._dataset_name == 'criteo_1t'):
datapipe = self._test_datapipe
elif (self._dataset_name == 'criteo_kaggle'):
datapipe = self._val_datapipe
else:
raise ValueError((f'Unknown dataset {self._dataset_name}. ' + 'Please choose {criteo_1t, criteo_kaggle} for dataset_name'))
assert isinstance(datapipe, IterDataPipe)
return self._create_dataloader(datapipe) |
class DividerNotchesEdge(edges.BaseEdge):
description = 'Edge with multiple notches for easier access to dividers'
def __init__(self, boxes, sx) -> None:
super().__init__(boxes, None)
self.sx = sx
def __call__(self, _, **kw):
first = True
for width in self.sx:
if first:
first = False
else:
self.edge(self.thickness)
self.edge_with_notch(width)
def edge_with_notch(self, width):
upper_third = (((width - (2 * self.Notch_upper_radius)) - (2 * self.Notch_lower_radius)) / 3)
if (upper_third > 0):
straightHeight = ((self.Notch_depth - self.Notch_upper_radius) - self.Notch_lower_radius)
self.polyline(upper_third, (90, self.Notch_upper_radius), straightHeight, ((- 90), self.Notch_lower_radius), upper_third, ((- 90), self.Notch_lower_radius), straightHeight, (90, self.Notch_upper_radius), upper_third)
else:
self.edge(width) |
(scope='function')
def tooltip_manager(request, bar_position, manager_nospawn):
widget = TooltipWidget('Testint', **{**{'tooltip_delay': 0.5}, **getattr(request, 'param', dict())})
class TooltipConfig(libqtile.confreader.Config):
auto_fullscreen = True
keys = []
mouse = []
groups = [libqtile.config.Group('a')]
layouts = [libqtile.layout.Max()]
floating_layout = libqtile.resources.default_config.floating_layout
screens = [libqtile.config.Screen(**{bar_position: libqtile.bar.Bar([widget], BAR_SIZE)})]
manager_nospawn.start(TooltipConfig)
(yield manager_nospawn) |
def compute_metrics(pred: Tensor, gt: Tensor, mask: Optional[Tensor]=None, extra=False) -> Dict[(str, float)]:
metrics = {'psnr': compute_psnr(pred, gt, mask)}
if ((mask is not None) or (not extra)):
return metrics
metrics['ssim'] = compute_ssim(pred, gt)
if (lpips_net is not None):
metrics['lpips'] = compute_lpips(pred, gt)
return metrics |
class Sentiment(GeneratedFeature):
column_name: str
def __init__(self, column_name: str, display_name: Optional[str]=None):
self.column_name = column_name
self.display_name = display_name
super().__init__()
def generate_feature(self, data: pd.DataFrame, data_definition: DataDefinition) -> pd.DataFrame:
import nltk
nltk.download('vader_lexicon', quiet=True)
sid = SentimentIntensityAnalyzer()
def sentiment_f(s, sid=sid):
if ((s is None) or (isinstance(s, float) and np.isnan(s))):
return 0
return sid.polarity_scores(s)['compound']
return pd.DataFrame(dict([(self.column_name, data[self.column_name].apply(sentiment_f))]))
def feature_name(self) -> ColumnName:
return additional_feature(self, self.column_name, (self.display_name or f'Sentiment for {self.column_name}')) |
class Event(TraitType):
default_value_type = DefaultValue.constant
def __init__(self, trait=None, **metadata):
metadata['type'] = 'event'
metadata['transient'] = True
super().__init__(**metadata)
self.trait = None
if (trait is not None):
self.trait = trait_from(trait)
validate = self.trait.get_validate()
if (validate is not None):
self.fast_validate = validate
def full_info(self, object, name, value):
trait = self.trait
if (trait is None):
return 'any value'
return trait.full_info(object, name, value) |
class TypeDoc():
type: object
doc: object
def print_text(self, indent=0):
color_kw = color_theme['keyword']
color_class = color_theme['name_class']
params = [str(p) for p in self.type.elems]
params = ', '.join(params)
if params:
params = f'\[{params}]'
indent_str = (' ' * indent)
s = f'''{indent_str}[{color_kw}]type[/{color_kw}] [{color_class}]{self.type.typename}[/{color_class}]{params}
'''
return (s + self.doc.print_text((indent + 4)))
def print_rst(self):
type_name = str(self.type)
s = f'''.. class:: {type_name}
'''
return (s + self.doc.print_rst()) |
def test_loop_to_sequence_rule_1():
ast = AbstractSyntaxForest(condition_handler=condition_handler1(LogicCondition.generate_new_context()))
root = ast.factory.create_endless_loop_node()
body = ast.factory.create_seq_node()
children = [ast.factory.create_code_node(stmts=[assignment_c_plus_5.copy()]), ast.factory.create_condition_node(condition=logic_cond('a', ast.factory.logic_context))]
true_branch = ast.factory.create_true_node()
true_branch_child = ast.factory.create_code_node(stmts=[Break()])
false_branch = ast.factory.create_false_node()
false_branch_child = ast.factory.create_code_node(stmts=[assignment_c_plus_10.copy(), Break()])
ast._add_nodes_from([root, body, children[0], children[1], true_branch, true_branch_child, false_branch, false_branch_child])
ast._add_edges_from([(root, body), (body, children[0]), (body, children[1]), (children[1], true_branch), (true_branch, true_branch_child), (children[1], false_branch), (false_branch, false_branch_child)])
ast._code_node_reachability_graph.add_reachability_from(((children[0], false_branch_child), (children[0], true_branch_child)))
body.sort_children()
new_root = LoopStructurer.refine_loop(ast, root)
transformed_ast = AbstractSyntaxForest(condition_handler=condition_handler1(LogicCondition.generate_new_context()))
root = transformed_ast.factory.create_seq_node()
children = [transformed_ast.factory.create_code_node(stmts=[assignment_c_plus_5.copy()]), transformed_ast.factory.create_condition_node(condition=(~ logic_cond('a', transformed_ast.factory.logic_context)))]
true_branch = transformed_ast.factory.create_true_node()
true_branch_child = transformed_ast.factory.create_code_node(stmts=[assignment_c_plus_10.copy()])
transformed_ast._add_nodes_from([root, children[0], children[1], true_branch, true_branch_child])
transformed_ast._add_edges_from([(root, children[0]), (root, children[1]), (children[1], true_branch), (true_branch, true_branch_child)])
transformed_ast._code_node_reachability_graph.add_reachability(children[0], true_branch_child)
root.sort_children()
assert (new_root == root)
assert ASTComparator.compare(ast, transformed_ast) |
class Slice():
def __init__(self, source_class, offset, number_of_lines):
self.source_class = source_class
self.source = None
self.offset = offset
self.number_of_lines = number_of_lines
self.current_line = 0
self.bulk_size = None
self.logger = logging.getLogger(__name__)
def open(self, file_name, mode, bulk_size):
self.bulk_size = bulk_size
self.source = self.source_class(file_name, mode).open()
self.logger.info('Will read [%d] lines from [%s] starting from line [%d] with bulk size [%d].', self.number_of_lines, file_name, self.offset, self.bulk_size)
start = time.perf_counter()
io.skip_lines(file_name, self.source, self.offset)
end = time.perf_counter()
self.logger.debug('Skipping [%d] lines took [%f] s.', self.offset, (end - start))
return self
def close(self):
self.source.close()
self.source = None
def __iter__(self):
return self
def __next__(self):
if (self.current_line >= self.number_of_lines):
raise StopIteration()
lines = self.source.readlines(min(self.bulk_size, (self.number_of_lines - self.current_line)))
self.current_line += len(lines)
if (len(lines) == 0):
raise StopIteration()
return lines
def __str__(self):
return ('%s[%d;%d]' % (self.source, self.offset, (self.offset + self.number_of_lines))) |
def pytest_collection_modifyitems(items, config):
flaky_tests = []
non_flaky_tests = []
for item in items:
if (any(((_ in item.fixturenames) for _ in ('async_unlocked_account', 'async_unlocked_account_dual_type', 'unlocked_account', 'unlocked_account_dual_type'))) and ('offchain_lookup' not in item.name)):
flaky_tests.append(item)
else:
non_flaky_tests.append(item)
if config.option.flaky:
items[:] = flaky_tests
config.hook.pytest_deselected(items=non_flaky_tests)
else:
items[:] = non_flaky_tests
config.hook.pytest_deselected(items=flaky_tests) |
class OptionPlotoptionsTreemapLevelsColorvariation(Options):
def key(self):
return self._config_get(None)
def key(self, value: Any):
self._config(value, js_type=False)
def to(self):
return self._config_get(None)
def to(self, num: float):
self._config(num, js_type=False) |
_os(*metadata.platforms)
def main():
masquerade = '/tmp/launchctl'
common.create_macos_masquerade(masquerade)
common.log('Launching fake launchctl command to mimic env variable hijacking')
common.execute([masquerade, 'setenv'], timeout=10, kill=True)
common.remove_file(masquerade) |
class OptionSeriesAreasplinerangeTooltip(Options):
def clusterFormat(self):
return self._config_get('Clustered points: {point.clusterPointsAmount}')
def clusterFormat(self, text: str):
self._config(text, js_type=False)
def dateTimeLabelFormats(self) -> 'OptionSeriesAreasplinerangeTooltipDatetimelabelformats':
return self._config_sub_data('dateTimeLabelFormats', OptionSeriesAreasplinerangeTooltipDatetimelabelformats)
def distance(self):
return self._config_get(16)
def distance(self, num: float):
self._config(num, js_type=False)
def followPointer(self):
return self._config_get(False)
def followPointer(self, flag: bool):
self._config(flag, js_type=False)
def followTouchMove(self):
return self._config_get(True)
def followTouchMove(self, flag: bool):
self._config(flag, js_type=False)
def footerFormat(self):
return self._config_get('')
def footerFormat(self, text: str):
self._config(text, js_type=False)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def headerFormat(self):
return self._config_get(None)
def headerFormat(self, text: str):
self._config(text, js_type=False)
def nullFormat(self):
return self._config_get(None)
def nullFormat(self, text: str):
self._config(text, js_type=False)
def nullFormatter(self):
return self._config_get(None)
def nullFormatter(self, value: Any):
self._config(value, js_type=False)
def pointFormat(self):
return self._config_get('<span style="color:{series.color}"></span> {series.name}: <b>{point.low}</b> - <b>{point.high}</b><br/>')
def pointFormat(self, text: str):
self._config(text, js_type=False)
def pointFormatter(self):
return self._config_get(None)
def pointFormatter(self, value: Any):
self._config(value, js_type=False)
def valueDecimals(self):
return self._config_get(None)
def valueDecimals(self, num: float):
self._config(num, js_type=False)
def valuePrefix(self):
return self._config_get(None)
def valuePrefix(self, text: str):
self._config(text, js_type=False)
def valueSuffix(self):
return self._config_get(None)
def valueSuffix(self, text: str):
self._config(text, js_type=False)
def xDateFormat(self):
return self._config_get(None)
def xDateFormat(self, text: str):
self._config(text, js_type=False) |
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
sites = {}
for (site_name, site_type) in gridinfo.sites.items():
if (site_type in ['IOB18S', 'IOB18M']):
sites[site_type] = site_name
if sites:
(yield (tile_name, sites)) |
class RequestBuildOperator(MapOperator[(RequestInput, ModelRequest)], ABC):
def __init__(self, model: Optional[str]=None, **kwargs):
self._model = model
super().__init__(**kwargs)
async def map(self, input_value: RequestInput) -> ModelRequest:
req_dict = {}
if isinstance(input_value, str):
req_dict = {'messages': [ModelMessage.build_human_message(input_value)]}
elif isinstance(input_value, dict):
req_dict = input_value
elif dataclasses.is_dataclass(input_value):
req_dict = dataclasses.asdict(input_value)
elif isinstance(input_value, BaseModel):
req_dict = input_value.dict()
elif isinstance(input_value, ModelRequest):
if (not input_value.model):
input_value.model = self._model
return input_value
if ('messages' not in req_dict):
raise ValueError('messages is not set')
messages = req_dict['messages']
if isinstance(messages, str):
req_dict['messages'] = [ModelMessage.build_human_message(messages)]
if ('model' not in req_dict):
req_dict['model'] = self._model
if (not req_dict['model']):
raise ValueError('model is not set')
stream = False
has_stream = False
if ('stream' in req_dict):
has_stream = True
stream = req_dict['stream']
del req_dict['stream']
if ('context' not in req_dict):
req_dict['context'] = ModelRequestContext(stream=stream)
else:
context_dict = req_dict['context']
if (not isinstance(context_dict, dict)):
raise ValueError('context is not a dict')
if has_stream:
context_dict['stream'] = stream
req_dict['context'] = ModelRequestContext(**context_dict)
return ModelRequest(**req_dict) |
.django_db
def test_spending_over_time_subawards_success(client):
resp = client.post('/api/v2/search/spending_over_time', content_type='application/json', data=json.dumps({'group': 'quarter', 'filters': non_legacy_filters(), 'subawards': True}))
assert (resp.status_code == status.HTTP_200_OK) |
.integration
class TestThermometerSkillFetchaiLedger(AEATestCaseManyFlaky):
.flaky(reruns=MAX_FLAKY_RERUNS_INTEGRATION)
def test_thermometer(self):
thermometer_aea_name = 'my_thermometer'
thermometer_client_aea_name = 'my_thermometer_client'
self.create_agents(thermometer_aea_name, thermometer_client_aea_name)
default_routing = {'fetchai/ledger_api:1.1.7': 'fetchai/ledger:0.21.5', 'fetchai/oef_search:1.1.7': 'fetchai/soef:0.27.6'}
location = {'latitude': round(uniform((- 90), 90), 2), 'longitude': round(uniform((- 180), 180), 2)}
self.set_agent_context(thermometer_aea_name)
self.add_item('connection', 'fetchai/p2p_libp2p:0.27.5')
self.add_item('connection', 'fetchai/soef:0.27.6')
self.set_config('agent.dependencies', '{ "aea-ledger-fetchai": {"version": "<2.0.0,>=1.0.0"} }', type_='dict')
self.set_config('agent.default_connection', 'fetchai/p2p_libp2p:0.27.5')
self.add_item('connection', 'fetchai/ledger:0.21.5')
self.add_item('skill', 'fetchai/thermometer:0.27.6')
setting_path = 'agent.default_routing'
self.nested_set_config(setting_path, default_routing)
self.run_install()
diff = self.difference_to_fetched_agent('fetchai/thermometer_aea:0.30.5', thermometer_aea_name)
assert (diff == []), 'Difference between created and fetched project for files={}'.format(diff)
self.generate_private_key(FetchAICrypto.identifier)
self.generate_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE_CONNECTION)
self.add_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE)
self.add_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE_CONNECTION, connection=True)
self.replace_private_key_in_file(NON_FUNDED_FETCHAI_PRIVATE_KEY_1, FETCHAI_PRIVATE_KEY_FILE_CONNECTION)
setting_path = 'vendor.fetchai.connections.p2p_libp2p.config.ledger_id'
self.set_config(setting_path, FetchAICrypto.identifier)
setting_path = 'vendor.fetchai.skills.thermometer.models.strategy.args.location'
self.nested_set_config(setting_path, location)
self.set_agent_context(thermometer_client_aea_name)
self.add_item('connection', 'fetchai/p2p_libp2p:0.27.5')
self.add_item('connection', 'fetchai/soef:0.27.6')
self.set_config('agent.dependencies', '{ "aea-ledger-fetchai": {"version": "<2.0.0,>=1.0.0"} }', type_='dict')
self.set_config('agent.default_connection', 'fetchai/p2p_libp2p:0.27.5')
self.add_item('connection', 'fetchai/ledger:0.21.5')
self.add_item('skill', 'fetchai/thermometer_client:0.26.6')
setting_path = 'agent.default_routing'
self.nested_set_config(setting_path, default_routing)
self.run_install()
diff = self.difference_to_fetched_agent('fetchai/thermometer_client:0.32.5', thermometer_client_aea_name)
assert (diff == []), 'Difference between created and fetched project for files={}'.format(diff)
self.generate_private_key(FetchAICrypto.identifier)
self.generate_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE_CONNECTION)
self.add_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE)
self.add_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE_CONNECTION, connection=True)
self.generate_wealth(FetchAICrypto.identifier)
setting_path = 'vendor.fetchai.connections.p2p_libp2p.config'
self.nested_set_config(setting_path, NON_GENESIS_CONFIG)
setting_path = 'vendor.fetchai.skills.thermometer_client.models.strategy.args.location'
self.nested_set_config(setting_path, location)
self.set_agent_context(thermometer_aea_name)
self.run_cli_command('build', cwd=self._get_cwd())
self.run_cli_command('issue-certificates', cwd=self._get_cwd())
thermometer_aea_process = self.run_agent()
check_strings = ('Starting libp2p node...', 'Connecting to libp2p node...', 'Successfully connected to libp2p node!', LIBP2P_SUCCESS_MESSAGE)
missing_strings = self.missing_from_output(thermometer_aea_process, check_strings, timeout=30, is_terminating=False)
assert (missing_strings == []), "Strings {} didn't appear in thermometer_aea output.".format(missing_strings)
self.set_agent_context(thermometer_client_aea_name)
self.run_cli_command('build', cwd=self._get_cwd())
self.run_cli_command('issue-certificates', cwd=self._get_cwd())
thermometer_client_aea_process = self.run_agent()
check_strings = ('Starting libp2p node...', 'Connecting to libp2p node...', 'Successfully connected to libp2p node!', LIBP2P_SUCCESS_MESSAGE)
missing_strings = self.missing_from_output(thermometer_client_aea_process, check_strings, timeout=30, is_terminating=False)
assert (missing_strings == []), "Strings {} didn't appear in thermometer_client_aea output.".format(missing_strings)
check_strings = ('registering agent on SOEF.', "registering agent's service on the SOEF.", "registering agent's personality genus on the SOEF.", "registering agent's personality classification on the SOEF.", 'received CFP from sender=', 'sending a PROPOSE with proposal=', 'received ACCEPT from sender=', 'sending MATCH_ACCEPT_W_INFORM to sender=', 'received INFORM from sender=', 'checking whether transaction=', 'transaction confirmed, sending data=')
missing_strings = self.missing_from_output(thermometer_aea_process, check_strings, timeout=240, is_terminating=False)
assert (missing_strings == []), "Strings {} didn't appear in thermometer_aea output.".format(missing_strings)
check_strings = ('found agents=', 'sending CFP to agent=', 'received proposal=', 'accepting the proposal from sender=', 'received MATCH_ACCEPT_W_INFORM from sender=', 'requesting transfer transaction from ledger api for message=', 'received raw transaction=', 'proposing the transaction to the decision maker. Waiting for confirmation ...', 'transaction signing was successful.', 'sending transaction to ledger.', 'transaction was successfully submitted. Transaction digest=', 'informing counterparty=', 'received INFORM from sender=', 'received the following data=')
missing_strings = self.missing_from_output(thermometer_client_aea_process, check_strings, is_terminating=False)
assert (missing_strings == []), "Strings {} didn't appear in thermometer_client_aea output.".format(missing_strings)
self.terminate_agents(thermometer_aea_process, thermometer_client_aea_process)
assert self.is_successfully_terminated(), "Agents weren't successfully terminated."
wait_for_localhost_ports_to_close([9000, 9001]) |
(private_key_bytes=private_key_st)
(max_examples=MAX_EXAMPLES)
def test_public_key_generation_is_equal(private_key_bytes, native_key_api, coincurve_key_api):
native_public_key = native_key_api.PrivateKey(private_key_bytes).public_key
coincurve_public_key = coincurve_key_api.PrivateKey(private_key_bytes).public_key
assert (native_public_key == coincurve_public_key) |
def nextCmd(snmpDispatcher, authData, transportTarget, *varBinds, **options):
def cbFun(*args, **kwargs):
response[:] = (args + (kwargs.get('nextVarBinds', ()),))
options['cbFun'] = cbFun
lexicographicMode = options.pop('lexicographicMode', True)
maxRows = options.pop('maxRows', 0)
maxCalls = options.pop('maxCalls', 0)
initialVarBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
totalRows = totalCalls = 0
(errorIndication, errorStatus, errorIndex, varBindTable) = (None, 0, 0, ())
response = []
while True:
if (not varBinds):
(yield (errorIndication, errorStatus, errorIndex, ((varBindTable and varBindTable[0]) or [])))
return
cmdgen.nextCmd(snmpDispatcher, authData, transportTarget, *[(x[0], Null('')) for x in varBinds], **options)
snmpDispatcher.transportDispatcher.runDispatcher()
(errorIndication, errorStatus, errorIndex, varBindTable, varBinds) = response
if errorIndication:
(yield (errorIndication, errorStatus, errorIndex, ((varBindTable and varBindTable[0]) or [])))
return
elif errorStatus:
if (errorStatus == 2):
errorStatus = errorStatus.clone(0)
errorIndex = errorIndex.clone(0)
(yield (errorIndication, errorStatus, errorIndex, ((varBindTable and varBindTable[0]) or [])))
return
else:
varBindRow = (varBindTable and varBindTable[(- 1)])
if (not lexicographicMode):
for (idx, varBind) in enumerate(varBindRow):
(name, val) = varBind
if (not isinstance(val, Null)):
if initialVarBinds[idx][0].isPrefixOf(name):
break
else:
return
for varBindRow in varBindTable:
nextVarBinds = (yield (errorIndication, errorStatus, errorIndex, varBindRow))
if nextVarBinds:
initialVarBinds = varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, nextVarBinds)
totalRows += 1
totalCalls += 1
if (maxRows and (totalRows >= maxRows)):
return
if (maxCalls and (totalCalls >= maxCalls)):
return |
def closest(color: Color, colors: Sequence[ColorInput], method: (str | None)=None, **kwargs: Any) -> Color:
if (method is None):
method = color.DELTA_E
algorithm = color.DE_MAP.get(method)
if (not algorithm):
raise ValueError("'{}' is not currently a supported distancing algorithm.".format(method))
lowest = math.inf
closest = None
for c in colors:
color2 = color._handle_color_input(c)
de = algorithm.distance(color, color2, **kwargs)
if (de < lowest):
lowest = de
closest = color2
if (closest is None):
raise ValueError('No colors to compare')
return closest |
class JsHtmlTabulator(JsHtml.JsHtml):
def val(self):
return JsObjects.JsObjects.get(('{%s: {value: %s, timestamp: Date.now(), offset: new Date().getTimezoneOffset()}}' % (self.htmlCode, self.content.toStr())))
def content(self):
return JsHtml.ContentFormatters(self.page, ('%s.getData()' % self.component.js_code))
def headers(self):
return JsObjects.JsObjects.get(('%s.getColumnDefinitions()' % self.component.js_code))
def empty(self):
return self.component.js.clearData()
def createWidget(self, html_code: str, container: str=None, options: etypes.JS_DATA_TYPES=None):
self.component.options.managed = False
self.component.js_code = html_code
lib = ('bb' if (self.component.name == 'Billboard') else 'c3')
js_code = JsUtils.jsConvertData(self.component.js_code, None).toStr()
if js_code.startswith('window'):
js_code = js_code[7:(- 1)]
return JsUtils.jsWrap(('\n(function(containerId, tag, htmlCode, jsCode, ctx, attrs){\n const newDiv = document.createElement(tag);\n Object.keys(attrs).forEach(function(key) {newDiv.setAttribute(key, attrs[key]);}); newDiv.id = htmlCode;\n if(!containerId){document.body.appendChild(newDiv)} else {document.getElementById(containerId).appendChild(newDiv)};\n window[jsCode] = new Tabulator("#"+ htmlCode, ctx); return newDiv;\n})(%(container)s, "%(tag)s", %(html_code)s, %(js_code)s, %(ctx)s, %(attrs)s)' % {'js_code': js_code, 'attrs': self.component.get_attrs(css_class_names=self.component.style.get_classes(), to_str=False), 'html_code': JsUtils.jsConvertData((html_code or self.component.html_code), None), 'tag': self.component.tag, 'ctx': self.component.options.config_js(options).toStr(), 'lib': lib, 'container': JsUtils.jsConvertData(container, None)})) |
def get_templates_dir():
res_dir = None
try:
res_dir = resource_filename(Requirement.parse('ambassador'), 'templates')
except:
pass
maybe_dirs = [res_dir, os.path.join(os.path.dirname(__file__), '..', 'templates')]
for d in maybe_dirs:
if (d and os.path.isdir(d)):
return d
raise FileNotFoundError |
class Test(unittest.TestCase):
def testCreate(self):
f = fourway.T('hello', 'fourway')
self.assertTrue(f)
def testAddToWindow(self):
w = Gtk.Window()
f = fourway.T('wibble', 'fourway')
w.set_child(f)
w.present()
def testMouseButton1(self):
f = fourway.T('button1', 'fourway')
gesture = FakeGesture()
f.onButtonPress(gesture, 1, 1)
self.assertEqual(f.last_x, 1)
f.onMotionNotify(gesture, 2, 1)
self.assertEqual(f.last_x, 2)
f.onButtonRelease(gesture, 1, 1)
self.assertEqual(f.last_x, 2) |
def test_regression_performance_metrics() -> None:
test_dataset = pd.DataFrame({'category_feature': ['1', '2', '3'], 'numerical_feature': [3, 2, 1], 'target': [1, 2, 3], 'prediction': [1, np.NAN, 1]})
data_mapping = ColumnMapping()
report = Report(metrics=[RegressionPerformanceMetrics()])
report.run(current_data=test_dataset, reference_data=None, column_mapping=data_mapping)
assert (report.metrics is not None)
assert (report.show() is not None)
assert report.json()
report.run(current_data=test_dataset, reference_data=test_dataset, column_mapping=data_mapping)
assert (report.metrics is not None)
assert (report.show() is not None)
assert report.json() |
class TransactionTruncator(object):
def __init__(self):
self.log = logging.getLogger('Main.DbVersioning.TransactionTruncator')
self.qlog = logging.getLogger('Main.DbVersioning.TransactionTruncator.Query')
def truncate_transaction_table(self):
with db.session_context() as sess:
self.qlog.info('Deleting items in transaction table')
sess.execute('TRUNCATE transaction;')
sess.execute('COMMIT;')
self.qlog.info('Vacuuming table')
sess.execute('VACUUM VERBOSE transaction;')
sess.execute('COMMIT;')
self.qlog.info('Table truncated!')
def go(self):
self.truncate_transaction_table() |
def test_path_dir_created():
with spacy.util.make_tempdir() as tmpdir:
config = copy.deepcopy(_DEFAULT_CFG)
assert (not (tmpdir / 'new_dir').exists())
config['cache']['path'] = str((tmpdir / 'new_dir'))
spacy.blank('en').add_pipe('llm', config=config)
assert (tmpdir / 'new_dir').exists() |
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Topic', fields=[('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.CharField(max_length=200)), ('date_added', models.DateTimeField(auto_now_add=True))])] |
class OptionSeriesColumnrangeStatesInactive(Options):
def animation(self) -> 'OptionSeriesColumnrangeStatesInactiveAnimation':
return self._config_sub_data('animation', OptionSeriesColumnrangeStatesInactiveAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def opacity(self):
return self._config_get(0.2)
def opacity(self, num: float):
self._config(num, js_type=False) |
def get_bins(bin_size, chrom_size, region=None):
bin_intvals = []
start = 0
if region:
(chrom_size, start, _, _) = getUserRegion(chrom_size, region)
for (chrom, size) in chrom_size:
for interval in range(start, size, bin_size):
bin_intvals.append((chrom, interval, min(size, (interval + bin_size))))
return bin_intvals |
def project_settings_with_dotenv(testproject, env_file):
project_config = copy.deepcopy(BASE_PROJECT_CONFIG)
project_config['dotenv'] = str(env_file)
project_config['networks']['development']['default_balance'] = '${DEFAULT_BALANCE}'
if ('console' not in project_config):
project_config['console'] = {}
project_config['console']['show_colors'] = '${SHOW_COLORS}'
with testproject._path.joinpath('brownie-config.yaml').open('w') as fp:
yaml.dump(project_config, fp)
with testproject._path.joinpath('brownie-config.yaml').open('r') as fp:
conf = yaml.safe_load(fp)
(yield conf) |
class TestVTKDataSource(TestContour):
def make_data(self):
script = self.script
from mayavi.sources.vtk_data_source import VTKDataSource
from tvtk.api import tvtk
s = self.new_scene()
r = tvtk.StructuredPointsReader()
r.file_name = get_example_data('heart.vtk')
r.update()
d = VTKDataSource(data=r.output)
script.add_source(d)
def test(self):
self.main()
def do(self):
self.make_data()
from mayavi.modules.outline import Outline
from mayavi.modules.iso_surface import IsoSurface
from mayavi.modules.contour_grid_plane import ContourGridPlane
from mayavi.modules.scalar_cut_plane import ScalarCutPlane
script = self.script
s = script.engine.current_scene
o = Outline()
script.add_module(o)
cgp = ContourGridPlane()
script.add_module(cgp)
cgp.grid_plane.position = 15
cgp = ContourGridPlane()
cgp.contour.filled_contours = True
cgp.grid_plane.axis = 'y'
cgp.grid_plane.position = 15
script.add_module(cgp)
iso = IsoSurface(compute_normals=True)
script.add_module(iso)
iso.contour.contours = [200.0]
cp = ScalarCutPlane()
script.add_module(cp)
ip = cp.implicit_plane
ip.normal = (0, 0, 1)
ip.origin = (0, 0, 5)
ip.widget.enabled = False
s.scene.isometric_view()
self.check()
f = BytesIO()
f.name = abspath('test.mv2')
script.save_visualization(f)
f.seek(0)
engine = script.engine
engine.close_scene(s)
script.load_visualization(f)
s = engine.current_scene
self.check()
source = s.children.pop()
s.children.append(source)
cp = source.children[0].children[(- 1)]
cp.implicit_plane.widget.enabled = False
self.check()
source1 = copy.deepcopy(source)
s.children[0] = source1
cp = source1.children[0].children[(- 1)]
cp.implicit_plane.widget.enabled = False
self.check() |
class ChainOfStates():
logger = logging.getLogger('cos')
valid_coord_types = ('cart', 'cartesian', 'dlc')
def __init__(self, images, fix_first=True, fix_last=True, align_fixed=True, climb=False, climb_rms=0.005, climb_lanczos=False, climb_lanczos_rms=0.005, climb_fixed=True, energy_min_mix=False, scheduler=None, progress=False):
assert (len(images) >= 2), 'Need at least 2 images!'
self.images = list(images)
self.fix_first = fix_first
self.fix_last = fix_last
self.align_fixed = align_fixed
self.climb = climb
self.climb_rms = climb_rms
self.climb_lanczos = climb_lanczos
self.climb_fixed = climb_fixed
self.energy_min_mix = energy_min_mix
self.climb_lanczos_rms = min(self.climb_rms, climb_lanczos_rms)
self.scheduler = scheduler
self.progress = progress
self._coords = None
self._forces = None
self._energy = None
self.counter = 0
self.coords_length = self.images[0].coords.size
self.cart_coords_length = self.images[0].cart_coords.size
self.zero_vec = np.zeros(self.coords_length)
self.coords_list = list()
self.forces_list = list()
self.all_energies = list()
self.all_true_forces = list()
self.lanczos_tangents = dict()
self.prev_lanczos_hash = None
self.started_climbing = (self.climb_rms == (- 1))
if self.started_climbing:
self.log('Will start climbing immediately.')
self.started_climbing_lanczos = False
self.fixed_climb_indices = None
self.org_forces_indices = list()
img0 = self.images[0]
self.image_atoms = copy(img0.atoms)
self.coord_type = img0.coord_type
assert (self.coord_type in self.valid_coord_types), f'Invalid coord_type! Supported types are: {self.valid_coord_types}'
assert all([(img.coord_type == self.coord_type) for img in self.images]), 'coord_type of images differ!'
try:
self.typed_prims = img0.internal.typed_prims
except AttributeError:
self.typed_prims = None
def calculator(self):
try:
calc = self.images[0].calculator
except IndexError:
calc = None
return calc
def log(self, message):
self.logger.debug(f'Counter {(self.counter + 1):03d}, {message}')
def get_fixed_indices(self):
fixed = list()
if self.fix_first:
fixed.append(0)
if self.fix_last:
fixed.append((len(self.images) - 1))
return fixed
def moving_indices(self):
fixed = self.get_fixed_indices()
return [i for i in range(len(self.images)) if (i not in fixed)]
def last_index(self):
return (len(self.images) - 1)
def moving_images(self):
return [self.images[i] for i in self.moving_indices]
def max_image_num(self):
return len(self.images)
def image_inds(self):
return list(range(self.max_image_num))
def zero_fixed_vector(self, vector):
fixed = self.get_fixed_indices()
for i in fixed:
vector[i] = self.zero_vec
return vector
def clear(self):
self._energy = None
self._forces = None
self._hessian = None
try:
self._tangents = None
except AttributeError:
self.log('There are no tangents to reset.')
def atoms(self):
atoms_ = self.images[0].atoms
return (len(self.images) * atoms_)
def set_vector(self, name, vector, clear=False):
vec_per_image = vector.reshape((- 1), self.coords_length)
assert (len(self.images) == len(vec_per_image))
for i in self.moving_indices:
setattr(self.images[i], name, vec_per_image[i])
if clear:
self.clear()
def coords(self):
all_coords = [image.coords for image in self.images]
self._coords = np.concatenate(all_coords)
return self._coords
def coords(self, coords):
self.set_vector('coords', coords, clear=True)
def cart_coords(self):
return np.concatenate([image.cart_coords for image in self.images])
def coords3d(self):
assert (self.images[0].coord_type == 'cart')
return self.coords.reshape((- 1), 3)
def image_coords(self):
return np.array([image.coords for image in self.images])
def set_coords_at(self, i, coords):
assert (self.images[i].coord_type in ('cart', 'cartesian')), "ChainOfStates.set_coords_at() has to be reworked to support internal coordiantes. Try to set 'align: False' in the 'opt' section of the .yaml input file."
if (i in self.moving_indices):
self.images[i].coords = coords
elif self.align_fixed:
self.images[i]._coords = coords
def energy(self):
self._energy = np.array([image.energy for image in self.images])
return self._energy
def energy(self, energies):
assert (len(self.images) == len(energies))
for i in self.moving_indices:
self.images[i].energy = energies[i]
self._energy = energies
def par_image_calc(self, image):
image.calc_energy_and_forces()
return image
def set_images(self, indices, images):
for (ind, image) in zip(indices, images):
self.images[ind] = image
def concurrent_force_calcs(self, images_to_calculate, image_indices):
client = self.get_dask_client()
self.log(client)
orig_pal = images_to_calculate[0].calculator.pal
n_workers = len(client.scheduler_info()['workers'])
n_images = len(images_to_calculate)
new_pal = max(1, (orig_pal // n_workers))
n_batches = (n_images // n_workers)
for i in range(0, n_batches):
for j in range((i * n_workers), ((i + 1) * n_workers)):
images_to_calculate[j].calculator.pal = new_pal
n_last_batch = (n_images % n_workers)
if (n_last_batch > 0):
new_pal = max(1, (orig_pal // n_last_batch))
for i in range((n_batches * n_workers), n_images):
images_to_calculate[i].calculator.pal = new_pal
image_futures = client.map(self.par_image_calc, images_to_calculate)
self.set_images(image_indices, client.gather(image_futures))
for i in range(0, n_images):
self.images[i].calculator.pal = orig_pal
def calculate_forces(self):
images_to_calculate = self.moving_images
image_indices = self.moving_indices
if (self.fix_first and (self.images[0]._energy is None)):
images_to_calculate = ([self.images[0]] + images_to_calculate)
image_indices = ([0] + list(image_indices))
if (self.fix_last and (self.images[(- 1)]._energy is None)):
images_to_calculate = (images_to_calculate + [self.images[(- 1)]])
image_indices = (list(image_indices) + [(- 1)])
assert (len(images_to_calculate) <= len(self.images))
if self.scheduler:
self.concurrent_force_calcs(images_to_calculate, image_indices)
else:
for image in images_to_calculate:
image.calc_energy_and_forces()
if self.progress:
print('.', end='')
sys.stdout.flush()
if self.progress:
print('\r', end='')
self.set_zero_forces_for_fixed_images()
self.counter += 1
if self.energy_min_mix:
all_energies = np.array([image.all_energies for image in self.images])
energy_diffs = np.diff(all_energies, axis=1).flatten()
calc_inds = all_energies.argmin(axis=1)
mix_at = []
for (i, calc_ind) in enumerate(calc_inds[:(- 1)]):
next_ind = calc_inds[(i + 1)]
if ((calc_ind != next_ind) and (i not in self.org_forces_indices) and ((i + 1) not in self.org_forces_indices)):
min_diff_offset = energy_diffs[[i, (i + 1)]].argmin()
mix_at.append((i + min_diff_offset))
for ind in mix_at:
self.images[ind].calculator.mix = True
print(f'Switch after calc_ind={calc_ind} at index {ind}. Recalculating.')
self.images[ind].calc_energy_and_forces()
self.org_forces_indices.append(ind)
calc_ind = calc_inds[ind]
energies = [image.energy for image in self.images]
forces = np.array([image.forces for image in self.images])
self.all_energies.append(energies)
self.all_true_forces.append(forces)
return {'energies': energies, 'forces': forces}
def forces(self):
self.set_zero_forces_for_fixed_images()
forces = [image.forces for image in self.images]
self._forces = np.concatenate(forces)
self.counter += 1
return self._forces
def forces(self, forces):
self.set_vector('forces', forces)
def perpendicular_forces(self):
indices = range(len(self.images))
perp_forces = [self.get_perpendicular_forces(i) for i in indices]
return np.array(perp_forces).flatten()
def get_perpendicular_forces(self, i):
if (i not in self.moving_indices):
return self.zero_vec
forces = self.images[i].forces
tangent = self.get_tangent(i)
perp_forces = (forces - (forces.dot(tangent) * tangent))
return perp_forces
def gradient(self):
return (- self.forces)
def gradient(self, gradient):
self.forces = (- gradient)
def masses_rep(self):
return np.array([image.masses_rep for image in self.images]).flatten()
def results(self):
tmp_results = list()
for image in self.images:
res = image.results
res['coords'] = image.coords
res['cart_coords'] = image.cart_coords
tmp_results.append(res)
return tmp_results
def set_zero_forces_for_fixed_images(self):
zero_forces = np.zeros_like(self.images[0].cart_coords)
if self.fix_first:
self.images[0].cart_forces = zero_forces
self.log('Zeroed forces on fixed first image.')
if self.fix_last:
self.images[(- 1)].cart_forces = zero_forces
self.log('Zeroed forces on fixed last image.')
def get_tangent(self, i, kind='upwinding', lanczos_guess=None, disable_lanczos=False):
if ((not disable_lanczos) and self.started_climbing_lanczos and (i == self.get_hei_index())):
kind = 'lanczos'
tangent_kinds = ('upwinding', 'simple', 'bisect', 'lanczos')
assert (kind in tangent_kinds), 'Invalid kind! Valid kinds are: {tangent_kinds}'
prev_index = max((i - 1), 0)
next_index = min((i + 1), (len(self.images) - 1))
prev_image = self.images[prev_index]
ith_image = self.images[i]
next_image = self.images[next_index]
tangent_plus = (next_image - ith_image)
tangent_minus = (ith_image - prev_image)
if (i == 0):
return (tangent_plus / np.linalg.norm(tangent_plus))
elif (i == (len(self.images) - 1)):
return (tangent_minus / np.linalg.norm(tangent_minus))
if (kind == 'simple'):
tangent = (next_image - prev_image)
elif (kind == 'bisect'):
first_term = (tangent_minus / np.linalg.norm(tangent_minus))
sec_term = (tangent_plus / np.linalg.norm(tangent_plus))
tangent = (first_term + sec_term)
elif (kind == 'upwinding'):
prev_energy = prev_image.energy
ith_energy = ith_image.energy
next_energy = next_image.energy
next_energy_diff = abs((next_energy - ith_energy))
prev_energy_diff = abs((prev_energy - ith_energy))
delta_energy_max = max(next_energy_diff, prev_energy_diff)
delta_energy_min = min(next_energy_diff, prev_energy_diff)
if (next_energy > ith_energy > prev_energy):
tangent = tangent_plus
elif (next_energy < ith_energy < prev_energy):
tangent = tangent_minus
elif (next_energy >= prev_energy):
tangent = ((tangent_plus * delta_energy_max) + (tangent_minus * delta_energy_min))
else:
tangent = ((tangent_plus * delta_energy_min) + (tangent_minus * delta_energy_max))
elif (kind == 'lanczos'):
cur_hash = hash_arr(ith_image.coords, precision=4)
try:
tangent = self.lanczos_tangents[cur_hash]
self.log(f'Returning previously calculated Lanczos tangent with hash={cur_hash}')
except KeyError:
guess = lanczos_guess
if ((guess is None) and (self.prev_lanczos_hash is not None)):
guess = self.lanczos_tangents[self.prev_lanczos_hash]
self.log(f'Using tangent with hash={self.prev_lanczos_hash} as initial guess for Lanczos algorithm.')
(w_min, tangent) = geom_lanczos(ith_image, guess=guess, logger=self.logger)
self.lanczos_tangents[cur_hash] = tangent
self.prev_lanczos_hash = cur_hash
tangent /= np.linalg.norm(tangent)
return tangent
def get_tangents(self):
return np.array([self.get_tangent(i) for i in range(len(self.images))])
def as_xyz(self, comments=None):
return '\n'.join([image.as_xyz() for image in self.images])
def get_dask_client(self):
return Client(self.scheduler)
def get_hei_index(self, energies=None):
if (energies is None):
energies = [image.energy for image in self.images]
return np.argmax(energies)
def prepare_opt_cycle(self, last_coords, last_energies, last_forces):
self.coords_list.append(last_coords)
self.forces_list.append(last_forces)
already_climbing = self.started_climbing
if (self.climb and (not already_climbing)):
self.started_climbing = self.check_for_climbing_start(self.climb_rms)
if self.started_climbing:
msg = 'Will use climbing image(s) in next cycle.'
self.log(msg)
print(msg)
if (already_climbing and self.climb_fixed and (self.fixed_climb_indices is None)):
self.fixed_climb_indices = self.get_climbing_indices()
already_climbing_lanczos = self.started_climbing_lanczos
if (self.climb_lanczos and self.started_climbing and (not already_climbing_lanczos)):
self.started_climbing_lanczos = self.check_for_climbing_start(self.climb_lanczos_rms)
if self.started_climbing_lanczos:
msg = 'Will use Lanczos algorithm for HEI tangent in next cycle.'
self.log(msg)
print(msg)
return ((not already_climbing) and self.started_climbing)
def rms(self, arr):
return np.sqrt(np.mean(np.square(arr)))
def check_for_climbing_start(self, ref_rms):
rms_forces = self.rms(self.forces_list[(- 1)])
try:
fully_grown = self.fully_grown
except AttributeError:
fully_grown = True
start_climbing = ((rms_forces <= ref_rms) and fully_grown)
return start_climbing
def get_climbing_indices(self):
hei_index = self.get_hei_index()
move_inds = self.moving_indices
if (not (self.climb and self.started_climbing)):
climb_indices = tuple()
elif (self.fixed_climb_indices is not None):
climb_indices = self.fixed_climb_indices
_ = ('index' if (len(climb_indices) == 1) else 'indices')
self.log(f'Returning fixed climbing {_}.')
elif ((self.climb == 'one') or ((hei_index == 1) or (hei_index == move_inds[(- 1)]))):
climb_indices = (hei_index,)
elif (hei_index in move_inds[1:(- 1)]):
climb_indices = ((hei_index - 1), (hei_index + 1))
else:
climb_indices = tuple()
self.log("Want to climb but can't. HEI is first or last image!")
return climb_indices
def get_climbing_forces(self, ind):
climbing_image = self.images[ind]
ci_forces = climbing_image.forces
tangent = self.get_tangent(ind)
climbing_forces = (ci_forces - ((2 * ci_forces.dot(tangent)) * tangent))
return (climbing_forces, climbing_image.energy)
def set_climbing_forces(self, forces):
if (not self.started_climbing):
return forces
for i in self.get_climbing_indices():
(climb_forces, climb_en) = self.get_climbing_forces(i)
forces[i] = climb_forces
norm = np.linalg.norm(climb_forces)
self.log(f'Climbing with image {i}, E = {climb_en:.6f} au, norm(forces)={norm:.6f}')
return forces
def get_splined_hei(self):
self.log('Splining HEI')
cart_coords = align_coords([image.cart_coords for image in self.images])
coord_diffs = get_coords_diffs(cart_coords)
self.log(f' Coordinate differences: {coord_diffs}')
energies = np.array(self.energy)
energies_spline = interp1d(coord_diffs, energies, kind='cubic')
x_fine = np.linspace(0, 1, 500)
energies_fine = energies_spline(x_fine)
hei_ind = energies_fine.argmax()
hei_x = x_fine[hei_ind]
self.log(f'Found splined HEI at x={hei_x:.4f}')
hei_frac_index = (hei_x * (len(self.images) - 1))
hei_energy = energies_fine[hei_ind]
reshaped = cart_coords.reshape((- 1), self.cart_coords_length)
transp_coords = reshaped.transpose()
(tcks, us) = zip(*[splprep(transp_coords[i:(i + 9)], s=0, k=3, u=coord_diffs) for i in range(0, len(transp_coords), 9)])
hei_coords = np.vstack([splev([hei_x], tck) for tck in tcks])
hei_coords = hei_coords.flatten()
hei_tangent = np.vstack([splev([hei_x], tck, der=1) for tck in tcks]).T
hei_tangent = hei_tangent.flatten()
hei_tangent /= np.linalg.norm(hei_tangent)
return (hei_coords, hei_energy, hei_tangent, hei_frac_index)
def get_image_calc_counter_sum(self):
return sum([image.calculator.calc_counter for image in self.images])
def describe(self):
imgs = self.images
img = imgs[0]
return f'ChainOfStates, {len(imgs)} images, ({img.sum_formula}, {len(img.atoms)} atoms) per image'
def __str__(self):
return self.__class__.__name__ |
class FedAvgWithLRSyncAggregator(SyncAggregatorWithOptimizer):
def __init__(self, *, global_model: IFLModel, channel: Optional[IdentityChannel]=None, **kwargs) -> None:
init_self_cfg(self, component_class=__class__, config_class=FedAvgWithLRSyncAggregatorConfig, **kwargs)
super().__init__(global_model=global_model, channel=channel, **kwargs)
def _set_defaults_in_cfg(cls, cfg):
pass |
class OptionSeriesDependencywheelSonificationTracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.