code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def sasets(self) -> 'SASets':
"""
This methods creates a SASets object which you can use to run various analytics.
See the sasets.py module.
:return: sasets object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASets(self)
|
This methods creates a SASets object which you can use to run various analytics.
See the sasets.py module.
:return: sasets object
|
def configure_config(graph):
"""
Configure the health endpoint.
:returns: the current service configuration
"""
ns = Namespace(
subject=Config,
)
convention = ConfigDiscoveryConvention(
graph,
)
convention.configure(ns, retrieve=tuple())
return convention.config_discovery
|
Configure the health endpoint.
:returns: the current service configuration
|
def check(text):
"""Suggest the preferred forms."""
err = "strunk_white.composition"
msg = "Try '{}' instead of '{}'."
bad_forms = [
# Put statements in positive form
["dishonest", ["not honest"]],
["trifling", ["not important"]],
["forgot", ["did not remember"]],
["ignored", ["did not pay (any )?attention to"]],
["distrusted", ["did not have much confidence in"]],
# Omit needless words
["whether", ["the question as to whether"]],
["no doubt", ["there is no doubt but that"]],
["used for fuel", ["used for fuel purposes"]],
["he", ["he is a man who"]],
["hastily", ["in a hasty manner"]],
["this subject", ["this is a subject that"]],
["Her story is strange.", ["Her story is a strange one."]],
["because", ["the reason why is that"]],
["because / since", ["owing to the fact that"]],
["although / though", ["in spite of the fact that"]],
["remind you / notify you",
["call your attention to the fact that"]],
["I did not know that / I was unaware that",
["I was unaware of the fact that"]],
["his failure", ["the fact that he had not succeeded"]],
["my arrival", ["the fact that i had arrived"]]
]
return preferred_forms_check(text, bad_forms, err, msg)
|
Suggest the preferred forms.
|
def get(self, fields=[]):
'''taobao.shopcats.list.get 获取前台展示的店铺类目
此API获取淘宝面向买家的浏览导航类目 跟后台卖家商品管理的类目有差异'''
request = TOPRequest('taobao.shopcats.list.get')
if not fields:
shopCat = ShopCat()
fields = shopCat.fields
request['fields'] = fields
self.create(self.execute(request))
return self.shop_cats
|
taobao.shopcats.list.get 获取前台展示的店铺类目
此API获取淘宝面向买家的浏览导航类目 跟后台卖家商品管理的类目有差异
|
def releases(self):
"""The releases for this app."""
return self._h._get_resources(
resource=('apps', self.name, 'releases'),
obj=Release, app=self
)
|
The releases for this app.
|
def colors_no_palette(colors=None, **kwds):
"""Return a Palette but don't take into account Pallete Names."""
if isinstance(colors, str):
colors = _split_colors(colors)
else:
colors = to_triplets(colors or ())
colors = (color(c) for c in colors or ())
return palette.Palette(colors, **kwds)
|
Return a Palette but don't take into account Pallete Names.
|
def remove(self, ref, cb=None):
"""Check in a bundle to the remote"""
if self.is_api:
return self._remove_api(ref, cb)
else:
return self._remove_fs(ref, cb)
|
Check in a bundle to the remote
|
def threshold(self, value):
"""Threshold used to determine if your content qualifies as spam.
On a scale from 1 to 10, with 10 being most strict, or most likely to
be considered as spam.
:param value: Threshold used to determine if your content qualifies as
spam.
On a scale from 1 to 10, with 10 being most strict, or
most likely to be considered as spam.
:type value: int
"""
if isinstance(value, SpamThreshold):
self._threshold = value
else:
self._threshold = SpamThreshold(value)
|
Threshold used to determine if your content qualifies as spam.
On a scale from 1 to 10, with 10 being most strict, or most likely to
be considered as spam.
:param value: Threshold used to determine if your content qualifies as
spam.
On a scale from 1 to 10, with 10 being most strict, or
most likely to be considered as spam.
:type value: int
|
def eval_adiabatic_limit(YABFGN, Ytilde, P0):
"""Compute the limiting SLH model for the adiabatic approximation
Args:
YABFGN: The tuple (Y, A, B, F, G, N)
as returned by prepare_adiabatic_limit.
Ytilde: The pseudo-inverse of Y, satisfying Y * Ytilde = P0.
P0: The projector onto the null-space of Y.
Returns:
SLH: Limiting SLH model
"""
Y, A, B, F, G, N = YABFGN
Klim = (P0 * (B - A * Ytilde * A) * P0).expand().simplify_scalar()
Hlim = ((Klim - Klim.dag())/2/I).expand().simplify_scalar()
Ldlim = (P0 * (G - A * Ytilde * F) * P0).expand().simplify_scalar()
dN = identity_matrix(N.shape[0]) + F.H * Ytilde * F
Nlim = (P0 * N * dN * P0).expand().simplify_scalar()
return SLH(Nlim.dag(), Ldlim.dag(), Hlim.dag())
|
Compute the limiting SLH model for the adiabatic approximation
Args:
YABFGN: The tuple (Y, A, B, F, G, N)
as returned by prepare_adiabatic_limit.
Ytilde: The pseudo-inverse of Y, satisfying Y * Ytilde = P0.
P0: The projector onto the null-space of Y.
Returns:
SLH: Limiting SLH model
|
def _setup_advanced_theme(self, theme_name, output_dir, advanced_name):
"""
Setup all the files required to enable an advanced theme.
Copies all the files over and creates the required directories
if they do not exist.
:param theme_name: theme to copy the files over from
:param output_dir: output directory to place the files in
"""
"""Directories"""
output_theme_dir = os.path.join(output_dir, advanced_name)
output_images_dir = os.path.join(output_theme_dir, advanced_name)
input_theme_dir = os.path.join(
utils.get_themes_directory(theme_name, self.png_support), theme_name)
input_images_dir = os.path.join(input_theme_dir, theme_name)
advanced_pkg_dir = os.path.join(utils.get_file_directory(), "advanced")
"""Directory creation"""
for directory in [output_dir, output_theme_dir]:
utils.create_directory(directory)
"""Theme TCL file"""
file_name = theme_name + ".tcl"
theme_input = os.path.join(input_theme_dir, file_name)
theme_output = os.path.join(output_theme_dir, "{}.tcl".format(advanced_name))
with open(theme_input, "r") as fi, open(theme_output, "w") as fo:
for line in fi:
# Setup new theme
line = line.replace(theme_name, advanced_name)
# Setup new image format
line = line.replace("gif89", "png")
line = line.replace("gif", "png")
# Write processed line
fo.write(line)
"""pkgIndex.tcl file"""
theme_pkg_input = os.path.join(advanced_pkg_dir, "pkgIndex.tcl")
theme_pkg_output = os.path.join(output_theme_dir, "pkgIndex.tcl")
with open(theme_pkg_input, "r") as fi, open(theme_pkg_output, "w") as fo:
for line in fi:
fo.write(line.replace("advanced", advanced_name))
"""pkgIndex_package.tcl -> pkgIndex.tcl"""
theme_pkg_input = os.path.join(advanced_pkg_dir, "pkgIndex_package.tcl")
theme_pkg_output = os.path.join(output_dir, "pkgIndex.tcl")
with open(theme_pkg_input, "r") as fi, open(theme_pkg_output, "w") as fo:
for line in fi:
fo.write(line.replace("advanced", advanced_name))
"""Images"""
if os.path.exists(output_images_dir):
rmtree(output_images_dir)
copytree(input_images_dir, output_images_dir)
|
Setup all the files required to enable an advanced theme.
Copies all the files over and creates the required directories
if they do not exist.
:param theme_name: theme to copy the files over from
:param output_dir: output directory to place the files in
|
def read(self, size=sys.maxsize):
"""Read at most size bytes from the file (less if the read hits EOF
before obtaining size bytes).
"""
blob_size = int(self.blob_properties.get('content-length'))
if self._pointer < blob_size:
chunk = self._download_chunk_with_retries(
chunk_offset=self._pointer, chunk_size=size)
self._pointer += size
return chunk
|
Read at most size bytes from the file (less if the read hits EOF
before obtaining size bytes).
|
def field_from_django_field(cls, field_name, django_field, readonly):
"""
Returns a Resource Field instance for the given Django model field.
"""
FieldWidget = cls.widget_from_django_field(django_field)
widget_kwargs = cls.widget_kwargs_for_field(field_name)
field = cls.DEFAULT_RESOURCE_FIELD(
attribute=field_name,
column_name=field_name,
widget=FieldWidget(**widget_kwargs),
readonly=readonly,
default=django_field.default,
)
return field
|
Returns a Resource Field instance for the given Django model field.
|
def close_session(self):
"""Close current session."""
if not self._session.closed:
if self._session._connector_owner:
self._session._connector.close()
self._session._connector = None
|
Close current session.
|
def patch(self, id_or_uri, operation, path, value, timeout=-1, custom_headers=None):
"""
Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
operation: Patch operation
path: Path
value: Value
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource.
"""
patch_request_body = [{'op': operation, 'path': path, 'value': value}]
return self.patch_request(id_or_uri=id_or_uri,
body=patch_request_body,
timeout=timeout,
custom_headers=custom_headers)
|
Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
operation: Patch operation
path: Path
value: Value
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource.
|
def _update_partition_srvc_node_ip(self, tenant_name, srvc_ip,
vrf_prof=None, part_name=None):
"""Function to update srvc_node address of partition. """
self.dcnm_obj.update_project(tenant_name, part_name,
service_node_ip=srvc_ip,
vrf_prof=vrf_prof,
desc="Service Partition")
|
Function to update srvc_node address of partition.
|
def is_not_inf(self):
"""Asserts that val is real number and not Inf (infinity)."""
self._validate_number()
self._validate_real()
if math.isinf(self.val):
self._err('Expected not <Inf>, but was.')
return self
|
Asserts that val is real number and not Inf (infinity).
|
def _run_cromwell(args):
"""Run CWL with Cromwell.
"""
main_file, json_file, project_name = _get_main_and_json(args.directory)
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work"))
final_dir = utils.safe_makedir(os.path.join(work_dir, "final"))
if args.no_container:
_remove_bcbiovm_path()
log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name)
metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name)
option_file = os.path.join(work_dir, "%s-options.json" % project_name)
cromwell_opts = {"final_workflow_outputs_dir": final_dir,
"default_runtime_attributes": {"bootDiskSizeGb": 20}}
with open(option_file, "w") as out_handle:
json.dump(cromwell_opts, out_handle)
cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file),
"run", "--type", "CWL",
"-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)]
cmd += hpc.args_to_cromwell_cl(args)
cmd += ["--metadata-output", metadata_file, "--options", option_file,
"--inputs", json_file, main_file]
with utils.chdir(work_dir):
_run_tool(cmd, not args.no_container, work_dir, log_file)
if metadata_file and utils.file_exists(metadata_file):
with open(metadata_file) as in_handle:
metadata = json.load(in_handle)
if metadata["status"] == "Failed":
_cromwell_debug(metadata)
sys.exit(1)
else:
_cromwell_move_outputs(metadata, final_dir)
|
Run CWL with Cromwell.
|
def indent(text, n=4):
"""Indent each line of text by n spaces"""
_indent = ' ' * n
return '\n'.join(_indent + line for line in text.split('\n'))
|
Indent each line of text by n spaces
|
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
|
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
|
def make_doc(self):
"""
Generate the doc for the current context in the form
{'key': 'label'}
"""
res = {}
for column in self.columns:
if isinstance(column['__col__'], ColumnProperty):
key = column['name']
label = column['__col__'].columns[0].info.get(
'colanderalchemy', {}
).get('title')
if label is None:
continue
res[key] = label
elif isinstance(column['__col__'], RelationshipProperty):
# 1- si la relation est directe (une AppOption), on override le
# champ avec la valeur (pour éviter des profondeurs)
# 2- si l'objet lié est plus complexe, on lui fait son propre
# chemin
# 3- si la relation est uselist, on fait une liste d'élément
# liés qu'on place dans une clé "l" et on place l'élément lié
# dans une clé portant le nom de son index
key = column['name']
label = column['__col__'].info.get(
'colanderalchemy', {}
).get('title')
if label is None:
continue
if column['__col__'].uselist:
subres = column['__prop__'].make_doc()
for subkey, value in subres.items():
new_key = u"%s.first.%s" % (key, subkey)
res[new_key] = u"%s - %s (premier élément)" % (
label, value
)
new_key = u"%s.last.%s" % (key, subkey)
res[new_key] = u"%s - %s (dernier élément)" % (
label, value
)
else:
subres = column['__prop__'].make_doc()
for subkey, value in subres.items():
new_key = u"%s.%s" % (key, subkey)
res[new_key] = u"%s - %s" % (label, value)
print("------------------ Rendering the docs -------------------")
keys = res.keys()
keys.sort()
for key in keys:
value = res[key]
print(u"{0} : py3o.{1}".format(value, key))
return res
|
Generate the doc for the current context in the form
{'key': 'label'}
|
def add_text(self, text, cursor=None, justification=None):
""" Input text, short or long. Writes in order, within the defined page boundaries. Sequential add_text commands will print without
additional whitespace. """
if cursor is None:
cursor = self.page.cursor
text = re.sub("\s\s+" , " ", text)
if justification is None:
justification = self.justification
if '\n' in text:
text_list = text.split('\n')
for text in text_list:
PDFText(self.session, self.page, text, self.font, self.text_color, cursor, justification, self.double_spacing)
self.add_newline()
else:
PDFText(self.session, self.page, text, self.font, self.text_color, cursor, justification, self.double_spacing)
|
Input text, short or long. Writes in order, within the defined page boundaries. Sequential add_text commands will print without
additional whitespace.
|
def _get_config():
'''
Get user docker configuration
Return: dict
'''
cfg = os.path.expanduser('~/.dockercfg')
try:
fic = open(cfg)
try:
config = json.loads(fic.read())
finally:
fic.close()
except Exception:
config = {'rootPath': '/dev/null'}
if not 'Configs' in config:
config['Configs'] = {}
return config
|
Get user docker configuration
Return: dict
|
def main_generate(table_names, stream):
"""This will print out valid prom python code for given tables that already exist
in a database.
This is really handy when you want to bootstrap an existing database to work
with prom and don't want to manually create Orm objects for the tables you want
to use, let `generate` do it for you
"""
with stream.open() as fp:
fp.write_line("from datetime import datetime, date")
fp.write_line("from decimal import Decimal")
fp.write_line("from prom import Orm, Field")
fp.write_newlines()
for table_name, inter, fields in get_table_info(*table_names):
fp.write_line("class {}(Orm):".format(table_name.title().replace("_", "")))
fp.write_line(" table_name = '{}'".format(table_name))
if inter.connection_config.name:
fp.write_line(" connection_name = '{}'".format(inter.connection_config.name))
fp.write_newlines()
magic_field_names = set(["_id", "_created", "_updated"])
if "_id" in fields:
fp.write_line(get_field_def("_id", fields.pop("_id")))
magic_field_names.discard("_id")
for field_name, field_d in fields.items():
fp.write_line(get_field_def(field_name, field_d))
for magic_field_name in magic_field_names:
if magic_field_name not in fields:
fp.write_line(" {} = None".format(magic_field_name))
fp.write_newlines(2)
|
This will print out valid prom python code for given tables that already exist
in a database.
This is really handy when you want to bootstrap an existing database to work
with prom and don't want to manually create Orm objects for the tables you want
to use, let `generate` do it for you
|
def getpath(self, section, option):
"""Return option as an expanded path."""
return os.path.expanduser(os.path.expandvars(self.get(section, option)))
|
Return option as an expanded path.
|
def _prepare_output(partitions, verbose):
"""Returns dict with 'raw' and 'message' keys filled."""
out = {}
partitions_count = len(partitions)
out['raw'] = {
'offline_count': partitions_count,
}
if partitions_count == 0:
out['message'] = 'No offline partitions.'
else:
out['message'] = "{count} offline partitions.".format(count=partitions_count)
if verbose:
lines = (
'{}:{}'.format(topic, partition)
for (topic, partition) in partitions
)
out['verbose'] = "Partitions:\n" + "\n".join(lines)
else:
cmdline = sys.argv[:]
cmdline.insert(1, '-v')
out['message'] += '\nTo see all offline partitions run: ' + ' '.join(cmdline)
if verbose:
out['raw']['partitions'] = [
{'topic': topic, 'partition': partition}
for (topic, partition) in partitions
]
return out
|
Returns dict with 'raw' and 'message' keys filled.
|
def remove_description(self, id, **kwargs): # noqa: E501
"""Remove description from a specific source # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_description(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_description_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.remove_description_with_http_info(id, **kwargs) # noqa: E501
return data
|
Remove description from a specific source # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_description(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
|
def up(self, migration_id=None, fake=False):
"""Executes migrations."""
if not self.check_directory():
return
for migration in self.get_migrations_to_up(migration_id):
logger.info('Executing migration: %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if not fake:
if hasattr(migration_module, 'up'):
migration_module.up(self.db)
else:
logger.error('No up method on migration %s' % migration.filename)
record = migration.as_dict()
record['date'] = datetime.utcnow()
self.collection.insert(record)
|
Executes migrations.
|
def increase_fcp_usage(self, fcp, assigner_id=None):
"""Incrase fcp usage of given fcp
Returns True if it's a new fcp, otherwise return False
"""
# TODO: check assigner_id to make sure on the correct fcp record
connections = self.db.get_connections_from_assigner(assigner_id)
new = False
if connections == 0:
self.db.assign(fcp, assigner_id)
new = True
else:
self.db.increase_usage(fcp)
return new
|
Incrase fcp usage of given fcp
Returns True if it's a new fcp, otherwise return False
|
def fingers_needed(fingering):
"""Return the number of fingers needed to play the given fingering."""
split = False # True if an open string must be played, thereby making any
# subsequent strings impossible to bar with the index finger
indexfinger = False # True if the index finger was already accounted for
# in the count
minimum = min(finger for finger in fingering if finger) # the index finger
# plays the lowest
# finger position
result = 0
for finger in reversed(fingering):
if finger == 0: # an open string is played
split = True # subsequent strings are impossible to bar with the
# index finger
else:
if not split and finger == minimum: # if an open string hasn't been
# played and this is a job for
# the index finger:
if not indexfinger: # if the index finger hasn't been accounted
# for:
result += 1
indexfinger = True # index finger has now been accounted for
else:
result += 1
return result
|
Return the number of fingers needed to play the given fingering.
|
def load_additional_data(self, valid_data, many, original_data):
"""Include unknown fields after load.
Unknown fields are added with no processing at all.
Args:
valid_data (dict or list): validated data returned by ``load()``.
many (bool): if True, data and original_data are a list.
original_data (dict or list): data passed to ``load()`` in the
first place.
Returns:
dict: the same ``valid_data`` extended with the unknown attributes.
Inspired by https://github.com/marshmallow-code/marshmallow/pull/595.
"""
if many:
for i, _ in enumerate(valid_data):
additional_keys = set(original_data[i]) - set(valid_data[i])
for key in additional_keys:
valid_data[i][key] = original_data[i][key]
else:
additional_keys = set(original_data) - set(valid_data)
for key in additional_keys:
valid_data[key] = original_data[key]
return valid_data
|
Include unknown fields after load.
Unknown fields are added with no processing at all.
Args:
valid_data (dict or list): validated data returned by ``load()``.
many (bool): if True, data and original_data are a list.
original_data (dict or list): data passed to ``load()`` in the
first place.
Returns:
dict: the same ``valid_data`` extended with the unknown attributes.
Inspired by https://github.com/marshmallow-code/marshmallow/pull/595.
|
def _salt_send_domain_event(opaque, conn, domain, event, event_data):
'''
Helper function send a salt event for a libvirt domain.
:param opaque: the opaque data that is passed to the callback.
This is a dict with 'prefix', 'object' and 'event' keys.
:param conn: libvirt connection
:param domain: name of the domain related to the event
:param event: name of the event
:param event_data: additional event data dict to send
'''
data = {
'domain': {
'name': domain.name(),
'id': domain.ID(),
'uuid': domain.UUIDString()
},
'event': event
}
data.update(event_data)
_salt_send_event(opaque, conn, data)
|
Helper function send a salt event for a libvirt domain.
:param opaque: the opaque data that is passed to the callback.
This is a dict with 'prefix', 'object' and 'event' keys.
:param conn: libvirt connection
:param domain: name of the domain related to the event
:param event: name of the event
:param event_data: additional event data dict to send
|
def wait(self):
''' wait for the done event to be set - no timeout'''
self._done_event.wait(MAXINT)
return self._status, self._exception
|
wait for the done event to be set - no timeout
|
def controlled(self, control_qubit):
"""
Add the CONTROLLED modifier to the gate with the given control qubit.
"""
control_qubit = unpack_qubit(control_qubit)
self.modifiers.insert(0, "CONTROLLED")
self.qubits.insert(0, control_qubit)
return self
|
Add the CONTROLLED modifier to the gate with the given control qubit.
|
def OnOpen(self, event):
"""File open event handler"""
# If changes have taken place save of old grid
if undo.stack().haschanged():
save_choice = self.interfaces.get_save_request_from_user()
if save_choice is None:
# Cancelled close operation
return
elif save_choice:
# User wants to save content
post_command_event(self.main_window, self.main_window.SaveMsg)
# Get filepath from user
f2w = get_filetypes2wildcards(
["pys", "pysu", "xls", "xlsx", "ods", "all"])
filetypes = f2w.keys()
wildcards = f2w.values()
wildcard = "|".join(wildcards)
message = _("Choose file to open.")
style = wx.OPEN
default_filetype = config["default_open_filetype"]
try:
default_filterindex = filetypes.index(default_filetype)
except ValueError:
# Be graceful if the user has entered an unkown filetype
default_filterindex = 0
get_fp_fidx = self.interfaces.get_filepath_findex_from_user
filepath, filterindex = get_fp_fidx(wildcard, message, style,
filterindex=default_filterindex)
if filepath is None:
return
filetype = filetypes[filterindex]
# Change the main window filepath state
self.main_window.filepath = filepath
# Load file into grid
post_command_event(self.main_window,
self.main_window.GridActionOpenMsg,
attr={"filepath": filepath, "filetype": filetype})
# Set Window title to new filepath
title_text = filepath.split("/")[-1] + " - pyspread"
post_command_event(self.main_window,
self.main_window.TitleMsg, text=title_text)
self.main_window.grid.ForceRefresh()
if is_gtk():
try:
wx.Yield()
except:
pass
# Update savepoint and clear the undo stack
undo.stack().clear()
undo.stack().savepoint()
# Update content changed state
try:
post_command_event(self.main_window, self.ContentChangedMsg)
except TypeError:
# The main window does not exist any more
pass
|
File open event handler
|
def parse(self):
""" parse the data
"""
# convert the xlsx file to csv first
delimiter = "|"
csv_file = self.xlsx_to_csv(self.getInputFile(), delimiter=delimiter)
reader = csv.DictReader(csv_file, delimiter=delimiter)
for n, row in enumerate(reader):
resid = row.get("SampleID", None)
serial = row.get("SerialNumber", None)
# Convert empty values as "Invalid"
value = row.get("Value", None) or "Invalid"
# no resid and no serial
if not any([resid, serial]):
self.err("Result identification not found.", numline=n)
continue
rawdict = row
rawdict["Value"] = value.rstrip(" cps/ml")
rawdict['DefaultResult'] = 'Value'
# HEALTH-567 correction factor for calculation
# XXX HEALTH-567 Is this just for nmrl?
if 'Plasma' in rawdict.get('Matrix', 'Other'):
rawdict['CF'] = 1 # report value as-is
else:
rawdict['CF'] = 1.82 # report value * 1.82
key = resid or serial
testname = row.get("Product", "EasyQDirector")
self._addRawResult(key, {testname: rawdict}, False)
|
parse the data
|
def fromvars(cls, dataset, batch_size, train=None, **kwargs):
"""Create a Batch directly from a number of Variables."""
batch = cls()
batch.batch_size = batch_size
batch.dataset = dataset
batch.fields = dataset.fields.keys()
for k, v in kwargs.items():
setattr(batch, k, v)
return batch
|
Create a Batch directly from a number of Variables.
|
def flash_spi_attach(self, hspi_arg):
"""Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command.
"""
# last 3 bytes in ESP_SPI_ATTACH argument are reserved values
arg = struct.pack('<I', hspi_arg)
if not self.IS_STUB:
# ESP32 ROM loader takes additional 'is legacy' arg, which is not
# currently supported in the stub loader or esptool.py (as it's not usually needed.)
is_legacy = 0
arg += struct.pack('BBBB', is_legacy, 0, 0, 0)
self.check_command("configure SPI flash pins", ESP32ROM.ESP_SPI_ATTACH, arg)
|
Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command.
|
def key_value_contents(use_dict=None, as_class=dict, key_values=()):
"""Return the contents of an object as a dict."""
if _debug: key_value_contents._debug("key_value_contents use_dict=%r as_class=%r key_values=%r", use_dict, as_class, key_values)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# loop through the values and save them
for k, v in key_values:
if v is not None:
if hasattr(v, 'dict_contents'):
v = v.dict_contents(as_class=as_class)
use_dict.__setitem__(k, v)
# return what we built/updated
return use_dict
|
Return the contents of an object as a dict.
|
def set_device_id(self, dev, id):
"""Set device ID to new value.
:param str dev: Serial device address/path
:param id: Device ID to set
"""
if id < 0 or id > 255:
raise ValueError("ID must be an unsigned byte!")
com, code, ok = io.send_packet(
CMDTYPE.SETID, 1, dev, self.baudrate, 5, id)
if not ok:
raise_error(code)
|
Set device ID to new value.
:param str dev: Serial device address/path
:param id: Device ID to set
|
def has(self, block, name):
"""
Return whether or not the field named `name` has a non-default value
"""
try:
return self._kvs.has(self._key(block, name))
except KeyError:
return False
|
Return whether or not the field named `name` has a non-default value
|
def __decode_dictionary(self, message_type, dictionary):
"""Merge dictionary in to message.
Args:
message: Message to merge dictionary in to.
dictionary: Dictionary to extract information from. Dictionary
is as parsed from JSON. Nested objects will also be dictionaries.
"""
message = message_type()
for key, value in six.iteritems(dictionary):
if value is None:
try:
message.reset(key)
except AttributeError:
pass # This is an unrecognized field, skip it.
continue
try:
field = message.field_by_name(key)
except KeyError:
# Save unknown values.
variant = self.__find_variant(value)
if variant:
message.set_unrecognized_field(key, value, variant)
continue
if field.repeated:
# This should be unnecessary? Or in fact become an error.
if not isinstance(value, list):
value = [value]
valid_value = [self.decode_field(field, item)
for item in value]
setattr(message, field.name, valid_value)
continue
# This is just for consistency with the old behavior.
if value == []:
continue
try:
setattr(message, field.name, self.decode_field(field, value))
except messages.DecodeError:
# Save unknown enum values.
if not isinstance(field, messages.EnumField):
raise
variant = self.__find_variant(value)
if variant:
message.set_unrecognized_field(key, value, variant)
return message
|
Merge dictionary in to message.
Args:
message: Message to merge dictionary in to.
dictionary: Dictionary to extract information from. Dictionary
is as parsed from JSON. Nested objects will also be dictionaries.
|
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear proxy data
self.driver_args['service_args'] = self.default_service_args
# Clear headers
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
# Create new web driver
self._create_session()
|
Kills old session and creates a new one with no proxies or headers
|
def make_2d(array, verbose=True):
"""
tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2
"""
array = np.asarray(array)
if array.ndim < 2:
msg = 'Expected 2D input data array, but found {}D. '\
'Expanding to 2D.'.format(array.ndim)
if verbose:
warnings.warn(msg)
array = np.atleast_1d(array)[:,None]
return array
|
tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2
|
def propmerge(into, data_from):
""" Merge JSON schema requirements into a dictionary """
newprops = copy.deepcopy(into)
for prop, propval in six.iteritems(data_from):
if prop not in newprops:
newprops[prop] = propval
continue
new_sp = newprops[prop]
for subprop, spval in six.iteritems(propval):
if subprop not in new_sp:
new_sp[subprop] = spval
elif subprop == 'enum':
new_sp[subprop] = set(spval) & set(new_sp[subprop])
elif subprop == 'type':
if spval != new_sp[subprop]:
raise TypeError("Type cannot conflict in allOf'")
elif subprop in ('minLength', 'minimum'):
new_sp[subprop] = (new_sp[subprop] if
new_sp[subprop] > spval else spval)
elif subprop in ('maxLength', 'maximum'):
new_sp[subprop] = (new_sp[subprop] if
new_sp[subprop] < spval else spval)
elif subprop == 'multipleOf':
if new_sp[subprop] % spval == 0:
new_sp[subprop] = spval
else:
raise AttributeError(
"Cannot set conflicting multipleOf values")
else:
new_sp[subprop] = spval
newprops[prop] = new_sp
return newprops
|
Merge JSON schema requirements into a dictionary
|
def _read_from_cwlinput(in_file, work_dir, runtime, parallel, input_order, output_cwl_keys):
"""Read data records from a JSON dump of inputs. Avoids command line flattening of records.
"""
with open(in_file) as in_handle:
inputs = json.load(in_handle)
items_by_key = {}
input_files = []
passed_keys = set([])
for key, input_val in ((k, v) for (k, v) in inputs.items() if not k.startswith(("sentinel", "ignore"))):
if key.endswith("_toolinput"):
key = key.replace("_toolinput", "")
if input_order[key] == "record":
cur_keys, items = _read_cwl_record(input_val)
passed_keys |= cur_keys
items_by_key[key] = items
else:
items_by_key[tuple(key.split("__"))] = _cwlvar_to_wdl(input_val)
input_files = _find_input_files(input_val, input_files)
prepped = _merge_cwlinputs(items_by_key, input_order, parallel)
out = []
for data in prepped:
if isinstance(data, (list, tuple)):
out.append([_finalize_cwl_in(utils.to_single_data(x), work_dir, list(passed_keys),
output_cwl_keys, runtime) for x in data])
else:
out.append(_finalize_cwl_in(data, work_dir, list(passed_keys), output_cwl_keys, runtime))
return out, input_files
|
Read data records from a JSON dump of inputs. Avoids command line flattening of records.
|
def searchForMessageIDs(self, query, offset=0, limit=5, thread_id=None):
"""
Find and get message IDs by query
:param query: Text to search for
:param offset: Number of messages to skip
:param limit: Max. number of messages to retrieve
:param thread_id: User/Group ID to search in. See :ref:`intro_threads`
:type offset: int
:type limit: int
:return: Found Message IDs
:rtype: generator
:raises: FBchatException if request failed
"""
thread_id, thread_type = self._getThread(thread_id, None)
data = {
"query": query,
"snippetOffset": offset,
"snippetLimit": limit,
"identifier": "thread_fbid",
"thread_fbid": thread_id,
}
j = self._post(
self.req_url.SEARCH_MESSAGES, data, fix_request=True, as_json=True
)
result = j["payload"]["search_snippets"][query]
snippets = result[thread_id]["snippets"] if result.get(thread_id) else []
for snippet in snippets:
yield snippet["message_id"]
|
Find and get message IDs by query
:param query: Text to search for
:param offset: Number of messages to skip
:param limit: Max. number of messages to retrieve
:param thread_id: User/Group ID to search in. See :ref:`intro_threads`
:type offset: int
:type limit: int
:return: Found Message IDs
:rtype: generator
:raises: FBchatException if request failed
|
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
|
Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
|
def blacklist_token():
"""
Blacklists an existing JWT by registering its jti claim in the blacklist.
.. example::
$ curl http://localhost:5000/blacklist_token -X POST \
-d '{"token":"<your_token>"}'
"""
req = flask.request.get_json(force=True)
data = guard.extract_jwt_token(req['token'])
blacklist.add(data['jti'])
return flask.jsonify(message='token blacklisted ({})'.format(req['token']))
|
Blacklists an existing JWT by registering its jti claim in the blacklist.
.. example::
$ curl http://localhost:5000/blacklist_token -X POST \
-d '{"token":"<your_token>"}'
|
def predict(self, a, b):
""" Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic
"""
a = np.array(a).reshape((-1, 1))
b = np.array(b).reshape((-1, 1))
return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2
|
Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic
|
def send(self, stream, msg_or_type, content=None, parent=None, ident=None,
buffers=None, subheader=None, track=False, header=None):
"""Build and send a message via stream or socket.
The message format used by this function internally is as follows:
[ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
buffer1,buffer2,...]
The serialize/unserialize methods convert the nested message dict into this
format.
Parameters
----------
stream : zmq.Socket or ZMQStream
The socket-like object used to send the data.
msg_or_type : str or Message/dict
Normally, msg_or_type will be a msg_type unless a message is being
sent more than once. If a header is supplied, this can be set to
None and the msg_type will be pulled from the header.
content : dict or None
The content of the message (ignored if msg_or_type is a message).
header : dict or None
The header dict for the message (ignores if msg_to_type is a message).
parent : Message or dict or None
The parent or parent header describing the parent of this message
(ignored if msg_or_type is a message).
ident : bytes or list of bytes
The zmq.IDENTITY routing path.
subheader : dict or None
Extra header keys for this message's header (ignored if msg_or_type
is a message).
buffers : list or None
The already-serialized buffers to be appended to the message.
track : bool
Whether to track. Only for use with Sockets, because ZMQStream
objects cannot track messages.
Returns
-------
msg : dict
The constructed message.
(msg,tracker) : (dict, MessageTracker)
if track=True, then a 2-tuple will be returned,
the first element being the constructed
message, and the second being the MessageTracker
"""
if not isinstance(stream, (zmq.Socket, ZMQStream)):
raise TypeError("stream must be Socket or ZMQStream, not %r"%type(stream))
elif track and isinstance(stream, ZMQStream):
raise TypeError("ZMQStream cannot track messages")
if isinstance(msg_or_type, (Message, dict)):
# We got a Message or message dict, not a msg_type so don't
# build a new Message.
msg = msg_or_type
else:
msg = self.msg(msg_or_type, content=content, parent=parent,
subheader=subheader, header=header)
buffers = [] if buffers is None else buffers
to_send = self.serialize(msg, ident)
flag = 0
if buffers:
flag = zmq.SNDMORE
_track = False
else:
_track=track
if track:
tracker = stream.send_multipart(to_send, flag, copy=False, track=_track)
else:
tracker = stream.send_multipart(to_send, flag, copy=False)
for b in buffers[:-1]:
stream.send(b, flag, copy=False)
if buffers:
if track:
tracker = stream.send(buffers[-1], copy=False, track=track)
else:
tracker = stream.send(buffers[-1], copy=False)
# omsg = Message(msg)
if self.debug:
pprint.pprint(msg)
pprint.pprint(to_send)
pprint.pprint(buffers)
msg['tracker'] = tracker
return msg
|
Build and send a message via stream or socket.
The message format used by this function internally is as follows:
[ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
buffer1,buffer2,...]
The serialize/unserialize methods convert the nested message dict into this
format.
Parameters
----------
stream : zmq.Socket or ZMQStream
The socket-like object used to send the data.
msg_or_type : str or Message/dict
Normally, msg_or_type will be a msg_type unless a message is being
sent more than once. If a header is supplied, this can be set to
None and the msg_type will be pulled from the header.
content : dict or None
The content of the message (ignored if msg_or_type is a message).
header : dict or None
The header dict for the message (ignores if msg_to_type is a message).
parent : Message or dict or None
The parent or parent header describing the parent of this message
(ignored if msg_or_type is a message).
ident : bytes or list of bytes
The zmq.IDENTITY routing path.
subheader : dict or None
Extra header keys for this message's header (ignored if msg_or_type
is a message).
buffers : list or None
The already-serialized buffers to be appended to the message.
track : bool
Whether to track. Only for use with Sockets, because ZMQStream
objects cannot track messages.
Returns
-------
msg : dict
The constructed message.
(msg,tracker) : (dict, MessageTracker)
if track=True, then a 2-tuple will be returned,
the first element being the constructed
message, and the second being the MessageTracker
|
def solve_potts_approx(y, w, gamma=None, min_size=1, **kw):
"""
Fit penalized stepwise constant function (Potts model) to data
approximatively, in linear time.
Do this by running the exact solver using a small maximum interval
size, and then combining consecutive intervals together if it
decreases the cost function.
"""
n = len(y)
if n == 0:
return [], [], []
mu_dist = kw.get('mu_dist')
if mu_dist is None:
mu_dist = get_mu_dist(y, w)
kw['mu_dist'] = mu_dist
if gamma is None:
mu, dist = mu_dist.mu, mu_dist.dist
gamma = 3 * dist(0,n-1) * math.log(n) / n
if min_size < 10:
max_size = 20
else:
max_size = min_size + 50
right, values, dists = solve_potts(y, w, gamma, min_size=min_size, max_size=max_size, **kw)
return merge_pieces(gamma, right, values, dists, mu_dist, max_size=max_size)
|
Fit penalized stepwise constant function (Potts model) to data
approximatively, in linear time.
Do this by running the exact solver using a small maximum interval
size, and then combining consecutive intervals together if it
decreases the cost function.
|
def inside_polygon(x, y, coordinates):
"""
Implementing the ray casting point in polygon test algorithm
cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm
:param x:
:param y:
:param coordinates: a polygon represented by a list containing two lists (x and y coordinates):
[ [x1,x2,x3...], [y1,y2,y3...]]
those lists are actually numpy arrays which are bei
ng read directly from a binary file
:return: true if the point (x,y) lies within the polygon
Some overflow considerations for the critical part of comparing the line segment slopes:
(y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max
(y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max
delta_y_max * delta_x_max = 180 * 360 < 65 x10^3
Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us:
delta_y_max * delta_x_max = 180x10^7 * 360x10^7
delta_y_max * delta_x_max <= 65x10^17
So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never
occur in practice (timezone polygons do not span the whole lng lat coordinate space),
32bit accuracy hence is not safe to use here!
Python 2.2 automatically uses the appropriate int data type preventing overflow
(cf. https://www.python.org/dev/peps/pep-0237/),
but here the data types are numpy internal static data types. The data is stored as int32
-> use int64 when comparing slopes!
"""
contained = False
# the edge from the last to the first point is checked first
i = -1
y1 = coordinates[1][-1]
y_gt_y1 = y > y1
for y2 in coordinates[1]:
y_gt_y2 = y > y2
if y_gt_y1:
if not y_gt_y2:
x1 = coordinates[0][i]
x2 = coordinates[0][i + 1]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
# compare the slope of the line [p1-p2] and [p-p2]
# depending on the position of p2 this determines whether the polygon edge is right or left of the point
# to avoid expensive division the divisors (of the slope dy/dx) are brought to the other side
# ( dy/dx > a == dy > a * dx )
# int64 accuracy needed here!
if (x1GEx and x2GEx) or ((x1GEx or x2GEx)
and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) <= (
int64(y2) - int64(y1)) * (int64(x2) - int64(x))):
contained = not contained
else:
if y_gt_y2:
x1 = coordinates[0][i]
x2 = coordinates[0][i + 1]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
if (x1GEx and x2GEx) or ((x1GEx or x2GEx)
and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) >= (
int64(y2) - int64(y1)) * (int64(x2) - int64(x))):
contained = not contained
y1 = y2
y_gt_y1 = y_gt_y2
i += 1
return contained
|
Implementing the ray casting point in polygon test algorithm
cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm
:param x:
:param y:
:param coordinates: a polygon represented by a list containing two lists (x and y coordinates):
[ [x1,x2,x3...], [y1,y2,y3...]]
those lists are actually numpy arrays which are bei
ng read directly from a binary file
:return: true if the point (x,y) lies within the polygon
Some overflow considerations for the critical part of comparing the line segment slopes:
(y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max
(y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max
delta_y_max * delta_x_max = 180 * 360 < 65 x10^3
Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us:
delta_y_max * delta_x_max = 180x10^7 * 360x10^7
delta_y_max * delta_x_max <= 65x10^17
So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never
occur in practice (timezone polygons do not span the whole lng lat coordinate space),
32bit accuracy hence is not safe to use here!
Python 2.2 automatically uses the appropriate int data type preventing overflow
(cf. https://www.python.org/dev/peps/pep-0237/),
but here the data types are numpy internal static data types. The data is stored as int32
-> use int64 when comparing slopes!
|
def set_custom_serializer(self, _type, serializer):
"""
Assign a serializer for the type.
:param _type: (Type), the target type of the serializer
:param serializer: (Serializer), Custom Serializer constructor function
"""
validate_type(_type)
validate_serializer(serializer, StreamSerializer)
self._custom_serializers[_type] = serializer
|
Assign a serializer for the type.
:param _type: (Type), the target type of the serializer
:param serializer: (Serializer), Custom Serializer constructor function
|
def get_time(self) -> float:
"""
Get the current time in seconds
Returns:
The current time in seconds
"""
if self.pause_time is not None:
curr_time = self.pause_time - self.offset - self.start_time
return curr_time
curr_time = time.time()
return curr_time - self.start_time - self.offset
|
Get the current time in seconds
Returns:
The current time in seconds
|
def memory_zones(self):
"""Gets all memory zones supported by the current target.
Some targets support multiple memory zones. This function provides the
ability to get a list of all the memory zones to facilate using the
memory zone routing functions.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of all the memory zones as ``JLinkMemoryZone`` structures.
Raises:
JLinkException: on hardware errors.
"""
count = self.num_memory_zones()
if count == 0:
return list()
buf = (structs.JLinkMemoryZone * count)()
res = self._dll.JLINK_GetMemZones(buf, count)
if res < 0:
raise errors.JLinkException(res)
return list(buf)
|
Gets all memory zones supported by the current target.
Some targets support multiple memory zones. This function provides the
ability to get a list of all the memory zones to facilate using the
memory zone routing functions.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of all the memory zones as ``JLinkMemoryZone`` structures.
Raises:
JLinkException: on hardware errors.
|
def get_messages(self):
"""
Retrieves the error or status messages associated with the specified profile.
Returns:
dict: Server Profile Health.
"""
uri = '{}/messages'.format(self.data["uri"])
return self._helper.do_get(uri)
|
Retrieves the error or status messages associated with the specified profile.
Returns:
dict: Server Profile Health.
|
def set_children(self, value, defined):
"""Set the children of the object."""
self.children = value
self.children_defined = defined
return self
|
Set the children of the object.
|
def get_bibliography(lsst_bib_names=None, bibtex=None):
"""Make a pybtex BibliographyData instance from standard lsst-texmf
bibliography files and user-supplied bibtex content.
Parameters
----------
lsst_bib_names : sequence of `str`, optional
Names of lsst-texmf BibTeX files to include. For example:
.. code-block:: python
['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']
Default is `None`, which includes all lsst-texmf bibtex files.
bibtex : `str`
BibTeX source content not included in lsst-texmf. This can be content
from a import ``local.bib`` file.
Returns
-------
bibliography : `pybtex.database.BibliographyData`
A pybtex bibliography database that includes all given sources:
lsst-texmf bibliographies and ``bibtex``.
"""
bibtex_data = get_lsst_bibtex(bibtex_filenames=lsst_bib_names)
# Parse with pybtex into BibliographyData instances
pybtex_data = [pybtex.database.parse_string(_bibtex, 'bibtex')
for _bibtex in bibtex_data.values()]
# Also parse local bibtex content
if bibtex is not None:
pybtex_data.append(pybtex.database.parse_string(bibtex, 'bibtex'))
# Merge BibliographyData
bib = pybtex_data[0]
if len(pybtex_data) > 1:
for other_bib in pybtex_data[1:]:
for key, entry in other_bib.entries.items():
bib.add_entry(key, entry)
return bib
|
Make a pybtex BibliographyData instance from standard lsst-texmf
bibliography files and user-supplied bibtex content.
Parameters
----------
lsst_bib_names : sequence of `str`, optional
Names of lsst-texmf BibTeX files to include. For example:
.. code-block:: python
['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']
Default is `None`, which includes all lsst-texmf bibtex files.
bibtex : `str`
BibTeX source content not included in lsst-texmf. This can be content
from a import ``local.bib`` file.
Returns
-------
bibliography : `pybtex.database.BibliographyData`
A pybtex bibliography database that includes all given sources:
lsst-texmf bibliographies and ``bibtex``.
|
def splay(vec):
""" Determine two lengths to split stride the input vector by
"""
N2 = 2 ** int(numpy.log2( len(vec) ) / 2)
N1 = len(vec) / N2
return N1, N2
|
Determine two lengths to split stride the input vector by
|
def extract_notification_payload(process_output):
"""
Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values.
"""
data = []
for element in process_output.splitlines()[1:]:
parts = element.split(": ")
if len(parts) == 2:
data.append(parts[1])
return data
|
Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values.
|
def fill_subparser(subparser):
"""Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command.
"""
sets = ['train', 'valid', 'test']
urls = ['http://www.cs.toronto.edu/~larocheh/public/datasets/' +
'binarized_mnist/binarized_mnist_{}.amat'.format(s) for s in sets]
filenames = ['binarized_mnist_{}.amat'.format(s) for s in sets]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader
|
Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command.
|
def _CheckPacketSize(cursor):
"""Checks that MySQL packet size is big enough for expected query size."""
cur_packet_size = int(_ReadVariable("max_allowed_packet", cursor))
if cur_packet_size < MAX_PACKET_SIZE:
raise Error(
"MySQL max_allowed_packet of {0} is required, got {1}. "
"Please set max_allowed_packet={0} in your MySQL config.".format(
MAX_PACKET_SIZE, cur_packet_size))
|
Checks that MySQL packet size is big enough for expected query size.
|
def get_media_list_by_selector(
self, media_selector, media_attribute="src"
):
"""Return a list of media."""
page_url = urlparse.urlparse(self.uri)
return [
mediafile.get_instance(
urlparse.urljoin(
"%s://%s" % (
page_url.scheme,
page_url.netloc
),
urlparse.urlparse(
media.attrib[media_attribute],
scheme="http"
).geturl()
)
)
for media in self.parsedpage.get_nodes_by_selector(media_selector)
]
|
Return a list of media.
|
def _expected_condition_find_first_element(self, elements):
"""Try to find sequentially the elements of the list and return the first element found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:returns: first element found or None
:rtype: toolium.pageelements.PageElement or tuple
"""
from toolium.pageelements.page_element import PageElement
element_found = None
for element in elements:
try:
if isinstance(element, PageElement):
element._web_element = None
element._find_web_element()
else:
self.driver_wrapper.driver.find_element(*element)
element_found = element
break
except (NoSuchElementException, TypeError):
pass
return element_found
|
Try to find sequentially the elements of the list and return the first element found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:returns: first element found or None
:rtype: toolium.pageelements.PageElement or tuple
|
def decrypt(private, ciphertext, output):
"""Decrypt ciphertext with private key.
Requires PRIVATE key file and the CIPHERTEXT encrypted with
the corresponding public key.
"""
privatekeydata = json.load(private)
assert 'pub' in privatekeydata
pub = load_public_key(privatekeydata['pub'])
log("Loading private key")
private_key_error = "Invalid private key"
assert 'key_ops' in privatekeydata, private_key_error
assert "decrypt" in privatekeydata['key_ops'], private_key_error
assert 'p' in privatekeydata, private_key_error
assert 'q' in privatekeydata, private_key_error
assert privatekeydata['kty'] == 'DAJ', private_key_error
_p = phe.util.base64_to_int(privatekeydata['p'])
_q = phe.util.base64_to_int(privatekeydata['q'])
private_key = phe.PaillierPrivateKey(pub, _p, _q)
log("Decrypting ciphertext")
enc = load_encrypted_number(ciphertext, pub)
out = private_key.decrypt(enc)
print(out, file=output)
|
Decrypt ciphertext with private key.
Requires PRIVATE key file and the CIPHERTEXT encrypted with
the corresponding public key.
|
async def fetch_message(self, id):
"""|coro|
Retrieves a single :class:`.Message` from the destination.
This can only be used by bot accounts.
Parameters
------------
id: :class:`int`
The message ID to look for.
Raises
--------
:exc:`.NotFound`
The specified message was not found.
:exc:`.Forbidden`
You do not have the permissions required to get a message.
:exc:`.HTTPException`
Retrieving the message failed.
Returns
--------
:class:`.Message`
The message asked for.
"""
channel = await self._get_channel()
data = await self._state.http.get_message(channel.id, id)
return self._state.create_message(channel=channel, data=data)
|
|coro|
Retrieves a single :class:`.Message` from the destination.
This can only be used by bot accounts.
Parameters
------------
id: :class:`int`
The message ID to look for.
Raises
--------
:exc:`.NotFound`
The specified message was not found.
:exc:`.Forbidden`
You do not have the permissions required to get a message.
:exc:`.HTTPException`
Retrieving the message failed.
Returns
--------
:class:`.Message`
The message asked for.
|
def _is_gitted(self):
"""Returns true if the current repodir has been initialized in git *and*
had a remote origin added *and* has a 'testing' branch.
"""
from os import waitpid
from subprocess import Popen, PIPE
premote = Popen("cd {}; git remote -v".format(self.repodir),
shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
waitpid(premote.pid, 0)
remote = premote.stdout.readlines()
remerr = premote.stderr.readlines()
pbranch = Popen("cd {}; git branch".format(self.repodir),
shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
waitpid(pbranch.pid, 0)
branch = pbranch.stdout.readlines()
braerr = pbranch.stderr.readlines()
if len(remote) > 0 and len(remerr) > 0 and len(branch) > 0:
return ((".git" in remote[0] and "fatal" not in remerr[0])
and any(["testing" in b for b in branch]))
elif self.testmode and len(remote) == 0 and len(branch) == 0 and len(remerr) == 0:
return True
else:
return False
|
Returns true if the current repodir has been initialized in git *and*
had a remote origin added *and* has a 'testing' branch.
|
def select(self, selections):
'''Make a selection in this
representation. BallAndStickRenderer support selections of
atoms and bonds.
To select the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.select({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection
'''
if 'atoms' in selections:
self.selection_state['atoms'] = selections['atoms']
self.on_atom_selection_changed()
if 'bonds' in selections:
self.selection_state['bonds'] = selections['bonds']
self.on_bond_selection_changed()
if 'box' in selections:
self.selection_state['box'] = selections['box']
return self.selection_state
|
Make a selection in this
representation. BallAndStickRenderer support selections of
atoms and bonds.
To select the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.select({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection
|
def _deserialize_class(cls, input_cls_name, trusted, strict):
"""Returns the HasProperties class to use for deserialization"""
if not input_cls_name or input_cls_name == cls.__name__:
return cls
if trusted and input_cls_name in cls._REGISTRY:
return cls._REGISTRY[input_cls_name]
if strict:
raise ValueError(
'Class name {} from deserialization input dictionary does '
'not match input class {}'.format(input_cls_name, cls.__name__)
)
return cls
|
Returns the HasProperties class to use for deserialization
|
def get_version(dunder_file):
"""Returns a version string for the current package, derived
either from git or from a .version file.
This function is expected to run in two contexts. In a development
context, where .git/ exists, the version is pulled from git tags.
Using the BuildPyCommand and SDistCommand classes for cmdclass in
setup.py will write a .version file into any dist.
In an installed context, the .version file written at dist build
time is the source of version information.
"""
path = abspath(expanduser(dirname(dunder_file)))
try:
return _get_version_from_version_file(path) or _get_version_from_git_tag(path)
except CalledProcessError as e:
log.warn(repr(e))
return None
except Exception as e:
log.exception(e)
return None
|
Returns a version string for the current package, derived
either from git or from a .version file.
This function is expected to run in two contexts. In a development
context, where .git/ exists, the version is pulled from git tags.
Using the BuildPyCommand and SDistCommand classes for cmdclass in
setup.py will write a .version file into any dist.
In an installed context, the .version file written at dist build
time is the source of version information.
|
def int_to_varbyte(self, value):
"""Convert an integer into a variable length byte.
How it works: the bytes are stored in big-endian (significant bit
first), the highest bit of the byte (mask 0x80) is set when there
are more bytes following. The remaining 7 bits (mask 0x7F) are used
to store the value.
"""
# Warning: bit kung-fu ahead. The length of the integer in bytes
length = int(log(max(value, 1), 0x80)) + 1
# Remove the highest bit and move the bits to the right if length > 1
bytes = [value >> i * 7 & 0x7F for i in range(length)]
bytes.reverse()
# Set the first bit on every one but the last bit.
for i in range(len(bytes) - 1):
bytes[i] = bytes[i] | 0x80
return pack('%sB' % len(bytes), *bytes)
|
Convert an integer into a variable length byte.
How it works: the bytes are stored in big-endian (significant bit
first), the highest bit of the byte (mask 0x80) is set when there
are more bytes following. The remaining 7 bits (mask 0x7F) are used
to store the value.
|
def load_edited_source(self, source, good_cb=None, bad_cb=None, filename=None):
"""
Load changed code into the execution environment.
Until the code is executed correctly, it will be
in the 'tenuous' state.
"""
with LiveExecution.lock:
self.good_cb = good_cb
self.bad_cb = bad_cb
try:
# text compile
compile(source + '\n\n', filename or self.filename, "exec")
self.edited_source = source
except Exception as e:
if bad_cb:
self.edited_source = None
tb = traceback.format_exc()
self.call_bad_cb(tb)
return
if filename is not None:
self.filename = filename
|
Load changed code into the execution environment.
Until the code is executed correctly, it will be
in the 'tenuous' state.
|
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
def struct_factory(field_types):
"""Summary
Args:
field_types (TYPE): Description
Returns:
TYPE: Description
"""
class Struct(Structure):
"""Summary
"""
_fields_ = [(str(i), t.ctype_class)
for i, t in enumerate(field_types)]
return Struct
if frozenset(self.field_types) not in WeldVec._singletons:
WeldStruct._singletons[
frozenset(self.field_types)] = struct_factory(self.field_types)
return WeldStruct._singletons[frozenset(self.field_types)]
|
Summary
Returns:
TYPE: Description
|
def get_raw(config, backend_section, arthur):
"""Execute the raw phase for a given backend section, optionally using Arthur
:param config: a Mordred config object
:param backend_section: the backend section where the raw phase is executed
:param arthur: if true, it enables Arthur to collect the raw data
"""
if arthur:
task = TaskRawDataArthurCollection(config, backend_section=backend_section)
else:
task = TaskRawDataCollection(config, backend_section=backend_section)
TaskProjects(config).execute()
try:
task.execute()
logging.info("Loading raw data finished!")
except Exception as e:
logging.error(str(e))
sys.exit(-1)
|
Execute the raw phase for a given backend section, optionally using Arthur
:param config: a Mordred config object
:param backend_section: the backend section where the raw phase is executed
:param arthur: if true, it enables Arthur to collect the raw data
|
def scale_in(self, blocks=None, block_ids=[]):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
block_ids : list
List of specific block ids to terminate. Optional
Raises:
NotImplementedError
"""
if block_ids:
block_ids_to_kill = block_ids
else:
block_ids_to_kill = list(self.blocks.keys())[:blocks]
# Hold the block
for block_id in block_ids_to_kill:
self._hold_block(block_id)
# Now kill via provider
to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]
if self.provider:
r = self.provider.cancel(to_kill)
return r
|
Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
block_ids : list
List of specific block ids to terminate. Optional
Raises:
NotImplementedError
|
def get_threads(session, query):
"""
Get one or more threads
"""
# GET /api/messages/0.1/threads
response = make_get_request(session, 'threads', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise ThreadsNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
)
|
Get one or more threads
|
def new_fills_report(self,
start_date,
end_date,
account_id=None,
product_id='BTC-USD',
format=None,
email=None):
"""`<https://docs.exchange.coinbase.com/#create-a-new-report>`_"""
return self._new_report(start_date,
'fills',
end_date,
account_id,
product_id,
format,
email)
|
`<https://docs.exchange.coinbase.com/#create-a-new-report>`_
|
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
if sp.issparse(array):
if dtype_numeric:
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype_numeric:
if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array
|
Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
|
def stdev(requestContext, seriesList, points, windowTolerance=0.1):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N
datapoints. If the ratio of null points in the window is greater than
windowTolerance, skip the calculation. The default for windowTolerance is
0.1 (up to 10% of points in the window can be missing). Note that if this
is set to 0.0, it will cause large gaps in the output anywhere a single
point is missing.
Example::
&target=stdev(server*.instance*.threads.busy,30)
&target=stdev(server*.instance*.cpu.system,30,0.0)
"""
# For this we take the standard deviation in terms of the moving average
# and the moving average of series squares.
for seriesIndex, series in enumerate(seriesList):
stdevSeries = TimeSeries("stdev(%s,%d)" % (series.name, int(points)),
series.start, series.end, series.step, [])
stdevSeries.pathExpression = "stdev(%s,%d)" % (series.name,
int(points))
validPoints = 0
currentSum = 0
currentSumOfSquares = 0
for index, newValue in enumerate(series):
# Mark whether we've reached our window size - dont drop points
# out otherwise
if index < points:
bootstrapping = True
droppedValue = None
else:
bootstrapping = False
droppedValue = series[index - points]
# Track non-None points in window
if not bootstrapping and droppedValue is not None:
validPoints -= 1
if newValue is not None:
validPoints += 1
# Remove the value that just dropped out of the window
if not bootstrapping and droppedValue is not None:
currentSum -= droppedValue
currentSumOfSquares -= droppedValue**2
# Add in the value that just popped in the window
if newValue is not None:
currentSum += newValue
currentSumOfSquares += newValue**2
if (
validPoints > 0 and
float(validPoints) / points >= windowTolerance
):
try:
deviation = math.sqrt(validPoints * currentSumOfSquares -
currentSum**2) / validPoints
except ValueError:
deviation = None
stdevSeries.append(deviation)
else:
stdevSeries.append(None)
seriesList[seriesIndex] = stdevSeries
return seriesList
|
Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N
datapoints. If the ratio of null points in the window is greater than
windowTolerance, skip the calculation. The default for windowTolerance is
0.1 (up to 10% of points in the window can be missing). Note that if this
is set to 0.0, it will cause large gaps in the output anywhere a single
point is missing.
Example::
&target=stdev(server*.instance*.threads.busy,30)
&target=stdev(server*.instance*.cpu.system,30,0.0)
|
def compileSass(sassPath):
'''
Compile a sass file (and dependencies) into a single css file.
'''
cssPath = os.path.splitext(sassPath)[0] + ".css"
# subprocess.call(["sass", sassPath, cssPath])
print("Compiling Sass")
process = subprocess.Popen(["sass", sassPath, cssPath])
process.wait()
|
Compile a sass file (and dependencies) into a single css file.
|
def get_overlapping_ranges(self, collection_link, partition_key_ranges):
'''
Given a partition key range and a collection,
returns the list of overlapping partition key ranges
:param str collection_link:
The name of the collection.
:param list partition_key_range:
List of partition key range.
:return:
List of overlapping partition key ranges.
:rtype: list
'''
cl = self._documentClient
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
collection_routing_map = self._collection_routing_map_by_item.get(collection_id)
if collection_routing_map is None:
collection_pk_ranges = list(cl._ReadPartitionKeyRanges(collection_link))
# for large collections, a split may complete between the read partition key ranges query page responses,
# causing the partitionKeyRanges to have both the children ranges and their parents. Therefore, we need
# to discard the parent ranges to have a valid routing map.
collection_pk_ranges = _PartitionKeyRangeCache._discard_parent_ranges(collection_pk_ranges)
collection_routing_map = _CollectionRoutingMap.CompleteRoutingMap([(r, True) for r in collection_pk_ranges], collection_id)
self._collection_routing_map_by_item[collection_id] = collection_routing_map
return collection_routing_map.get_overlapping_ranges(partition_key_ranges)
|
Given a partition key range and a collection,
returns the list of overlapping partition key ranges
:param str collection_link:
The name of the collection.
:param list partition_key_range:
List of partition key range.
:return:
List of overlapping partition key ranges.
:rtype: list
|
def _friendlyAuthError(fn):
''' Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.error('insufficient permission')
elif e.response.status_code == requests.codes.bad and 'jwt has expired' in e.response.text.lower(): #pylint: disable=no-member
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
logger.error('Check that your system clock is set accurately!')
else:
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
raise
return wrapped
|
Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login.
|
def extract_kwargs(names:Collection[str], kwargs:KWArgs):
"Extract the keys in `names` from the `kwargs`."
new_kwargs = {}
for arg_name in names:
if arg_name in kwargs:
arg_val = kwargs.pop(arg_name)
new_kwargs[arg_name] = arg_val
return new_kwargs, kwargs
|
Extract the keys in `names` from the `kwargs`.
|
def unregister_counter_nonzero(network):
"""
Unregister nonzero counter hooks
:param network: The network previously registered via `register_nonzero_counter`
"""
if not hasattr(network, "__counter_nonzero_handles__"):
raise ValueError("register_counter_nonzero was not called for this network")
for h in network.__counter_nonzero_handles__:
h.remove()
delattr(network, "__counter_nonzero_handles__")
for module in network.modules():
if hasattr(module, "__counter_nonzero__"):
delattr(module, "__counter_nonzero__")
|
Unregister nonzero counter hooks
:param network: The network previously registered via `register_nonzero_counter`
|
def cli(ctx, feature_id, organism="", sequence=""):
"""[CURRENTLY BROKEN] Get the sequence of a feature
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.get_feature_sequence(feature_id, organism=organism, sequence=sequence)
|
[CURRENTLY BROKEN] Get the sequence of a feature
Output:
A standard apollo feature dictionary ({"features": [{...}]})
|
def ParseOptions(self, options):
"""Parses the options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
# The extraction options are dependent on the data location.
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['data_location'])
self._ReadParserPresetsFromFile()
# Check the list options first otherwise required options will raise.
argument_helper_names = ['hashers', 'parsers', 'profiling']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self._ParseTimezoneOption(options)
self.list_hashers = self._hasher_names_string == 'list'
self.list_parsers_and_plugins = self._parser_filter_expression == 'list'
self.list_profilers = self._profilers == 'list'
self.show_info = getattr(options, 'show_info', False)
self.show_troubleshooting = getattr(options, 'show_troubleshooting', False)
if getattr(options, 'use_markdown', False):
self._views_format_type = views.ViewsFactory.FORMAT_TYPE_MARKDOWN
self.dependencies_check = getattr(options, 'dependencies_check', True)
if (self.list_hashers or self.list_parsers_and_plugins or
self.list_profilers or self.list_timezones or self.show_info or
self.show_troubleshooting):
return
self._ParseInformationalOptions(options)
argument_helper_names = [
'artifact_definitions', 'artifact_filters', 'extraction',
'filter_file', 'status_view', 'storage_file', 'storage_format',
'text_prepend', 'yara_rules']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self._ParseLogFileOptions(options)
self._ParseStorageMediaOptions(options)
self._ParsePerformanceOptions(options)
self._ParseProcessingOptions(options)
if not self._storage_file_path:
raise errors.BadConfigOption('Missing storage file option.')
serializer_format = getattr(
options, 'serializer_format', definitions.SERIALIZER_FORMAT_JSON)
if serializer_format not in definitions.SERIALIZER_FORMATS:
raise errors.BadConfigOption(
'Unsupported storage serializer format: {0:s}.'.format(
serializer_format))
self._storage_serializer_format = serializer_format
# TODO: where is this defined?
self._operating_system = getattr(options, 'os', None)
if self._operating_system:
self._mount_path = getattr(options, 'filename', None)
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['status_view'])
self._enable_sigsegv_handler = getattr(options, 'sigsegv_handler', False)
self._EnforceProcessMemoryLimit(self._process_memory_limit)
|
Parses the options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
def get_documents_in_database(self, with_id=True):
"""Gets all documents in database
:param with_id: True iff each document should also come with its id
:return: List of documents in collection in database
"""
documents = []
for coll in self.get_collection_names():
documents += self.get_documents_in_collection(
coll,
with_id=with_id
)
return documents
|
Gets all documents in database
:param with_id: True iff each document should also come with its id
:return: List of documents in collection in database
|
def default_if_empty(self, default):
'''If the source sequence is empty return a single element sequence
containing the supplied default value, otherwise return the source
sequence unchanged.
Note: This method uses deferred execution.
Args:
default: The element to be returned if the source sequence is empty.
Returns:
The source sequence, or if the source sequence is empty an sequence
containing a single element with the supplied default value.
Raises:
ValueError: If the Queryable has been closed.
'''
if self.closed():
raise ValueError("Attempt to call default_if_empty() on a "
"closed Queryable.")
return self._create(self._generate_default_if_empty_result(default))
|
If the source sequence is empty return a single element sequence
containing the supplied default value, otherwise return the source
sequence unchanged.
Note: This method uses deferred execution.
Args:
default: The element to be returned if the source sequence is empty.
Returns:
The source sequence, or if the source sequence is empty an sequence
containing a single element with the supplied default value.
Raises:
ValueError: If the Queryable has been closed.
|
def from_poppy_creature(cls, poppy, motors, passiv, tip,
reversed_motors=[]):
""" Creates an kinematic chain from motors of a Poppy Creature.
:param poppy: PoppyCreature used
:param list motors: list of all motors that composed the kinematic chain
:param list passiv: list of motors which are passiv in the chain (they will not move)
:param list tip: [x, y, z] translation of the tip of the chain (in meters)
:param list reversed_motors: list of motors that should be manually reversed (due to a problem in the URDF?)
"""
chain_elements = get_chain_from_joints(poppy.urdf_file,
[m.name for m in motors])
activ = [False] + [m not in passiv for m in motors] + [True]
chain = cls.from_urdf_file(poppy.urdf_file,
base_elements=chain_elements,
last_link_vector=tip,
active_links_mask=activ)
chain.motors = [getattr(poppy, l.name) for l in chain.links[1:-1]]
for m, l in zip(chain.motors, chain.links[1:-1]):
# Force an access to angle limit to retrieve real values
# This is quite an ugly fix and should be handled better
m.angle_limit
bounds = m.__dict__['lower_limit'], m.__dict__['upper_limit']
l.bounds = tuple(map(rad2deg, bounds))
chain._reversed = array([(-1 if m in reversed_motors else 1)
for m in motors])
return chain
|
Creates an kinematic chain from motors of a Poppy Creature.
:param poppy: PoppyCreature used
:param list motors: list of all motors that composed the kinematic chain
:param list passiv: list of motors which are passiv in the chain (they will not move)
:param list tip: [x, y, z] translation of the tip of the chain (in meters)
:param list reversed_motors: list of motors that should be manually reversed (due to a problem in the URDF?)
|
def get_field_from_args_or_session(config, args, field_name):
"""
We try to get field_name from diffent sources:
The order of priorioty is following:
- command line argument (--<field_name>)
- current session configuration (default_<filed_name>)
"""
rez = getattr(args, field_name, None)
#type(rez) can be int in case of wallet-index, so we cannot make simply if(rez)
if (rez != None):
return rez
rez = config.get_session_field("default_%s"%field_name, exception_if_not_found=False)
if (rez):
return rez
raise Exception("Fail to get default_%s from config, should specify %s via --%s parameter"%(field_name, field_name, field_name.replace("_","-")))
|
We try to get field_name from diffent sources:
The order of priorioty is following:
- command line argument (--<field_name>)
- current session configuration (default_<filed_name>)
|
def validate(self, expectations_config=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False):
"""Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectations_config=None to validate the expectations config associated with the DataAsset.
Args:
expectations_config (json or None): \
If None, uses the expectations config generated with the DataAsset during the current session. \
If a JSON file, validates those expectations.
evaluation_parameters (dict or None): \
If None, uses the evaluation_paramters from the expectations_config provided or as part of the data_asset.
If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): \
If True, exceptions raised by tests will not end validation and will be described in the returned report.
result_format (string or None): \
If None, uses the default value ('BASIC' or as specified). \
If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', etc.).
include_config (boolean): \
If True, the returned results include the config information associated with each expectation, if \
it exists.
only_return_failures (boolean): \
If True, expectation results are only returned when ``success = False`` \
Returns:
A JSON-formatted dictionary containing a list of the validation results. \
An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the current environment. \
If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
"""
results = []
if expectations_config is None:
expectations_config = self.get_expectations_config(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_configs_kwargs=False,
discard_catch_exceptions_kwargs=False,
)
elif isinstance(expectations_config, string_types):
expectations_config = json.load(open(expectations_config, 'r'))
if evaluation_parameters is None:
# Use evaluation parameters from the (maybe provided) config
if "evaluation_parameters" in expectations_config:
evaluation_parameters = expectations_config["evaluation_parameters"]
# Warn if our version is different from the version in the configuration
try:
if expectations_config['meta']['great_expectations.__version__'] != __version__:
warnings.warn(
"WARNING: This configuration object was built using version %s of great_expectations, but is currently being valided by version %s." % (expectations_config['meta']['great_expectations.__version__'], __version__))
except KeyError:
warnings.warn(
"WARNING: No great_expectations version found in configuration object.")
for expectation in expectations_config['expectations']:
try:
expectation_method = getattr(
self, expectation['expectation_type'])
if result_format is not None:
expectation['kwargs'].update({'result_format': result_format})
# Counting the number of unexpected values can be expensive when there is a large
# number of np.nan values.
# This only happens on expect_column_values_to_not_be_null expectations.
# Since there is no reason to look for most common unexpected values in this case,
# we will instruct the result formatting method to skip this step.
if expectation['expectation_type'] in ['expect_column_values_to_not_be_null',
'expect_column_values_to_be_null']:
expectation['kwargs']['result_format'] = parse_result_format(expectation['kwargs']['result_format'])
expectation['kwargs']['result_format']['partial_unexpected_count'] = 0
# A missing parameter should raise a KeyError
evaluation_args = self._build_evaluation_parameters(
expectation['kwargs'], evaluation_parameters)
result = expectation_method(
catch_exceptions=catch_exceptions,
**evaluation_args
)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = {
"success": False,
"exception_info": {
"raised_exception": raised_exception,
"exception_traceback": exception_traceback,
"exception_message": str(err)
}
}
else:
raise(err)
# if include_config:
result["expectation_config"] = copy.deepcopy(expectation)
# Add an empty exception_info object if no exception was caught
if catch_exceptions and ('exception_info' not in result):
result["exception_info"] = {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None
}
results.append(result)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if exp["success"] == False:
abbrev_results.append(exp)
results = abbrev_results
result = {
"results": results,
"success": statistics.success,
"statistics": {
"evaluated_expectations": statistics.evaluated_expectations,
"successful_expectations": statistics.successful_expectations,
"unsuccessful_expectations": statistics.unsuccessful_expectations,
"success_percent": statistics.success_percent,
}
}
if evaluation_parameters is not None:
result.update({"evaluation_parameters": evaluation_parameters})
return result
|
Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectations_config=None to validate the expectations config associated with the DataAsset.
Args:
expectations_config (json or None): \
If None, uses the expectations config generated with the DataAsset during the current session. \
If a JSON file, validates those expectations.
evaluation_parameters (dict or None): \
If None, uses the evaluation_paramters from the expectations_config provided or as part of the data_asset.
If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): \
If True, exceptions raised by tests will not end validation and will be described in the returned report.
result_format (string or None): \
If None, uses the default value ('BASIC' or as specified). \
If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', etc.).
include_config (boolean): \
If True, the returned results include the config information associated with each expectation, if \
it exists.
only_return_failures (boolean): \
If True, expectation results are only returned when ``success = False`` \
Returns:
A JSON-formatted dictionary containing a list of the validation results. \
An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the current environment. \
If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
|
def parse_dash(string, width):
"parse dash pattern specified with string"
# DashConvert from {tk-sources}/generic/tkCanvUtil.c
w = max(1, int(width + 0.5))
n = len(string)
result = []
for i, c in enumerate(string):
if c == " " and len(result):
result[-1] += w + 1
elif c == "_":
result.append(8*w)
result.append(4*w)
elif c == "-":
result.append(6*w)
result.append(4*w)
elif c == ",":
result.append(4*w)
result.append(4*w)
elif c == ".":
result.append(2*w)
result.append(4*w)
return result
|
parse dash pattern specified with string
|
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
|
store item in sqlite database
|
def get_run_as_identifiers_stack(self):
"""
:returns: an IdentifierCollection
"""
session = self.get_session(False)
try:
return session.get_internal_attribute(self.run_as_identifiers_session_key)
except AttributeError:
return None
|
:returns: an IdentifierCollection
|
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if __ISON and os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
|
Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
|
def DEBUG_ON_RESPONSE(self, statusCode, responseHeader, data):
'''
Update current frame with response
Current frame index will be attached to responseHeader
'''
if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests)
self._frameBuffer[self._frameCount][1:4] = [statusCode, responseHeader, data]
responseHeader[self.DEBUG_HEADER_KEY] = self._frameCount
|
Update current frame with response
Current frame index will be attached to responseHeader
|
def __to_plain_containers(self,
container: Union[CommentedSeq, CommentedMap]
) -> Union[OrderedDict, list]:
"""Converts any sequence or mapping to list or OrderedDict
Stops at anything that isn't a sequence or a mapping.
One day, we'll extract the comments and formatting and store \
them out-of-band.
Args:
mapping: The mapping of constructed subobjects to edit
"""
if isinstance(container, CommentedMap):
new_container = OrderedDict() # type: Union[OrderedDict, list]
for key, value_obj in container.items():
if (isinstance(value_obj, CommentedMap)
or isinstance(value_obj, CommentedSeq)):
new_container[key] = self.__to_plain_containers(value_obj)
else:
new_container[key] = value_obj
elif isinstance(container, CommentedSeq):
new_container = list()
for value_obj in container:
if (isinstance(value_obj, CommentedMap)
or isinstance(value_obj, CommentedSeq)):
new_container.append(self.__to_plain_containers(value_obj))
else:
new_container.append(value_obj)
return new_container
|
Converts any sequence or mapping to list or OrderedDict
Stops at anything that isn't a sequence or a mapping.
One day, we'll extract the comments and formatting and store \
them out-of-band.
Args:
mapping: The mapping of constructed subobjects to edit
|
def p_expr_div_expr(p):
""" expr : expr BAND expr
| expr BOR expr
| expr BXOR expr
| expr PLUS expr
| expr MINUS expr
| expr MUL expr
| expr DIV expr
| expr MOD expr
| expr POW expr
| expr LSHIFT expr
| expr RSHIFT expr
| pexpr BAND expr
| pexpr BOR expr
| pexpr BXOR expr
| pexpr PLUS expr
| pexpr MINUS expr
| pexpr MUL expr
| pexpr DIV expr
| pexpr MOD expr
| pexpr POW expr
| pexpr LSHIFT expr
| pexpr RSHIFT expr
| expr BAND pexpr
| expr BOR pexpr
| expr BXOR pexpr
| expr PLUS pexpr
| expr MINUS pexpr
| expr MUL pexpr
| expr DIV pexpr
| expr MOD pexpr
| expr POW pexpr
| expr LSHIFT pexpr
| expr RSHIFT pexpr
| pexpr BAND pexpr
| pexpr BOR pexpr
| pexpr BXOR pexpr
| pexpr PLUS pexpr
| pexpr MINUS pexpr
| pexpr MUL pexpr
| pexpr DIV pexpr
| pexpr MOD pexpr
| pexpr POW pexpr
| pexpr LSHIFT pexpr
| pexpr RSHIFT pexpr
"""
p[0] = Expr.makenode(Container(p[2], p.lineno(2)), p[1], p[3])
|
expr : expr BAND expr
| expr BOR expr
| expr BXOR expr
| expr PLUS expr
| expr MINUS expr
| expr MUL expr
| expr DIV expr
| expr MOD expr
| expr POW expr
| expr LSHIFT expr
| expr RSHIFT expr
| pexpr BAND expr
| pexpr BOR expr
| pexpr BXOR expr
| pexpr PLUS expr
| pexpr MINUS expr
| pexpr MUL expr
| pexpr DIV expr
| pexpr MOD expr
| pexpr POW expr
| pexpr LSHIFT expr
| pexpr RSHIFT expr
| expr BAND pexpr
| expr BOR pexpr
| expr BXOR pexpr
| expr PLUS pexpr
| expr MINUS pexpr
| expr MUL pexpr
| expr DIV pexpr
| expr MOD pexpr
| expr POW pexpr
| expr LSHIFT pexpr
| expr RSHIFT pexpr
| pexpr BAND pexpr
| pexpr BOR pexpr
| pexpr BXOR pexpr
| pexpr PLUS pexpr
| pexpr MINUS pexpr
| pexpr MUL pexpr
| pexpr DIV pexpr
| pexpr MOD pexpr
| pexpr POW pexpr
| pexpr LSHIFT pexpr
| pexpr RSHIFT pexpr
|
def channel_in_frame(channel, framefile):
"""Determine whether a channel is stored in this framefile
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`
name of channel to find
framefile : `str`
path of GWF file to test
Returns
-------
inframe : `bool`
whether this channel is included in the table of contents for
the given framefile
"""
channel = str(channel)
for name in iter_channel_names(framefile):
if channel == name:
return True
return False
|
Determine whether a channel is stored in this framefile
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`
name of channel to find
framefile : `str`
path of GWF file to test
Returns
-------
inframe : `bool`
whether this channel is included in the table of contents for
the given framefile
|
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic function `%%%%%s` not found%s."
extra = '' if lm is None else (' (But line magic `%%%s` exists, '
'did you mean that instead?)' % magic_name )
error(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
magic_arg_s = self.var_expand(line, stack_depth)
with self.builtin_trap:
result = fn(line, cell)
return result
|
Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.