code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def remove_files(filename):
# type: (AnyStr) -> None
"""
Delete all files with same root as fileName,
i.e. regardless of suffix, such as ESRI shapefile
"""
pattern = os.path.splitext(filename)[0] + '.*'
for f in glob.iglob(pattern):
os.remove(f)
|
Delete all files with same root as fileName,
i.e. regardless of suffix, such as ESRI shapefile
|
def named_objs(objlist, namesdict=None):
"""
Given a list of objects, returns a dictionary mapping from
string name for the object to the object itself. Accepts
an optional name,obj dictionary, which will override any other
name if that item is present in the dictionary.
"""
objs = OrderedDict()
if namesdict is not None:
objtoname = {hashable(v): k for k, v in namesdict.items()}
for obj in objlist:
if namesdict is not None and hashable(obj) in objtoname:
k = objtoname[hashable(obj)]
elif hasattr(obj, "name"):
k = obj.name
elif hasattr(obj, '__name__'):
k = obj.__name__
else:
k = as_unicode(obj)
objs[k] = obj
return objs
|
Given a list of objects, returns a dictionary mapping from
string name for the object to the object itself. Accepts
an optional name,obj dictionary, which will override any other
name if that item is present in the dictionary.
|
def connectPeer(self, peer):
"""Establish a SIGMA connection to the given peer.
@param peer: a Q2QAddress of a peer which has a file that I want
@return: a Deferred which fires a SigmaProtocol.
"""
return self.conns.connectCached(endpoint.Q2QEndpoint(self.svc,
self.addr,
peer,
PROTOCOL_NAME),
self.clientFactory)
|
Establish a SIGMA connection to the given peer.
@param peer: a Q2QAddress of a peer which has a file that I want
@return: a Deferred which fires a SigmaProtocol.
|
def do_patch(endpoint, body, access_token):
'''Do an HTTP PATCH request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to patch.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.patch(endpoint, data=body, headers=headers)
|
Do an HTTP PATCH request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to patch.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
|
def add_translation(sender):
"""
Adds the actions to a class.
"""
# 1. Execute _save_translations when saving an object
signals.post_save.connect(_save_translations, sender=sender)
# 2. Adds get_fieldtranslations to class. Remember that this method get all the translations.
sender.add_to_class("get_fieldtranslations", _get_fieldtranslations)
# 3. Adss load_translations. Remember that this method included all the translations as dynamic attributes.
sender.add_to_class("load_translations", _load_translations)
# 4. Adds _set_dict_translations. This methods allows us setting all the translated fields form a dict.
# Very useful when dealing with ModelForms.
sender.add_to_class("set_translation_fields", _set_dict_translations)
# 5. This methods returns one translated attribute in Django.
# Avoid using _ and use get_trans_attr because Django maketranslations parser is fooled believing that everything
# inside _ methods is translatable.
sender.add_to_class("_", _get_translated_field)
sender.add_to_class("get_trans_attr", _get_translated_field)
sender.add_to_class("_t", _get_translated_field)
|
Adds the actions to a class.
|
def generate_random_person(self, n):
# type: (int) -> Iterable[Tuple[str, str, str, str]]
"""
Generator that yields details on a person with plausible name, sex and age.
:yields: Generated data for one person
tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') )
"""
assert self.all_male_first_names is not None
assert self.all_female_first_names is not None
assert self.all_last_names is not None
for i in range(n):
sex = 'M' if random.random() > 0.5 else 'F'
dob = random_date(self.earliest_birthday, self.latest_birthday).strftime("%Y/%m/%d")
first_name = random.choice(self.all_male_first_names) if sex == 'M' else random.choice(
self.all_female_first_names)
last_name = random.choice(self.all_last_names)
yield (
str(i),
first_name + ' ' + last_name,
dob,
sex
)
|
Generator that yields details on a person with plausible name, sex and age.
:yields: Generated data for one person
tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') )
|
def execute_after_scenario_steps(self, context):
"""
actions after each scenario
:param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave.
"""
if not self.feature_error and not self.scenario_error:
self.__execute_steps_by_action(context, ACTIONS_AFTER_SCENARIO)
# Behave dynamic environment: Fail all steps if dyn_env has got any error and reset it
if self.reset_error_status():
context.scenario.reset()
context.dyn_env.fail_first_step_precondition_exception(context.scenario)
|
actions after each scenario
:param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave.
|
def get_context_override(self, request):
"""
Override the request object with an emulated user.
"""
context_override = super(EmulateUserModelMixin, self).get_context_override(request)
try:
if request.user.is_staff:
user = self.UserModel.objects.get(pk=request.session['emulate_user_id'])
context_override.update(user=user)
except (self.UserModel.DoesNotExist, KeyError):
pass
return context_override
|
Override the request object with an emulated user.
|
def search_users(self, username_keyword, limit=10):
"""
Searches for users whose username matches ``username_keyword``, and returns
a list of matched users.
:param str username_keyword: keyword to search with
:param int limit: maximum number of returned users
:return: a list of matched users
:rtype: List[GogsUser]
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
params = {"q": username_keyword, "limit": limit}
response = self.get("/users/search", params=params)
return [GogsUser.from_json(user_json) for user_json in response.json()["data"]]
|
Searches for users whose username matches ``username_keyword``, and returns
a list of matched users.
:param str username_keyword: keyword to search with
:param int limit: maximum number of returned users
:return: a list of matched users
:rtype: List[GogsUser]
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
|
def get(self, word, default=nil):
"""
Retrieves output value associated with word.
If there is no word returns default value,
and if default is not given rises KeyError.
"""
node = self.__get_node(word)
output = nil
if node:
output = node.output
if output is nil:
if default is nil:
raise KeyError("no key '%s'" % word)
else:
return default
else:
return output
|
Retrieves output value associated with word.
If there is no word returns default value,
and if default is not given rises KeyError.
|
def list(self, limit=None, marker=None, name=None, visibility=None,
member_status=None, owner=None, tag=None, status=None,
size_min=None, size_max=None, sort_key=None, sort_dir=None,
return_raw=False):
"""
Returns a list of resource objects. Pagination is supported through the
optional 'marker' and 'limit' parameters. Filtering the returned value
is possible by specifying values for any of the other parameters.
"""
uri = "/%s" % self.uri_base
qs = utils.dict_to_qs(dict(limit=limit, marker=marker, name=name,
visibility=visibility, member_status=member_status,
owner=owner, tag=tag, status=status, size_min=size_min,
size_max=size_max, sort_key=sort_key, sort_dir=sort_dir))
if qs:
uri = "%s?%s" % (uri, qs)
return self._list(uri, return_raw=return_raw)
|
Returns a list of resource objects. Pagination is supported through the
optional 'marker' and 'limit' parameters. Filtering the returned value
is possible by specifying values for any of the other parameters.
|
def _promote_and_split(s):
"""
E:F:.O:M:.t.- => E:.-F:.O:M:.-t.-‘
E:F:.M:M:.l.- => E:.-F:.M:M:.-l.-‘
"""
subst, attr, mode = s
subst0, subst1, _mode = subst
assert isinstance(_mode, NullScript)
return m(m(m(subst0)) ,m(m(subst1), attr) ,m(mode))
|
E:F:.O:M:.t.- => E:.-F:.O:M:.-t.-‘
E:F:.M:M:.l.- => E:.-F:.M:M:.-l.-‘
|
def _run_with_kvm(self, qemu_path, options):
"""
Check if we could run qemu with KVM
:param qemu_path: Path to qemu
:param options: String of qemu user options
:returns: Boolean True if we need to enable KVM
"""
if sys.platform.startswith("linux") and self.manager.config.get_section_config("Qemu").getboolean("enable_kvm", True) \
and "-no-kvm" not in options:
# Turn OFF kvm for non x86 architectures
if os.path.basename(qemu_path) not in ["qemu-system-x86_64", "qemu-system-i386", "qemu-kvm"]:
return False
if not os.path.exists("/dev/kvm"):
if self.manager.config.get_section_config("Qemu").getboolean("require_kvm", True):
raise QemuError("KVM acceleration cannot be used (/dev/kvm doesn't exist). You can turn off KVM support in the gns3_server.conf by adding enable_kvm = false to the [Qemu] section.")
else:
return False
return True
return False
|
Check if we could run qemu with KVM
:param qemu_path: Path to qemu
:param options: String of qemu user options
:returns: Boolean True if we need to enable KVM
|
def w3_tx(self):
"""
:return: Web3 contract tx prepared for `call`, `transact` or `buildTransaction`
"""
safe_contract = get_safe_contract(self.w3, address=self.safe_address)
return safe_contract.functions.execTransaction(
self.to,
self.value,
self.data,
self.operation,
self.safe_tx_gas,
self.data_gas,
self.gas_price,
self.gas_token,
self.refund_receiver,
self.signatures)
|
:return: Web3 contract tx prepared for `call`, `transact` or `buildTransaction`
|
def is_descendant_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is a descendant of a bank.
arg: id (osid.id.Id): an ``Id``
arg: bank_id (osid.id.Id): the ``Id`` of a bank
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``bank_id,`` ``false`` otherwise
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=bank_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=bank_id)
|
Tests if an ``Id`` is a descendant of a bank.
arg: id (osid.id.Id): an ``Id``
arg: bank_id (osid.id.Id): the ``Id`` of a bank
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``bank_id,`` ``false`` otherwise
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
|
def _fetch_data(self):
"""Converts inputspec to files"""
if (self.inputs.surface_target == "fsnative" or
self.inputs.volume_target != "MNI152NLin2009cAsym"):
# subject space is not support yet
raise NotImplementedError
annotation_files = sorted(glob(os.path.join(self.inputs.subjects_dir,
self.inputs.surface_target,
'label',
'*h.aparc.annot')))
if not annotation_files:
raise IOError("Freesurfer annotations for %s not found in %s" % (
self.inputs.surface_target, self.inputs.subjects_dir))
label_file = str(get_template(
'MNI152NLin2009cAsym', resolution=2, desc='DKT31', suffix='dseg'))
return annotation_files, label_file
|
Converts inputspec to files
|
def search_individuals(self, dataset_id, name=None):
"""
Returns an iterator over the Individuals fulfilling the specified
conditions.
:param str dataset_id: The dataset to search within.
:param str name: Only Individuals matching the specified name will
be returned.
:return: An iterator over the :class:`ga4gh.protocol.Biosample`
objects defined by the query parameters.
"""
request = protocol.SearchIndividualsRequest()
request.dataset_id = dataset_id
request.name = pb.string(name)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "individuals", protocol.SearchIndividualsResponse)
|
Returns an iterator over the Individuals fulfilling the specified
conditions.
:param str dataset_id: The dataset to search within.
:param str name: Only Individuals matching the specified name will
be returned.
:return: An iterator over the :class:`ga4gh.protocol.Biosample`
objects defined by the query parameters.
|
def _normalize_purge_unknown(mapping, schema):
""" {'type': 'boolean'} """
for field in tuple(mapping):
if field not in schema:
del mapping[field]
return mapping
|
{'type': 'boolean'}
|
def crates(self, from_page=1):
"""Get crates in alphabetical order"""
path = urijoin(CRATES_API_URL, CATEGORY_CRATES)
raw_crates = self.__fetch_items(path, from_page)
return raw_crates
|
Get crates in alphabetical order
|
def _configure_detail_level(cls, detail_level):
"""
Validate the `detail_level` parameter and return it.
This accepts a string or integer for `detail_level`.
"""
# process detail_level
if isinstance(detail_level, six.string_types):
if detail_level not in LOG_DETAIL_LEVELS:
raise ValueError(
_format("Invalid log detail level string: {0!A}; must be "
"one of: {1!A}", detail_level, LOG_DETAIL_LEVELS))
elif isinstance(detail_level, int):
if detail_level < 0:
raise ValueError(
_format("Invalid log detail level integer: {0}; must be a "
"positive integer.", detail_level))
elif detail_level is None:
detail_level = DEFAULT_LOG_DETAIL_LEVEL
else:
raise ValueError(
_format("Invalid log detail level: {0!A}; must be one of: "
"{1!A}, or a positive integer",
detail_level, LOG_DETAIL_LEVELS))
return detail_level
|
Validate the `detail_level` parameter and return it.
This accepts a string or integer for `detail_level`.
|
def build_news(ctx, draft=False, yes=False):
""" Build towncrier newsfragments.
"""
report.info(ctx, "docs.build-news", "building changelog from news fragments")
build_command = f"towncrier --version {ctx.metadata['version']}"
if draft:
report.warn(
ctx,
"docs.build-news",
"building changelog as draft (results are written to stdout)",
)
build_command += " --draft"
elif yes:
report.warn(
ctx, "docs.build-news", "removing news files without user confirmation (-y)"
)
build_command += " --yes"
ctx.run(build_command, hide=None)
|
Build towncrier newsfragments.
|
def process_results(self):
"""
function that is called when a stage is completed and
needs to be analyzed befor further computations.
The code here implements the original SH algorithms by
advancing the k-best (lowest loss) configurations at the current
budget. k is defined by the num_configs list (see __init__)
and the current stage value.
For more advanced methods like resampling after each stage,
overload this function only.
"""
self.stage += 1
# collect all config_ids that need to be compared
config_ids = list(filter(lambda cid: self.data[cid].status == 'REVIEW', self.data.keys()))
if (self.stage >= len(self.num_configs)):
self.finish_up()
return
budgets = [self.data[cid].budget for cid in config_ids]
if len(set(budgets)) > 1:
raise RuntimeError('Not all configurations have the same budget!')
budget = self.budgets[self.stage-1]
losses = np.array([self.data[cid].results[budget]['loss'] for cid in config_ids])
advance = self._advance_to_next_stage(config_ids, losses)
for i, a in enumerate(advance):
if a:
self.logger.debug('ITERATION: Advancing config %s to next budget %f'%(config_ids[i], self.budgets[self.stage]))
for i, cid in enumerate(config_ids):
if advance[i]:
self.data[cid].status = 'QUEUED'
self.data[cid].budget = self.budgets[self.stage]
self.actual_num_configs[self.stage] += 1
else:
self.data[cid].status = 'TERMINATED'
|
function that is called when a stage is completed and
needs to be analyzed befor further computations.
The code here implements the original SH algorithms by
advancing the k-best (lowest loss) configurations at the current
budget. k is defined by the num_configs list (see __init__)
and the current stage value.
For more advanced methods like resampling after each stage,
overload this function only.
|
def mtf_image_transformer_tiny_spatial1d():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.num_decoder_layers = 6
hparams.filter_size = 128
hparams.block_height = 8
hparams.block_width = 8
hparams.attention_type = "local1d_spatial"
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
|
Small single parameters.
|
def get_volumes(container_map, config, default_volume_paths, include_named):
"""
Generates volume paths for the ``volumes`` argument during container creation.
:param container_map: Container map.
:type container_map: dockermap.map.config.main.ContainerMap
:param config: Container configuration.
:type config: dockermap.map.config.container.ContainerConfiguration
:param default_volume_paths: Dictionary with volume aliases and their default paths.
:type default_volume_paths: dict[unicode | str, unicode | str]
:param include_named: Whether to include attached and their re-used volumes. This should be done if Docker supports
named volumes; otherwise volumes are inherited from other containers via ``volumes_from``.
:type include_named: bool
:return: List of shared volume mount points.
:rtype: list[unicode | str]
"""
def _bind_volume_path(vol):
if isinstance(vol, HostVolume):
return resolve_value(vol.path)
v_path = resolve_value(default_volume_paths.get(vol.name))
if v_path:
return v_path
raise KeyError("No host-volume information found for alias {0}.".format(vol))
def _attached_volume_path(vol):
if isinstance(vol, UsedVolume):
return resolve_value(vol.path)
v_path = resolve_value(default_volume_paths.get(vol.name))
if v_path:
return v_path
raise KeyError("No volume information found for alias {0}.".format(vol))
def _used_volume_path(vol):
if isinstance(vol, UsedVolume):
return resolve_value(vol.path)
if container_map.use_attached_parent_name:
return resolve_value(default_volume_paths.get(vol.name.partition('.')[2]))
return resolve_value(default_volume_paths.get(vol.name))
volumes = list(map(resolve_value, config.shares))
volumes.extend(map(_bind_volume_path, config.binds))
if include_named:
volumes.extend(map(_attached_volume_path, config.attaches))
volumes.extend(filter(None, map(_used_volume_path, config.uses)))
return volumes
|
Generates volume paths for the ``volumes`` argument during container creation.
:param container_map: Container map.
:type container_map: dockermap.map.config.main.ContainerMap
:param config: Container configuration.
:type config: dockermap.map.config.container.ContainerConfiguration
:param default_volume_paths: Dictionary with volume aliases and their default paths.
:type default_volume_paths: dict[unicode | str, unicode | str]
:param include_named: Whether to include attached and their re-used volumes. This should be done if Docker supports
named volumes; otherwise volumes are inherited from other containers via ``volumes_from``.
:type include_named: bool
:return: List of shared volume mount points.
:rtype: list[unicode | str]
|
def run(self, context=None, options=None):
"""
Run axe against the current page.
:param context: which page part(s) to analyze and/or what to exclude.
:param options: dictionary of aXe options.
"""
template = (
"var callback = arguments[arguments.length - 1];"
+ "axe.run(%s).then(results => callback(results))"
)
args = ""
# If context parameter is passed, add to args
if context is not None:
args += "%r" % context
# Add comma delimiter only if both parameters are passed
if context is not None and options is not None:
args += ","
# If options parameter is passed, add to args
if options is not None:
args += "%s" % options
command = template % args
response = self.selenium.execute_async_script(command)
return response
|
Run axe against the current page.
:param context: which page part(s) to analyze and/or what to exclude.
:param options: dictionary of aXe options.
|
def extract_scopes(self, request):
"""
Extract scopes from a request object.
"""
payload = self.extract_payload(request)
if not payload:
return None
scopes_attribute = self.config.scopes_name()
return payload.get(scopes_attribute, None)
|
Extract scopes from a request object.
|
def cast_to_list(position):
"""Cast the positional argument at given position into a list if not already a list."""
@wrapt.decorator
def wrapper(function, instance, args, kwargs):
if not isinstance(args[position], list):
args = list(args)
args[position] = [args[position]]
args = tuple(args)
return function(*args, **kwargs)
return wrapper
|
Cast the positional argument at given position into a list if not already a list.
|
def find_link(self, target_node):
"""
Find the link that points to ``target_node`` if it exists.
If no link in ``self`` points to ``target_node``, return None
Args:
target_node (Node): The node to look for in ``self.link_list``
Returns:
Link: An existing link pointing to ``target_node`` if found
None: If no such link exists
Example:
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link(node_2, 1)
>>> link_1 = node_1.link_list[0]
>>> found_link = node_1.find_link(node_2)
>>> found_link == link_1
True
"""
try:
return next(l for l in self.link_list if l.target == target_node)
except StopIteration:
return None
|
Find the link that points to ``target_node`` if it exists.
If no link in ``self`` points to ``target_node``, return None
Args:
target_node (Node): The node to look for in ``self.link_list``
Returns:
Link: An existing link pointing to ``target_node`` if found
None: If no such link exists
Example:
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link(node_2, 1)
>>> link_1 = node_1.link_list[0]
>>> found_link = node_1.find_link(node_2)
>>> found_link == link_1
True
|
def get_job_definition(self, identifier):
"""
Get job defintiion by name or ARN
:param identifier: Name or ARN
:type identifier: str
:return: Job definition or None
:rtype: JobDefinition or None
"""
env = self.get_job_definition_by_arn(identifier)
if env is None:
env = self.get_job_definition_by_name(identifier)
return env
|
Get job defintiion by name or ARN
:param identifier: Name or ARN
:type identifier: str
:return: Job definition or None
:rtype: JobDefinition or None
|
def get_all_instance_profiles(path_prefix='/', region=None, key=None,
keyid=None, profile=None):
'''
Get and return all IAM instance profiles, starting at the optional path.
.. versionadded:: 2016.11.0
CLI Example:
salt-call boto_iam.get_all_instance_profiles
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
marker = False
profiles = []
while marker is not None:
marker = marker if marker else None
p = conn.list_instance_profiles(path_prefix=path_prefix,
marker=marker)
res = p.list_instance_profiles_response.list_instance_profiles_result
profiles += res.instance_profiles
marker = getattr(res, 'marker', None)
return profiles
|
Get and return all IAM instance profiles, starting at the optional path.
.. versionadded:: 2016.11.0
CLI Example:
salt-call boto_iam.get_all_instance_profiles
|
def connection(self) -> Iterator[amqp.Connection]:
"""Returns a new connection as a context manager."""
TCP_USER_TIMEOUT = 18 # constant is available on Python 3.6+.
socket_settings = {TCP_USER_TIMEOUT: self.config.TCP_USER_TIMEOUT}
if sys.platform.startswith('darwin'):
del socket_settings[TCP_USER_TIMEOUT]
conn = amqp.Connection(
host="%s:%s" % (self.config.RABBIT_HOST, self.config.RABBIT_PORT),
userid=self.config.RABBIT_USER,
password=self.config.RABBIT_PASSWORD,
virtual_host=self.config.RABBIT_VIRTUAL_HOST,
connect_timeout=self.config.RABBIT_CONNECT_TIMEOUT,
read_timeout=self.config.RABBIT_READ_TIMEOUT,
write_timeout=self.config.RABBIT_WRITE_TIMEOUT,
socket_settings=socket_settings,
heartbeat=self.config.RABBIT_HEARTBEAT,
)
conn.connect()
logger.info('Connected to RabbitMQ')
with _safe_close(conn):
yield conn
|
Returns a new connection as a context manager.
|
def get_connection(connection='', engine_name=None, connection_type='long', **args):
"""
Creating an NamedEngine or just return existed engine instance
if '://' include in connection parameter, it'll create new engine object
otherwise return existed engine isntance
"""
engine_name = engine_name or __default_engine__
if '://' in connection:
d = {
'connection_string':connection,
'connection_args':args,
'connection_type':connection_type,
}
return engine_manager.add(engine_name, d).engine
else:
connection = connection or __default_engine__
if connection in engine_manager:
return engine_manager[connection].engine
else:
raise Error("Can't find engine %s" % connection)
|
Creating an NamedEngine or just return existed engine instance
if '://' include in connection parameter, it'll create new engine object
otherwise return existed engine isntance
|
def categorical_partition_data(data):
"""Convenience method for creating weights from categorical data.
Args:
data (list-like): The data from which to construct the estimate.
Returns:
A new partition object::
{
"partition": (list) The categorical values present in the data
"weights": (list) The weights of the values in the partition.
}
"""
# Make dropna explicit (even though it defaults to true)
series = pd.Series(data)
value_counts = series.value_counts(dropna=True)
# Compute weights using denominator only of nonnull values
null_indexes = series.isnull()
nonnull_count = (null_indexes == False).sum()
weights = value_counts.values / nonnull_count
return {
"values": value_counts.index.tolist(),
"weights": weights
}
|
Convenience method for creating weights from categorical data.
Args:
data (list-like): The data from which to construct the estimate.
Returns:
A new partition object::
{
"partition": (list) The categorical values present in the data
"weights": (list) The weights of the values in the partition.
}
|
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document, **kwargs)
|
Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
|
def smart_scrubf(df,col_name,error_rate = 0):
""" Scrubs from the front of an 'object' column in a DataFrame
until the scrub would semantically alter the contents of the column. If only a
subset of the elements in the column are scrubbed, then a boolean array indicating which
elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed
df - DataFrame
DataFrame to scrub
col_name - string
Name of column to scrub
error_rate - number, default 0
The maximum amount of values this function can ignore while scrubbing, expressed as a
fraction of the total amount of rows in the dataframe.
"""
scrubbed = ""
while True:
valcounts = df[col_name].str[:len(scrubbed)+1].value_counts()
if not len(valcounts):
break
if not valcounts[0] >= (1-error_rate) * _utils.rows(df):
break
scrubbed=valcounts.index[0]
if scrubbed == '':
return None
which = df[col_name].str.startswith(scrubbed)
_basics.col_scrubf(df,col_name,which,len(scrubbed),True)
if not which.all():
new_col_name = _basics.colname_gen(df,"{}_sf-{}".format(col_name,scrubbed))
df[new_col_name] = which
return scrubbed
|
Scrubs from the front of an 'object' column in a DataFrame
until the scrub would semantically alter the contents of the column. If only a
subset of the elements in the column are scrubbed, then a boolean array indicating which
elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed
df - DataFrame
DataFrame to scrub
col_name - string
Name of column to scrub
error_rate - number, default 0
The maximum amount of values this function can ignore while scrubbing, expressed as a
fraction of the total amount of rows in the dataframe.
|
def log_url (self, url_data):
"""
Store url check info into the database.
"""
self.writeln(u"insert into %(table)s(urlname,"
"parentname,baseref,valid,result,warning,info,url,line,col,"
"name,checktime,dltime,size,cached,level,modified) values ("
"%(base_url)s,"
"%(url_parent)s,"
"%(base_ref)s,"
"%(valid)d,"
"%(result)s,"
"%(warning)s,"
"%(info)s,"
"%(url)s,"
"%(line)d,"
"%(column)d,"
"%(name)s,"
"%(checktime)d,"
"%(dltime)d,"
"%(size)d,"
"%(cached)d,"
"%(level)d,"
"%(modified)s"
")%(separator)s" %
{'table': self.dbname,
'base_url': sqlify(url_data.base_url),
'url_parent': sqlify((url_data.parent_url)),
'base_ref': sqlify((url_data.base_ref)),
'valid': intify(url_data.valid),
'result': sqlify(url_data.result),
'warning': sqlify(os.linesep.join(x[1] for x in url_data.warnings)),
'info': sqlify(os.linesep.join(url_data.info)),
'url': sqlify(urlutil.url_quote(url_data.url)),
'line': url_data.line,
'column': url_data.column,
'name': sqlify(url_data.name),
'checktime': url_data.checktime,
'dltime': url_data.dltime,
'size': url_data.size,
'cached': 0,
'separator': self.separator,
"level": url_data.level,
"modified": sqlify(self.format_modified(url_data.modified)),
})
self.flush()
|
Store url check info into the database.
|
def graftm_package_is_protein(graftm_package):
'''Return true if this package is an Amino Acid alignment package, otherwise
False i.e. it is a nucleotide package. In general it is best to use
'is_protein_package' instead.
'''
found = None
with open(graftm_package.alignment_hmm_path()) as f:
r = f.read().split("\n")
for line in r:
if line=='ALPH DNA':
found = False
break
elif line=='ALPH amino':
found = True
break
if found is None:
raise Exception("Unable to determine whether the HMM was amino acid or dna")
return found
|
Return true if this package is an Amino Acid alignment package, otherwise
False i.e. it is a nucleotide package. In general it is best to use
'is_protein_package' instead.
|
def parse(self, line):
"""Parse a line, return a Message.
Parameters
----------
line : str
The line to parse (should not contain the terminating newline
or carriage return).
Returns
-------
msg : Message object
The resulting Message.
"""
# find command type and check validity
if not line:
raise KatcpSyntaxError("Empty message received.")
type_char = line[0]
if type_char not in self.TYPE_SYMBOL_LOOKUP:
raise KatcpSyntaxError("Bad type character %r." % (type_char,))
mtype = self.TYPE_SYMBOL_LOOKUP[type_char]
# find command and arguments name
# (removing possible empty argument resulting from whitespace at end
# of command)
parts = self.WHITESPACE_RE.split(line)
if not parts[-1]:
del parts[-1]
name = parts[0][1:]
arguments = [self._parse_arg(x) for x in parts[1:]]
# split out message id
match = self.NAME_RE.match(name)
if match:
name = match.group('name')
mid = match.group('id')
else:
raise KatcpSyntaxError("Bad message name (and possibly id) %r." %
(name,))
return Message(mtype, name, arguments, mid)
|
Parse a line, return a Message.
Parameters
----------
line : str
The line to parse (should not contain the terminating newline
or carriage return).
Returns
-------
msg : Message object
The resulting Message.
|
def _create_record(self, rtype, name, content):
"""Create record. If it already exists, do nothing."""
if not self._list_records(rtype, name, content):
self._update_records([{}], {
'type': rtype,
'hostname': self._relative_name(name),
'destination': content,
'priority': self._get_lexicon_option('priority'),
})
LOGGER.debug('create_record: %s', True)
return True
|
Create record. If it already exists, do nothing.
|
def get_group_details(self, group):
""" Get the group details. """
result = {}
try:
lgroup = self._get_group(group.name)
lgroup = preload(lgroup, database=self._database)
except ObjectDoesNotExist:
return result
for i, j in lgroup.items():
if j is not None:
result[i] = j
return result
|
Get the group details.
|
def lock_time(logfile):
'''work out gps lock times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
locked = False
start_time = 0.0
total_time = 0.0
t = None
m = mlog.recv_match(type=['GPS_RAW_INT','GPS_RAW'], condition=args.condition)
if m is None:
return 0
unlock_time = time.mktime(time.localtime(m._timestamp))
while True:
m = mlog.recv_match(type=['GPS_RAW_INT','GPS_RAW'], condition=args.condition)
if m is None:
if locked:
total_time += time.mktime(t) - start_time
if total_time > 0:
print("Lock time : %u:%02u" % (int(total_time)/60, int(total_time)%60))
return total_time
t = time.localtime(m._timestamp)
if m.fix_type >= 2 and not locked:
print("Locked at %s after %u seconds" % (time.asctime(t),
time.mktime(t) - unlock_time))
locked = True
start_time = time.mktime(t)
elif m.fix_type == 1 and locked:
print("Lost GPS lock at %s" % time.asctime(t))
locked = False
total_time += time.mktime(t) - start_time
unlock_time = time.mktime(t)
elif m.fix_type == 0 and locked:
print("Lost protocol lock at %s" % time.asctime(t))
locked = False
total_time += time.mktime(t) - start_time
unlock_time = time.mktime(t)
return total_time
|
work out gps lock times for a log file
|
def get_service_types(self):
"""
Get all service types supported by this cluster.
@return: A list of service types (strings)
"""
resp = self._get_resource_root().get(self._path() + '/serviceTypes')
return resp[ApiList.LIST_KEY]
|
Get all service types supported by this cluster.
@return: A list of service types (strings)
|
def cmd(send, msg, args):
"""Slap somebody.
Syntax: {command} <nick> [for <reason>]
"""
implements = ['the golden gate bridge', 'a large trout', 'a clue-by-four', 'a fresh haddock', 'moon', 'an Itanium', 'fwilson', 'a wombat']
methods = ['around a bit', 'upside the head']
if not msg:
channel = args['target'] if args['target'] != 'private' else args['config']['core']['channel']
with args['handler'].data_lock:
users = list(args['handler'].channels[channel].users())
slap = 'slaps %s %s with %s'
send(slap % (choice(users), choice(methods), choice(implements)), 'action')
else:
reason = ''
method = choice(methods)
implement = ''
msg = msg.split()
slapee = msg[0]
# Basic and stupid NLP!
i = 1
args = False
while i < len(msg):
if msg[i] == 'for':
args = True
if reason:
send("Invalid Syntax: You can only have one for clause!")
return
i += 1
while i < len(msg):
if msg[i] == 'with':
break
reason += " "
reason += msg[i]
i += 1
reason = reason.strip()
elif msg[i] == 'with':
args = True
if implement:
send("Invalid Synatx: You can only have one with clause!")
return
i += 1
while i < len(msg):
if msg[i] == 'for':
break
implement += msg[i]
implement += ' '
i += 1
implement = implement.strip()
elif not args:
slapee += ' ' + msg[i]
i += 1
if not implement:
implement = choice(implements)
if reason:
slap = 'slaps %s %s with %s for %s' % (slapee, method, implement, reason)
else:
slap = 'slaps %s %s with %s' % (slapee, method, implement)
send(slap, 'action')
|
Slap somebody.
Syntax: {command} <nick> [for <reason>]
|
def split_bin_edges(edges, npts=2):
"""Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
"""
if npts < 2:
return edges
x = (edges[:-1, None] +
(edges[1:, None] - edges[:-1, None]) *
np.linspace(0.0, 1.0, npts + 1)[None, :])
return np.unique(np.ravel(x))
|
Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
|
def getkeys(table):
"""
customize by commenting out unwanted keys
"""
keys = []
if table == "ER_expedition":
pass
if table == "ER_citations":
keys.append("er_citation_name")
keys.append("long_authors")
keys.append("year")
keys.append("title")
keys.append("citation_type")
keys.append("doi")
keys.append("journal")
keys.append("volume")
keys.append("pages")
keys.append("book_title")
keys.append("book_editors")
keys.append("publisher")
keys.append("city")
if table == "ER_locations":
keys.append("er_location_name")
keys.append("er_scientist_mail_names")
# keys.append("er_location_alternatives" )
keys.append("location_type")
keys.append("location_begin_lat")
keys.append("location_begin_lon")
# keys.append("location_begin_elevation" )
keys.append("location_end_lat")
keys.append("location_end_lon")
# keys.append("location_end_elevation" )
keys.append("continent_ocean")
keys.append("country")
keys.append("region")
keys.append("plate_block")
keys.append("terrane")
keys.append("tectonic_setting")
# keys.append("er_citation_names")
if table == "ER_Formations":
keys.append("er_formation_name")
keys.append("formation_class")
keys.append("formation_lithology")
keys.append("formation_paleo_environment")
keys.append("formation_thickness")
keys.append("formation_description")
if table == "ER_sections":
keys.append("er_section_name")
keys.append("er_section_alternatives")
keys.append("er_expedition_name")
keys.append("er_location_name")
keys.append("er_formation_name")
keys.append("er_member_name")
keys.append("section_definition")
keys.append("section_class")
keys.append("section_lithology")
keys.append("section_type")
keys.append("section_n")
keys.append("section_begin_lat")
keys.append("section_begin_lon")
keys.append("section_begin_elevation")
keys.append("section_begin_height")
keys.append("section_begin_drill_depth")
keys.append("section_begin_composite_depth")
keys.append("section_end_lat")
keys.append("section_end_lon")
keys.append("section_end_elevation")
keys.append("section_end_height")
keys.append("section_end_drill_depth")
keys.append("section_end_composite_depth")
keys.append("section_azimuth")
keys.append("section_dip")
keys.append("section_description")
keys.append("er_scientist_mail_names")
keys.append("er_citation_names")
if table == "ER_sites":
keys.append("er_location_name")
keys.append("er_site_name")
# keys.append("er_site_alternatives")
# keys.append("er_formation_name")
# keys.append("er_member_name")
# keys.append("er_section_name")
keys.append("er_scientist_mail_names")
keys.append("site_class")
# keys.append("site_type")
# keys.append("site_lithology")
# keys.append("site_height")
# keys.append("site_drill_depth")
# keys.append("site_composite_depth")
# keys.append("site_lithology")
# keys.append("site_description")
keys.append("site_lat")
keys.append("site_lon")
# keys.append("site_location_precision")
# keys.append("site_elevation")
if table == "ER_samples":
keys.append("er_location_name")
keys.append("er_site_name")
# keys.append("er_sample_alternatives")
keys.append("sample_azimuth")
keys.append("sample_dip")
keys.append("sample_bed_dip")
keys.append("sample_bed_dip_direction")
# keys.append("sample_cooling_rate")
# keys.append("sample_type")
# keys.append("sample_lat")
# keys.append("sample_lon")
keys.append("magic_method_codes")
if table == "ER_ages":
# keys.append("er_location_name")
# keys.append("er_site_name")
# keys.append("er_section_name")
# keys.append("er_formation_name")
# keys.append("er_member_name")
# keys.append("er_site_name")
# keys.append("er_sample_name")
# keys.append("er_specimen_name")
# keys.append("er_fossil_name")
# keys.append("er_mineral_name")
# keys.append("tiepoint_name")
keys.append("age")
keys.append("age_sigma")
keys.append("age_unit")
keys.append("age_range_low")
keys.append("age_range_hi")
keys.append("timescale_eon")
keys.append("timescale_era")
keys.append("timescale_period")
keys.append("timescale_epoch")
keys.append("timescale_stage")
keys.append("biostrat_zone")
keys.append("conodont_zone")
keys.append("magnetic_reversal_chron")
keys.append("astronomical_stage")
# keys.append("age_description")
# keys.append("magic_method_codes")
# keys.append("er_timescale_citation_names")
# keys.append("er_citation_names")
if table == "MAGIC_measurements":
keys.append("er_location_name")
keys.append("er_site_name")
keys.append("er_sample_name")
keys.append("er_specimen_name")
keys.append("measurement_positions")
keys.append("treatment_temp")
keys.append("treatment_ac_field")
keys.append("treatment_dc_field")
keys.append("treatment_dc_field_phi")
keys.append("treatment_dc_field_theta")
keys.append("magic_experiment_name")
keys.append("magic_instrument_codes")
keys.append("measurement_temp")
keys.append("magic_method_codes")
keys.append("measurement_inc")
keys.append("measurement_dec")
keys.append("measurement_magn_moment")
keys.append("measurement_csd")
return keys
|
customize by commenting out unwanted keys
|
def save(self, obj):
""" Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.
"""
if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
# Compute a hash of the object
# The update function of the hash requires a c_contiguous buffer.
if obj.shape == ():
# 0d arrays need to be flattened because viewing them as bytes
# raises a ValueError exception.
obj_c_contiguous = obj.flatten()
elif obj.flags.c_contiguous:
obj_c_contiguous = obj
elif obj.flags.f_contiguous:
obj_c_contiguous = obj.T
else:
# Cater for non-single-segment arrays: this creates a
# copy, and thus aleviates this issue.
# XXX: There might be a more efficient way of doing this
obj_c_contiguous = obj.flatten()
# memoryview is not supported for some dtypes, e.g. datetime64, see
# https://github.com/numpy/numpy/issues/4983. The
# workaround is to view the array as bytes before
# taking the memoryview.
self._hash.update(
self._getbuffer(obj_c_contiguous.view(self.np.uint8)))
# We store the class, to be able to distinguish between
# Objects with the same binary content, but different
# classes.
if self.coerce_mmap and isinstance(obj, self.np.memmap):
# We don't make the difference between memmap and
# normal ndarrays, to be able to reload previously
# computed results with memmap.
klass = self.np.ndarray
else:
klass = obj.__class__
# We also return the dtype and the shape, to distinguish
# different views on the same data with different dtypes.
# The object will be pickled by the pickler hashed at the end.
obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))
elif isinstance(obj, self.np.dtype):
# Atomic dtype objects are interned by their default constructor:
# np.dtype('f8') is np.dtype('f8')
# This interning is not maintained by a
# pickle.loads + pickle.dumps cycle, because __reduce__
# uses copy=True in the dtype constructor. This
# non-deterministic behavior causes the internal memoizer
# of the hasher to generate different hash values
# depending on the history of the dtype object.
# To prevent the hash from being sensitive to this, we use
# .descr which is a full (and never interned) description of
# the array dtype according to the numpy doc.
klass = obj.__class__
obj = (klass, ('HASHED', obj.descr))
Hasher.save(self, obj)
|
Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.
|
def sync_repo(self, repo_name=None, envs=[], query='/repositories/'):
"""
Sync repository in specified environments
"""
juicer.utils.Log.log_debug(
"Sync Repo %s In: %s" % (repo_name, ",".join(envs)))
data = {
'override_config': {
'verify_checksum': 'true',
'verify_size': 'true'
},
}
for env in envs:
url = "%s%s-%s/actions/sync/" % (query, repo_name, env)
juicer.utils.Log.log_info("%s:", env)
_r = self.connectors[env].post(url, data)
if _r.status_code == Constants.PULP_POST_ACCEPTED:
juicer.utils.Log.log_info("`%s` sync scheduled" % repo_name)
else:
_r.raise_for_status()
return True
|
Sync repository in specified environments
|
def save_plot(self, filename, img_format="eps", ylim=None, units="thz"):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(ylim=ylim, units=units)
plt.savefig(filename, format=img_format)
plt.close()
|
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
|
def set_tlsext_use_srtp(self, profiles):
"""
Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None
"""
if not isinstance(profiles, bytes):
raise TypeError("profiles must be a byte string.")
_openssl_assert(
_lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == 0
)
|
Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None
|
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
|
Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
|
def findFile(input):
"""Search a directory for full filename with optional path."""
# If no input name is provided, default to returning 'no'(FALSE)
if not input:
return no
# We use 'osfn' here to insure that any IRAF variables are
# expanded out before splitting out the path...
_fdir, _fname = os.path.split(osfn(input))
if _fdir == '':
_fdir = os.curdir
try:
flist = os.listdir(_fdir)
except OSError:
# handle when requested file in on a disconnect network store
return no
_root, _extn = parseFilename(_fname)
found = no
for name in flist:
if name == _root:
# Check to see if given extension, if any, exists
if _extn is None:
found = yes
continue
else:
_split = _extn.split(',')
_extnum = None
_extver = None
if _split[0].isdigit():
_extname = None
_extnum = int(_split[0])
else:
_extname = _split[0]
if len(_split) > 1:
_extver = int(_split[1])
else:
_extver = 1
f = openImage(_root)
f.close()
if _extnum is not None:
if _extnum < len(f):
found = yes
del f
continue
else:
del f
else:
_fext = findExtname(f, _extname, extver=_extver)
if _fext is not None:
found = yes
del f
continue
return found
|
Search a directory for full filename with optional path.
|
def breakdown_tt2000(tt2000, to_np=None): # @NoSelf
"""
Breaks down the epoch(s) into UTC components.
For CDF_EPOCH:
they are 7 date/time components: year, month, day,
hour, minute, second, and millisecond
For CDF_EPOCH16:
they are 10 date/time components: year, month, day,
hour, minute, second, and millisecond, microsecond,
nanosecond, and picosecond.
For TT2000:
they are 9 date/time components: year, month, day,
hour, minute, second, millisecond, microsecond,
nanosecond.
Specify to_np to True, if the result should be in numpy array.
"""
if (isinstance(tt2000, int) or isinstance(tt2000, np.int64)):
new_tt2000 = [tt2000]
elif (isinstance(tt2000, list) or isinstance(tt2000, tuple) or
isinstance(tt2000, np.ndarray)):
new_tt2000 = tt2000
else:
print('Bad input data')
return None
count = len(new_tt2000)
toutcs = []
for x in range(0, count):
nanoSecSinceJ2000 = new_tt2000[x]
toPlus = 0.0
t3 = nanoSecSinceJ2000
datx = CDFepoch._LeapSecondsfromJ2000(nanoSecSinceJ2000)
if (nanoSecSinceJ2000 > 0):
secSinceJ2000 = int(nanoSecSinceJ2000/CDFepoch.SECinNanoSecsD)
nansec = int(nanoSecSinceJ2000 - secSinceJ2000 *
CDFepoch.SECinNanoSecs)
secSinceJ2000 = secSinceJ2000 - 32
secSinceJ2000 = secSinceJ2000 + 43200
nansec = nansec - 184000000
else:
nanoSecSinceJ2000 = nanoSecSinceJ2000 + CDFepoch.T12hinNanoSecs
nanoSecSinceJ2000 = nanoSecSinceJ2000 - CDFepoch.dTinNanoSecs
secSinceJ2000 = int(nanoSecSinceJ2000/CDFepoch.SECinNanoSecsD)
nansec = int(nanoSecSinceJ2000 - secSinceJ2000 *
CDFepoch.SECinNanoSecs)
if (nansec < 0):
nansec = CDFepoch.SECinNanoSecs + nansec
secSinceJ2000 = secSinceJ2000 - 1
t2 = secSinceJ2000 * CDFepoch.SECinNanoSecs + nansec
if (datx[0] > 0.0):
# post-1972...
secSinceJ2000 = secSinceJ2000 - int(datx[0])
epoch = CDFepoch.J2000Since0AD12hSec + secSinceJ2000
if (datx[1] == 0.0):
date1 = CDFepoch._EPOCHbreakdownTT2000(epoch)
else:
epoch = epoch - 1
date1 = CDFepoch._EPOCHbreakdownTT2000(epoch)
date1[5] = date1[5] + 1
ye1 = date1[0]
mo1 = date1[1]
da1 = date1[2]
ho1 = date1[3]
mi1 = date1[4]
se1 = date1[5]
else:
# pre-1972...
epoch = secSinceJ2000 + CDFepoch.J2000Since0AD12hSec
xdate1 = CDFepoch._EPOCHbreakdownTT2000(epoch)
xdate1.append(0)
xdate1.append(0)
xdate1.append(nansec)
tmpNanosecs = CDFepoch.compute_tt2000(xdate1)
if (tmpNanosecs != t3):
dat0 = CDFepoch._LeapSecondsfromYMD(xdate1[0],
xdate1[1], xdate1[2])
tmpx = t2 - int(dat0 * CDFepoch.SECinNanoSecs)
tmpy = int(float(tmpx/CDFepoch.SECinNanoSecsD))
nansec = int(tmpx - tmpy * CDFepoch.SECinNanoSecs)
if (nansec < 0):
nansec = CDFepoch.SECinNanoSecs + nansec
tmpy = tmpy - 1
epoch = tmpy + CDFepoch.J2000Since0AD12hSec
xdate1 = CDFepoch._EPOCHbreakdownTT2000(epoch)
xdate1.append(0)
xdate1.append(0)
xdate1.append(nansec)
tmpNanosecs = CDFepoch.compute_tt2000(xdate1)
if (tmpNanosecs != t3):
dat0 = CDFepoch._LeapSecondsfromYMD(xdate1[0],
xdate1[1], xdate1[2])
tmpx = t2 - int(dat0 * CDFepoch.SECinNanoSecs)
tmpy = int((1.0*tmpx)/CDFepoch.SECinNanoSecsD)
nansec = int(tmpx - tmpy * CDFepoch.SECinNanoSecs)
if (nansec < 0):
nansec = CDFepoch.SECinNanoSecs + nansec
tmpy = tmpy - 1
epoch = tmpy + CDFepoch.J2000Since0AD12hSec
xdate1 = CDFepoch._EPOCHbreakdownTT2000(epoch)
xdate1.append(0)
xdate1.append(0)
xdate1.append(nansec)
tmpNanosecs = CDFepoch.compute_tt2000(xdate1)
if (tmpNanosecs != t3):
dat0 = CDFepoch._LeapSecondsfromYMD(xdate1[0],
xdate1[1],
xdate1[2])
tmpx = t2 - int(dat0 * CDFepoch.SECinNanoSecs)
tmpy = int((1.0*tmpx)/CDFepoch.SECinNanoSecsD)
nansec = int(tmpx - tmpy * CDFepoch.SECinNanoSecs)
if (nansec < 0):
nansec = CDFepoch.SECinNanoSecs + nansec
tmpy = tmpy - 1
epoch = tmpy + CDFepoch.J2000Since0AD12hSec
# One more determination
xdate1 = CDFepoch._EPOCHbreakdownTT2000(epoch)
ye1 = int(xdate1[0])
mo1 = int(xdate1[1])
da1 = int(xdate1[2])
ho1 = int(xdate1[3])
mi1 = int(xdate1[4])
se1 = int(xdate1[5])
ml1 = int(nansec / 1000000)
tmp1 = nansec - 1000000 * ml1
if (ml1 > 1000):
ml1 = ml1 - 1000
se1 = se1 + 1
ma1 = int(tmp1 / 1000)
na1 = int(tmp1 - 1000 * ma1)
datetime = []
datetime.append(ye1)
datetime.append(mo1)
datetime.append(da1)
datetime.append(ho1)
datetime.append(mi1)
datetime.append(se1)
datetime.append(ml1)
datetime.append(ma1)
datetime.append(na1)
if (count == 1):
if (to_np == None):
return datetime
else:
return np.array(datetime)
else:
toutcs.append(datetime)
if (to_np == None):
return toutcs
else:
return np.array(toutcs)
|
Breaks down the epoch(s) into UTC components.
For CDF_EPOCH:
they are 7 date/time components: year, month, day,
hour, minute, second, and millisecond
For CDF_EPOCH16:
they are 10 date/time components: year, month, day,
hour, minute, second, and millisecond, microsecond,
nanosecond, and picosecond.
For TT2000:
they are 9 date/time components: year, month, day,
hour, minute, second, millisecond, microsecond,
nanosecond.
Specify to_np to True, if the result should be in numpy array.
|
def diy(expression_data,
regressor_type,
regressor_kwargs,
gene_names=None,
tf_names='all',
client_or_address='local',
early_stop_window_length=EARLY_STOP_WINDOW_LENGTH,
limit=None,
seed=None,
verbose=False):
"""
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param regressor_type: string. One of: 'RF', 'GBM', 'ET'. Case insensitive.
:param regressor_kwargs: a dictionary of key-value pairs that configures the regressor.
:param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as
'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:param early_stop_window_length: early stopping window length.
:param client_or_address: one of:
* None or 'local': a new Client(LocalCluster()) will be used to perform the computation.
* string address: a new Client(address) will be used to perform the computation.
* a Client instance: the specified Client instance will be used to perform the computation.
:param limit: optional number (int) of top regulatory links to return. Default None.
:param seed: optional random seed for the regressors. Default 666. Use None for random seed.
:param verbose: print info.
:return: a pandas DataFrame['TF', 'target', 'importance'] representing the inferred gene regulatory links.
"""
if verbose:
print('preparing dask client')
client, shutdown_callback = _prepare_client(client_or_address)
try:
if verbose:
print('parsing input')
expression_matrix, gene_names, tf_names = _prepare_input(expression_data, gene_names, tf_names)
if verbose:
print('creating dask graph')
graph = create_graph(expression_matrix,
gene_names,
tf_names,
client=client,
regressor_type=regressor_type,
regressor_kwargs=regressor_kwargs,
early_stop_window_length=early_stop_window_length,
limit=limit,
seed=seed)
if verbose:
print('{} partitions'.format(graph.npartitions))
print('computing dask graph')
return client \
.compute(graph, sync=True) \
.sort_values(by='importance', ascending=False)
finally:
shutdown_callback(verbose)
if verbose:
print('finished')
|
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param regressor_type: string. One of: 'RF', 'GBM', 'ET'. Case insensitive.
:param regressor_kwargs: a dictionary of key-value pairs that configures the regressor.
:param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as
'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:param early_stop_window_length: early stopping window length.
:param client_or_address: one of:
* None or 'local': a new Client(LocalCluster()) will be used to perform the computation.
* string address: a new Client(address) will be used to perform the computation.
* a Client instance: the specified Client instance will be used to perform the computation.
:param limit: optional number (int) of top regulatory links to return. Default None.
:param seed: optional random seed for the regressors. Default 666. Use None for random seed.
:param verbose: print info.
:return: a pandas DataFrame['TF', 'target', 'importance'] representing the inferred gene regulatory links.
|
def set_coords(self, value):
"""Set all the images contained in the animation to the specified value."""
self.__coords = value
for image in self.images:
image.coords = value
|
Set all the images contained in the animation to the specified value.
|
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
|
Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
|
def get_rank_based_enrichment(
self,
ranked_genes: List[str],
pval_thresh: float = 0.05,
X_frac: float = 0.25,
X_min: int = 5,
L: int = None,
adjust_pval_thresh: bool = True,
escore_pval_thresh: float = None,
exact_pval: str = 'always',
gene_set_ids: List[str] = None,
table: np.ndarray = None) -> RankBasedGSEResult:
"""Test for gene set enrichment at the top of a ranked list of genes.
This function uses the XL-mHG test to identify enriched gene sets.
This function also calculates XL-mHG E-scores for the enriched gene
sets, using ``escore_pval_thresh`` as the p-value threshold "psi".
Parameters
----------
ranked_gene_ids : list of str
The ranked list of gene IDs.
pval_thresh : float, optional
The p-value threshold used to determine significance.
See also ``adjust_pval_thresh``. [0.05]
X_frac : float, optional
The min. fraction of genes from a gene set required for enrichment. [0.25]
X_min : int, optional
The min. no. of genes from a gene set required for enrichment. [5]
L : int, optional
The lowest cutoff to test for enrichment. If ``None``,
int(0.25*(no. of genes)) will be used. [None]
adjust_pval_thresh : bool, optional
Whether to adjust the p-value thershold for multiple testing,
using the Bonferroni method. [True]
escore_pval_thresh : float or None, optional
The "psi" p-value threshold used in calculating E-scores. If
``None``, will be set to p-value threshold. [None]
exact_pval : str
Choices are: "always", "if_significant", "if_necessary". Parameter
will be passed to `xlmhg.get_xlmhg_test_result`. ["always"]
gene_set_ids : list of str or None, optional
A list of gene set IDs to specify which gene sets should be tested for enrichment. If ``None``, all gene sets will be tested. [None]
table : 2-dim numpy.ndarray of type numpy.longdouble or None, optional
The dynamic programming table used by the algorithm for calculating XL-mHG p-values. Passing this avoids memory re-allocation when calling this function repetitively. [None]
Returns
-------
list of `RankBasedGSEResult`
A list of all significantly enriched gene sets.
"""
# make sure X_frac is a float (e.g., if specified as 0)
X_frac = float(X_frac)
if table is not None:
if not np.issubdtype(table.dtype, np.longdouble):
raise TypeError('The provided array for storing the dynamic '
'programming table must be of type '
'"longdouble"!')
if L is None:
L = int(len(ranked_genes)/4.0)
gene_set_coll = self._gene_set_coll
gene_memberships = self._gene_memberships
# postpone this
if escore_pval_thresh is None:
# if no separate E-score p-value threshold is specified, use the
# p-value threshold (this results in conservative E-scores)
logger.warning('No E-score p-value threshold supplied. '
'The E-score p-value threshold will be set to the'
'global significance threshold. This will result '
'in conservative E-scores.')
# test only some terms?
if gene_set_ids is not None:
gs_indices = np.int64([self._gene_set_coll.index(id_)
for id_ in gene_set_ids])
gene_sets = [gene_set_coll[id_] for id_ in gene_set_ids]
gene_set_coll = GeneSetCollection(gene_sets)
gene_memberships = gene_memberships[:, gs_indices] # not a view!
# reorder rows in annotation matrix to match the given gene ranking
# also exclude genes not in the ranking
unknown = 0
L_adj = L
sel = []
filtered_genes = []
logger.debug('Looking up indices for %d genes...' % len(ranked_genes))
for i, g in enumerate(ranked_genes):
try:
idx = self._gene_indices[g]
except KeyError:
unknown += 1
# adjust L if the gene was above the original L cutoff
if i < L:
L_adj -= 1
else:
sel.append(idx)
filtered_genes.append(g)
sel = np.int64(sel)
logger.debug('Adjusted L: %d', L_adj)
# the following also copies the data (not a view)
gene_memberships = gene_memberships[sel, :]
N, m = gene_memberships.shape
if unknown > 0:
# Some genes in the ranked list were unknown (i.e., not present in
# the specified genome).
logger.warn('%d / %d unknown genes (%.1f %%), will be ignored.',
unknown, len(ranked_genes),
100 * (unknown / float(len(ranked_genes))))
# Determine the number of gene set genes above the L'th cutoff,
# for all gene sets. This quantity is useful, because we don't need
# to perform any more work for gene sets that have less than X genes
# above the cutoff.
k_above_L = np.sum(gene_memberships[:L_adj, :], axis=0, dtype=np.int64)
# Determine the number of genes below the L'th cutoff, for all gene
# sets.
k_below_L = np.sum(gene_memberships[L_adj:, :], axis=0, dtype=np.int64)
# Determine the total number K of genes in each gene set that are
# present in the ranked list (this is equal to k_above_L + k_below_L)
K_vec = k_above_L + k_below_L
# Determine the largest K across all gene sets.
K_max = np.amax(K_vec)
# Determine X for all gene sets.
X = np.amax(
np.c_[np.tile(X_min, m), np.int64(np.ceil(X_frac * K_vec))],
axis=1)
# Determine the number of tests (we do not conduct a test if the
# total number of gene set genes in the ranked list is below X).
num_tests = np.sum(K_vec-X >= 0)
logger.info('Conducting %d tests.', num_tests)
# determine Bonferroni-corrected p-value, if desired
final_pval_thresh = pval_thresh
if adjust_pval_thresh and num_tests > 0:
final_pval_thresh /= float(num_tests)
logger.info('Using Bonferroni-corrected p-value threshold: %.1e',
final_pval_thresh)
if escore_pval_thresh is None:
escore_pval_thresh = final_pval_thresh
elif escore_pval_thresh < final_pval_thresh:
logger.warning('The E-score p-value threshold is smaller than '
'the p-value threshold. Setting E-score p-value '
'threshold to the p-value threshold.')
escore_pval_thresh = final_pval_thresh
# Prepare the matrix that holds the dynamic programming table for
# the calculation of the XL-mHG p-value.
if table is None:
table = np.empty((K_max+1, N+1), dtype=np.longdouble)
else:
if table.shape[0] < K_max+1 or table.shape[1] < N+1:
raise ValueError(
'The supplied array is too small (%d x %d) to hold the '
'entire dynamic programming table. The required size is'
'%d x %d (rows x columns).'
% (table.shape[0], table.shape[1], K_max+1, N+1))
# find enriched GO terms
# logger.info('Testing %d gene sets for enrichment...', m)
logger.debug('(N=%d, X_frac=%.2f, X_min=%d, L=%d; K_max=%d)',
len(ranked_genes), X_frac, X_min, L, K_max)
enriched = []
num_tests = 0 # number of tests conducted
for j in range(m):
# determine gene set-specific value for X
X = max(X_min, int(ceil(X_frac * float(K_vec[j]))))
# Determine significance of gene set enrichment using the XL-mHG
# test (only if there are at least X gene set genes in the list).
if K_vec[j] >= X:
num_tests += 1
# We only need to perform the XL-mHG test if there are enough
# gene set genes above the L'th cutoff (otherwise, pval = 1.0).
if k_above_L[j] >= X:
# perform test
# Determine the ranks of the gene set genes in the
# ranked list.
indices = np.uint16(np.nonzero(gene_memberships[:, j])[0])
res = xlmhg.get_xlmhg_test_result(
N, indices, X, L, pval_thresh=final_pval_thresh,
escore_pval_thresh=escore_pval_thresh,
exact_pval=exact_pval, table=table)
# check if gene set is significantly enriched
if res.pval <= final_pval_thresh:
# generate RankedGSEResult
ind_genes = [ranked_genes[i] for i in indices]
gse_result = RankBasedGSEResult(
gene_set_coll[j], N, indices, ind_genes,
X, L, res.stat, res.cutoff, res.pval,
escore_pval_thresh=escore_pval_thresh
)
enriched.append(gse_result)
# report results
q = len(enriched)
ignored = m - num_tests
if ignored > 0:
logger.debug('%d / %d gene sets (%.1f%%) had less than X genes '
'annotated with them and were ignored.',
ignored, m, 100 * (ignored / float(m)))
logger.info('%d / %d gene sets were found to be significantly '
'enriched (p-value <= %.1e).', q, m, final_pval_thresh)
return enriched
|
Test for gene set enrichment at the top of a ranked list of genes.
This function uses the XL-mHG test to identify enriched gene sets.
This function also calculates XL-mHG E-scores for the enriched gene
sets, using ``escore_pval_thresh`` as the p-value threshold "psi".
Parameters
----------
ranked_gene_ids : list of str
The ranked list of gene IDs.
pval_thresh : float, optional
The p-value threshold used to determine significance.
See also ``adjust_pval_thresh``. [0.05]
X_frac : float, optional
The min. fraction of genes from a gene set required for enrichment. [0.25]
X_min : int, optional
The min. no. of genes from a gene set required for enrichment. [5]
L : int, optional
The lowest cutoff to test for enrichment. If ``None``,
int(0.25*(no. of genes)) will be used. [None]
adjust_pval_thresh : bool, optional
Whether to adjust the p-value thershold for multiple testing,
using the Bonferroni method. [True]
escore_pval_thresh : float or None, optional
The "psi" p-value threshold used in calculating E-scores. If
``None``, will be set to p-value threshold. [None]
exact_pval : str
Choices are: "always", "if_significant", "if_necessary". Parameter
will be passed to `xlmhg.get_xlmhg_test_result`. ["always"]
gene_set_ids : list of str or None, optional
A list of gene set IDs to specify which gene sets should be tested for enrichment. If ``None``, all gene sets will be tested. [None]
table : 2-dim numpy.ndarray of type numpy.longdouble or None, optional
The dynamic programming table used by the algorithm for calculating XL-mHG p-values. Passing this avoids memory re-allocation when calling this function repetitively. [None]
Returns
-------
list of `RankBasedGSEResult`
A list of all significantly enriched gene sets.
|
async def create_rev_reg(self, rr_id: str, rr_size: int = None) -> None:
"""
Create revocation registry artifacts and new tails file (with association to
corresponding revocation registry identifier via symbolic link name)
for input revocation registry identifier. Symbolic link presence signals completion.
If revocation registry builder operates in a process external to its Issuer's,
target directory is hopper directory.
Raise WalletState for closed wallet.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 64)
"""
LOGGER.debug('RevRegBuilder.create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size)
if not self.wallet.handle:
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if not ok_rev_reg_id(rr_id):
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_size = rr_size or 64
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
dir_tails = self.dir_tails_top(rr_id)
dir_target = self.dir_tails_target(rr_id)
if self.external:
try:
makedirs(dir_target, exist_ok=False)
except FileExistsError:
LOGGER.warning(
'RevRegBuilder.create_rev_reg found dir %s, but task not in progress: rebuilding rev reg %s',
dir_target,
rr_id)
rmtree(dir_target)
makedirs(dir_target, exist_ok=False)
LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id)
tails_writer_handle = await blob_storage.open_writer(
'default',
json.dumps({
'base_dir': dir_target,
'uri_pattern': ''
}))
(created_rr_id, rr_def_json, rr_ent_json) = await anoncreds.issuer_create_and_store_revoc_reg(
self.wallet.handle,
self.did,
'CL_ACCUM',
tag,
cd_id,
json.dumps({
'max_cred_num': rr_size,
'issuance_type': 'ISSUANCE_BY_DEFAULT'
}),
tails_writer_handle)
tails_hash = basename(Tails.unlinked(dir_target).pop())
with open(join(dir_target, 'rr_def.json'), 'w') as rr_def_fh:
print(rr_def_json, file=rr_def_fh)
with open(join(dir_target, 'rr_ent.json'), 'w') as rr_ent_fh:
print(rr_ent_json, file=rr_ent_fh)
Tails.associate(dir_tails, created_rr_id, tails_hash) # associate last: symlink signals completion
LOGGER.debug('RevRegBuilder.create_rev_reg <<<')
|
Create revocation registry artifacts and new tails file (with association to
corresponding revocation registry identifier via symbolic link name)
for input revocation registry identifier. Symbolic link presence signals completion.
If revocation registry builder operates in a process external to its Issuer's,
target directory is hopper directory.
Raise WalletState for closed wallet.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 64)
|
def search(self, search, **kwargs):
"""
Search for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. The matching is done using LIKE.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url part to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.search_with_http_info(search, **kwargs)
else:
(data) = self.search_with_http_info(search, **kwargs)
return data
|
Search for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. The matching is done using LIKE.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url part to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
|
def convert_str_to_datetime(df, *, column: str, format: str):
"""
Convert string column into datetime column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to format
- `format` (*str*): current format of the values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
"""
df[column] = pd.to_datetime(df[column], format=format)
return df
|
Convert string column into datetime column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to format
- `format` (*str*): current format of the values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
|
def _RegisterProcess(self, process):
"""Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
if process.pid in self._processes_per_pid:
raise KeyError(
'Already managing process: {0!s} (PID: {1:d})'.format(
process.name, process.pid))
self._processes_per_pid[process.pid] = process
|
Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing.
|
def from_plane(cls, plane):
"""
:param plane: cadquery plane instance to base coordinate system on
:type plane: :class:`cadquery.Plane`
:return: duplicate of the given plane, in this class
:rtype: :class:`CoordSystem`
usage example:
.. doctest::
>>> import cadquery
>>> from cqparts.utils.geometry import CoordSystem
>>> obj = cadquery.Workplane('XY').circle(1).extrude(5)
>>> plane = obj.faces(">Z").workplane().plane
>>> isinstance(plane, cadquery.Plane)
True
>>> coord_sys = CoordSystem.from_plane(plane)
>>> isinstance(coord_sys, CoordSystem)
True
>>> coord_sys.origin.z
5.0
"""
return cls(
origin=plane.origin.toTuple(),
xDir=plane.xDir.toTuple(),
normal=plane.zDir.toTuple(),
)
|
:param plane: cadquery plane instance to base coordinate system on
:type plane: :class:`cadquery.Plane`
:return: duplicate of the given plane, in this class
:rtype: :class:`CoordSystem`
usage example:
.. doctest::
>>> import cadquery
>>> from cqparts.utils.geometry import CoordSystem
>>> obj = cadquery.Workplane('XY').circle(1).extrude(5)
>>> plane = obj.faces(">Z").workplane().plane
>>> isinstance(plane, cadquery.Plane)
True
>>> coord_sys = CoordSystem.from_plane(plane)
>>> isinstance(coord_sys, CoordSystem)
True
>>> coord_sys.origin.z
5.0
|
def check_rollout(edits_service, package_name, days):
"""Check if package_name has a release on staged rollout for too long"""
edit = edits_service.insert(body={}, packageName=package_name).execute()
response = edits_service.tracks().get(editId=edit['id'], track='production', packageName=package_name).execute()
releases = response['releases']
for release in releases:
if release['status'] == 'inProgress':
url = 'https://archive.mozilla.org/pub/mobile/releases/{}/SHA512SUMS'.format(release['name'])
resp = requests.head(url)
if resp.status_code != 200:
if resp.status_code != 404: # 404 is expected for release candidates
logger.warning("Could not check %s: %s", url, resp.status_code)
continue
age = time.time() - calendar.timegm(eu.parsedate(resp.headers['Last-Modified']))
if age >= days * DAY:
yield release, age
|
Check if package_name has a release on staged rollout for too long
|
def _require_host_parameter(args, to):
"""
Make sure, that user specified --host argument.
"""
if not args.host:
sys.stderr.write("--host is required parameter to --%s\n" % to)
sys.exit(1)
|
Make sure, that user specified --host argument.
|
def addup_fluxes(self):
"""Add up the sum of the fluxes calculated so far.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> fluxes.fastaccess._q_sum = 1.0
>>> fluxes.q(2.0)
>>> model.addup_fluxes()
>>> fluxes.fastaccess._q_sum
3.0
"""
fluxes = self.sequences.fluxes
for flux in fluxes.numerics:
sum_ = getattr(fluxes.fastaccess, '_%s_sum' % flux.name)
sum_ += flux
if flux.NDIM == 0:
setattr(fluxes.fastaccess, '_%s_sum' % flux.name, sum_)
|
Add up the sum of the fluxes calculated so far.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> fluxes.fastaccess._q_sum = 1.0
>>> fluxes.q(2.0)
>>> model.addup_fluxes()
>>> fluxes.fastaccess._q_sum
3.0
|
def gen_nf_quick_check(output, ascii_props=False, append=False, prefix=""):
"""Generate quick check properties."""
categories = []
nf = {}
all_chars = ALL_ASCII if ascii_props else ALL_CHARS
file_name = os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedNormalizationProps.txt')
with codecs.open(file_name, 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split('#')[0].split(';')
if len(data) < 2:
continue
if not data[1].strip().lower().endswith('_qc'):
continue
span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props)
if span is None:
continue
name = format_name(data[1][:-3] + 'quickcheck')
subvalue = format_name(data[2])
if name not in nf:
nf[name] = {}
categories.append(name)
if subvalue not in nf[name]:
nf[name][subvalue] = []
nf[name][subvalue].extend(span)
for k1, v1 in nf.items():
temp = set()
for k2 in list(v1.keys()):
temp |= set(v1[k2])
v1['y'] = list(all_chars - temp)
for k1, v1 in nf.items():
for name in list(v1.keys()):
s = set(nf[k1][name])
nf[k1][name] = sorted(s)
# Convert characters values to ranges
char2range(nf, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
for key, value in sorted(nf.items()):
# Write out the Unicode properties
f.write('%s_%s = {\n' % (prefix, key.replace('quickcheck', '_quick_check')))
count = len(value) - 1
i = 0
for k1, v1 in sorted(value.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1
return categories
|
Generate quick check properties.
|
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
|
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
|
def _check_load_parameters(self, **kwargs):
"""Override method for one in resource.py to check partition
The partition cannot be included as a parameter to load a guest.
Raise an exception if a consumer gives the partition parameter.
:raises: DisallowedReadParameter
"""
if 'partition' in kwargs:
msg = "'partition' is not allowed as a load parameter. Vcmp " \
"guests are accessed by name."
raise DisallowedReadParameter(msg)
super(Guest, self)._check_load_parameters(**kwargs)
|
Override method for one in resource.py to check partition
The partition cannot be included as a parameter to load a guest.
Raise an exception if a consumer gives the partition parameter.
:raises: DisallowedReadParameter
|
def key_rule(self, regex, verifier):
"""Add a rule with a pattern that should apply to all keys.
Any key not explicitly listed in an add_required or add_optional rule
must match ONE OF the rules given in a call to key_rule().
So these rules are all OR'ed together.
In this case you should pass a raw string specifying a regex that is
used to determine if the rule is used to check a given key.
Args:
regex (str): The regular expression used to match the rule or None
if this should apply to all
verifier (Verifier): The verification rule
"""
if regex is not None:
regex = re.compile(regex)
self._additional_key_rules.append((regex, verifier))
|
Add a rule with a pattern that should apply to all keys.
Any key not explicitly listed in an add_required or add_optional rule
must match ONE OF the rules given in a call to key_rule().
So these rules are all OR'ed together.
In this case you should pass a raw string specifying a regex that is
used to determine if the rule is used to check a given key.
Args:
regex (str): The regular expression used to match the rule or None
if this should apply to all
verifier (Verifier): The verification rule
|
def get_word_before_cursor(self, WORD=False):
"""
Give the word before the cursor.
If we have whitespace before the cursor this returns an empty string.
"""
if self.text_before_cursor[-1:].isspace():
return ''
else:
return self.text_before_cursor[self.find_start_of_previous_word(WORD=WORD):]
|
Give the word before the cursor.
If we have whitespace before the cursor this returns an empty string.
|
def _container_start_handler_factory(ion_type, before_yield=lambda c, ctx: None):
"""Generates handlers for tokens that begin with container start characters.
Args:
ion_type (IonType): The type of this container.
before_yield (Optional[callable]): Called at initialization. Accepts the first character's ordinal and the
current context; performs any necessary initialization actions.
"""
assert ion_type.is_container
@coroutine
def container_start_handler(c, ctx):
before_yield(c, ctx)
yield
yield ctx.event_transition(IonEvent, IonEventType.CONTAINER_START, ion_type, value=None)
return container_start_handler
|
Generates handlers for tokens that begin with container start characters.
Args:
ion_type (IonType): The type of this container.
before_yield (Optional[callable]): Called at initialization. Accepts the first character's ordinal and the
current context; performs any necessary initialization actions.
|
def genfirstvalues(cursor: Cursor, arraysize: int = 1000) \
-> Generator[Any, None, None]:
"""
Generate the first value in each row.
Args:
cursor: the cursor
arraysize: split fetches into chunks of this many records
Yields:
the first value of each row
"""
return (row[0] for row in genrows(cursor, arraysize))
|
Generate the first value in each row.
Args:
cursor: the cursor
arraysize: split fetches into chunks of this many records
Yields:
the first value of each row
|
def create_values(self, value_names):
"""Read original values from the settings or the defaults.
Parameters
----------
value_names : list of str
list of value names to read
Returns
-------
dict
dictionary with the value names as keys
"""
output = {}
for value_name in value_names:
output[value_name] = read_settings(self.widget, value_name)
return output
|
Read original values from the settings or the defaults.
Parameters
----------
value_names : list of str
list of value names to read
Returns
-------
dict
dictionary with the value names as keys
|
def update_clock(self, dt):
"""This method is called by the ClockApp whenever the timer fires
to update the clock. `dt` is a timezone-aware datetime object.
"""
dt = dt.astimezone(self.tzinfo)
fmt = "%H:%M"
if self.show_seconds:
fmt = "%H:%M:%S"
self.time_txt.text = dt.strftime(fmt)
suppl_text = "{0} {1}".format(dt.strftime("%Y-%m-%d"), self.timezone)
self.suppl_txt.text = suppl_text
self.viewer.redraw(whence=3)
|
This method is called by the ClockApp whenever the timer fires
to update the clock. `dt` is a timezone-aware datetime object.
|
def _elements_to_dict(data, position, obj_end, opts, subdocument=None):
"""Decode a BSON document."""
if type(opts.document_class) == tuple:
result = opts.document_class[0](**opts.document_class[1]) if not subdocument else dict()
else:
result = opts.document_class() if not subdocument else dict()
end = obj_end - 1
while position < end:
(key, value, position) = _element_to_dict(data, position, obj_end, opts)
result[key] = value
return result
|
Decode a BSON document.
|
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default:True,
- targets (list of ints| 'all'): List of engine id's to kill, Default:'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
if self.controller:
logger.debug("IPP:Shutdown sequence: Attempting controller kill")
self.controller.close()
# We do not actually do executor.shutdown because
# this blocks even when requested to not block, killing the
# controller is more effective although impolite.
# x = self.executor.shutdown(targets=targets,
# hub=hub,
# block=block)
logger.debug("Done with executor shutdown")
return True
|
Shutdown the executor, including all workers and controllers.
The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default:True,
- targets (list of ints| 'all'): List of engine id's to kill, Default:'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
|
def connection(self):
"""
Get a connection to the database or raise an exception
"""
connection = self._get_connection()
if connection:
return connection
else:
message = "GTF database needs to be created"
if self.install_string:
message += ", run: %s" % self.install_string
raise ValueError(message)
|
Get a connection to the database or raise an exception
|
def commit(*args):
"""Commit changes to the fragments repository, limited to FILENAME(s) if specified."""
parser = argparse.ArgumentParser(prog="%s %s" % (__package__, commit.__name__), description=commit.__doc__)
parser.add_argument('FILENAME', help="file(s) to commit", nargs="*", default=['.'])
args = parser.parse_args(args)
config = FragmentsConfig()
for s, curr_path in _iterate_over_files(args.FILENAME, config, statuses='MAD'):
key = os.path.relpath(curr_path, config.root)
if key not in config['files']:
yield "Could not commit '%s' because it is not being followed" % os.path.relpath(curr_path)
continue
if s in 'MA':
repo_path = os.path.join(config.directory, config['files'][key])
with _smart_open(repo_path, 'w') as repo_file:
with _smart_open(curr_path, 'r') as curr_file:
repo_file.write(curr_file.read())
os.utime(repo_path, os.stat(curr_path)[7:9])
yield "'%s' committed" % os.path.relpath(curr_path)
elif s == 'D':
yield "Could not commit '%s' because it has been removed, instead revert or forget it" % os.path.relpath(curr_path)
elif s == ' ':
yield "Could not commit '%s' because it has not been changed" % os.path.relpath(curr_path)
|
Commit changes to the fragments repository, limited to FILENAME(s) if specified.
|
def receive(self, path, diffTo, diffFrom):
""" Receive a btrfs diff. """
diff = self.toObj.diff(diffTo, diffFrom)
self._open(self.butterStore.receive(diff, [path, ]))
|
Receive a btrfs diff.
|
def handle(self, *args, **options):
"""Handle the management command."""
if mon is None:
sys.stderr.write(MISSING)
else:
mon.run(**options)
|
Handle the management command.
|
def home(request):
"""Try to connect to database, and list available examples."""
try:
DBSession.query(User).first()
except DBAPIError:
return Response(
conn_err_msg,
content_type="text/plain",
status_int=500,
)
return {"project": "pyramid_tut"}
|
Try to connect to database, and list available examples.
|
def _parse_quniform(self, param_value):
'''parse type of quniform parameter and return a list'''
if param_value[2] < 2:
raise RuntimeError("The number of values sampled (q) should be at least 2")
low, high, count = param_value[0], param_value[1], param_value[2]
interval = (high - low) / (count - 1)
return [float(low + interval * i) for i in range(count)]
|
parse type of quniform parameter and return a list
|
def ellipse(n=1000, adaptive=False):
"""
Get a parameterized set of vectors defining
ellipse for a major and minor axis length.
Resulting vector bundle has major axes
along axes given.
"""
u = N.linspace(0,2*N.pi,n)
# Get a bundle of vectors defining
# a full rotation around the unit circle
return N.array([N.cos(u),N.sin(u)]).T
|
Get a parameterized set of vectors defining
ellipse for a major and minor axis length.
Resulting vector bundle has major axes
along axes given.
|
def _store_post(self, stored_entry, entry=None):
""" This method formats entry returned by _get_data() and puts to DB
create textDesc, title, and MIME """
# stored_entry.content_type = utils.get_source_setting(ds.type, 'type')
if stored_entry.published is None:
stored_entry.published = self._get_dummy_datetime()
if stored_entry.updated is None:
stored_entry.updated = self._get_dummy_datetime()
# print 'Post: %s' % stored_entry.title
# print 'Attributes: %s' % stored_entry.attributes
stored_entry.save()
self.posts.add(stored_entry)
return None
|
This method formats entry returned by _get_data() and puts to DB
create textDesc, title, and MIME
|
def _empty_notification(self):
""" empty and return list of message notification
"""
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
if username in self.notifications:
ret = self.notifications[username]
else:
ret = []
self.notifications[username] = []
return ret
|
empty and return list of message notification
|
def _load_neighbors_from_external_source(self) -> None:
"""
Loads the neighbors of the node from the igraph `Graph` instance that is
wrapped by the graph that has this node.
"""
graph: SpotifyArtistGraph = self._graph
items: List[NameExternalIDPair] = graph.client.similar_artists(self.external_id)
limit: int = graph.neighbor_count if graph.neighbor_count > 0 else self._NEIGHBORS_TO_LOAD
if len(items) > limit:
del items[limit:]
for item in items:
neighbor: SpotifyArtistNode = graph.nodes.get_node_by_name(item.name,
can_validate_and_load=True,
external_id=item.external_id)
# Strangely we need this guard because the Spofity API's search method doesn't
# recognise certain artist names.
# Actually it could also be a bug in SpotifyClient.search_artists_by_name(),
# the artist name sent as a request parameter may not be encoded 100% correctly...
# Anyway, this is a working hotfix.
if neighbor is not None:
graph.add_edge(self, neighbor)
|
Loads the neighbors of the node from the igraph `Graph` instance that is
wrapped by the graph that has this node.
|
def cut(self, start=0, end=-1, index=False):
"""
The method cuts the time series to reduce its length.
:param start: int or float, optional, New start point
:param end: int or float, optional, New end point
:param index: bool, optional, if False then start and end are considered values in time.
"""
s_index, e_index = time_indices(self.npts, self.dt, start, end, index)
self._values = np.array(self.values[s_index:e_index])
|
The method cuts the time series to reduce its length.
:param start: int or float, optional, New start point
:param end: int or float, optional, New end point
:param index: bool, optional, if False then start and end are considered values in time.
|
def update(self, muted=values.unset, hold=values.unset, hold_url=values.unset,
hold_method=values.unset, announce_url=values.unset,
announce_method=values.unset, wait_url=values.unset,
wait_method=values.unset, beep_on_exit=values.unset,
end_conference_on_exit=values.unset, coaching=values.unset,
call_sid_to_coach=values.unset):
"""
Update the ParticipantInstance
:param bool muted: Whether the participant should be muted
:param bool hold: Whether the participant should be on hold
:param unicode hold_url: The URL we call using the `hold_method` for music that plays when the participant is on hold
:param unicode hold_method: The HTTP method we should use to call hold_url
:param unicode announce_url: The URL we call using the `announce_method` for an announcement to the participant
:param unicode announce_method: The HTTP method we should use to call announce_url
:param unicode wait_url: URL that hosts pre-conference hold music
:param unicode wait_method: The HTTP method we should use to call `wait_url`
:param bool beep_on_exit: Whether to play a notification beep to the conference when the participant exit
:param bool end_conference_on_exit: Whether to end the conference when the participant leaves
:param bool coaching: Indicates if the participant changed to coach
:param unicode call_sid_to_coach: The SID of the participant who is being `coached`
:returns: Updated ParticipantInstance
:rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantInstance
"""
data = values.of({
'Muted': muted,
'Hold': hold,
'HoldUrl': hold_url,
'HoldMethod': hold_method,
'AnnounceUrl': announce_url,
'AnnounceMethod': announce_method,
'WaitUrl': wait_url,
'WaitMethod': wait_method,
'BeepOnExit': beep_on_exit,
'EndConferenceOnExit': end_conference_on_exit,
'Coaching': coaching,
'CallSidToCoach': call_sid_to_coach,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ParticipantInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
call_sid=self._solution['call_sid'],
)
|
Update the ParticipantInstance
:param bool muted: Whether the participant should be muted
:param bool hold: Whether the participant should be on hold
:param unicode hold_url: The URL we call using the `hold_method` for music that plays when the participant is on hold
:param unicode hold_method: The HTTP method we should use to call hold_url
:param unicode announce_url: The URL we call using the `announce_method` for an announcement to the participant
:param unicode announce_method: The HTTP method we should use to call announce_url
:param unicode wait_url: URL that hosts pre-conference hold music
:param unicode wait_method: The HTTP method we should use to call `wait_url`
:param bool beep_on_exit: Whether to play a notification beep to the conference when the participant exit
:param bool end_conference_on_exit: Whether to end the conference when the participant leaves
:param bool coaching: Indicates if the participant changed to coach
:param unicode call_sid_to_coach: The SID of the participant who is being `coached`
:returns: Updated ParticipantInstance
:rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantInstance
|
def _entropy(self):
"""Shannon entropy in nats."""
if any(self._dist_fn_args):
raise ValueError(
'Can only compute entropy when all distributions are independent.')
return sum(joint_distribution_lib.maybe_check_wont_broadcast(
(d().entropy() for d in self._dist_fn_wrapped),
self.validate_args))
|
Shannon entropy in nats.
|
def most_seen_creators_by_works(work_kind=None, role_name=None, num=10):
"""
Returns a QuerySet of the Creators that are associated with the most Works.
"""
return Creator.objects.by_works(kind=work_kind, role_name=role_name)[:num]
|
Returns a QuerySet of the Creators that are associated with the most Works.
|
def maps_get_default_rules_output_rules_timebase(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_default_rules = ET.Element("maps_get_default_rules")
config = maps_get_default_rules
output = ET.SubElement(maps_get_default_rules, "output")
rules = ET.SubElement(output, "rules")
timebase = ET.SubElement(rules, "timebase")
timebase.text = kwargs.pop('timebase')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def get_turbine_data_from_oedb(turbine_type, fetch_curve, overwrite=False):
r"""
Fetches data for one wind turbine type from the OpenEnergy Database (oedb).
If turbine data exists in local repository it is loaded from this file. The
file is created when turbine data was loaded from oedb in
:py:func:`~.load_turbine_data_from_oedb`. Use this function with
`overwrite=True` to overwrite your file with newly fetched data.
Use :py:func:`~.check_local_turbine_data` to check
weather your local file is up to date.
Parameters
----------
turbine_type : string
Specifies the turbine type data is fetched for.
Use :py:func:`~.get_turbine_types` to see a table of all wind turbines
for which power (coefficient) curve data is provided.
fetch_curve : string
Parameter to specify whether a power or power coefficient curve
should be retrieved from the provided turbine data. Valid options are
'power_curve' and 'power_coefficient_curve'. Default: None.
overwrite : boolean
If True local file is overwritten by newly fetch data from oedb, if
False turbine data is fetched from previously saved file.
Returns
-------
Tuple (pandas.DataFrame, float)
Power curve or power coefficient curve (pandas.DataFrame) and nominal
power (float) of one wind turbine type. Power (coefficient) curve
DataFrame contains power coefficient curve values (dimensionless) or
power curve values in W with the corresponding wind speeds in m/s.
"""
# hdf5 filename
filename = os.path.join(os.path.dirname(__file__), 'data',
'turbine_data_oedb.h5')
if os.path.isfile(filename) and not overwrite:
logging.debug("Turbine data is fetched from {}".format(filename))
with pd.HDFStore(filename) as hdf_store:
turbine_data = hdf_store.get('turbine_data')
else:
turbine_data = load_turbine_data_from_oedb()
turbine_data.set_index('turbine_type', inplace=True)
# Set `curve` depending on `fetch_curve` to match names in oedb
curve = ('cp_curve' if fetch_curve == 'power_coefficient_curve'
else fetch_curve)
# Select curve and nominal power of turbine type
try:
df = turbine_data.loc[turbine_type]
except KeyError:
raise KeyError("Turbine type '{}' not in database. ".format(
turbine_type) + "Use 'get_turbine_types()' to see a table of " +
"possible wind turbine types.")
if df[curve] is not None:
df = pd.DataFrame(df[curve])
else:
sys.exit("{} of {} not available in ".format(curve, turbine_type) +
"oedb. Use 'get_turbine_types()' to see for which turbine " +
"types power coefficient curves are available.")
nominal_power = turbine_data.loc[turbine_type][
'installed_capacity_kw'] * 1000
df.columns = ['wind_speed', 'value']
if fetch_curve == 'power_curve':
# power in W
df['value'] = df['value'] * 1000
return df, nominal_power
|
r"""
Fetches data for one wind turbine type from the OpenEnergy Database (oedb).
If turbine data exists in local repository it is loaded from this file. The
file is created when turbine data was loaded from oedb in
:py:func:`~.load_turbine_data_from_oedb`. Use this function with
`overwrite=True` to overwrite your file with newly fetched data.
Use :py:func:`~.check_local_turbine_data` to check
weather your local file is up to date.
Parameters
----------
turbine_type : string
Specifies the turbine type data is fetched for.
Use :py:func:`~.get_turbine_types` to see a table of all wind turbines
for which power (coefficient) curve data is provided.
fetch_curve : string
Parameter to specify whether a power or power coefficient curve
should be retrieved from the provided turbine data. Valid options are
'power_curve' and 'power_coefficient_curve'. Default: None.
overwrite : boolean
If True local file is overwritten by newly fetch data from oedb, if
False turbine data is fetched from previously saved file.
Returns
-------
Tuple (pandas.DataFrame, float)
Power curve or power coefficient curve (pandas.DataFrame) and nominal
power (float) of one wind turbine type. Power (coefficient) curve
DataFrame contains power coefficient curve values (dimensionless) or
power curve values in W with the corresponding wind speeds in m/s.
|
def is_writable(path):
"""Check if path has write access"""
try:
testfile = tempfile.TemporaryFile(dir=path)
testfile.close()
except OSError as e:
if e.errno == errno.EACCES: # 13
return False
return True
|
Check if path has write access
|
def add(A, b, offset=0):
"""
Add b to the view of A in place (!).
Returns modified A.
Broadcasting is allowed, thus b can be scalar.
if offset is not zero, make sure b is of right shape!
:param ndarray A: 2 dimensional array
:param ndarray-like b: either one dimensional or scalar
:param int offset: same as in view.
:rtype: view of A, which is adjusted inplace
"""
return _diag_ufunc(A, b, offset, np.add)
|
Add b to the view of A in place (!).
Returns modified A.
Broadcasting is allowed, thus b can be scalar.
if offset is not zero, make sure b is of right shape!
:param ndarray A: 2 dimensional array
:param ndarray-like b: either one dimensional or scalar
:param int offset: same as in view.
:rtype: view of A, which is adjusted inplace
|
def cmd(command):
"""Send IPMI 'command' via ipmitool"""
env()
ipmi = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)
command = "ipmitool -U %s -P %s -H %s -p %s %s" % (
ipmi["USER"], ipmi["PASS"], ipmi["HOST"], ipmi["PORT"], command)
cij.info("ipmi.command: %s" % command)
return cij.util.execute(command, shell=True, echo=True)
|
Send IPMI 'command' via ipmitool
|
def _apply_data_mask(self, data):
"""
Apply pre-defined masks to the data.
"""
data = self._format_data(data)
masked_data, pixels_affected = [], 0
data_mask = self._configuration.get("masks", {}).get("data", [])
for spectrum in data:
masked_spectrum = spectrum.copy()
for start, end in data_mask:
idx = np.clip(
masked_spectrum.disp.searchsorted([start, end]) + [0, 1],
0, masked_spectrum.disp.size)
masked_spectrum.flux[idx[0]:idx[1]] = np.nan
pixels_affected += np.clip(np.ptp(idx) - 1, 0, None)
masked_data.append(masked_spectrum)
logger.debug("{0} observed pixels were masked according to the data "
"mask: {1}".format(pixels_affected, data_mask))
return (masked_data, pixels_affected)
|
Apply pre-defined masks to the data.
|
def token_distance(t1, t2, initial_match_penalization):
"""Calculates the edit distance between two tokens."""
if isinstance(t1, NameInitial) or isinstance(t2, NameInitial):
if t1.token == t2.token:
return 0
if t1 == t2:
return initial_match_penalization
return 1.0
return _normalized_edit_dist(t1.token, t2.token)
|
Calculates the edit distance between two tokens.
|
def _parse(self, stream):
"""Parse a JSON BUILD file.
Args:
builddata: dictionary of buildfile data
reponame: name of the repo that it came from
path: directory path within the repo
"""
builddata = json.load(stream)
log.debug('This is a JSON build file.')
if 'targets' not in builddata:
log.warn('Warning: No targets defined here.')
return
for tdata in builddata['targets']:
# TODO: validate name
target = address.new(target=tdata.pop('name'),
repo=self.target.repo,
path=self.target.path)
# Duplicate target definition? Uh oh.
if target in self.node and 'target_obj' in self.node[target]:
raise error.ButcherError(
'Target is defined more than once: %s', target)
rule_obj = targets.new(name=target,
ruletype=tdata.pop('type'),
**tdata)
log.debug('New target: %s', target)
self.add_node(target, {'target_obj': rule_obj})
# dep could be ":blabla" or "//foo:blabla" or "//foo/bar:blabla"
for dep in rule_obj.composed_deps() or []:
d_target = address.new(dep)
if not d_target.repo: # ":blabla"
d_target.repo = self.target.repo
if d_target.repo == self.target.repo and not d_target.path:
d_target.path = self.target.path
if d_target not in self.nodes():
self.add_node(d_target)
log.debug('New dep: %s -> %s', target, d_target)
self.add_edge(target, d_target)
|
Parse a JSON BUILD file.
Args:
builddata: dictionary of buildfile data
reponame: name of the repo that it came from
path: directory path within the repo
|
def _parse_tile_url(tile_url):
""" Extracts tile name, data and AWS index from tile URL
:param tile_url: Location of tile at AWS
:type: tile_url: str
:return: Tuple in a form (tile_name, date, aws_index)
:rtype: (str, str, int)
"""
props = tile_url.rsplit('/', 7)
return ''.join(props[1:4]), '-'.join(props[4:7]), int(props[7])
|
Extracts tile name, data and AWS index from tile URL
:param tile_url: Location of tile at AWS
:type: tile_url: str
:return: Tuple in a form (tile_name, date, aws_index)
:rtype: (str, str, int)
|
def apply(
self, value, locale, currency=None, currency_digits=True,
decimal_quantization=True):
"""Renders into a string a number following the defined pattern.
Forced decimal quantization is active by default so we'll produce a
number string that is strictly following CLDR pattern definitions.
"""
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
value = value.scaleb(self.scale)
# Separate the absolute value from its sign.
is_negative = int(value.is_signed())
value = abs(value).normalize()
# Prepare scientific notation metadata.
if self.exp_prec:
value, exp, exp_sign = self.scientific_notation_elements(
value, locale)
# Adjust the precision of the fractionnal part and force it to the
# currency's if neccessary.
frac_prec = self.frac_prec
if currency and currency_digits:
frac_prec = (babel.numbers.get_currency_precision(currency), ) * 2
# Bump decimal precision to the natural precision of the number if it
# exceeds the one we're about to use. This adaptative precision is only
# triggered if the decimal quantization is disabled or if a scientific
# notation pattern has a missing mandatory fractional part (as in the
# default '#E0' pattern). This special case has been extensively
# discussed at
# https://github.com/python-babel/babel/pull/494#issuecomment-307649969
if not decimal_quantization or (self.exp_prec and frac_prec == (0, 0)):
frac_prec = (frac_prec[0], max([frac_prec[1],
get_decimal_precision(value)]))
# Render scientific notation.
if self.exp_prec:
number = ''.join([
self._quantize_value(value, locale, frac_prec),
babel.numbers.get_exponential_symbol(locale),
exp_sign,
self._format_int(
str(exp), self.exp_prec[0], self.exp_prec[1], locale)])
# Is it a siginificant digits pattern?
elif '@' in self.pattern:
text = self._format_significant(value,
self.int_prec[0],
self.int_prec[1])
a, sep, b = text.partition(".")
number = self._format_int(a, 0, 1000, locale)
if sep:
number += babel.numbers.get_decimal_symbol(locale) + b
# A normal number pattern.
else:
number = self._quantize_value(value, locale, frac_prec)
retval = ''.join([
self.prefix[is_negative],
number,
self.suffix[is_negative]])
if u'¤' in retval:
retval = retval.replace(u'¤¤¤',
babel.numbers.get_currency_name(
currency, value, locale))
retval = retval.replace(u'¤¤', currency.upper())
retval = retval.replace(u'¤', babel.numbers.get_currency_symbol(
currency, locale))
return retval
|
Renders into a string a number following the defined pattern.
Forced decimal quantization is active by default so we'll produce a
number string that is strictly following CLDR pattern definitions.
|
def name(self):
"""str: name of the file entry, which does not include the full path."""
if self._name is None:
location = getattr(self.path_spec, 'location', None)
if location is not None:
self._name = self._file_system.BasenamePath(location)
else:
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(
self.path_spec)
if volume_index is not None:
self._name = 'apfs{0:d}'.format(volume_index + 1)
else:
self._name = ''
return self._name
|
str: name of the file entry, which does not include the full path.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.