code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def find_copy_constructor(type_):
"""
Returns reference to copy constructor.
Args:
type_ (declarations.class_t): the class to be searched.
Returns:
declarations.constructor_t: the copy constructor
"""
copy_ = type_.constructors(
lambda x: is_copy_constructor(x),
recursive=False,
allow_empty=True)
if copy_:
return copy_[0]
return None
|
Returns reference to copy constructor.
Args:
type_ (declarations.class_t): the class to be searched.
Returns:
declarations.constructor_t: the copy constructor
|
def get_stock_basicinfo(self, market, stock_type=SecurityType.STOCK, code_list=None):
"""
获取指定市场中特定类型的股票基本信息
:param market: 市场类型,futuquant.common.constant.Market
:param stock_type: 股票类型, futuquant.common.constant.SecurityType
:param code_list: 如果不为None,应该是股票code的iterable类型,将只返回指定的股票信息
:return: (ret_code, content)
ret_code 等于RET_OK时, content为Pandas.DataFrame数据, 否则为错误原因字符串, 数据列格式如下
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
name str 名字
lot_size int 每手数量
stock_type str 股票类型,参见SecurityType
stock_child_type str 涡轮子类型,参见WrtType
stock_owner str 所属正股的代码
option_type str 期权类型,Qot_Common.OptionType
strike_time str 行权日
strike_price float 行权价
suspension bool 是否停牌(True表示停牌)
listing_date str 上市时间
stock_id int 股票id
delisting bool 是否退市
================= =========== ==============================================================================
:example:
.. code-block:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_stock_basicinfo(Market.HK, SecurityType.WARRANT))
print(quote_ctx.get_stock_basicinfo(Market.US, SecurityType.DRVT, 'US.AAPL190621C140000'))
quote_ctx.close()
"""
param_table = {'market': market, 'stock_type': stock_type}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
if code_list is not None:
if is_str(code_list):
code_list = code_list.split(',')
elif isinstance(code_list, list):
pass
else:
return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'"
query_processor = self._get_sync_query_processor(
StockBasicInfoQuery.pack_req, StockBasicInfoQuery.unpack_rsp)
kargs = {
"market": market,
'stock_type': stock_type,
'code_list': code_list,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, basic_info_list = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
col_list = [
'code', 'name', 'lot_size', 'stock_type', 'stock_child_type', 'stock_owner',
'option_type', 'strike_time', 'strike_price', 'suspension',
'listing_date', 'stock_id', 'delisting'
]
basic_info_table = pd.DataFrame(basic_info_list, columns=col_list)
return RET_OK, basic_info_table
|
获取指定市场中特定类型的股票基本信息
:param market: 市场类型,futuquant.common.constant.Market
:param stock_type: 股票类型, futuquant.common.constant.SecurityType
:param code_list: 如果不为None,应该是股票code的iterable类型,将只返回指定的股票信息
:return: (ret_code, content)
ret_code 等于RET_OK时, content为Pandas.DataFrame数据, 否则为错误原因字符串, 数据列格式如下
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
name str 名字
lot_size int 每手数量
stock_type str 股票类型,参见SecurityType
stock_child_type str 涡轮子类型,参见WrtType
stock_owner str 所属正股的代码
option_type str 期权类型,Qot_Common.OptionType
strike_time str 行权日
strike_price float 行权价
suspension bool 是否停牌(True表示停牌)
listing_date str 上市时间
stock_id int 股票id
delisting bool 是否退市
================= =========== ==============================================================================
:example:
.. code-block:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_stock_basicinfo(Market.HK, SecurityType.WARRANT))
print(quote_ctx.get_stock_basicinfo(Market.US, SecurityType.DRVT, 'US.AAPL190621C140000'))
quote_ctx.close()
|
def get_item_abspath(self, identifier):
"""Return absolute path at which item content can be accessed.
:param identifier: item identifier
:returns: absolute path from which the item content can be accessed
"""
admin_metadata = self.get_admin_metadata()
uuid = admin_metadata["uuid"]
# Create directory for the specific dataset.
dataset_cache_abspath = os.path.join(self._irods_cache_abspath, uuid)
mkdir_parents(dataset_cache_abspath)
# Get the file extension from the relpath from the handle metadata.
irods_item_path = os.path.join(self._data_abspath, identifier)
relpath = self._get_metadata_with_cache(irods_item_path, "handle")
_, ext = os.path.splitext(relpath)
local_item_abspath = os.path.join(
dataset_cache_abspath,
identifier + ext)
if not os.path.isfile(local_item_abspath):
tmp_local_item_abspath = local_item_abspath + ".tmp"
_get_file_forcefully(irods_item_path, tmp_local_item_abspath)
os.rename(tmp_local_item_abspath, local_item_abspath)
return local_item_abspath
|
Return absolute path at which item content can be accessed.
:param identifier: item identifier
:returns: absolute path from which the item content can be accessed
|
def remove_relation(post_id, tag_id):
'''
Delete the record of post 2 tag.
'''
entry = TabPost2Tag.delete().where(
(TabPost2Tag.post_id == post_id) &
(TabPost2Tag.tag_id == tag_id)
)
entry.execute()
MCategory.update_count(tag_id)
|
Delete the record of post 2 tag.
|
def _perform_validation(self, path, value, results):
"""
Validates a given value against the schema and configured validation rules.
:param path: a dot notation path to the value.
:param value: a value to be validated.
:param results: a list with validation results to add new results.
"""
name = path if path != None else "value"
value = ObjectReader.get_value(value)
super(ArraySchema, self)._perform_validation(path, value, results)
if value == None:
return
if isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple):
index = 0
for element in value:
element_path = str(index) if path == None or len(path) == 0 else path + "." + str(index)
self._perform_type_validation(element_path, self.value_type, element, results)
index += 1
else:
results.append(
ValidationResult(
path,
ValidationResultType.Error,
"VALUE_ISNOT_ARRAY",
name + " type must be List or Array",
"List",
type(value)
)
)
|
Validates a given value against the schema and configured validation rules.
:param path: a dot notation path to the value.
:param value: a value to be validated.
:param results: a list with validation results to add new results.
|
def correlation(s, o):
"""
correlation coefficient
input:
s: simulated
o: observed
output:
correlation: correlation coefficient
"""
# s,o = filter_nan(s,o)
if s.size == 0:
corr = np.NaN
else:
corr = np.corrcoef(o, s)[0, 1]
return corr
|
correlation coefficient
input:
s: simulated
o: observed
output:
correlation: correlation coefficient
|
def on_delete(resc, req, resp, rid): # pylint: disable=unused-argument
""" Delete the single item
Upon a successful deletion an empty bodied 204
is returned.
"""
signals.pre_req.send(resc.model)
signals.pre_req_delete.send(resc.model)
model = find(resc.model, rid)
goldman.sess.store.delete(model)
resp.status = falcon.HTTP_204
signals.post_req.send(resc.model)
signals.post_req_delete.send(resc.model)
|
Delete the single item
Upon a successful deletion an empty bodied 204
is returned.
|
def store_many_vectors(self, hash_name, bucket_keys, vs, data):
"""
Store a batch of vectors.
Stores vector and JSON-serializable data in bucket with specified key.
"""
if data is None:
data = itertools.repeat(data)
for v, k, d in zip(vs, bucket_keys, data):
self.store_vector(hash_name, k, v, d)
|
Store a batch of vectors.
Stores vector and JSON-serializable data in bucket with specified key.
|
def extract_paths(self, paths, ignore_nopath):
"""
Extract the given paths from the domain
Attempt to extract all files defined in ``paths`` with the method
defined in :func:`~lago.plugins.vm.VMProviderPlugin.extract_paths`,
if it fails, and `guestfs` is available it will try extracting the
files with guestfs.
Args:
paths(list of tuples): files to extract in
`[(src1, dst1), (src2, dst2)...]` format.
ignore_nopath(boolean): if True will ignore none existing paths.
Returns:
None
Raises:
:exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing
path was found on the VM, and `ignore_nopath` is False.
:exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.
"""
try:
super().extract_paths(
paths=paths,
ignore_nopath=ignore_nopath,
)
except ExtractPathError as err:
LOGGER.debug(
'%s: failed extracting files: %s', self.vm.name(), err.message
)
if self._has_guestfs:
self.extract_paths_dead(paths, ignore_nopath)
else:
raise
|
Extract the given paths from the domain
Attempt to extract all files defined in ``paths`` with the method
defined in :func:`~lago.plugins.vm.VMProviderPlugin.extract_paths`,
if it fails, and `guestfs` is available it will try extracting the
files with guestfs.
Args:
paths(list of tuples): files to extract in
`[(src1, dst1), (src2, dst2)...]` format.
ignore_nopath(boolean): if True will ignore none existing paths.
Returns:
None
Raises:
:exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing
path was found on the VM, and `ignore_nopath` is False.
:exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.
|
def get_default_query_from_module(module):
""" Given a %%sql module return the default (last) query for the module.
Args:
module: the %%sql module.
Returns:
The default query associated with this module.
"""
if isinstance(module, types.ModuleType):
return module.__dict__.get(_SQL_MODULE_LAST, None)
return None
|
Given a %%sql module return the default (last) query for the module.
Args:
module: the %%sql module.
Returns:
The default query associated with this module.
|
def has_equal_ast(state, incorrect_msg=None, code=None, exact=True, append=None):
"""Test whether abstract syntax trees match between the student and solution code.
``has_equal_ast()`` can be used in two ways:
* As a robust version of ``has_code()``. By setting ``code``, you can look for the AST representation of ``code`` in the student's submission.
But be aware that ``a`` and ``a = 1`` won't match, as reading and assigning are not the same in an AST.
Use ``ast.dump(ast.parse(code))`` to see an AST representation of ``code``.
* As an expression-based check when using more advanced SCT chain, e.g. to compare the equality of expressions to set function arguments.
Args:
incorrect_msg: message displayed when ASTs mismatch. When you specify ``code`` yourself, you have to specify this.
code: optional code to use instead of the solution AST.
exact: whether the representations must match exactly. If false, the solution AST
only needs to be contained within the student AST (similar to using test student typed).
Defaults to ``True``, unless the ``code`` argument has been specified.
:Example:
Student and Solution Code::
dict(a = 'value').keys()
SCT::
# all pass
Ex().has_equal_ast()
Ex().has_equal_ast(code = "dict(a = 'value').keys()")
Ex().has_equal_ast(code = "dict(a = 'value')", exact = False)
Student and Solution Code::
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
np.mean(arr)
SCT::
# Check underlying value of arugment a of np.mean:
Ex().check_function('numpy.mean').check_args('a').has_equal_ast()
# Only check AST equality of expression used to specify argument a:
Ex().check_function('numpy.mean').check_args('a').has_equal_ast()
"""
if utils.v2_only():
state.assert_is_not(["object_assignments"], "has_equal_ast", ["check_object"])
state.assert_is_not(["function_calls"], "has_equal_ast", ["check_function"])
if code and incorrect_msg is None:
raise InstructorError(
"If you manually specify the code to match inside has_equal_ast(), "
"you have to explicitly set the `incorrect_msg` argument."
)
if (
append is None
): # if not specified, set to False if incorrect_msg was manually specified
append = incorrect_msg is None
if incorrect_msg is None:
incorrect_msg = "Expected `{{sol_str}}`, but got `{{stu_str}}`."
def parse_tree(tree):
# get contents of module.body if only 1 element
crnt = (
tree.body[0]
if isinstance(tree, ast.Module) and len(tree.body) == 1
else tree
)
# remove Expr if it exists
return ast.dump(crnt.value if isinstance(crnt, ast.Expr) else crnt)
stu_rep = parse_tree(state.student_ast)
sol_rep = parse_tree(state.solution_ast if not code else ast.parse(code))
fmt_kwargs = {
"sol_str": state.solution_code if not code else code,
"stu_str": state.student_code,
}
_msg = state.build_message(incorrect_msg, fmt_kwargs, append=append)
if exact and not code:
state.do_test(EqualTest(stu_rep, sol_rep, Feedback(_msg, state)))
elif not sol_rep in stu_rep:
state.report(Feedback(_msg, state))
return state
|
Test whether abstract syntax trees match between the student and solution code.
``has_equal_ast()`` can be used in two ways:
* As a robust version of ``has_code()``. By setting ``code``, you can look for the AST representation of ``code`` in the student's submission.
But be aware that ``a`` and ``a = 1`` won't match, as reading and assigning are not the same in an AST.
Use ``ast.dump(ast.parse(code))`` to see an AST representation of ``code``.
* As an expression-based check when using more advanced SCT chain, e.g. to compare the equality of expressions to set function arguments.
Args:
incorrect_msg: message displayed when ASTs mismatch. When you specify ``code`` yourself, you have to specify this.
code: optional code to use instead of the solution AST.
exact: whether the representations must match exactly. If false, the solution AST
only needs to be contained within the student AST (similar to using test student typed).
Defaults to ``True``, unless the ``code`` argument has been specified.
:Example:
Student and Solution Code::
dict(a = 'value').keys()
SCT::
# all pass
Ex().has_equal_ast()
Ex().has_equal_ast(code = "dict(a = 'value').keys()")
Ex().has_equal_ast(code = "dict(a = 'value')", exact = False)
Student and Solution Code::
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
np.mean(arr)
SCT::
# Check underlying value of arugment a of np.mean:
Ex().check_function('numpy.mean').check_args('a').has_equal_ast()
# Only check AST equality of expression used to specify argument a:
Ex().check_function('numpy.mean').check_args('a').has_equal_ast()
|
def runGetOutput(cmd, raiseOnFailure=False, encoding=sys.getdefaultencoding()):
'''
runGetOutput - Simply runs a command and returns the output as a string. Use #runGetResults if you need something more complex.
@param cmd <str/list> - String of command and arguments, or list of command and arguments
If cmd is a string, the command will be executed as if ran exactly as written in a shell. This mode supports shell-isms like '&&' and '|'
If cmd is a list, the first element will be the executable, and further elements are arguments that will be passed to that executable.
@param raiseOnFailure <True/False> - Default False, if True a non-zero return from the command (failure) will raise a SimpleCommandFailure, which contains all gathered output and return code. @see #SimpleCommandFailure
@param encoding <None/str> - Default sys.getdefaultencoding(), the program's output will automatically be decoded using the provided codec (e.x. "utf-8" or "ascii").
If None or False-ish, data will not be decoded (i.e. in python3 will be "bytes" type)
If unsure, leave this as it's default value, or provide "utf-8"
@return <str> - String of data output by the executed program. This combines stdout and stderr into one string. If you need them separate, use #runGetResults
@raises SimpleCommandFailure -
* If command cannot be executed (like program not found, insufficient permissions, etc)
* If #raiseOnFailure is set to True, and the program returns non-zero
'''
results = Simple.runGetResults(cmd, stdout=True, stderr=subprocess.STDOUT, encoding=encoding)
if raiseOnFailure is True and results['returnCode'] != 0:
try:
if issubclass(cmd.__class__, (list, tuple)):
cmdStr = ' '.join(cmd)
else:
cmdStr = cmd
except:
cmdStr = repr(cmd)
failMsg = "Command '%s' failed with returnCode=%d" %(cmdStr, results['returnCode'])
raise SimpleCommandFailure(failMsg, results['returnCode'], results.get('stdout', None), results.get('stderr', None))
return results['stdout']
|
runGetOutput - Simply runs a command and returns the output as a string. Use #runGetResults if you need something more complex.
@param cmd <str/list> - String of command and arguments, or list of command and arguments
If cmd is a string, the command will be executed as if ran exactly as written in a shell. This mode supports shell-isms like '&&' and '|'
If cmd is a list, the first element will be the executable, and further elements are arguments that will be passed to that executable.
@param raiseOnFailure <True/False> - Default False, if True a non-zero return from the command (failure) will raise a SimpleCommandFailure, which contains all gathered output and return code. @see #SimpleCommandFailure
@param encoding <None/str> - Default sys.getdefaultencoding(), the program's output will automatically be decoded using the provided codec (e.x. "utf-8" or "ascii").
If None or False-ish, data will not be decoded (i.e. in python3 will be "bytes" type)
If unsure, leave this as it's default value, or provide "utf-8"
@return <str> - String of data output by the executed program. This combines stdout and stderr into one string. If you need them separate, use #runGetResults
@raises SimpleCommandFailure -
* If command cannot be executed (like program not found, insufficient permissions, etc)
* If #raiseOnFailure is set to True, and the program returns non-zero
|
def python_type(self):
"""Return the python type for the row, possibly getting it from a valuetype reference """
from ambry.valuetype import resolve_value_type
if self.valuetype and resolve_value_type(self.valuetype):
return resolve_value_type(self.valuetype)._pythontype
elif self.datatype:
try:
return self.types[self.datatype][1]
except KeyError:
return resolve_value_type(self.datatype)._pythontype
else:
from ambry.exc import ConfigurationError
raise ConfigurationError("Can't get python_type: neither datatype of valuetype is defined")
|
Return the python type for the row, possibly getting it from a valuetype reference
|
def run(self, schedule_type, lookup_id, **kwargs):
"""
Loads Schedule linked to provided lookup
"""
log = self.get_logger(**kwargs)
log.info("Queuing <%s> <%s>" % (schedule_type, lookup_id))
task_run = QueueTaskRun()
task_run.task_id = self.request.id or uuid4()
task_run.started_at = now()
tr_qs = QueueTaskRun.objects
# Load the schedule active items
schedules = Schedule.objects.filter(enabled=True)
if schedule_type == "crontab":
schedules = schedules.filter(celery_cron_definition=lookup_id)
tr_qs = tr_qs.filter(celery_cron_definition=lookup_id)
scheduler_type = CrontabSchedule
task_run.celery_cron_definition_id = lookup_id
elif schedule_type == "interval":
schedules = schedules.filter(celery_interval_definition=lookup_id)
tr_qs = tr_qs.filter(celery_interval_definition=lookup_id)
scheduler_type = IntervalSchedule
task_run.celery_interval_definition_id = lookup_id
# Confirm that this task should run now based on last run time.
try:
last_task_run = tr_qs.latest("started_at")
except QueueTaskRun.DoesNotExist:
# No previous run so it is safe to continue.
pass
else:
# This basicly replicates what celery beat is meant to do, but
# we can't trust celery beat and django-celery to always accurately
# update their own last run time.
sched = scheduler_type.objects.get(id=lookup_id)
due, due_next = sched.schedule.is_due(last_task_run.started_at)
if not due and due_next >= settings.DEFAULT_CLOCK_SKEW_SECONDS:
return (
"Aborted Queuing <%s> <%s> due to last task run (%s) "
"at %s"
% (
schedule_type,
lookup_id,
last_task_run.id,
last_task_run.started_at,
)
)
task_run.save()
# create tasks for each active schedule
queued = 0
schedules = schedules.values("id", "auth_token", "endpoint", "payload")
for schedule in schedules.iterator():
schedule["schedule_id"] = str(schedule.pop("id"))
DeliverTask.apply_async(kwargs=schedule)
queued += 1
task_run.completed_at = now()
task_run.save()
return "Queued <%s> Tasks" % (queued,)
|
Loads Schedule linked to provided lookup
|
def _make_meta(self, tracker_url, root_name, private, progress):
""" Create torrent dict.
"""
# Calculate piece size
if self._fifo:
# TODO we need to add a (command line) param, probably for total data size
# for now, always 1MB
piece_size_exp = 20
else:
total_size = self._calc_size()
if total_size:
piece_size_exp = int(math.log(total_size) / math.log(2)) - 9
else:
piece_size_exp = 0
piece_size_exp = min(max(15, piece_size_exp), 24)
piece_size = 2 ** piece_size_exp
# Build info hash
info, totalhashed = self._make_info(piece_size, progress, self.walk() if self._fifo else sorted(self.walk()))
# Enforce unique hash per tracker
info["x_cross_seed"] = hashlib.md5(tracker_url).hexdigest()
# Set private flag
if private:
info["private"] = 1
# Freely chosen root name (default is basename of the data path)
if root_name:
info["name"] = root_name
# Torrent metadata
meta = {
"info": info,
"announce": tracker_url.strip(),
}
#XXX meta["encoding"] = "UTF-8"
# Return validated meta dict
return check_meta(meta), totalhashed
|
Create torrent dict.
|
def start_runs(
logdir,
steps,
run_name,
thresholds,
mask_every_other_prediction=False):
"""Generate a PR curve with precision and recall evenly weighted.
Arguments:
logdir: The directory into which to store all the runs' data.
steps: The number of steps to run for.
run_name: The name of the run.
thresholds: The number of thresholds to use for PR curves.
mask_every_other_prediction: Whether to mask every other prediction by
alternating weights between 0 and 1.
"""
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(42)
# Create a normal distribution layer used to generate true color labels.
distribution = tf.compat.v1.distributions.Normal(loc=0., scale=142.)
# Sample the distribution to generate colors. Lets generate different numbers
# of each color. The first dimension is the count of examples.
# The calls to sample() are given fixed random seed values that are "magic"
# in that they correspond to the default seeds for those ops when the PR
# curve test (which depends on this code) was written. We've pinned these
# instead of continuing to use the defaults since the defaults are based on
# node IDs from the sequence of nodes added to the graph, which can silently
# change when this code or any TF op implementations it uses are modified.
# TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds.
# Generate reds.
number_of_reds = 100
true_reds = tf.clip_by_value(
tf.concat([
255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)),
tf.abs(distribution.sample([number_of_reds, 2], seed=34))
], axis=1),
0, 255)
# Generate greens.
number_of_greens = 200
true_greens = tf.clip_by_value(
tf.concat([
tf.abs(distribution.sample([number_of_greens, 1], seed=61)),
255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)),
tf.abs(distribution.sample([number_of_greens, 1], seed=105))
], axis=1),
0, 255)
# Generate blues.
number_of_blues = 150
true_blues = tf.clip_by_value(
tf.concat([
tf.abs(distribution.sample([number_of_blues, 2], seed=132)),
255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153))
], axis=1),
0, 255)
# Assign each color a vector of 3 booleans based on its true label.
labels = tf.concat([
tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)),
tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)),
tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)),
], axis=0)
# We introduce 3 normal distributions. They are used to predict whether a
# color falls under a certain class (based on distances from corners of the
# color triangle). The distributions vary per color. We have the distributions
# narrow over time.
initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)]
iteration = tf.compat.v1.placeholder(tf.int32, shape=[])
red_predictor = tf.compat.v1.distributions.Normal(
loc=0.,
scale=tf.cast(
initial_standard_deviations[0] - iteration,
dtype=tf.float32))
green_predictor = tf.compat.v1.distributions.Normal(
loc=0.,
scale=tf.cast(
initial_standard_deviations[1] - iteration,
dtype=tf.float32))
blue_predictor = tf.compat.v1.distributions.Normal(
loc=0.,
scale=tf.cast(
initial_standard_deviations[2] - iteration,
dtype=tf.float32))
# Make predictions (assign 3 probabilities to each color based on each color's
# distance to each of the 3 corners). We seek double the area in the right
# tail of the normal distribution.
examples = tf.concat([true_reds, true_greens, true_blues], axis=0)
probabilities_colors_are_red = (1 - red_predictor.cdf(
tf.norm(tensor=examples - tf.constant([255., 0, 0]), axis=1))) * 2
probabilities_colors_are_green = (1 - green_predictor.cdf(
tf.norm(tensor=examples - tf.constant([0, 255., 0]), axis=1))) * 2
probabilities_colors_are_blue = (1 - blue_predictor.cdf(
tf.norm(tensor=examples - tf.constant([0, 0, 255.]), axis=1))) * 2
predictions = (
probabilities_colors_are_red,
probabilities_colors_are_green,
probabilities_colors_are_blue
)
# This is the crucial piece. We write data required for generating PR curves.
# We create 1 summary per class because we create 1 PR curve per class.
for i, color in enumerate(('red', 'green', 'blue')):
description = ('The probabilities used to create this PR curve are '
'generated from a normal distribution. Its standard '
'deviation is initially %0.0f and decreases over time.' %
initial_standard_deviations[i])
weights = None
if mask_every_other_prediction:
# Assign a weight of 0 to every even-indexed prediction. Odd-indexed
# predictions are assigned a default weight of 1.
consecutive_indices = tf.reshape(
tf.range(tf.size(input=predictions[i])), tf.shape(input=predictions[i]))
weights = tf.cast(consecutive_indices % 2, dtype=tf.float32)
summary.op(
name=color,
labels=labels[:, i],
predictions=predictions[i],
num_thresholds=thresholds,
weights=weights,
display_name='classifying %s' % color,
description=description)
merged_summary_op = tf.compat.v1.summary.merge_all()
events_directory = os.path.join(logdir, run_name)
sess = tf.compat.v1.Session()
writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph)
for step in xrange(steps):
feed_dict = {
iteration: step,
}
merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict)
writer.add_summary(merged_summary, step)
writer.close()
|
Generate a PR curve with precision and recall evenly weighted.
Arguments:
logdir: The directory into which to store all the runs' data.
steps: The number of steps to run for.
run_name: The name of the run.
thresholds: The number of thresholds to use for PR curves.
mask_every_other_prediction: Whether to mask every other prediction by
alternating weights between 0 and 1.
|
def assign_edge_colors_and_widths(self):
"""
Resolve conflict of 'node_color' and 'node_style['fill'] args which are
redundant. Default is node_style.fill unless user entered node_color.
To enter multiple colors user must use node_color not style fill.
Either way, we build a list of colors to pass to Drawing.node_colors
which is then written to the marker as a fill CSS attribute.
"""
# node_color overrides fill. Tricky to catch cuz it can be many types.
# SET edge_widths and POP edge_style.stroke-width
if self.style.edge_widths is None:
if not self.style.edge_style["stroke-width"]:
self.style.edge_style.pop("stroke-width")
self.style.edge_style.pop("stroke")
self.edge_widths = [None] * self.nedges
else:
if isinstance(self.style.edge_style["stroke-width"], (list, tuple)):
raise ToytreeError(
"Use edge_widths not edge_style for multiple edge widths")
# check the color
width = self.style.edge_style["stroke-width"]
self.style.edge_style.pop("stroke-width")
self.edge_widths = [width] * self.nedges
else:
self.style.edge_style.pop("stroke-width")
if isinstance(self.style.edge_widths, (str, int)):
self.edge_widths = [int(self.style.edge_widths)] * self.nedges
elif isinstance(self.style.edge_widths, (list, tuple)):
if len(self.style.edge_widths) != self.nedges:
raise ToytreeError("edge_widths arg is the wrong length")
for cidx in range(self.nedges):
self.edge_widths[cidx] = self.style.edge_widths[cidx]
# SET edge_colors and POP edge_style.stroke
if self.style.edge_colors is None:
if self.style.edge_style["stroke"] is None:
self.style.edge_style.pop("stroke")
self.edge_colors = [None] * self.nedges
else:
if isinstance(self.style.edge_style["stroke"], (list, tuple)):
raise ToytreeError(
"Use edge_colors not edge_style for multiple edge colors")
# check the color
color = self.style.edge_style["stroke"]
if isinstance(color, (np.ndarray, np.void, list, tuple)):
color = toyplot.color.to_css(color)
self.style.edge_style.pop("stroke")
self.edge_colors = [color] * self.nedges
# otherwise parse node_color
else:
self.style.edge_style.pop("stroke")
if isinstance(self.style.edge_colors, (str, int)):
# check the color
color = self.style.edge_colors
if isinstance(color, (np.ndarray, np.void, list, tuple)):
color = toyplot.color.to_css(color)
self.edge_colors = [color] * self.nedges
elif isinstance(self.style.edge_colors, (list, tuple)):
if len(self.style.edge_colors) != self.nedges:
raise ToytreeError("edge_colors arg is the wrong length")
for cidx in range(self.nedges):
self.edge_colors[cidx] = self.style.edge_colors[cidx]
# do not allow empty edge_colors or widths
self.edge_colors = [i if i else "#262626" for i in self.edge_colors]
self.edge_widths = [i if i else 2 for i in self.edge_widths]
|
Resolve conflict of 'node_color' and 'node_style['fill'] args which are
redundant. Default is node_style.fill unless user entered node_color.
To enter multiple colors user must use node_color not style fill.
Either way, we build a list of colors to pass to Drawing.node_colors
which is then written to the marker as a fill CSS attribute.
|
def visit_tryexcept(self, node):
"""return an astroid.TryExcept node as string"""
trys = ["try:\n%s" % self._stmt_list(node.body)]
for handler in node.handlers:
trys.append(handler.accept(self))
if node.orelse:
trys.append("else:\n%s" % self._stmt_list(node.orelse))
return "\n".join(trys)
|
return an astroid.TryExcept node as string
|
def peek_step(self, val: ArrayValue,
sn: "DataNode") -> Tuple[ObjectValue, "DataNode"]:
"""Return the entry addressed by the receiver + its schema node.
Args:
val: Current value (array).
sn: Current schema node.
"""
keys = self.parse_keys(sn)
for en in val:
flag = True
try:
for k in keys:
if en[k] != keys[k]:
flag = False
break
except KeyError:
continue
if flag:
return (en, sn)
return (None, sn)
|
Return the entry addressed by the receiver + its schema node.
Args:
val: Current value (array).
sn: Current schema node.
|
def clientConnected(self, proto):
"""
Called when a client connects to the bus. This method assigns the
new connection a unique bus name.
"""
proto.uniqueName = ':1.%d' % (self.next_id,)
self.next_id += 1
self.clients[proto.uniqueName] = proto
|
Called when a client connects to the bus. This method assigns the
new connection a unique bus name.
|
def set_thresholds(self, touch, release):
"""Set the touch and release threshold for all inputs to the provided
values. Both touch and release should be a value between 0 to 255
(inclusive).
"""
assert touch >= 0 and touch <= 255, 'touch must be between 0-255 (inclusive)'
assert release >= 0 and release <= 255, 'release must be between 0-255 (inclusive)'
# Set the touch and release register value for all the inputs.
for i in range(12):
self._i2c_retry(self._device.write8, MPR121_TOUCHTH_0 + 2*i, touch)
self._i2c_retry(self._device.write8, MPR121_RELEASETH_0 + 2*i, release)
|
Set the touch and release threshold for all inputs to the provided
values. Both touch and release should be a value between 0 to 255
(inclusive).
|
def page_uri_handler(context, content, pargs, kwargs):
"""
Shortcode for getting the link to internal pages using the flask `url_for`
method.
Activate with 'shortcodes' template filter. Within the content use the
chill page_uri shortcode: "[chill page_uri idofapage]". The argument is the
'uri' for a page that chill uses.
Does not verify the link to see if it's valid.
"""
uri = pargs[0]
return url_for('.page_uri', uri=uri)
|
Shortcode for getting the link to internal pages using the flask `url_for`
method.
Activate with 'shortcodes' template filter. Within the content use the
chill page_uri shortcode: "[chill page_uri idofapage]". The argument is the
'uri' for a page that chill uses.
Does not verify the link to see if it's valid.
|
def _grab_history(self):
"""Calculate the needed history/changelog changes
Every history heading looks like '1.0 b4 (1972-12-25)'. Extract them,
check if the first one matches the version and whether it has a the
current date.
"""
default_location = None
config = self.setup_cfg.config
if config and config.has_option('zest.releaser', 'history_file'):
default_location = config.get('zest.releaser', 'history_file')
history_file = self.vcs.history_file(location=default_location)
if not history_file:
logger.warn("No history file found")
self.data['history_lines'] = None
self.data['history_file'] = None
return
logger.debug("Checking %s", history_file)
history_lines = open(history_file).read().split('\n')
# ^^^ TODO: .readlines()?
headings = utils.extract_headings_from_history(history_lines)
if not len(headings):
logger.error("No detectable version heading in the history "
"file %s", history_file)
sys.exit()
good_heading = self.data['history_header'] % self.data
# ^^^ history_header is a string with %(abc)s replacements.
line = headings[0]['line']
previous = history_lines[line]
history_lines[line] = good_heading
logger.debug("Set heading from %r to %r.", previous, good_heading)
history_lines[line + 1] = utils.fix_rst_heading(
heading=good_heading,
below=history_lines[line + 1])
logger.debug("Set line below heading to %r",
history_lines[line + 1])
self.data['history_lines'] = history_lines
self.data['history_file'] = history_file
|
Calculate the needed history/changelog changes
Every history heading looks like '1.0 b4 (1972-12-25)'. Extract them,
check if the first one matches the version and whether it has a the
current date.
|
def health(self, session=None):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
BJ = jobs.BaseJob
payload = {}
scheduler_health_check_threshold = timedelta(seconds=conf.getint('scheduler',
'scheduler_health_check_threshold'
))
latest_scheduler_heartbeat = None
payload['metadatabase'] = {'status': 'healthy'}
try:
latest_scheduler_heartbeat = session.query(func.max(BJ.latest_heartbeat)).\
filter(BJ.state == 'running', BJ.job_type == 'SchedulerJob').\
scalar()
except Exception:
payload['metadatabase']['status'] = 'unhealthy'
if not latest_scheduler_heartbeat:
scheduler_status = 'unhealthy'
else:
if timezone.utcnow() - latest_scheduler_heartbeat <= scheduler_health_check_threshold:
scheduler_status = 'healthy'
else:
scheduler_status = 'unhealthy'
payload['scheduler'] = {'status': scheduler_status,
'latest_scheduler_heartbeat': str(latest_scheduler_heartbeat)}
return wwwutils.json_response(payload)
|
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
|
def apply_projection(projection, value):
"""Apply projection."""
if isinstance(value, Sequence):
# Apply projection to each item in the list.
return [
apply_projection(projection, item)
for item in value
]
elif not isinstance(value, Mapping):
# Non-dictionary values are simply ignored.
return value
# Extract projection for current level.
try:
current_projection = [p[0] for p in projection]
except IndexError:
return value
# Apply projection.
for name in list(value.keys()):
if name not in current_projection:
value.pop(name)
elif isinstance(value[name], dict):
# Apply projection recursively.
value[name] = apply_projection(
[p[1:] for p in projection if p[0] == name],
value[name]
)
return value
|
Apply projection.
|
def find_neighbor_pores(self, pores, mode='union', flatten=True,
include_input=False):
r"""
Returns a list of pores that are direct neighbors to the given pore(s)
Parameters
----------
pores : array_like
Indices of the pores whose neighbors are sought
flatten : boolean
If ``True`` (default) the returned result is a compressed array of
all neighbors. If ``False``, a list of lists with each sub-list
containing the neighbors for each input site. Note that an
*unflattened* list might be slow to generate since it is a Python
``list`` rather than a Numpy ``array``.
include_input : bool
If ``False`` (default) then the input pores are not included in
the returned list(s). Note that since pores are not neighbors of
themselves, the neighbors of pore N will not include N, even if
this flag is ``True``.
mode : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input pores. This is
also known as the 'union' in set theory or 'any' in boolean logic.
Both keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input pore. This
is useful for finding the pores that are not shared by any of the
input pores. This is known as 'exclusive_or' in set theory, and
is an accepted input.
**'xnor'** : Neighbors that are shared by two or more input pores.
This is equivalent to finding all neighbors with 'or', minus those
found with 'xor', and is useful for finding neighbors that the
inputs have in common.
**'and'** : Only neighbors shared by all input pores. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
If ``flatten`` is ``True``, returns a 1D array of pore indices filtered
according to the specified mode. If ``flatten`` is ``False``, returns
a list of lists, where each list contains the neighbors of the
corresponding input pores.
Notes
-----
The ``logic`` options are applied to neighboring pores only, thus it
is not possible to obtain pores that are part of the global set but
not neighbors. This is because (a) the list of global pores might be
very large, and (b) it is not possible to return a list of neighbors
for each input pores if global pores are considered.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> Ps = pn.find_neighbor_pores(pores=[0, 2])
>>> print(Ps)
[ 1 3 5 7 25 27]
>>> Ps = pn.find_neighbor_pores(pores=[0, 1])
>>> print(Ps)
[ 2 5 6 25 26]
>>> Ps = pn.find_neighbor_pores(pores=[0, 1], mode='union',
... include_input=True)
>>> print(Ps)
[ 0 1 2 5 6 25 26]
>>> Ps = pn.find_neighbor_pores(pores=[0, 2], flatten=False)
>>> print(Ps)
[array([ 1, 5, 25]), array([ 1, 3, 7, 27])]
>>> Ps = pn.find_neighbor_pores(pores=[0, 2], mode='xnor')
>>> print(Ps)
[1]
>>> Ps = pn.find_neighbor_pores(pores=[0, 2], mode='xor')
>>> print(Ps)
[ 3 5 7 25 27]
"""
pores = self._parse_indices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
if 'lil' not in self._am.keys():
self.get_adjacency_matrix(fmt='lil')
neighbors = topotools.find_neighbor_sites(sites=pores, logic=mode,
am=self._am['lil'],
flatten=flatten,
include_input=include_input)
return neighbors
|
r"""
Returns a list of pores that are direct neighbors to the given pore(s)
Parameters
----------
pores : array_like
Indices of the pores whose neighbors are sought
flatten : boolean
If ``True`` (default) the returned result is a compressed array of
all neighbors. If ``False``, a list of lists with each sub-list
containing the neighbors for each input site. Note that an
*unflattened* list might be slow to generate since it is a Python
``list`` rather than a Numpy ``array``.
include_input : bool
If ``False`` (default) then the input pores are not included in
the returned list(s). Note that since pores are not neighbors of
themselves, the neighbors of pore N will not include N, even if
this flag is ``True``.
mode : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input pores. This is
also known as the 'union' in set theory or 'any' in boolean logic.
Both keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input pore. This
is useful for finding the pores that are not shared by any of the
input pores. This is known as 'exclusive_or' in set theory, and
is an accepted input.
**'xnor'** : Neighbors that are shared by two or more input pores.
This is equivalent to finding all neighbors with 'or', minus those
found with 'xor', and is useful for finding neighbors that the
inputs have in common.
**'and'** : Only neighbors shared by all input pores. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
If ``flatten`` is ``True``, returns a 1D array of pore indices filtered
according to the specified mode. If ``flatten`` is ``False``, returns
a list of lists, where each list contains the neighbors of the
corresponding input pores.
Notes
-----
The ``logic`` options are applied to neighboring pores only, thus it
is not possible to obtain pores that are part of the global set but
not neighbors. This is because (a) the list of global pores might be
very large, and (b) it is not possible to return a list of neighbors
for each input pores if global pores are considered.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> Ps = pn.find_neighbor_pores(pores=[0, 2])
>>> print(Ps)
[ 1 3 5 7 25 27]
>>> Ps = pn.find_neighbor_pores(pores=[0, 1])
>>> print(Ps)
[ 2 5 6 25 26]
>>> Ps = pn.find_neighbor_pores(pores=[0, 1], mode='union',
... include_input=True)
>>> print(Ps)
[ 0 1 2 5 6 25 26]
>>> Ps = pn.find_neighbor_pores(pores=[0, 2], flatten=False)
>>> print(Ps)
[array([ 1, 5, 25]), array([ 1, 3, 7, 27])]
>>> Ps = pn.find_neighbor_pores(pores=[0, 2], mode='xnor')
>>> print(Ps)
[1]
>>> Ps = pn.find_neighbor_pores(pores=[0, 2], mode='xor')
>>> print(Ps)
[ 3 5 7 25 27]
|
def get_overridden_calculated_entry(self):
"""Gets the calculated entry this entry overrides.
return: (osid.grading.GradeEntry) - the calculated entry
raise: IllegalState - ``overrides_calculated_entry()`` is
``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['overriddenCalculatedEntryId']):
raise errors.IllegalState('this GradeEntry has no overridden_calculated_entry')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_grade_entry_lookup():
raise errors.OperationFailed('Grading does not support GradeEntry lookup')
lookup_session = mgr.get_grade_entry_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_gradebook_view()
osid_object = lookup_session.get_grade_entry(self.get_overridden_calculated_entry_id())
return osid_object
|
Gets the calculated entry this entry overrides.
return: (osid.grading.GradeEntry) - the calculated entry
raise: IllegalState - ``overrides_calculated_entry()`` is
``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
|
def is_applicable(self, date_string, strip_timezone=False, settings=None):
"""
Check if the locale is applicable to translate date string.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param strip_timezone:
If True, timezone is stripped from date string.
:type strip_timezone: bool
:return: boolean value representing if the locale is applicable for the date string or not.
"""
if strip_timezone:
date_string, _ = pop_tz_offset_from_string(date_string, as_offset=False)
date_string = self._translate_numerals(date_string)
if settings.NORMALIZE:
date_string = normalize_unicode(date_string)
date_string = self._simplify(date_string, settings=settings)
dictionary = self._get_dictionary(settings)
date_tokens = dictionary.split(date_string)
return dictionary.are_tokens_valid(date_tokens)
|
Check if the locale is applicable to translate date string.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param strip_timezone:
If True, timezone is stripped from date string.
:type strip_timezone: bool
:return: boolean value representing if the locale is applicable for the date string or not.
|
def clipValue(self, value, minValue, maxValue):
'''
Makes sure that value is within a specific range.
If not, then the lower or upper bounds is returned
'''
return min(max(value, minValue), maxValue)
|
Makes sure that value is within a specific range.
If not, then the lower or upper bounds is returned
|
def fit_classifier(self, name, analytes, method, samples=None,
subset=None, filt=True, sort_by=0, **kwargs):
"""
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
samples : iterable
list of samples to consider. Overrides 'subset'.
subset : str
The subset of samples used to fit the classifier. Ignored if
'samples' is specified.
sort_by : int
Which analyte the resulting clusters should be sorted
by - defaults to 0, which is the first analyte.
**kwargs :
method-specific keyword parameters - see below.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
name : str
"""
# isolate data
if samples is not None:
subset = self.make_subset(samples)
self.get_focus(subset=subset, filt=filt)
# create classifer
c = classifier(analytes,
sort_by)
# fit classifier
c.fit(data=self.focus,
method=method,
**kwargs)
self.classifiers[name] = c
return name
|
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
samples : iterable
list of samples to consider. Overrides 'subset'.
subset : str
The subset of samples used to fit the classifier. Ignored if
'samples' is specified.
sort_by : int
Which analyte the resulting clusters should be sorted
by - defaults to 0, which is the first analyte.
**kwargs :
method-specific keyword parameters - see below.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
name : str
|
def nx_contracted_nodes(G, u, v, self_loops=True, inplace=False):
"""
copy of networkx function with inplace modification
TODO: commit to networkx
"""
import itertools as it
if G.is_directed():
in_edges = ((w, u, d) for w, x, d in G.in_edges(v, data=True)
if self_loops or w != u)
out_edges = ((u, w, d) for x, w, d in G.out_edges(v, data=True)
if self_loops or w != u)
new_edges = it.chain(in_edges, out_edges)
else:
new_edges = ((u, w, d) for x, w, d in G.edges(v, data=True)
if self_loops or w != u)
if inplace:
H = G
new_edges = list(new_edges)
else:
H = G.copy()
node_dict = nx_node_dict(H)
v_data = node_dict[v]
H.remove_node(v)
H.add_edges_from(new_edges)
if 'contraction' in node_dict[u]:
node_dict[u]['contraction'][v] = v_data
else:
node_dict[u]['contraction'] = {v: v_data}
return H
|
copy of networkx function with inplace modification
TODO: commit to networkx
|
def aoi(self, **kwargs):
""" Subsets the Image by the given bounds
Args:
bbox (list): optional. A bounding box array [minx, miny, maxx, maxy]
wkt (str): optional. A WKT geometry string
geojson (str): optional. A GeoJSON geometry dictionary
Returns:
image: an image instance of the same type
"""
g = self._parse_geoms(**kwargs)
if g is None:
return self
else:
return self[g]
|
Subsets the Image by the given bounds
Args:
bbox (list): optional. A bounding box array [minx, miny, maxx, maxy]
wkt (str): optional. A WKT geometry string
geojson (str): optional. A GeoJSON geometry dictionary
Returns:
image: an image instance of the same type
|
def poly(self, return_coeffs=False):
"""returns the quadratic as a Polynomial object."""
p = self.bpoints()
coeffs = (p[0] - 2*p[1] + p[2], 2*(p[1] - p[0]), p[0])
if return_coeffs:
return coeffs
else:
return np.poly1d(coeffs)
|
returns the quadratic as a Polynomial object.
|
def mosaic_inline(self, imagelist, bg_ref=None, trim_px=None,
merge=False, allow_expand=True, expand_pad_deg=0.01,
max_expand_pct=None,
update_minmax=True, suppress_callback=False):
"""Drops new images into the current image (if there is room),
relocating them according the WCS between the two images.
"""
# Get our own (mosaic) rotation and scale
header = self.get_header()
((xrot_ref, yrot_ref),
(cdelt1_ref, cdelt2_ref)) = wcs.get_xy_rotation_and_scale(header)
scale_x, scale_y = math.fabs(cdelt1_ref), math.fabs(cdelt2_ref)
# drop each image in the right place in the new data array
mydata = self._get_data()
count = 1
res = []
for image in imagelist:
name = image.get('name', 'image%d' % (count))
count += 1
data_np = image._get_data()
if 0 in data_np.shape:
self.logger.info("Skipping image with zero length axis")
continue
# Calculate sky position at the center of the piece
ctr_x, ctr_y = trcalc.get_center(data_np)
ra, dec = image.pixtoradec(ctr_x, ctr_y)
# User specified a trim? If so, trim edge pixels from each
# side of the array
ht, wd = data_np.shape[:2]
if trim_px:
xlo, xhi = trim_px, wd - trim_px
ylo, yhi = trim_px, ht - trim_px
data_np = data_np[ylo:yhi, xlo:xhi, ...]
ht, wd = data_np.shape[:2]
# If caller asked us to match background of pieces then
# get the median of this piece
if bg_ref is not None:
bg = iqcalc.get_median(data_np)
bg_inc = bg_ref - bg
data_np = data_np + bg_inc
# Determine max/min to update our values
if update_minmax:
maxval = np.nanmax(data_np)
minval = np.nanmin(data_np)
self.maxval = max(self.maxval, maxval)
self.minval = min(self.minval, minval)
# Get rotation and scale of piece
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
self.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f "
"cdelt2=%f" % (name, xrot, yrot, cdelt1, cdelt2))
# scale if necessary
# TODO: combine with rotation?
if (not np.isclose(math.fabs(cdelt1), scale_x) or
not np.isclose(math.fabs(cdelt2), scale_y)):
nscale_x = math.fabs(cdelt1) / scale_x
nscale_y = math.fabs(cdelt2) / scale_y
self.logger.debug("scaling piece by x(%f), y(%f)" % (
nscale_x, nscale_y))
data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(
data_np, 0, 0, wd - 1, ht - 1, nscale_x, nscale_y,
logger=self.logger)
# Rotate piece into our orientation, according to wcs
rot_dx, rot_dy = xrot - xrot_ref, yrot - yrot_ref
flip_x = False
flip_y = False
# Optomization for 180 rotations
if (np.isclose(math.fabs(rot_dx), 180.0) or
np.isclose(math.fabs(rot_dy), 180.0)):
rotdata = trcalc.transform(data_np,
flip_x=True, flip_y=True)
rot_dx = 0.0
rot_dy = 0.0
else:
rotdata = data_np
# Finish with any necessary rotation of piece
if not np.isclose(rot_dy, 0.0):
rot_deg = rot_dy
self.logger.debug("rotating %s by %f deg" % (name, rot_deg))
rotdata = trcalc.rotate(rotdata, rot_deg,
#rotctr_x=ctr_x, rotctr_y=ctr_y
logger=self.logger)
# Flip X due to negative CDELT1
if np.sign(cdelt1) != np.sign(cdelt1_ref):
flip_x = True
# Flip Y due to negative CDELT2
if np.sign(cdelt2) != np.sign(cdelt2_ref):
flip_y = True
if flip_x or flip_y:
rotdata = trcalc.transform(rotdata,
flip_x=flip_x, flip_y=flip_y)
# Get size and data of new image
ht, wd = rotdata.shape[:2]
ctr_x, ctr_y = trcalc.get_center(rotdata)
# Find location of image piece (center) in our array
x0, y0 = self.radectopix(ra, dec)
# Merge piece as closely as possible into our array
# Unfortunately we lose a little precision rounding to the
# nearest pixel--can't be helped with this approach
x0, y0 = int(np.round(x0)), int(np.round(y0))
self.logger.debug("Fitting image '%s' into mosaic at %d,%d" % (
name, x0, y0))
# This is for useful debugging info only
my_ctr_x, my_ctr_y = trcalc.get_center(mydata)
off_x, off_y = x0 - my_ctr_x, y0 - my_ctr_y
self.logger.debug("centering offsets: %d,%d" % (off_x, off_y))
# Sanity check piece placement
xlo, xhi = x0 - ctr_x, x0 + wd - ctr_x
ylo, yhi = y0 - ctr_y, y0 + ht - ctr_y
assert (xhi - xlo == wd), \
Exception("Width differential %d != %d" % (xhi - xlo, wd))
assert (yhi - ylo == ht), \
Exception("Height differential %d != %d" % (yhi - ylo, ht))
mywd, myht = self.get_size()
if xlo < 0 or xhi > mywd or ylo < 0 or yhi > myht:
if not allow_expand:
raise Exception("New piece doesn't fit on image and "
"allow_expand=False")
# <-- Resize our data array to allow the new image
# determine amount to pad expansion by
expand_x = max(int(expand_pad_deg / scale_x), 0)
expand_y = max(int(expand_pad_deg / scale_y), 0)
nx1_off, nx2_off = 0, 0
if xlo < 0:
nx1_off = abs(xlo) + expand_x
if xhi > mywd:
nx2_off = (xhi - mywd) + expand_x
xlo, xhi = xlo + nx1_off, xhi + nx1_off
ny1_off, ny2_off = 0, 0
if ylo < 0:
ny1_off = abs(ylo) + expand_y
if yhi > myht:
ny2_off = (yhi - myht) + expand_y
ylo, yhi = ylo + ny1_off, yhi + ny1_off
new_wd = mywd + nx1_off + nx2_off
new_ht = myht + ny1_off + ny2_off
# sanity check on new mosaic size
old_area = mywd * myht
new_area = new_wd * new_ht
expand_pct = new_area / old_area
if ((max_expand_pct is not None) and
(expand_pct > max_expand_pct)):
raise Exception("New area exceeds current one by %.2f %%;"
"increase max_expand_pct (%.2f) to allow" %
(expand_pct * 100, max_expand_pct))
# go for it!
new_data = np.zeros((new_ht, new_wd))
# place current data into new data
new_data[ny1_off:ny1_off + myht, nx1_off:nx1_off + mywd] = \
mydata
self._data = new_data
mydata = new_data
if (nx1_off > 0) or (ny1_off > 0):
# Adjust our WCS for relocation of the reference pixel
crpix1, crpix2 = self.get_keywords_list('CRPIX1', 'CRPIX2')
kwds = dict(CRPIX1=crpix1 + nx1_off,
CRPIX2=crpix2 + ny1_off)
self.update_keywords(kwds)
# fit image piece into our array
try:
if merge:
mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...]
else:
idx = (mydata[ylo:yhi, xlo:xhi, ...] == 0.0)
mydata[ylo:yhi, xlo:xhi, ...][idx] = \
rotdata[0:ht, 0:wd, ...][idx]
except Exception as e:
self.logger.error("Error fitting tile: %s" % (str(e)))
raise
res.append((xlo, ylo, xhi, yhi))
# TODO: recalculate min and max values
# Can't use usual techniques because it adds too much time to the
# mosacing
#self._set_minmax()
# Notify watchers that our data has changed
if not suppress_callback:
self.make_callback('modified')
return res
|
Drops new images into the current image (if there is room),
relocating them according the WCS between the two images.
|
def exceptions(self):
"""
Returns a list of ParamDoc objects (with empty names) of the
exception tags for the function.
>>> comments = parse_comments_for_file('examples/module_closure.js')
>>> fn1 = FunctionDoc(comments[1])
>>> fn1.exceptions[0].doc
'Another exception'
>>> fn1.exceptions[1].doc
'A fake exception'
>>> fn1.exceptions[1].type
'String'
"""
def make_param(text):
if '{' in text and '}' in text:
# Make sure param name is blank:
word_split = list(split_delimited('{}', ' ', text))
if word_split[1] != '':
text = ' '.join([word_split[0], ''] + word_split[1:])
else:
# Handle old JSDoc format
word_split = text.split()
text = '{%s} %s' % (word_split[0], ' '.join(word_split[1:]))
return ParamDoc(text)
return [make_param(text) for text in
self.get_as_list('throws') + self.get_as_list('exception')]
|
Returns a list of ParamDoc objects (with empty names) of the
exception tags for the function.
>>> comments = parse_comments_for_file('examples/module_closure.js')
>>> fn1 = FunctionDoc(comments[1])
>>> fn1.exceptions[0].doc
'Another exception'
>>> fn1.exceptions[1].doc
'A fake exception'
>>> fn1.exceptions[1].type
'String'
|
def copy_unit_properties(self, sorting, unit_ids=None):
'''Copy unit properties from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the properties will be copied
unit_ids: (array_like, int)
The list (or single value) of unit_ids for which the properties will be copied.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
if isinstance(unit_ids, int):
curr_property_names = sorting.get_unit_property_names(unit_id=unit_ids)
for curr_property_name in curr_property_names:
value = sorting.get_unit_property(unit_id=unit_ids, property_name=curr_property_name)
self.set_unit_property(unit_id=unit_ids, property_name=curr_property_name, value=value)
else:
for unit_id in unit_ids:
curr_property_names = sorting.get_unit_property_names(unit_id=unit_id)
for curr_property_name in curr_property_names:
value = sorting.get_unit_property(unit_id=unit_id, property_name=curr_property_name)
self.set_unit_property(unit_id=unit_id, property_name=curr_property_name, value=value)
|
Copy unit properties from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the properties will be copied
unit_ids: (array_like, int)
The list (or single value) of unit_ids for which the properties will be copied.
|
def iter_links(operations, page):
"""
Generate links for an iterable of operations on a starting page.
"""
for operation, ns, rule, func in operations:
yield Link.for_(
operation=operation,
ns=ns,
type=ns.subject_name,
qs=page.to_items(),
)
|
Generate links for an iterable of operations on a starting page.
|
def resume_transfer_operation(self, operation_name):
"""
Resumes an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:rtype: None
"""
self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries)
|
Resumes an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:rtype: None
|
def pid(self):
"""The pid of the process associated to the scheduler."""
try:
return self._pid
except AttributeError:
self._pid = os.getpid()
return self._pid
|
The pid of the process associated to the scheduler.
|
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
# TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = np.abs(hkl1)
h2 = np.abs(hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = collections.defaultdict(list)
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2].append(hkl1)
break
if not found:
unique[hkl1].append(hkl1)
pretty_unique = {}
for k, v in unique.items():
pretty_unique[sorted(v)[-1]] = len(v)
return pretty_unique
|
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
|
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
|
Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
|
def replace_word_tokens(string, language):
"""
Given a string and an ISO 639-2 language code,
return the string with the words replaced with
an operational equivalent.
"""
words = mathwords.word_groups_for_language(language)
# Replace operator words with numeric operators
operators = words['binary_operators'].copy()
if 'unary_operators' in words:
operators.update(words['unary_operators'])
for operator in list(operators.keys()):
if operator in string:
string = string.replace(operator, operators[operator])
# Replace number words with numeric values
numbers = words['numbers']
for number in list(numbers.keys()):
if number in string:
string = string.replace(number, str(numbers[number]))
# Replace scaling multipliers with numeric values
scales = words['scales']
end_index_characters = mathwords.BINARY_OPERATORS
end_index_characters.add('(')
word_matches = find_word_groups(string, list(scales.keys()))
for match in word_matches:
string = string.replace(match, '(' + match + ')')
for scale in list(scales.keys()):
for _ in range(0, string.count(scale)):
start_index = string.find(scale) - 1
end_index = len(string)
while is_int(string[start_index - 1]) and start_index > 0:
start_index -= 1
end_index = string.find(' ', start_index) + 1
end_index = string.find(' ', end_index) + 1
add = ' + '
if string[end_index] in end_index_characters:
add = ''
string = string[:start_index] + '(' + string[start_index:]
string = string.replace(
scale, '* ' + str(scales[scale]) + ')' + add,
1
)
string = string.replace(') (', ') + (')
return string
|
Given a string and an ISO 639-2 language code,
return the string with the words replaced with
an operational equivalent.
|
def console(self, ttynum=-1, stdinfd=0, stdoutfd=1, stderrfd=2, escape=1):
"""
Attach to console of running container.
"""
if not self.running:
return False
return _lxc.Container.console(self, ttynum, stdinfd, stdoutfd,
stderrfd, escape)
|
Attach to console of running container.
|
def send(self,text):
"""Send a string to the PiLite, can be simple text or a $$$ command"""
#print text
self.s.write(text)
time.sleep(0.001*len(text))
|
Send a string to the PiLite, can be simple text or a $$$ command
|
def add_json(self, json_obj, **kwargs):
"""Adds a json-serializable Python dict as a json file to IPFS.
.. code-block:: python
>>> c.add_json({'one': 1, 'two': 2, 'three': 3})
'QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob'
Parameters
----------
json_obj : dict
A json-serializable Python dictionary
Returns
-------
str : Hash of the added IPFS object
"""
return self.add_bytes(encoding.Json().encode(json_obj), **kwargs)
|
Adds a json-serializable Python dict as a json file to IPFS.
.. code-block:: python
>>> c.add_json({'one': 1, 'two': 2, 'three': 3})
'QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob'
Parameters
----------
json_obj : dict
A json-serializable Python dictionary
Returns
-------
str : Hash of the added IPFS object
|
def load(obj, settings_module, identifier="py", silent=False, key=None):
"""Tries to import a python module"""
mod, loaded_from = get_module(obj, settings_module, silent)
if mod and loaded_from:
obj.logger.debug("py_loader: {}".format(mod))
else:
obj.logger.debug(
"py_loader: %s (Ignoring, Not Found)", settings_module
)
return
for setting in dir(mod):
if setting.isupper():
if key is None or key == setting:
setting_value = getattr(mod, setting)
obj.logger.debug(
"py_loader: loading %s: %s (%s)",
setting,
"*****" if "secret" in settings_module else setting_value,
identifier,
)
obj.set(setting, setting_value, loader_identifier=identifier)
obj._loaded_files.append(mod.__file__)
|
Tries to import a python module
|
def swap(self, fn, *args, **kwargs):
'''
Given a mutator `fn`, calls `fn` with the atom's current state, `args`,
and `kwargs`. The return value of this invocation becomes the new value
of the atom. Returns the new value.
:param fn: A function which will be passed the current state. Should
return a new state. This absolutely *MUST NOT* mutate the
reference to the current state! If it does, this function may loop
indefinitely.
:param \*args: Arguments to be passed to `fn`.
:param \*\*kwargs: Keyword arguments to be passed to `fn`.
'''
while True:
oldval = self.deref()
newval = fn(oldval, *args, **kwargs)
if self._state.compare_and_set(oldval, newval):
self.notify_watches(oldval, newval)
return newval
|
Given a mutator `fn`, calls `fn` with the atom's current state, `args`,
and `kwargs`. The return value of this invocation becomes the new value
of the atom. Returns the new value.
:param fn: A function which will be passed the current state. Should
return a new state. This absolutely *MUST NOT* mutate the
reference to the current state! If it does, this function may loop
indefinitely.
:param \*args: Arguments to be passed to `fn`.
:param \*\*kwargs: Keyword arguments to be passed to `fn`.
|
def set(self, obj, id, payload, action='', async=False):
""" Function set
Set an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@param action: specific action of an object ('power'...)
@param payload: the dict of the payload
@param async: should this request be async, if true use
return.result() to get the response
@return RETURN: the server response
"""
self.url = '{}{}/{}'.format(self.base_url, obj, id)
self.method = 'PUT'
if action:
self.url += '/{}'.format(action)
self.payload = json.dumps(payload)
if async:
session = FuturesSession()
return session.put(url=self.url, auth=self.auth,
headers=self.headers, data=self.payload,
cert=self.ca_cert)
else:
self.resp = requests.put(url=self.url, auth=self.auth,
headers=self.headers, data=self.payload,
cert=self.ca_cert)
if self.__process_resp__(obj):
return self.res
return False
|
Function set
Set an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@param action: specific action of an object ('power'...)
@param payload: the dict of the payload
@param async: should this request be async, if true use
return.result() to get the response
@return RETURN: the server response
|
def profile_cancel(self, query_id, timeout=10):
"""
Cancel the query that has the given queryid.
:param query_id: The UUID of the query in standard UUID format that Drill assigns to each query.
:param timeout: int
:return: pydrill.client.Result
"""
result = Result(*self.perform_request(**{
'method': 'GET',
'url': '/profiles/cancel/{0}'.format(query_id),
'params': {
'request_timeout': timeout
}
}))
return result
|
Cancel the query that has the given queryid.
:param query_id: The UUID of the query in standard UUID format that Drill assigns to each query.
:param timeout: int
:return: pydrill.client.Result
|
def OnExpandAll(self):
""" expand all nodes """
root = self.tree.GetRootItem()
fn = self.tree.Expand
self.traverse(root, fn)
self.tree.Expand(root)
|
expand all nodes
|
def onStart(self, event):
"""
Display the environment of a started container
"""
c = event.container
print '+' * 5, 'started:', c
kv = lambda s: s.split('=', 1)
env = {k: v for (k, v) in (kv(s) for s in c.attrs['Config']['Env'])}
print env
|
Display the environment of a started container
|
def fit(self, counts_df, val_set=None):
"""
Fit Hierarchical Poisson Model to sparse count data
Fits a hierarchical Poisson model to count data using mean-field approximation with either
full-batch coordinate-ascent or mini-batch stochastic coordinate-ascent.
Note
----
DataFrames and arrays passed to '.fit' might be modified inplace - if this is a problem you'll
need to pass a copy to them, e.g. 'counts_df=counts_df.copy()'.
Note
----
Forcibly terminating the procedure should still keep the last calculated shape and rate
parameter values, but is not recommended. If you need to make predictions on a forced-terminated
object, set the attribute 'is_fitted' to 'True'.
Note
----
Fitting in mini-batches is more prone to numerical instability and compared to full-batch
variational inference, it is more likely that all your parameters will turn to NaNs (which
means the optimization procedure failed).
Parameters
----------
counts_df : pandas data frame (nobs, 3) or coo_matrix
Input data with one row per non-zero observation, consisting of triplets ('UserId', 'ItemId', 'Count').
Must containin columns 'UserId', 'ItemId', and 'Count'.
Combinations of users and items not present are implicitly assumed to be zero by the model.
Can also pass a sparse coo_matrix, in which case 'reindex' will be forced to 'False'.
val_set : pandas data frame (nobs, 3)
Validation set on which to monitor log-likelihood. Same format as counts_df.
Returns
-------
self : obj
Copy of this object
"""
## a basic check
if self.stop_crit == 'val-llk':
if val_set is None:
raise ValueError("If 'stop_crit' is set to 'val-llk', must provide a validation set.")
## running each sub-process
if self.verbose:
self._print_st_msg()
self._process_data(counts_df)
if self.verbose:
self._print_data_info()
if (val_set is not None) and (self.stop_crit!='diff-norm') and (self.stop_crit!='train-llk'):
self._process_valset(val_set)
else:
self.val_set = None
self._cast_before_fit()
self._fit()
## after terminating optimization
if self.keep_data:
if self.users_per_batch == 0:
self._store_metadata()
else:
self._st_ix_user = self._st_ix_user[:-1]
if self.produce_dicts and self.reindex:
self.user_dict_ = {self.user_mapping_[i]:i for i in range(self.user_mapping_.shape[0])}
self.item_dict_ = {self.item_mapping_[i]:i for i in range(self.item_mapping_.shape[0])}
self.is_fitted = True
del self.input_df
del self.val_set
return self
|
Fit Hierarchical Poisson Model to sparse count data
Fits a hierarchical Poisson model to count data using mean-field approximation with either
full-batch coordinate-ascent or mini-batch stochastic coordinate-ascent.
Note
----
DataFrames and arrays passed to '.fit' might be modified inplace - if this is a problem you'll
need to pass a copy to them, e.g. 'counts_df=counts_df.copy()'.
Note
----
Forcibly terminating the procedure should still keep the last calculated shape and rate
parameter values, but is not recommended. If you need to make predictions on a forced-terminated
object, set the attribute 'is_fitted' to 'True'.
Note
----
Fitting in mini-batches is more prone to numerical instability and compared to full-batch
variational inference, it is more likely that all your parameters will turn to NaNs (which
means the optimization procedure failed).
Parameters
----------
counts_df : pandas data frame (nobs, 3) or coo_matrix
Input data with one row per non-zero observation, consisting of triplets ('UserId', 'ItemId', 'Count').
Must containin columns 'UserId', 'ItemId', and 'Count'.
Combinations of users and items not present are implicitly assumed to be zero by the model.
Can also pass a sparse coo_matrix, in which case 'reindex' will be forced to 'False'.
val_set : pandas data frame (nobs, 3)
Validation set on which to monitor log-likelihood. Same format as counts_df.
Returns
-------
self : obj
Copy of this object
|
def use_app(backend_name=None, call_reuse=True):
""" Get/create the default Application object
It is safe to call this function multiple times, as long as
backend_name is None or matches the already selected backend.
Parameters
----------
backend_name : str | None
The name of the backend application to use. If not specified, Vispy
tries to select a backend automatically. See ``vispy.use()`` for
details.
call_reuse : bool
Whether to call the backend's `reuse()` function (True by default).
Not implemented by default, but some backends need it. For example,
the notebook backends need to inject some JavaScript in a notebook as
soon as `use_app()` is called.
"""
global default_app
# If we already have a default_app, raise error or return
if default_app is not None:
names = default_app.backend_name.lower().replace('(', ' ').strip(') ')
names = [name for name in names.split(' ') if name]
if backend_name and backend_name.lower() not in names:
raise RuntimeError('Can only select a backend once, already using '
'%s.' % names)
else:
if call_reuse:
default_app.reuse()
return default_app # Current backend matches backend_name
# Create default app
default_app = Application(backend_name)
return default_app
|
Get/create the default Application object
It is safe to call this function multiple times, as long as
backend_name is None or matches the already selected backend.
Parameters
----------
backend_name : str | None
The name of the backend application to use. If not specified, Vispy
tries to select a backend automatically. See ``vispy.use()`` for
details.
call_reuse : bool
Whether to call the backend's `reuse()` function (True by default).
Not implemented by default, but some backends need it. For example,
the notebook backends need to inject some JavaScript in a notebook as
soon as `use_app()` is called.
|
def optimise_xy(xy, *args):
"""Return negative pore diameter for x and y coordinates optimisation."""
z, elements, coordinates = args
window_com = np.array([xy[0], xy[1], z])
return -pore_diameter(elements, coordinates, com=window_com)[0]
|
Return negative pore diameter for x and y coordinates optimisation.
|
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.leapyear_observed = None
else:
self.leapyear_observed = vals[i]
i += 1
if len(vals[i]) == 0:
self.daylight_saving_start_day = None
else:
self.daylight_saving_start_day = vals[i]
i += 1
if len(vals[i]) == 0:
self.daylight_saving_end_day = None
else:
self.daylight_saving_end_day = vals[i]
i += 1
count = int(vals[i])
i += 1
for _ in range(count):
obj = Holiday()
obj.read(vals[i:i + obj.field_count])
self.add_holiday(obj)
i += obj.field_count
|
Read values.
Args:
vals (list): list of strings representing values
|
def signin(request, auth_form=AuthenticationForm,
template_name='userena/signin_form.html',
redirect_field_name=REDIRECT_FIELD_NAME,
redirect_signin_function=signin_redirect, extra_context=None):
"""
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by userena.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
"""
form = auth_form()
if request.method == 'POST':
form = auth_form(request.POST, request.FILES)
if form.is_valid():
identification, password, remember_me = (form.cleaned_data['identification'],
form.cleaned_data['password'],
form.cleaned_data['remember_me'])
user = authenticate(identification=identification,
password=password)
if user.is_active:
login(request, user)
if remember_me:
request.session.set_expiry(userena_settings.USERENA_REMEMBER_ME_DAYS[1] * 86400)
else: request.session.set_expiry(0)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('You have been signed in.'),
fail_silently=True)
#send a signal that a user has signed in
userena_signals.account_signin.send(sender=None, user=user)
# Whereto now?
redirect_to = redirect_signin_function(
request.GET.get(redirect_field_name,
request.POST.get(redirect_field_name)), user)
return HttpResponseRedirect(redirect_to)
else:
return redirect(reverse('userena_disabled',
kwargs={'username': user.username}))
if not extra_context: extra_context = dict()
extra_context.update({
'form': form,
'next': request.GET.get(redirect_field_name,
request.POST.get(redirect_field_name)),
})
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
|
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by userena.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
|
def refitPrefixes(self):
"""
Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.refitPrefixes()
if self.prefix is not None:
ns = self.resolvePrefix(self.prefix)
if ns[1] is not None:
self.expns = ns[1]
self.prefix = None
self.nsprefixes = {}
return self
|
Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element}
|
def switch(request, url):
"""
Set/clear boolean field value for model object
"""
app_label, model_name, object_id, field = url.split('/')
try:
# django >= 1.7
from django.apps import apps
model = apps.get_model(app_label, model_name)
except ImportError:
# django < 1.7
from django.db.models import get_model
model = get_model(app_label, model_name)
object = get_object_or_404(model, pk=object_id)
perm_str = '%s.change_%s' % (app_label, model.__name__)
# check only model
if not request.user.has_perm(perm_str.lower()):
raise PermissionDenied
setattr(object, field, getattr(object, field) == 0)
object.save()
if request.is_ajax():
return JsonResponse({'object_id': object.pk, 'field': field, 'value': getattr(object, field)})
else:
msg = _(u'flag %(field)s was changed for %(object)s') % {'field': field, 'object': object}
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
|
Set/clear boolean field value for model object
|
def _resolve(self, name):
"""
Resolve the given store
:param name: The store to resolve
:type name: str
:rtype: Repository
"""
config = self._get_config(name)
if not config:
raise RuntimeError('Cache store [%s] is not defined.' % name)
if config['driver'] in self._custom_creators:
repository = self._call_custom_creator(config)
else:
repository = getattr(self, '_create_%s_driver' % config['driver'])(config)
if 'serializer' in config:
serializer = self._resolve_serializer(config['serializer'])
else:
serializer = self._serializer
repository.get_store().set_serializer(serializer)
return repository
|
Resolve the given store
:param name: The store to resolve
:type name: str
:rtype: Repository
|
def add(self, defn):
"""Adds the given Packet Definition to this Telemetry Dictionary."""
if defn.name not in self:
self[defn.name] = defn
else:
msg = "Duplicate packet name '%s'" % defn.name
log.error(msg)
raise util.YAMLError(msg)
|
Adds the given Packet Definition to this Telemetry Dictionary.
|
def downloadArchiveAction(self, request, queryset):
'''
Download selected submissions as archive, for targeted correction.
'''
output = io.BytesIO()
z = zipfile.ZipFile(output, 'w')
for sub in queryset:
sub.add_to_zipfile(z)
z.close()
# go back to start in ZIP file so that Django can deliver it
output.seek(0)
response = HttpResponse(
output, content_type="application/x-zip-compressed")
response['Content-Disposition'] = 'attachment; filename=submissions.zip'
return response
|
Download selected submissions as archive, for targeted correction.
|
def _dump_to_file(self, file):
"""dump to the file"""
xmltodict.unparse(self.object(), file, pretty=True)
|
dump to the file
|
def evaluate_stacked_ensemble(path, ensemble_id):
"""Evaluates the ensemble and updates the database when finished/
Args:
path (str): Path to Xcessiv notebook
ensemble_id (str): Ensemble ID
"""
with functions.DBContextManager(path) as session:
stacked_ensemble = session.query(models.StackedEnsemble).filter_by(
id=ensemble_id).first()
if not stacked_ensemble:
raise exceptions.UserError('Stacked ensemble {} '
'does not exist'.format(ensemble_id))
stacked_ensemble.job_id = get_current_job().id
stacked_ensemble.job_status = 'started'
session.add(stacked_ensemble)
session.commit()
try:
meta_features_list = []
for base_learner in stacked_ensemble.base_learners:
mf = np.load(base_learner.meta_features_path(path))
if len(mf.shape) == 1:
mf = mf.reshape(-1, 1)
meta_features_list.append(mf)
secondary_features = np.concatenate(meta_features_list, axis=1)
# Get data
extraction = session.query(models.Extraction).first()
return_splits_iterable = functions.import_object_from_string_code(
extraction.meta_feature_generation['source'],
'return_splits_iterable'
)
X, y = extraction.return_train_dataset()
# We need to retrieve original order of meta-features
indices_list = [test_index for train_index, test_index in return_splits_iterable(X, y)]
indices = np.concatenate(indices_list)
X, y = X[indices], y[indices]
est = stacked_ensemble.return_secondary_learner()
return_splits_iterable_stacked_ensemble = functions.import_object_from_string_code(
extraction.stacked_ensemble_cv['source'],
'return_splits_iterable'
)
preds = []
trues_list = []
for train_index, test_index in return_splits_iterable_stacked_ensemble(secondary_features, y):
X_train, X_test = secondary_features[train_index], secondary_features[test_index]
y_train, y_test = y[train_index], y[test_index]
est = est.fit(X_train, y_train)
preds.append(
getattr(est, stacked_ensemble.base_learner_origin.
meta_feature_generator)(X_test)
)
trues_list.append(y_test)
preds = np.concatenate(preds, axis=0)
y_true = np.concatenate(trues_list)
for key in stacked_ensemble.base_learner_origin.metric_generators:
metric_generator = functions.import_object_from_string_code(
stacked_ensemble.base_learner_origin.metric_generators[key],
'metric_generator'
)
stacked_ensemble.individual_score[key] = metric_generator(y_true, preds)
stacked_ensemble.job_status = 'finished'
session.add(stacked_ensemble)
session.commit()
except:
session.rollback()
stacked_ensemble.job_status = 'errored'
stacked_ensemble.description['error_type'] = repr(sys.exc_info()[0])
stacked_ensemble.description['error_value'] = repr(sys.exc_info()[1])
stacked_ensemble.description['error_traceback'] = \
traceback.format_exception(*sys.exc_info())
session.add(stacked_ensemble)
session.commit()
raise
|
Evaluates the ensemble and updates the database when finished/
Args:
path (str): Path to Xcessiv notebook
ensemble_id (str): Ensemble ID
|
def list_objects(self, bucket_name=None, **kwargs):
"""
This method is primarily for illustration and just calls the
boto3 client implementation of list_objects but is a common task
for first time Predix BlobStore users.
"""
if not bucket_name: bucket_name = self.bucket_name
return self.client.list_objects(Bucket=bucket_name, **kwargs)
|
This method is primarily for illustration and just calls the
boto3 client implementation of list_objects but is a common task
for first time Predix BlobStore users.
|
def spreadsheet(service, id):
"""Fetch and return spreadsheet meta data with Google sheets API."""
request = service.spreadsheets().get(spreadsheetId=id)
try:
response = request.execute()
except apiclient.errors.HttpError as e:
if e.resp.status == 404:
raise KeyError(id)
else: # pragma: no cover
raise
return response
|
Fetch and return spreadsheet meta data with Google sheets API.
|
def status_bar(python_input):
"""
Create the `Layout` for the status bar.
"""
TB = 'class:status-toolbar'
@if_mousedown
def toggle_paste_mode(mouse_event):
python_input.paste_mode = not python_input.paste_mode
@if_mousedown
def enter_history(mouse_event):
python_input.enter_history()
def get_text_fragments():
python_buffer = python_input.default_buffer
result = []
append = result.append
append((TB, ' '))
result.extend(get_inputmode_fragments(python_input))
append((TB, ' '))
# Position in history.
append((TB, '%i/%i ' % (python_buffer.working_index + 1,
len(python_buffer._working_lines))))
# Shortcuts.
app = get_app()
if not python_input.vi_mode and app.current_buffer == python_input.search_buffer:
append((TB, '[Ctrl-G] Cancel search [Enter] Go to this position.'))
elif bool(app.current_buffer.selection_state) and not python_input.vi_mode:
# Emacs cut/copy keys.
append((TB, '[Ctrl-W] Cut [Meta-W] Copy [Ctrl-Y] Paste [Ctrl-G] Cancel'))
else:
result.extend([
(TB + ' class:key', '[F3]', enter_history),
(TB, ' History ', enter_history),
(TB + ' class:key', '[F6]', toggle_paste_mode),
(TB, ' ', toggle_paste_mode),
])
if python_input.paste_mode:
append((TB + ' class:paste-mode-on', 'Paste mode (on)', toggle_paste_mode))
else:
append((TB, 'Paste mode', toggle_paste_mode))
return result
return ConditionalContainer(
content=Window(content=FormattedTextControl(get_text_fragments), style=TB),
filter=~is_done & renderer_height_is_known &
Condition(lambda: python_input.show_status_bar and
not python_input.show_exit_confirmation))
|
Create the `Layout` for the status bar.
|
def handle_inittarget(
state_change: ActionInitTarget,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult[TargetTransferState]:
""" Handles an ActionInitTarget state change. """
transfer = state_change.transfer
route = state_change.route
assert channel_state.identifier == transfer.balance_proof.channel_identifier
is_valid, channel_events, errormsg = channel.handle_receive_lockedtransfer(
channel_state,
transfer,
)
if is_valid:
# A valid balance proof does not mean the payment itself is still valid.
# e.g. the lock may be near expiration or have expired. This is fine. The
# message with an unusable lock must be handled to properly synchronize the
# local view of the partner's channel state, allowing the next balance
# proofs to be handled. This however, must only be done once, which is
# enforced by the nonce increasing sequentially, which is verified by
# the handler handle_receive_lockedtransfer.
target_state = TargetTransferState(route, transfer)
safe_to_wait, _ = is_safe_to_wait(
transfer.lock.expiration,
channel_state.reveal_timeout,
block_number,
)
# If there is not enough time to safely unlock the lock on-chain
# silently let the transfer expire. The target task must be created to
# handle the ReceiveLockExpired state change, which will clear the
# expired lock.
if safe_to_wait:
message_identifier = message_identifier_from_prng(pseudo_random_generator)
recipient = transfer.initiator
secret_request = SendSecretRequest(
recipient=Address(recipient),
channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
message_identifier=message_identifier,
payment_identifier=transfer.payment_identifier,
amount=transfer.lock.amount,
expiration=transfer.lock.expiration,
secrethash=transfer.lock.secrethash,
)
channel_events.append(secret_request)
iteration = TransitionResult(target_state, channel_events)
else:
# If the balance proof is not valid, do *not* create a task. Otherwise it's
# possible for an attacker to send multiple invalid transfers, and increase
# the memory usage of this Node.
unlock_failed = EventUnlockClaimFailed(
identifier=transfer.payment_identifier,
secrethash=transfer.lock.secrethash,
reason=errormsg,
)
channel_events.append(unlock_failed)
iteration = TransitionResult(None, channel_events)
return iteration
|
Handles an ActionInitTarget state change.
|
def create_secret(self, value, contributor, metadata=None, expires=None):
"""Create a new secret, returning its handle.
:param value: Secret value to store
:param contributor: User owning the secret
:param metadata: Optional metadata dictionary (must be JSON serializable)
:param expires: Optional date/time of expiry (defaults to None, which means that
the secret never expires)
:return: Secret handle
"""
if metadata is None:
metadata = {}
secret = self.create(
value=value,
contributor=contributor,
metadata=metadata,
expires=expires,
)
return str(secret.handle)
|
Create a new secret, returning its handle.
:param value: Secret value to store
:param contributor: User owning the secret
:param metadata: Optional metadata dictionary (must be JSON serializable)
:param expires: Optional date/time of expiry (defaults to None, which means that
the secret never expires)
:return: Secret handle
|
def inception_v3(pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""Inception v3 model from
`"Rethinking the Inception Architecture for Computer Vision"
<http://arxiv.org/abs/1512.00567>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = Inception3(**kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('inceptionv3', root=root), ctx=ctx)
return net
|
r"""Inception v3 model from
`"Rethinking the Inception Architecture for Computer Vision"
<http://arxiv.org/abs/1512.00567>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
|
def call(method, *args, **kwargs):
'''
Calls a specific method from the network driver instance.
Please check the readthedocs_ page for the updated list of getters.
.. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix
:param method: specifies the name of the method to be called
:param params: contains the mapping between the name and the values of the parameters needed to call the method
:return: A dictionary with three keys:
- result (True/False): if the operation succeeded
- out (object): returns the object as-is from the call
- comment (string): provides more details in case the call failed
- traceback (string): complete traceback in case of exception. Please
submit an issue including this traceback on the `correct driver repo`_
and make sure to read the FAQ_
.. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new
.. _FAQ: https://github.com/napalm-automation/napalm#faq
Example:
.. code-block:: python
__proxy__['napalm.call']('cli'
**{
'commands': [
'show version',
'show chassis fan'
]
})
'''
kwargs_copy = {}
kwargs_copy.update(kwargs)
for karg, warg in six.iteritems(kwargs_copy):
# will remove None values
# thus the NAPALM methods will be called with their defaults
if warg is None:
kwargs.pop(karg)
return salt.utils.napalm.call(NETWORK_DEVICE, method, *args, **kwargs)
|
Calls a specific method from the network driver instance.
Please check the readthedocs_ page for the updated list of getters.
.. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix
:param method: specifies the name of the method to be called
:param params: contains the mapping between the name and the values of the parameters needed to call the method
:return: A dictionary with three keys:
- result (True/False): if the operation succeeded
- out (object): returns the object as-is from the call
- comment (string): provides more details in case the call failed
- traceback (string): complete traceback in case of exception. Please
submit an issue including this traceback on the `correct driver repo`_
and make sure to read the FAQ_
.. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new
.. _FAQ: https://github.com/napalm-automation/napalm#faq
Example:
.. code-block:: python
__proxy__['napalm.call']('cli'
**{
'commands': [
'show version',
'show chassis fan'
]
})
|
def _parse_one_event(self):
"""Parse the stream buffer and return either a single event or None"""
# WVA includes \r\n between messages which the parser doesn't like, so we
# throw away any data before a opening brace
try:
open_brace_idx = self._buf.index('{')
except ValueError:
self._buf = six.u('') # no brace found
else:
if open_brace_idx > 0:
self._buf = self._buf[open_brace_idx:]
try:
event, idx = self._decoder.raw_decode(self._buf)
self._buf = self._buf[idx:]
return event
except ValueError:
return None
|
Parse the stream buffer and return either a single event or None
|
def extract_lookups(value):
"""Recursively extracts any stack lookups within the data structure.
Args:
value (one of str, list, dict): a structure that contains lookups to
output values
Returns:
list: list of lookups if any
"""
lookups = set()
if isinstance(value, basestring):
lookups = lookups.union(extract_lookups_from_string(value))
elif isinstance(value, list):
for v in value:
lookups = lookups.union(extract_lookups(v))
elif isinstance(value, dict):
for v in value.values():
lookups = lookups.union(extract_lookups(v))
return lookups
|
Recursively extracts any stack lookups within the data structure.
Args:
value (one of str, list, dict): a structure that contains lookups to
output values
Returns:
list: list of lookups if any
|
def get_differing_atom_residue_ids(self, pdb_name, pdb_list):
'''Returns a list of residues in pdb_name which differ from the pdbs corresponding to the names in pdb_list.'''
assert(pdb_name in self.pdb_names)
assert(set(pdb_list).intersection(set(self.pdb_names)) == set(pdb_list)) # the names in pdb_list must be in pdb_names
differing_atom_residue_ids = set()
for other_pdb in pdb_list:
differing_atom_residue_ids = differing_atom_residue_ids.union(set(self.differing_atom_residue_ids[(pdb_name, other_pdb)]))
return sorted(differing_atom_residue_ids)
|
Returns a list of residues in pdb_name which differ from the pdbs corresponding to the names in pdb_list.
|
def _assertField(self, name):
"""Raise AttributeError when PacketHistory has no field with the given
name.
"""
if name not in self._names:
msg = 'PacketHistory "%s" has no field "%s"'
values = self._defn.name, name
raise AttributeError(msg % values)
|
Raise AttributeError when PacketHistory has no field with the given
name.
|
def get_or_guess_labels(model, x, **kwargs):
"""
Get the label to use in generating an adversarial example for x.
The kwargs are fed directly from the kwargs of the attack.
If 'y' is in kwargs, then assume it's an untargeted attack and
use that as the label.
If 'y_target' is in kwargs and is not none, then assume it's a
targeted attack and use that as the label.
Otherwise, use the model's prediction as the label and perform an
untargeted attack.
:param model: PyTorch model. Do not add a softmax gate to the output.
:param x: Tensor, shape (N, d_1, ...).
:param y: (optional) Tensor, shape (N).
:param y_target: (optional) Tensor, shape (N).
"""
if 'y' in kwargs and 'y_target' in kwargs:
raise ValueError("Can not set both 'y' and 'y_target'.")
if 'y' in kwargs:
labels = kwargs['y']
elif 'y_target' in kwargs and kwargs['y_target'] is not None:
labels = kwargs['y_target']
else:
_, labels = torch.max(model(x), 1)
return labels
|
Get the label to use in generating an adversarial example for x.
The kwargs are fed directly from the kwargs of the attack.
If 'y' is in kwargs, then assume it's an untargeted attack and
use that as the label.
If 'y_target' is in kwargs and is not none, then assume it's a
targeted attack and use that as the label.
Otherwise, use the model's prediction as the label and perform an
untargeted attack.
:param model: PyTorch model. Do not add a softmax gate to the output.
:param x: Tensor, shape (N, d_1, ...).
:param y: (optional) Tensor, shape (N).
:param y_target: (optional) Tensor, shape (N).
|
def get_sn(unit):
"""获取文本行的句子数量
Keyword arguments:
unit -- 文本行
Return:
sn -- 句数
"""
sn = 0
match_re = re.findall(str(sentence_delimiters), unit)
if match_re:
string = ''.join(match_re)
sn = len(string)
return int(sn)
|
获取文本行的句子数量
Keyword arguments:
unit -- 文本行
Return:
sn -- 句数
|
def run(cmd_str,cwd='.',verbose=False):
""" an OS agnostic function to execute a command line
Parameters
----------
cmd_str : str
the str to execute with os.system()
cwd : str
the directory to execute the command in
verbose : bool
flag to echo to stdout complete cmd str
Note
----
uses platform to detect OS and adds .exe suffix or ./ prefix as appropriate
for Windows, if os.system returns non-zero, raises exception
Example
-------
``>>>import pyemu``
``>>>pyemu.helpers.run("pestpp pest.pst")``
"""
bwd = os.getcwd()
os.chdir(cwd)
try:
exe_name = cmd_str.split()[0]
if "window" in platform.platform().lower():
if not exe_name.lower().endswith("exe"):
raw = cmd_str.split()
raw[0] = exe_name + ".exe"
cmd_str = ' '.join(raw)
else:
if exe_name.lower().endswith('exe'):
raw = cmd_str.split()
exe_name = exe_name.replace('.exe','')
raw[0] = exe_name
cmd_str = '{0} {1} '.format(*raw)
if os.path.exists(exe_name) and not exe_name.startswith('./'):
cmd_str = "./" + cmd_str
except Exception as e:
os.chdir(bwd)
raise Exception("run() error preprocessing command line :{0}".format(str(e)))
if verbose:
print("run():{0}".format(cmd_str))
try:
ret_val = os.system(cmd_str)
except Exception as e:
os.chdir(bwd)
raise Exception("run() raised :{0}".format(str(e)))
os.chdir(bwd)
if "window" in platform.platform().lower():
if ret_val != 0:
raise Exception("run() returned non-zero")
|
an OS agnostic function to execute a command line
Parameters
----------
cmd_str : str
the str to execute with os.system()
cwd : str
the directory to execute the command in
verbose : bool
flag to echo to stdout complete cmd str
Note
----
uses platform to detect OS and adds .exe suffix or ./ prefix as appropriate
for Windows, if os.system returns non-zero, raises exception
Example
-------
``>>>import pyemu``
``>>>pyemu.helpers.run("pestpp pest.pst")``
|
def parent(self):
""" Return the parent device. """
if self._has_parent is None:
_parent = self._ctx.backend.get_parent(self._ctx.dev)
self._has_parent = _parent is not None
if self._has_parent:
self._parent = Device(_parent, self._ctx.backend)
else:
self._parent = None
return self._parent
|
Return the parent device.
|
def find_optimal_allocation(self, tokens):
"""
Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie
:param tokens: tokens tokenize
:type tokens: list of str
:return: Optimal allocation of tokens to phrases
:rtype: list of TokenTrie.Token
"""
token_ranges = self.find_tracked_words(tokens)
token_ranges.sort()
for offset in range(1, len(token_ranges)):
to_be_removed = []
for candidate in token_ranges[offset:]:
for i in range(offset):
if token_ranges[i].overlaps_with(candidate):
to_be_removed.append(candidate)
break
token_ranges = [token for token in token_ranges if token not in to_be_removed]
token_ranges.sort(key=lambda token: token.get_start_index())
return token_ranges
|
Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie
:param tokens: tokens tokenize
:type tokens: list of str
:return: Optimal allocation of tokens to phrases
:rtype: list of TokenTrie.Token
|
def attended_by(self, email):
""" Check if user attended the event """
for attendee in self["attendees"] or []:
if (attendee["email"] == email
and attendee["responseStatus"] == "accepted"):
return True
return False
|
Check if user attended the event
|
def read_string(self, registeraddress, numberOfRegisters=16, functioncode=3):
"""Read a string from the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* numberOfRegisters (int): The number of registers allocated for the string.
* functioncode (int): Modbus function code. Can be 3 or 4.
Returns:
The string (str).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkInt(numberOfRegisters, minvalue=1, description='number of registers for read string')
return self._genericCommand(functioncode, registeraddress, \
numberOfRegisters=numberOfRegisters, payloadformat='string')
|
Read a string from the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* numberOfRegisters (int): The number of registers allocated for the string.
* functioncode (int): Modbus function code. Can be 3 or 4.
Returns:
The string (str).
Raises:
ValueError, TypeError, IOError
|
def get_dependencies(self):
"""Return dependencies, which should trigger updates of this model."""
# pylint: disable=no-member
return super().get_dependencies() + [
Data.collection_set,
Data.entity_set,
Data.parents,
]
|
Return dependencies, which should trigger updates of this model.
|
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
# CRAM-MD5 is for server testing only. Undocumented,
# unsupported, may be removed at any time. You have
# been warned.
if value not in MECHANISMS and value != 'CRAM-MD5':
raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS)))
return value
|
Validate the authMechanism URI option.
|
def print_genl_msg(_, ofd, hdr, ops, payloadlen):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L831.
Positional arguments:
_ -- unused.
ofd -- function to call with arguments similar to `logging.debug`.
hdr -- Netlink message header (nlmsghdr class instance).
ops -- cache operations (nl_cache_ops class instance).
payloadlen -- length of payload in message (ctypes.c_int instance).
Returns:
data (bytearray_ptr).
"""
data = nlmsg_data(hdr)
if payloadlen.value < GENL_HDRLEN:
return data
print_genl_hdr(ofd, data)
payloadlen.value -= GENL_HDRLEN
data = bytearray_ptr(data, GENL_HDRLEN)
if ops:
hdrsize = ops.co_hdrsize - GENL_HDRLEN
if hdrsize > 0:
if payloadlen.value < hdrsize:
return data
ofd(' [HEADER] %d octets', hdrsize)
dump_hex(ofd, data, hdrsize, 0)
payloadlen.value -= hdrsize
data = bytearray_ptr(data, hdrsize)
return data
|
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L831.
Positional arguments:
_ -- unused.
ofd -- function to call with arguments similar to `logging.debug`.
hdr -- Netlink message header (nlmsghdr class instance).
ops -- cache operations (nl_cache_ops class instance).
payloadlen -- length of payload in message (ctypes.c_int instance).
Returns:
data (bytearray_ptr).
|
def put(self, key):
"""Put and return the only unique identifier possible, its url
"""
self._consul_request('PUT', self._key_url(key['name']), json=key)
return key['name']
|
Put and return the only unique identifier possible, its url
|
def _el_orb_tuple(string):
"""Parse the element and orbital argument strings.
The presence of an element without any orbitals means that we want to plot
all of its orbitals.
Args:
string (`str`): The selected elements and orbitals in in the form:
`"Sn.s.p,O"`.
Returns:
A list of tuples specifying which elements/orbitals to plot. The output
for the above example would be:
`[('Sn', ('s', 'p')), 'O']`
"""
el_orbs = []
for split in string.split(','):
splits = split.split('.')
el = splits[0]
if len(splits) == 1:
el_orbs.append(el)
else:
el_orbs.append((el, tuple(splits[1:])))
return el_orbs
|
Parse the element and orbital argument strings.
The presence of an element without any orbitals means that we want to plot
all of its orbitals.
Args:
string (`str`): The selected elements and orbitals in in the form:
`"Sn.s.p,O"`.
Returns:
A list of tuples specifying which elements/orbitals to plot. The output
for the above example would be:
`[('Sn', ('s', 'p')), 'O']`
|
def to_dict(self):
'''Save this target component into a dictionary.'''
d = {'componentId': self.component_id,
'instanceName': self.instance_name}
props = []
for name in self.properties:
p = {'name': name}
if self.properties[name]:
p['value'] = str(self.properties[name])
props.append(p)
if props:
d[RTS_EXT_NS_YAML + 'properties'] = props
return d
|
Save this target component into a dictionary.
|
def stencils(self):
"""List of stencils."""
if not self._stencils:
self._stencils = self.manifest['stencils']
return self._stencils
|
List of stencils.
|
def location(self, value):
"""(Deprecated) Set `Bucket.location`
This can only be set at bucket **creation** time.
See https://cloud.google.com/storage/docs/json_api/v1/buckets and
https://cloud.google.com/storage/docs/bucket-locations
.. warning::
Assignment to 'Bucket.location' is deprecated, as it is only
valid before the bucket is created. Instead, pass the location
to `Bucket.create`.
"""
warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2)
self._location = value
|
(Deprecated) Set `Bucket.location`
This can only be set at bucket **creation** time.
See https://cloud.google.com/storage/docs/json_api/v1/buckets and
https://cloud.google.com/storage/docs/bucket-locations
.. warning::
Assignment to 'Bucket.location' is deprecated, as it is only
valid before the bucket is created. Instead, pass the location
to `Bucket.create`.
|
def _set_relative_pythonpath(self, value):
"""Set PYTHONPATH list relative paths"""
self.pythonpath = [osp.abspath(osp.join(self.root_path, path))
for path in value]
|
Set PYTHONPATH list relative paths
|
def sysinfo2float(version_info=sys.version_info):
"""Convert a sys.versions_info-compatible list into a 'canonic'
floating-point number which that can then be used to look up a
magic number. Note that this can only be used for released version
of C Python, not interim development versions, since we can't
represent that as a floating-point number.
For handling Pypy, pyston, jython, etc. and interim versions of
C Python, use sysinfo2magic.
"""
vers_str = '.'.join([str(v) for v in version_info[0:3]])
if version_info[3] != 'final':
vers_str += '.' + ''.join([str(i) for i in version_info[3:]])
if IS_PYPY:
vers_str += 'pypy'
else:
try:
import platform
platform = platform.python_implementation()
if platform in ('Jython', 'Pyston'):
vers_str += platform
pass
except ImportError:
# Python may be too old, e.g. < 2.6 or implementation may
# just not have platform
pass
except AttributeError:
pass
return py_str2float(vers_str)
|
Convert a sys.versions_info-compatible list into a 'canonic'
floating-point number which that can then be used to look up a
magic number. Note that this can only be used for released version
of C Python, not interim development versions, since we can't
represent that as a floating-point number.
For handling Pypy, pyston, jython, etc. and interim versions of
C Python, use sysinfo2magic.
|
def route_handler(context, content, pargs, kwargs):
"""
Route shortcode works a lot like rendering a page based on the url or
route. This allows inserting in rendered HTML within another page.
Activate it with the 'shortcodes' template filter. Within the content use
the chill route shortcode: "[chill route /path/to/something/]" where the
'[chill' and ']' are the shortcode starting and ending tags. And 'route' is
this route handler that takes one argument which is the url.
"""
(node, rule_kw) = node_from_uri(pargs[0])
if node == None:
return u"<!-- 404 '{0}' -->".format(pargs[0])
rule_kw.update( node )
values = rule_kw
values.update( request.form.to_dict(flat=True) )
values.update( request.args.to_dict(flat=True) )
values['method'] = request.method
noderequest = values.copy()
noderequest.pop('node_id')
noderequest.pop('name')
noderequest.pop('value')
rendered = render_node(node['id'], noderequest=noderequest, **values)
if rendered:
if not isinstance(rendered, (str, unicode, int, float)):
# return a json string
return encoder.encode(rendered)
return rendered
# Nothing to show, so nothing found
return "<!-- 404 '{0}' -->".format(pargs[0])
|
Route shortcode works a lot like rendering a page based on the url or
route. This allows inserting in rendered HTML within another page.
Activate it with the 'shortcodes' template filter. Within the content use
the chill route shortcode: "[chill route /path/to/something/]" where the
'[chill' and ']' are the shortcode starting and ending tags. And 'route' is
this route handler that takes one argument which is the url.
|
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return ECDSA_LOW_Curve(key)
if key not in ECDSA_LOW_Curve._member_map_:
extend_enum(ECDSA_LOW_Curve, key, default)
return ECDSA_LOW_Curve[key]
|
Backport support for original codes.
|
def get_certificate_json(self, certificate_uid):
"""
Returns certificate as json. Propagates KeyError if key isn't found
:param certificate_uid:
:return:
"""
if certificate_uid.startswith(URN_UUID_PREFIX):
uid = certificate_uid[len(URN_UUID_PREFIX):]
elif certificate_uid.startswith('http'):
last_slash = certificate_uid.rindex('/')
uid = certificate_uid[last_slash + 1:]
else:
uid = certificate_uid
logging.debug('Retrieving certificate for uid=%s', uid)
certificate_bytes = self._get_certificate_raw(uid)
logging.debug('Found certificate for uid=%s', uid)
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
return certificate_json
|
Returns certificate as json. Propagates KeyError if key isn't found
:param certificate_uid:
:return:
|
def process_am1(self, am1):
"""
Due to the solving process involving multiple optimization
levels to be treated individually, new soft clauses for
the detected intrinsic AtMost1 constraints should be
remembered. The method is a slightly modified version of
the base method :func:`RC2.process_am1` taking care of
this.
"""
# computing am1's weight
self.minw = min(map(lambda l: self.wght[l], am1))
# pretending am1 to be a core, and the bound is its size - 1
self.core_sels, b = am1, len(am1) - 1
# incrementing the cost
self.cost += b * self.minw
# assumptions to remove
self.garbage = set()
# splitting and relaxing if needed
self.process_sels()
# new selector
self.topv += 1
selv = self.topv
self.oracle.add_clause([-l for l in self.rels] + [-selv])
# integrating the new selector
self.sels.append(selv)
self.wght[selv] = self.minw
self.smap[selv] = len(self.wght) - 1
# do not forget this newly selector!
self.bckp_set.add(selv)
# removing unnecessary assumptions
self.filter_assumps()
|
Due to the solving process involving multiple optimization
levels to be treated individually, new soft clauses for
the detected intrinsic AtMost1 constraints should be
remembered. The method is a slightly modified version of
the base method :func:`RC2.process_am1` taking care of
this.
|
def raw_file(client, src, dest, opt):
"""Write the contents of a vault path/key to a file. Is
smart enough to attempt and handle binary files that are
base64 encoded."""
path, key = path_pieces(src)
resp = client.read(path)
if not resp:
client.revoke_self_token()
raise aomi.exceptions.VaultData("Unable to retrieve %s" % path)
else:
if 'data' in resp and key in resp['data']:
secret = resp['data'][key]
if is_base64(secret):
LOG.debug('decoding base64 entry')
secret = portable_b64decode(secret)
if is_aws(resp['data']) and 'sts' not in path:
renew_secret(client, resp, opt)
write_raw_file(secret, dest)
else:
client.revoke_self_token()
e_msg = "Key %s not found in %s" % (key, path)
raise aomi.exceptions.VaultData(e_msg)
|
Write the contents of a vault path/key to a file. Is
smart enough to attempt and handle binary files that are
base64 encoded.
|
def _report_external_dependencies(self, sect, _, _dummy):
"""return a verbatim layout for displaying dependencies"""
dep_info = _make_tree_defs(self._external_dependencies_info().items())
if not dep_info:
raise EmptyReportError()
tree_str = _repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
|
return a verbatim layout for displaying dependencies
|
def get_namespaces(namespace="", apiserver_url=None):
'''
.. versionadded:: 2016.3.0
Get one or all kubernetes namespaces.
If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example:
.. code-block:: bash
kubectl get namespaces -o json
In case namespace is set by user, the output will be similar to the one from kubectl:
.. code-block:: bash
kubectl get namespaces namespace_name -o json
CLI Example:
.. code-block:: bash
salt '*' k8s.get_namespaces
salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local
'''
# Try to get kubernetes master
apiserver_url = _guess_apiserver(apiserver_url)
if apiserver_url is None:
return False
# Get data
ret = _get_namespaces(apiserver_url, namespace)
return ret
|
.. versionadded:: 2016.3.0
Get one or all kubernetes namespaces.
If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example:
.. code-block:: bash
kubectl get namespaces -o json
In case namespace is set by user, the output will be similar to the one from kubectl:
.. code-block:: bash
kubectl get namespaces namespace_name -o json
CLI Example:
.. code-block:: bash
salt '*' k8s.get_namespaces
salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local
|
def shutdown(at_time=None):
'''
Shutdown a running system
at_time
The wait time in minutes before the system will be shutdown.
CLI Example:
.. code-block:: bash
salt '*' system.shutdown 5
'''
cmd = ['shutdown', '-h', ('{0}'.format(at_time) if at_time else 'now')]
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret
|
Shutdown a running system
at_time
The wait time in minutes before the system will be shutdown.
CLI Example:
.. code-block:: bash
salt '*' system.shutdown 5
|
def hydrate_input_uploads(input_, input_schema, hydrate_values=True):
"""Hydrate input basic:upload types with upload location.
Find basic:upload fields in input.
Add the upload location for relative paths.
"""
from resolwe.flow.managers import manager
files = []
for field_schema, fields in iterate_fields(input_, input_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'] == 'basic:file:':
files.append(value)
elif field_schema['type'] == 'list:basic:file:':
files.extend(value)
urlregex = re.compile(r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]')
for value in files:
if 'file_temp' in value:
if isinstance(value['file_temp'], str):
# If file_temp not url, hydrate path.
if not urlregex.search(value['file_temp']):
value['file_temp'] = manager.get_executor().resolve_upload_path(value['file_temp'])
else:
# Something very strange happened.
value['file_temp'] = 'Invalid value for file_temp in DB'
|
Hydrate input basic:upload types with upload location.
Find basic:upload fields in input.
Add the upload location for relative paths.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.