code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def insert(self, resource, value):
"""insert(resource, value)
Insert a resource entry into the database. RESOURCE is a
string and VALUE can be any Python value.
"""
# Split res into components and bindings
parts = resource_parts_re.split(resource)
# If the last part is empty, this is an invalid resource
# which we simply ignore
if parts[-1] == '':
return
self.lock.acquire()
db = self.db
for i in range(1, len(parts), 2):
# Create a new mapping/value group
if parts[i - 1] not in db:
db[parts[i - 1]] = ({}, {})
# Use second mapping if a loose binding, first otherwise
if '*' in parts[i]:
db = db[parts[i - 1]][1]
else:
db = db[parts[i - 1]][0]
# Insert value into the derived db
if parts[-1] in db:
db[parts[-1]] = db[parts[-1]][:2] + (value, )
else:
db[parts[-1]] = ({}, {}, value)
self.lock.release()
|
insert(resource, value)
Insert a resource entry into the database. RESOURCE is a
string and VALUE can be any Python value.
|
def isPointVisible(self, x, y):
""" Checks if a point is visible on any monitor. """
class POINT(ctypes.Structure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
pt = POINT()
pt.x = x
pt.y = y
MONITOR_DEFAULTTONULL = 0
hmon = self._user32.MonitorFromPoint(pt, MONITOR_DEFAULTTONULL)
if hmon == 0:
return False
return True
|
Checks if a point is visible on any monitor.
|
def setInverted(self, state):
"""
Sets whether or not to invert the check state for collapsing.
:param state | <bool>
"""
collapsed = self.isCollapsed()
self._inverted = state
if self.isCollapsible():
self.setCollapsed(collapsed)
|
Sets whether or not to invert the check state for collapsing.
:param state | <bool>
|
def to_profile_info(self, serialize_credentials=False):
"""Unlike to_project_config, this dict is not a mirror of any existing
on-disk data structure. It's used when creating a new profile from an
existing one.
:param serialize_credentials bool: If True, serialize the credentials.
Otherwise, the Credentials object will be copied.
:returns dict: The serialized profile.
"""
result = {
'profile_name': self.profile_name,
'target_name': self.target_name,
'config': self.config.to_dict(),
'threads': self.threads,
'credentials': self.credentials.incorporate(),
}
if serialize_credentials:
result['credentials'] = result['credentials'].serialize()
return result
|
Unlike to_project_config, this dict is not a mirror of any existing
on-disk data structure. It's used when creating a new profile from an
existing one.
:param serialize_credentials bool: If True, serialize the credentials.
Otherwise, the Credentials object will be copied.
:returns dict: The serialized profile.
|
def Diag(a):
"""
Diag op.
"""
r = np.zeros(2 * a.shape, dtype=a.dtype)
for idx, v in np.ndenumerate(a):
r[2 * idx] = v
return r,
|
Diag op.
|
def failure_raiser(*validation_func, # type: ValidationFuncs
**kwargs
):
# type: (...) -> Callable
"""
This function is automatically used if you provide a tuple `(<function>, <msg>_or_<Failure_type>)`, to any of the
methods in this page or to one of the `valid8` decorators. It transforms the provided `<function>` into a failure
raiser, raising a subclass of `Failure` in case of failure (either not returning `True` or raising an exception)
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param failure_type: a subclass of `WrappingFailure` that should be raised in case of failure
:param help_msg: a string help message for the raised `WrappingFailure`. Optional (default = WrappingFailure with
no help message).
:param kw_context_args
:return:
"""
failure_type, help_msg = pop_kwargs(kwargs, [('failure_type', None), ('help_msg', None)], allow_others=True)
# the rest of keyword arguments is used as context.
kw_context_args = kwargs
main_func = _process_validation_function_s(list(validation_func))
return _failure_raiser(main_func, failure_type=failure_type, help_msg=help_msg, **kw_context_args)
|
This function is automatically used if you provide a tuple `(<function>, <msg>_or_<Failure_type>)`, to any of the
methods in this page or to one of the `valid8` decorators. It transforms the provided `<function>` into a failure
raiser, raising a subclass of `Failure` in case of failure (either not returning `True` or raising an exception)
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param failure_type: a subclass of `WrappingFailure` that should be raised in case of failure
:param help_msg: a string help message for the raised `WrappingFailure`. Optional (default = WrappingFailure with
no help message).
:param kw_context_args
:return:
|
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a (structure of) `Tensor`.
This function converts Python objects of various types to a (structure of)
`Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and
Python scalars. For example:
Args:
value: An object whose structure matches that of `dtype ` and/or
`dtype_hint` and for which each leaf has a registered `Tensor` conversion
function.
dtype: Optional (structure of) element type for the returned tensor. If
missing, the type is inferred from the type of `value`.
dtype_hint: Optional (structure of) element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a dtype in
mind when converting to a tensor, so dtype_hint can be used as a soft
preference. If the conversion to `dtype_hint` is not possible, this
argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A (structure of) `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
if (tf.nest.is_nested(dtype) or
tf.nest.is_nested(dtype_hint)):
if dtype is None:
fn = lambda v, pd: tf.convert_to_tensor(v, dtype_hint=pd, name=name)
return tf.nest.map_structure(fn, value, dtype_hint)
elif dtype_hint is None:
fn = lambda v, d: tf.convert_to_tensor(v, dtype=d, name=name)
return tf.nest.map_structure(fn, value, dtype_hint)
else:
fn = lambda v, d, pd: tf.convert_to_tensor( # pylint: disable=g-long-lambda
v, dtype=d, dtype_hint=pd, name=name)
return tf.nest.map_structure(fn, value, dtype, dtype_hint)
return tf.convert_to_tensor(
value=value, dtype=dtype, dtype_hint=dtype_hint, name=name)
|
Converts the given `value` to a (structure of) `Tensor`.
This function converts Python objects of various types to a (structure of)
`Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and
Python scalars. For example:
Args:
value: An object whose structure matches that of `dtype ` and/or
`dtype_hint` and for which each leaf has a registered `Tensor` conversion
function.
dtype: Optional (structure of) element type for the returned tensor. If
missing, the type is inferred from the type of `value`.
dtype_hint: Optional (structure of) element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a dtype in
mind when converting to a tensor, so dtype_hint can be used as a soft
preference. If the conversion to `dtype_hint` is not possible, this
argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A (structure of) `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
|
def save_to_file(self, path, filename, **params):
"""
Saves binary content to a file with name filename. filename should
include the appropriate file extension, such as .xlsx or .txt, e.g.,
filename = 'sample.xlsx'.
Useful for downloading .xlsx files.
"""
url = ensure_trailing_slash(self.url + path.lstrip('/'))
content = self._request('get', url, params=params).content
with open(filename, 'wb') as f:
f.write(content)
|
Saves binary content to a file with name filename. filename should
include the appropriate file extension, such as .xlsx or .txt, e.g.,
filename = 'sample.xlsx'.
Useful for downloading .xlsx files.
|
def time(host=None, port=None, db=None, password=None):
'''
Return the current server UNIX time in seconds
CLI Example:
.. code-block:: bash
salt '*' redis.time
'''
server = _connect(host, port, db, password)
return server.time()[0]
|
Return the current server UNIX time in seconds
CLI Example:
.. code-block:: bash
salt '*' redis.time
|
def K(self, parm):
""" Returns the Gram Matrix
Parameters
----------
parm : np.ndarray
Parameters for the Gram Matrix
Returns
----------
- Gram Matrix (np.ndarray)
"""
return ARD_K_matrix(self.X, parm) + np.identity(self.X.shape[0])*(10**-10)
|
Returns the Gram Matrix
Parameters
----------
parm : np.ndarray
Parameters for the Gram Matrix
Returns
----------
- Gram Matrix (np.ndarray)
|
def _astorestr(ins):
''' Stores a string value into a memory address.
It copies content of 2nd operand (string), into 1st, reallocating
dynamic memory for the 1st str. These instruction DOES ALLOW
immediate strings for the 2nd parameter, starting with '#'.
'''
output = _addr(ins.quad[1])
op = ins.quad[2]
indirect = op[0] == '*'
if indirect:
op = op[1:]
immediate = op[0] == '#'
if immediate:
op = op[1:]
temporal = op[0] != '$'
if not temporal:
op = op[1:]
if is_int(op):
op = str(int(op) & 0xFFFF)
if indirect:
if immediate: # *#<addr> = ld hl, (number)
output.append('ld de, (%s)' % op)
else:
output.append('ld de, (%s)' % op)
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
else:
# Integer does not make sense here (unless it's a ptr)
raise InvalidICError(str(ins))
output.append('ld de, (%s)' % op)
elif op[0] == '_': # an identifier
temporal = False # Global var is not a temporary string
if indirect:
if immediate: # *#_id = _id
output.append('ld de, (%s)' % op)
else: # *_id
output.append('ld de, (%s)' % op)
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
else:
if immediate:
output.append('ld de, %s' % op)
else:
output.append('ld de, (%s)' % op)
else: # tn
output.append('pop de')
if indirect:
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
if not temporal:
output.append('call __STORE_STR')
REQUIRES.add('storestr.asm')
else: # A value already on dynamic memory
output.append('call __STORE_STR2')
REQUIRES.add('storestr2.asm')
return output
|
Stores a string value into a memory address.
It copies content of 2nd operand (string), into 1st, reallocating
dynamic memory for the 1st str. These instruction DOES ALLOW
immediate strings for the 2nd parameter, starting with '#'.
|
def apply(self, event = None):
"""Before self.onOk closes the window, it calls this function to sync the config changes from the GUI back to self.config."""
for section in self.config.sections():
# Run through the sections to check all the option values:
for option, o in self.config.config[section].items():
# Check the actual values against the validators and complain if necessary:
if not o['include']:
continue # This value is hidden, so there's no control for it.
control = self._controls[section][option] # Get the actual control for GetValue
try:
value = type(o['value'])(control.GetValue()) # Try and convert the value
except ValueError as msg:
self.displayError(section, option, str(msg)) # Woops, something went wrong
return False # Tells self.onOk not to close the window
problem = None # Set up the problem variable.
try:
problem = o['validate'](value) # See if it passes the test
except Exception as e:
problem = str(e) # The lambda raised an exception.
if problem:
self.displayError(section, option, problem) # It didn't
return False # Tells self.onOk not to close the window
self.config.set(section, option, value) # All clear
return True
|
Before self.onOk closes the window, it calls this function to sync the config changes from the GUI back to self.config.
|
def send_status_response(environ, start_response, e, add_headers=None, is_head=False):
"""Start a WSGI response for a DAVError or status code."""
status = get_http_status_string(e)
headers = []
if add_headers:
headers.extend(add_headers)
# if 'keep-alive' in environ.get('HTTP_CONNECTION', '').lower():
# headers += [
# ('Connection', 'keep-alive'),
# ]
if e in (HTTP_NOT_MODIFIED, HTTP_NO_CONTENT):
# See paste.lint: these code don't have content
start_response(
status, [("Content-Length", "0"), ("Date", get_rfc1123_time())] + headers
)
return [b""]
if e in (HTTP_OK, HTTP_CREATED):
e = DAVError(e)
assert isinstance(e, DAVError)
content_type, body = e.get_response_page()
if is_head:
body = compat.b_empty
assert compat.is_bytes(body), body # If not, Content-Length is wrong!
start_response(
status,
[
("Content-Type", content_type),
("Date", get_rfc1123_time()),
("Content-Length", str(len(body))),
]
+ headers,
)
return [body]
|
Start a WSGI response for a DAVError or status code.
|
def spec_formatter(cls, spec):
" Formats the elements of an argument set appropriately"
return type(spec)((k, str(v)) for (k,v) in spec.items())
|
Formats the elements of an argument set appropriately
|
def loadGmesh(filename, c="gold", alpha=1, wire=False, bc=None):
"""Reads a `gmesh` file format. Return an ``Actor(vtkActor)`` object."""
if not os.path.exists(filename):
colors.printc("~noentry Error in loadGmesh: Cannot find", filename, c=1)
return None
f = open(filename, "r")
lines = f.readlines()
f.close()
nnodes = 0
index_nodes = 0
for i, line in enumerate(lines):
if "$Nodes" in line:
index_nodes = i + 1
nnodes = int(lines[index_nodes])
break
node_coords = []
for i in range(index_nodes + 1, index_nodes + 1 + nnodes):
cn = lines[i].split()
node_coords.append([float(cn[1]), float(cn[2]), float(cn[3])])
nelements = 0
index_elements = 0
for i, line in enumerate(lines):
if "$Elements" in line:
index_elements = i + 1
nelements = int(lines[index_elements])
break
elements = []
for i in range(index_elements + 1, index_elements + 1 + nelements):
ele = lines[i].split()
elements.append([int(ele[-3]), int(ele[-2]), int(ele[-1])])
poly = buildPolyData(node_coords, elements, indexOffset=1)
return Actor(poly, c, alpha, wire, bc)
|
Reads a `gmesh` file format. Return an ``Actor(vtkActor)`` object.
|
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return distribution_util.with_dependencies([
assert_util.assert_positive(
concentration, message="Concentration parameter must be positive."),
assert_util.assert_rank_at_least(
concentration,
1,
message="Concentration parameter must have >=1 dimensions."),
assert_util.assert_less(
1,
tf.shape(input=concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
|
Checks the validity of the concentration parameter.
|
def _create_relational_field(self, attr, options):
"""Creates the form element for working with entity relationships."""
options['entity_class'] = attr.py_type
options['allow_empty'] = not attr.is_required
return EntityField, options
|
Creates the form element for working with entity relationships.
|
def format(self, indent_level, indent_size=4):
"""Format this verifier
Returns:
string: A formatted string
"""
name = self.format_name('Literal', indent_size)
if self.long_desc is not None:
name += '\n'
name += self.wrap_lines('value: %s\n' % str(self._literal), 1, indent_size)
return self.wrap_lines(name, indent_level, indent_size)
|
Format this verifier
Returns:
string: A formatted string
|
def after_request(self, fn):
"""
Register a function to be run after each request.
Your function must take one parameter, an instance of
:attr:`response_class` and return a new response object or the
same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self._defer(lambda app: app.after_request(fn))
return fn
|
Register a function to be run after each request.
Your function must take one parameter, an instance of
:attr:`response_class` and return a new response object or the
same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
|
def state_create(history_id_key, table_name, collision_checker, always_set=[]):
"""
Decorator for the check() method on state-creating operations.
Makes sure that:
* there is a __preorder__ field set, which contains the state-creating operation's associated preorder
* there is a __table__ field set, which contains the table into which to insert this state into
* there is a __history_id_key__ field set, which identifies the table's primary key name
* there are no unexpired, duplicate instances of this state with this history id.
(i.e. if we're preordering a name that had previously expired, we need to preserve its history)
"""
def wrap( check ):
def wrapped_check( state_engine, nameop, block_id, checked_ops ):
rc = check( state_engine, nameop, block_id, checked_ops )
# pretty sure this isn't necessary any longer, but leave this is an assert just in case
assert op_get_opcode_name(nameop['op']) in OPCODE_CREATION_OPS, 'BUG: opcode became {}'.format(nameop['op'])
# succeeded?
if rc:
# ensure that there's now a __preorder__
try:
assert '__preorder__' in nameop.keys(), "Missing __preorder__"
except Exception, e:
log.exception(e)
log.error("FATAL: missing fields")
os.abort()
# propagate __table__ and __history_id_key__
nameop['__table__'] = table_name
nameop['__history_id_key__'] = history_id_key
nameop['__state_create__'] = True
nameop['__always_set__'] = always_set
# sanity check---we need to have the appropriate metadata for this operation
invariant_tags = state_create_invariant_tags()
for tag in invariant_tags:
assert tag in nameop, "BUG: missing invariant tag '%s'" % tag
# sanity check---all required consensus fields must be present
for required_field in CONSENSUS_FIELDS_REQUIRED:
assert required_field in nameop, 'BUG: missing required consensus field {}'.format(required_field)
# verify no duplicates
rc = state_check_collisions( state_engine, nameop, history_id_key, block_id, checked_ops, collision_checker )
if rc:
# this is a duplicate!
log.debug("COLLISION on %s '%s'" % (history_id_key, nameop[history_id_key]))
rc = False
else:
# no collision
rc = True
return rc
return wrapped_check
return wrap
|
Decorator for the check() method on state-creating operations.
Makes sure that:
* there is a __preorder__ field set, which contains the state-creating operation's associated preorder
* there is a __table__ field set, which contains the table into which to insert this state into
* there is a __history_id_key__ field set, which identifies the table's primary key name
* there are no unexpired, duplicate instances of this state with this history id.
(i.e. if we're preordering a name that had previously expired, we need to preserve its history)
|
def parse_match_settings(match_settings, config: ConfigObject):
"""
Parses the matching settings modifying the match settings object.
:param match_settings:
:param config:
:return:
"""
match_settings.game_mode = config.get(MATCH_CONFIGURATION_HEADER, GAME_MODE)
match_settings.game_map = config.get(MATCH_CONFIGURATION_HEADER, GAME_MAP)
match_settings.skip_replays = config.getboolean(MATCH_CONFIGURATION_HEADER, SKIP_REPLAYS)
match_settings.instant_start = config.getboolean(MATCH_CONFIGURATION_HEADER, INSTANT_START)
parse_mutator_settings(match_settings.mutators, config)
|
Parses the matching settings modifying the match settings object.
:param match_settings:
:param config:
:return:
|
def com_google_fonts_check_os2_metrics_match_hhea(ttFont):
"""Checking OS/2 Metrics match hhea Metrics.
OS/2 and hhea vertical metric values should match. This will produce
the same linespacing on Mac, GNU+Linux and Windows.
Mac OS X uses the hhea values.
Windows uses OS/2 or Win, depending on the OS or fsSelection bit value.
"""
# OS/2 sTypoAscender and sTypoDescender match hhea ascent and descent
if ttFont["OS/2"].sTypoAscender != ttFont["hhea"].ascent:
yield FAIL, Message("ascender",
"OS/2 sTypoAscender and hhea ascent must be equal.")
elif ttFont["OS/2"].sTypoDescender != ttFont["hhea"].descent:
yield FAIL, Message("descender",
"OS/2 sTypoDescender and hhea descent must be equal.")
else:
yield PASS, ("OS/2.sTypoAscender/Descender values"
" match hhea.ascent/descent.")
|
Checking OS/2 Metrics match hhea Metrics.
OS/2 and hhea vertical metric values should match. This will produce
the same linespacing on Mac, GNU+Linux and Windows.
Mac OS X uses the hhea values.
Windows uses OS/2 or Win, depending on the OS or fsSelection bit value.
|
def missing_whitespace_around_operator(logical_line, tokens):
r"""Surround operators with a single space on either side.
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- If operators with different priorities are used, consider adding
whitespace around the operators with the lowest priorities.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in SKIP_COMMENTS:
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
elif prev_text != '**':
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if (prev_text in '}])' if prev_type == tokenize.OP
else prev_text not in KEYWORDS):
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
|
r"""Surround operators with a single space on either side.
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- If operators with different priorities are used, consider adding
whitespace around the operators with the lowest priorities.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
|
def clear(self):
""" Clear everything
:rtype: Query
"""
self._filters = []
self._order_by = OrderedDict()
self._selects = set()
self._negation = False
self._attribute = None
self._chain = None
self._search = None
return self
|
Clear everything
:rtype: Query
|
def predictions_variance(df, filepath=None):
"""
Plots the mean variance prediction for each readout
Parameters
----------
df: `pandas.DataFrame`_
DataFrame with columns starting with `VAR:`
filepath: str
Absolute path to a folder where to write the plots
Returns
-------
plot
Generated plot
.. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
"""
df = df.filter(regex="^VAR:")
by_readout = df.mean(axis=0).reset_index(level=0)
by_readout.columns = ['Readout', 'Prediction variance (mean)']
by_readout['Readout'] = by_readout.Readout.map(lambda n: n[4:])
g1 = sns.factorplot(x='Readout', y='Prediction variance (mean)', data=by_readout, kind='bar', aspect=2)
for tick in g1.ax.get_xticklabels():
tick.set_rotation(90)
if filepath:
g1.savefig(os.path.join(filepath, 'predictions-variance.pdf'))
return g1
|
Plots the mean variance prediction for each readout
Parameters
----------
df: `pandas.DataFrame`_
DataFrame with columns starting with `VAR:`
filepath: str
Absolute path to a folder where to write the plots
Returns
-------
plot
Generated plot
.. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
|
def _code_line(self, line):
"""Add a code line."""
assert self._containers
container = self._containers[-1]
# Handle extra spaces.
text = line
while text:
if text.startswith(' '):
r = re.match(r'(^ +)', text)
n = len(r.group(1))
container.addElement(S(c=n))
text = text[n:]
elif ' ' in text:
assert not text.startswith(' ')
i = text.index(' ')
container.addElement(Span(text=text[:i]))
text = text[i:]
else:
container.addElement(Span(text=text))
text = ''
|
Add a code line.
|
def resolve_aliases(self, chunks):
'''
Preserve backward compatibility by rewriting the 'state' key in the low
chunks if it is using a legacy type.
'''
for idx, _ in enumerate(chunks):
new_state = self.aliases.get(chunks[idx]['state'])
if new_state is not None:
chunks[idx]['state'] = new_state
|
Preserve backward compatibility by rewriting the 'state' key in the low
chunks if it is using a legacy type.
|
def _unordered_iter(self):
"""iterator for results *as they arrive*, on FCFS basis, ignoring submission order."""
try:
rlist = self.get(0)
except error.TimeoutError:
pending = set(self.msg_ids)
while pending:
try:
self._client.wait(pending, 1e-3)
except error.TimeoutError:
# ignore timeout error, because that only means
# *some* jobs are outstanding
pass
# update ready set with those no longer outstanding:
ready = pending.difference(self._client.outstanding)
# update pending to exclude those that are finished
pending = pending.difference(ready)
while ready:
msg_id = ready.pop()
ar = AsyncResult(self._client, msg_id, self._fname)
rlist = ar.get()
try:
for r in rlist:
yield r
except TypeError:
# flattened, not a list
# this could get broken by flattened data that returns iterables
# but most calls to map do not expose the `flatten` argument
yield rlist
else:
# already done
for r in rlist:
yield r
|
iterator for results *as they arrive*, on FCFS basis, ignoring submission order.
|
def is_kanji(data):
"""\
Returns if the `data` can be encoded in "kanji" mode.
:param bytes data: The data to check.
:rtype: bool
"""
data_len = len(data)
if not data_len or data_len % 2:
return False
if _PY2:
data = (ord(c) for c in data)
data_iter = iter(data)
for i in range(0, data_len, 2):
code = (next(data_iter) << 8) | next(data_iter)
if not (0x8140 <= code <= 0x9ffc or 0xe040 <= code <= 0xebbf):
return False
return True
|
\
Returns if the `data` can be encoded in "kanji" mode.
:param bytes data: The data to check.
:rtype: bool
|
def supported_alleles(self):
"""
Alleles for which predictions can be made.
Returns
-------
list of string
"""
if 'supported_alleles' not in self._cache:
result = set(self.allele_to_allele_specific_models)
if self.allele_to_fixed_length_sequence:
result = result.union(self.allele_to_fixed_length_sequence)
self._cache["supported_alleles"] = sorted(result)
return self._cache["supported_alleles"]
|
Alleles for which predictions can be made.
Returns
-------
list of string
|
def map_prop_value_as_index(prp, lst):
"""
Returns the given prop of each item in the list
:param prp:
:param lst:
:return:
"""
return from_pairs(map(lambda item: (prop(prp, item), item), lst))
|
Returns the given prop of each item in the list
:param prp:
:param lst:
:return:
|
def extend_right_to(self, window, max_size):
"""Adjust the size to make our window end where the right window begins, but don't
get larger than max_size"""
self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
|
Adjust the size to make our window end where the right window begins, but don't
get larger than max_size
|
def get_facts(self):
"""Return a set of facts from the devices."""
# default values.
vendor = u'Cisco'
uptime = -1
serial_number, fqdn, os_version, hostname, domain_name = ('Unknown',) * 5
# obtain output from device
show_ver = self._send_command('show version')
show_hosts = self._send_command('show hosts')
show_ip_int_br = self._send_command('show ip interface brief')
# uptime/serial_number/IOS version
for line in show_ver.splitlines():
if ' uptime is ' in line:
hostname, uptime_str = line.split(' uptime is ')
uptime = self.parse_uptime(uptime_str)
hostname = hostname.strip()
if 'Processor board ID' in line:
_, serial_number = line.split("Processor board ID ")
serial_number = serial_number.strip()
if re.search(r"Cisco IOS Software", line):
try:
_, os_version = line.split("Cisco IOS Software, ")
except ValueError:
# Handle 'Cisco IOS Software [Denali],'
_, os_version = re.split(r"Cisco IOS Software \[.*?\], ", line)
os_version = os_version.strip()
elif re.search(r"IOS (tm).+Software", line):
_, os_version = line.split("IOS (tm) ")
os_version = os_version.strip()
# Determine domain_name and fqdn
for line in show_hosts.splitlines():
if 'Default domain' in line:
_, domain_name = line.split("Default domain is ")
domain_name = domain_name.strip()
break
if domain_name != 'Unknown' and hostname != 'Unknown':
fqdn = u'{}.{}'.format(hostname, domain_name)
# model filter
try:
match_model = re.search(r"Cisco (.+?) .+bytes of", show_ver, flags=re.IGNORECASE)
model = match_model.group(1)
except AttributeError:
model = u'Unknown'
# interface_list filter
interface_list = []
show_ip_int_br = show_ip_int_br.strip()
for line in show_ip_int_br.splitlines():
if 'Interface ' in line:
continue
interface = line.split()[0]
interface_list.append(interface)
return {
'uptime': uptime,
'vendor': vendor,
'os_version': py23_compat.text_type(os_version),
'serial_number': py23_compat.text_type(serial_number),
'model': py23_compat.text_type(model),
'hostname': py23_compat.text_type(hostname),
'fqdn': fqdn,
'interface_list': interface_list
}
|
Return a set of facts from the devices.
|
def solvedbi_sm(ah, rho, b, c=None, axis=4):
r"""
Solve a diagonal block linear system with a scaled identity term
using the Sherman-Morrison equation.
The solution is obtained by independently solving a set of linear
systems of the form (see :cite:`wohlberg-2016-efficient`)
.. math::
(\rho I + \mathbf{a} \mathbf{a}^H ) \; \mathbf{x} = \mathbf{b} \;\;.
In this equation inner products and matrix products are taken along
the specified axis of the corresponding multi-dimensional arrays; the
solutions are independent over the other axes.
Parameters
----------
ah : array_like
Linear system component :math:`\mathbf{a}^H`
rho : float
Linear system parameter :math:`\rho`
b : array_like
Linear system component :math:`\mathbf{b}`
c : array_like, optional (default None)
Solution component :math:`\mathbf{c}` that may be pre-computed using
:func:`solvedbi_sm_c` and cached for re-use.
axis : int, optional (default 4)
Axis along which to solve the linear system
Returns
-------
x : ndarray
Linear system solution :math:`\mathbf{x}`
"""
a = np.conj(ah)
if c is None:
c = solvedbi_sm_c(ah, a, rho, axis)
if have_numexpr:
cb = inner(c, b, axis=axis)
return ne.evaluate('(b - (a * cb)) / rho')
else:
return (b - (a * inner(c, b, axis=axis))) / rho
|
r"""
Solve a diagonal block linear system with a scaled identity term
using the Sherman-Morrison equation.
The solution is obtained by independently solving a set of linear
systems of the form (see :cite:`wohlberg-2016-efficient`)
.. math::
(\rho I + \mathbf{a} \mathbf{a}^H ) \; \mathbf{x} = \mathbf{b} \;\;.
In this equation inner products and matrix products are taken along
the specified axis of the corresponding multi-dimensional arrays; the
solutions are independent over the other axes.
Parameters
----------
ah : array_like
Linear system component :math:`\mathbf{a}^H`
rho : float
Linear system parameter :math:`\rho`
b : array_like
Linear system component :math:`\mathbf{b}`
c : array_like, optional (default None)
Solution component :math:`\mathbf{c}` that may be pre-computed using
:func:`solvedbi_sm_c` and cached for re-use.
axis : int, optional (default 4)
Axis along which to solve the linear system
Returns
-------
x : ndarray
Linear system solution :math:`\mathbf{x}`
|
def gridsearch(self, X, y, weights=None, return_scores=False,
keep_best=True, objective='auto', progress=True,
**param_grids):
"""
Performs a grid search over a space of parameters for a given
objective
Warnings
--------
``gridsearch`` is lazy and will not remove useless combinations
from the search space, eg.
>>> n_splines=np.arange(5,10), fit_splines=[True, False]
will result in 10 loops, of which 5 are equivalent because
``fit_splines = False``
Also, it is not recommended to search over a grid that alternates
between known scales and unknown scales, as the scores of the
candidate models will not be comparable.
Parameters
----------
X : array-like
input data of shape (n_samples, m_features)
y : array-like
label data of shape (n_samples,)
weights : array-like shape (n_samples,), optional
sample weights
return_scores : boolean, optional
whether to return the hyperpamaters and score for each element
in the grid
keep_best : boolean, optional
whether to keep the best GAM as self.
objective : {'auto', 'AIC', 'AICc', 'GCV', 'UBRE'}, optional
Metric to optimize.
If `auto`, then grid search will optimize `GCV` for models with unknown
scale and `UBRE` for models with known scale.
progress : bool, optional
whether to display a progress bar
**kwargs
pairs of parameters and iterables of floats, or
parameters and iterables of iterables of floats.
If no parameter are specified, ``lam=np.logspace(-3, 3, 11)`` is used.
This results in a 11 points, placed diagonally across lam space.
If grid is iterable of iterables of floats,
the outer iterable must have length ``m_features``.
the cartesian product of the subgrids in the grid will be tested.
If grid is a 2d numpy array,
each row of the array will be tested.
The method will make a grid of all the combinations of the parameters
and fit a GAM to each combination.
Returns
-------
if ``return_scores=True``:
model_scores: dict containing each fitted model as keys and corresponding
objective scores as values
else:
self: ie possibly the newly fitted model
Examples
--------
For a model with 4 terms, and where we expect 4 lam values,
our search space for lam must have 4 dimensions.
We can search the space in 3 ways:
1. via cartesian product by specifying the grid as a list.
our grid search will consider ``11 ** 4`` points:
>>> lam = np.logspace(-3, 3, 11)
>>> lams = [lam] * 4
>>> gam.gridsearch(X, y, lam=lams)
2. directly by specifying the grid as a np.ndarray.
This is useful for when the dimensionality of the search space
is very large, and we would prefer to execute a randomized search:
>>> lams = np.exp(np.random.random(50, 4) * 6 - 3)
>>> gam.gridsearch(X, y, lam=lams)
3. copying grids for parameters with multiple dimensions.
if we specify a 1D np.ndarray for lam, we are implicitly testing the
space where all points have the same value
>>> gam.gridsearch(lam=np.logspace(-3, 3, 11))
is equivalent to:
>>> lam = np.logspace(-3, 3, 11)
>>> lams = np.array([lam] * 4)
>>> gam.gridsearch(X, y, lam=lams)
"""
# check if model fitted
if not self._is_fitted:
self._validate_params()
self._validate_data_dep_params(X)
y = check_y(y, self.link, self.distribution, verbose=self.verbose)
X = check_X(X, verbose=self.verbose)
check_X_y(X, y)
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
check_lengths(y, weights)
else:
weights = np.ones_like(y).astype('float64')
# validate objective
if objective not in ['auto', 'GCV', 'UBRE', 'AIC', 'AICc']:
raise ValueError("objective mut be in "\
"['auto', 'GCV', 'UBRE', 'AIC', 'AICc'], '\
'but found objective = {}".format(objective))
# check objective
if self.distribution._known_scale:
if objective == 'GCV':
raise ValueError('GCV should be used for models with'\
'unknown scale')
if objective == 'auto':
objective = 'UBRE'
else:
if objective == 'UBRE':
raise ValueError('UBRE should be used for models with '\
'known scale')
if objective == 'auto':
objective = 'GCV'
# if no params, then set up default gridsearch
if not bool(param_grids):
param_grids['lam'] = np.logspace(-3, 3, 11)
# validate params
admissible_params = list(self.get_params()) + self._plural
params = []
grids = []
for param, grid in list(param_grids.items()):
# check param exists
if param not in (admissible_params):
raise ValueError('unknown parameter: {}'.format(param))
# check grid is iterable at all
if not (isiterable(grid) and (len(grid) > 1)): \
raise ValueError('{} grid must either be iterable of '
'iterables, or an iterable of lengnth > 1, '\
'but found {}'.format(param, grid))
# prepare grid
if any(isiterable(g) for g in grid):
# get required parameter shape
target_len = len(flatten(getattr(self, param)))
# check if cartesian product needed
cartesian = (not isinstance(grid, np.ndarray) or grid.ndim != 2)
# build grid
grid = [np.atleast_1d(g) for g in grid]
# check chape
msg = '{} grid should have {} columns, '\
'but found grid with {} columns'.format(param, target_len, len(grid))
if cartesian:
if len(grid) != target_len:
raise ValueError(msg)
grid = combine(*grid)
if not all([len(subgrid) == target_len for subgrid in grid]):
raise ValueError(msg)
# save param name and grid
params.append(param)
grids.append(grid)
# build a list of dicts of candidate model params
param_grid_list = []
for candidate in combine(*grids):
param_grid_list.append(dict(zip(params,candidate)))
# set up data collection
best_model = None # keep the best model
best_score = np.inf
scores = []
models = []
# check if our model has been fitted already and store it
if self._is_fitted:
models.append(self)
scores.append(self.statistics_[objective])
# our model is currently the best
best_model = models[-1]
best_score = scores[-1]
# make progressbar optional
if progress:
pbar = ProgressBar()
else:
pbar = lambda x: x
# loop through candidate model params
for param_grid in pbar(param_grid_list):
try:
# try fitting
# define new model
gam = deepcopy(self)
gam.set_params(self.get_params())
gam.set_params(**param_grid)
# warm start with parameters from previous build
if models:
coef = models[-1].coef_
gam.set_params(coef_=coef, force=True, verbose=False)
gam.fit(X, y, weights)
except ValueError as error:
msg = str(error) + '\non model with params:\n' + str(param_grid)
msg += '\nskipping...\n'
if self.verbose:
warnings.warn(msg)
continue
# record results
models.append(gam)
scores.append(gam.statistics_[objective])
# track best
if scores[-1] < best_score:
best_model = models[-1]
best_score = scores[-1]
# problems
if len(models) == 0:
msg = 'No models were fitted.'
if self.verbose:
warnings.warn(msg)
return self
# copy over the best
if keep_best:
self.set_params(deep=True,
force=True,
**best_model.get_params(deep=True))
if return_scores:
return OrderedDict(zip(models, scores))
else:
return self
|
Performs a grid search over a space of parameters for a given
objective
Warnings
--------
``gridsearch`` is lazy and will not remove useless combinations
from the search space, eg.
>>> n_splines=np.arange(5,10), fit_splines=[True, False]
will result in 10 loops, of which 5 are equivalent because
``fit_splines = False``
Also, it is not recommended to search over a grid that alternates
between known scales and unknown scales, as the scores of the
candidate models will not be comparable.
Parameters
----------
X : array-like
input data of shape (n_samples, m_features)
y : array-like
label data of shape (n_samples,)
weights : array-like shape (n_samples,), optional
sample weights
return_scores : boolean, optional
whether to return the hyperpamaters and score for each element
in the grid
keep_best : boolean, optional
whether to keep the best GAM as self.
objective : {'auto', 'AIC', 'AICc', 'GCV', 'UBRE'}, optional
Metric to optimize.
If `auto`, then grid search will optimize `GCV` for models with unknown
scale and `UBRE` for models with known scale.
progress : bool, optional
whether to display a progress bar
**kwargs
pairs of parameters and iterables of floats, or
parameters and iterables of iterables of floats.
If no parameter are specified, ``lam=np.logspace(-3, 3, 11)`` is used.
This results in a 11 points, placed diagonally across lam space.
If grid is iterable of iterables of floats,
the outer iterable must have length ``m_features``.
the cartesian product of the subgrids in the grid will be tested.
If grid is a 2d numpy array,
each row of the array will be tested.
The method will make a grid of all the combinations of the parameters
and fit a GAM to each combination.
Returns
-------
if ``return_scores=True``:
model_scores: dict containing each fitted model as keys and corresponding
objective scores as values
else:
self: ie possibly the newly fitted model
Examples
--------
For a model with 4 terms, and where we expect 4 lam values,
our search space for lam must have 4 dimensions.
We can search the space in 3 ways:
1. via cartesian product by specifying the grid as a list.
our grid search will consider ``11 ** 4`` points:
>>> lam = np.logspace(-3, 3, 11)
>>> lams = [lam] * 4
>>> gam.gridsearch(X, y, lam=lams)
2. directly by specifying the grid as a np.ndarray.
This is useful for when the dimensionality of the search space
is very large, and we would prefer to execute a randomized search:
>>> lams = np.exp(np.random.random(50, 4) * 6 - 3)
>>> gam.gridsearch(X, y, lam=lams)
3. copying grids for parameters with multiple dimensions.
if we specify a 1D np.ndarray for lam, we are implicitly testing the
space where all points have the same value
>>> gam.gridsearch(lam=np.logspace(-3, 3, 11))
is equivalent to:
>>> lam = np.logspace(-3, 3, 11)
>>> lams = np.array([lam] * 4)
>>> gam.gridsearch(X, y, lam=lams)
|
def copy_path_flat(self):
"""Return a flattened copy of the current path
This method is like :meth:`copy_path`
except that any curves in the path will be approximated
with piecewise-linear approximations,
(accurate to within the current tolerance value,
see :meth:`set_tolerance`).
That is,
the result is guaranteed to not have any elements
of type :obj:`CURVE_TO <PATH_CURVE_TO>`
which will instead be replaced by
a series of :obj:`LINE_TO <PATH_LINE_TO>` elements.
:returns:
A list of ``(path_operation, coordinates)`` tuples.
See :meth:`copy_path` for the data structure.
"""
path = cairo.cairo_copy_path_flat(self._pointer)
result = list(_iter_path(path))
cairo.cairo_path_destroy(path)
return result
|
Return a flattened copy of the current path
This method is like :meth:`copy_path`
except that any curves in the path will be approximated
with piecewise-linear approximations,
(accurate to within the current tolerance value,
see :meth:`set_tolerance`).
That is,
the result is guaranteed to not have any elements
of type :obj:`CURVE_TO <PATH_CURVE_TO>`
which will instead be replaced by
a series of :obj:`LINE_TO <PATH_LINE_TO>` elements.
:returns:
A list of ``(path_operation, coordinates)`` tuples.
See :meth:`copy_path` for the data structure.
|
def default_ubuntu_tr(mod):
"""
Default translation function for Ubuntu based systems
"""
pkg = 'python-%s' % mod.lower()
py2pkg = pkg
py3pkg = 'python3-%s' % mod.lower()
return (pkg, py2pkg, py3pkg)
|
Default translation function for Ubuntu based systems
|
def run(
draco_query: List[str],
constants: Dict[str, str] = None,
files: List[str] = None,
relax_hard=False,
silence_warnings=False,
debug=False,
clear_cache=False,
) -> Optional[Result]:
""" Run clingo to compute a completion of a partial spec or violations. """
# Clear file cache. useful during development in notebooks.
if clear_cache and file_cache:
logger.warning("Cleared file cache")
file_cache.clear()
stderr, stdout = run_clingo(
draco_query, constants, files, relax_hard, silence_warnings, debug
)
try:
json_result = json.loads(stdout)
except json.JSONDecodeError:
logger.error("stdout: %s", stdout)
logger.error("stderr: %s", stderr)
raise
if stderr:
logger.error(stderr)
result = json_result["Result"]
if result == "UNSATISFIABLE":
logger.info("Constraints are unsatisfiable.")
return None
elif result == "OPTIMUM FOUND":
# get the last witness, which is the best result
answers = json_result["Call"][0]["Witnesses"][-1]
logger.debug(answers["Value"])
return Result(
clyngor.Answers(answers["Value"]).sorted,
cost=json_result["Models"]["Costs"][0],
)
elif result == "SATISFIABLE":
answers = json_result["Call"][0]["Witnesses"][-1]
assert (
json_result["Models"]["Number"] == 1
), "Should not have more than one model if we don't optimize"
logger.debug(answers["Value"])
return Result(clyngor.Answers(answers["Value"]).sorted)
else:
logger.error("Unsupported result: %s", result)
return None
|
Run clingo to compute a completion of a partial spec or violations.
|
def read(self, page):
"""Send a READ command to retrieve data from the tag.
The *page* argument specifies the offset in multiples of 4
bytes (i.e. page number 1 will return bytes 4 to 19). The data
returned is a byte array of length 16 or None if the block is
outside the readable memory range.
Command execution errors raise :exc:`Type2TagCommandError`.
"""
log.debug("read pages {0} to {1}".format(page, page+3))
data = self.transceive("\x30"+chr(page % 256), timeout=0.005)
if len(data) == 1 and data[0] & 0xFA == 0x00:
log.debug("received nak response")
self.target.sel_req = self.target.sdd_res[:]
self._target = self.clf.sense(self.target)
raise Type2TagCommandError(
INVALID_PAGE_ERROR if self.target else nfc.tag.RECEIVE_ERROR)
if len(data) != 16:
log.debug("invalid response " + hexlify(data))
raise Type2TagCommandError(INVALID_RESPONSE_ERROR)
return data
|
Send a READ command to retrieve data from the tag.
The *page* argument specifies the offset in multiples of 4
bytes (i.e. page number 1 will return bytes 4 to 19). The data
returned is a byte array of length 16 or None if the block is
outside the readable memory range.
Command execution errors raise :exc:`Type2TagCommandError`.
|
def grid_linspace(bounds, count):
"""
Return a grid spaced inside a bounding box with edges spaced using np.linspace.
Parameters
---------
bounds: (2,dimension) list of [[min x, min y, etc], [max x, max y, etc]]
count: int, or (dimension,) int, number of samples per side
Returns
-------
grid: (n, dimension) float, points in the specified bounds
"""
bounds = np.asanyarray(bounds, dtype=np.float64)
if len(bounds) != 2:
raise ValueError('bounds must be (2, dimension!')
count = np.asanyarray(count, dtype=np.int)
if count.shape == ():
count = np.tile(count, bounds.shape[1])
grid_elements = [np.linspace(*b, num=c) for b, c in zip(bounds.T, count)]
grid = np.vstack(np.meshgrid(*grid_elements)
).reshape(bounds.shape[1], -1).T
return grid
|
Return a grid spaced inside a bounding box with edges spaced using np.linspace.
Parameters
---------
bounds: (2,dimension) list of [[min x, min y, etc], [max x, max y, etc]]
count: int, or (dimension,) int, number of samples per side
Returns
-------
grid: (n, dimension) float, points in the specified bounds
|
def get_scaled_cutout_basic(self, x1, y1, x2, y2, scale_x, scale_y,
method='basic'):
"""Extract a region of the image defined by corners (x1, y1) and
(x2, y2) and scale it by scale factors (scale_x, scale_y).
`method` describes the method of interpolation used, where the
default "basic" is nearest neighbor.
"""
new_wd = int(round(scale_x * (x2 - x1 + 1)))
new_ht = int(round(scale_y * (y2 - y1 + 1)))
return self.get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht,
# TODO:
# this causes a problem for the
# current Glue plugin--update that
#method=method
)
|
Extract a region of the image defined by corners (x1, y1) and
(x2, y2) and scale it by scale factors (scale_x, scale_y).
`method` describes the method of interpolation used, where the
default "basic" is nearest neighbor.
|
def polygon(self):
"""Returns an OGR Geometry for this envelope."""
ring = ogr.Geometry(ogr.wkbLinearRing)
for coord in self.ll, self.lr, self.ur, self.ul, self.ll:
ring.AddPoint_2D(*coord)
polyg = ogr.Geometry(ogr.wkbPolygon)
polyg.AddGeometryDirectly(ring)
return polyg
|
Returns an OGR Geometry for this envelope.
|
def start(self):
'''
Listen to messages and publish them.
'''
# counter metrics for messages
c_logs_ingested = Counter(
'napalm_logs_listener_logs_ingested',
'Count of ingested log messages',
['listener_type', 'address', 'port'],
)
c_messages_published = Counter(
'napalm_logs_listener_messages_published',
'Count of published messages',
['listener_type', 'address', 'port'],
)
self._setup_ipc()
log.debug('Using the %s listener', self._listener_type)
self._setup_listener()
self.listener.start()
# Start suicide polling thread
thread = threading.Thread(target=self._suicide_when_without_parent, args=(os.getppid(),))
thread.start()
signal.signal(signal.SIGTERM, self._exit_gracefully)
self.__up = True
while self.__up:
try:
log_message, log_source = self.listener.receive()
except ListenerException as lerr:
if self.__up is False:
log.info('Exiting on process shutdown')
return
else:
log.error(lerr, exc_info=True)
raise NapalmLogsExit(lerr)
log.debug('Received %s from %s. Queueing to the server.', log_message, log_source)
if not log_message:
log.info('Empty message received from %s. Not queueing to the server.', log_source)
continue
c_logs_ingested.labels(listener_type=self._listener_type, address=self.address, port=self.port).inc()
self.pub.send(umsgpack.packb((log_message, log_source)))
c_messages_published.labels(listener_type=self._listener_type, address=self.address, port=self.port).inc()
|
Listen to messages and publish them.
|
def read_file_1st_col_only(fname):
"""
read a CSV file (ref_classes.csv) and return the
list of names
"""
lst = []
with open(fname, 'r') as f:
_ = f.readline() # read the header and ignore it
for line in f:
lst.append(line.split(',')[0])
return lst
|
read a CSV file (ref_classes.csv) and return the
list of names
|
def complete(request, provider):
"""
After first step of net authentication, we must validate the response.
If everything is ok, we must do the following:
1. If user is already authenticated:
a. Try to login him again (strange variation but we must take it to account).
b. Create new netID record in database.
c. Merge authenticated account with newly created netID record.
d. Redirect user to 'next' url stored in session.
2. If user is anonymouse:
a. Try to log him by identity and redirect to 'next' url.
b. Create new netID record in database.
c. Try to automaticaly fill all extra fields with information returned form
server. If successfull, login the user and redirect to 'next' url.
d. Redirect user to extra page where he can fill all extra fields by hand.
"""
# merge data from POST and GET methods
data = request.GET.copy()
data.update(request.POST)
# In case of skipping begin step.
if 'next_url' not in request.session:
request.session['next_url'] = request.GET.get("next") or settings.LOGIN_REDIRECT_URL
backend = get_backend(provider)
response = backend.validate(request, data)
if isinstance(response, HttpResponseRedirect):
return response
if request.user.is_authenticated():
success = backend.login_user(request)
backend.merge_accounts(request)
else:
success = backend.login_user(request)
if not success and not settings.REGISTRATION_ALLOWED:
messages.warning(request, lang.REGISTRATION_DISABLED)
return redirect(settings.REGISTRATION_DISABLED_REDIRECT)
if success:
return redirect(request.session.pop('next_url', settings.LOGIN_REDIRECT_URL))
return backend.complete(request, response)
|
After first step of net authentication, we must validate the response.
If everything is ok, we must do the following:
1. If user is already authenticated:
a. Try to login him again (strange variation but we must take it to account).
b. Create new netID record in database.
c. Merge authenticated account with newly created netID record.
d. Redirect user to 'next' url stored in session.
2. If user is anonymouse:
a. Try to log him by identity and redirect to 'next' url.
b. Create new netID record in database.
c. Try to automaticaly fill all extra fields with information returned form
server. If successfull, login the user and redirect to 'next' url.
d. Redirect user to extra page where he can fill all extra fields by hand.
|
def sort_by_name(names):
"""Sort by last name, uniquely."""
def last_name_key(full_name):
parts = full_name.split(' ')
if len(parts) == 1:
return full_name.upper()
last_first = parts[-1] + ' ' + ' '.join(parts[:-1])
return last_first.upper()
return sorted(set(names), key=last_name_key)
|
Sort by last name, uniquely.
|
def pairinplace(args):
"""
%prog pairinplace bulk.fastq
Pair up the records in bulk.fastq by comparing the names for adjancent
records. If they match, print to bulk.pairs.fastq, else print to
bulk.frags.fastq.
"""
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.set_rclip()
p.set_tag()
p.add_option("--base",
help="Base name for the output files [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
base = opts.base or op.basename(fastqfile).split(".")[0]
frags = base + ".frags.fastq"
pairs = base + ".pairs.fastq"
if fastqfile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
tag = opts.tag
strip_name = (lambda x: x[:-N]) if N else None
fh_iter = iter_fastq(fastqfile, key=strip_name)
skipflag = False # controls the iterator skip
for a, b in pairwise(fh_iter):
if b is None: # hit the eof
break
if skipflag:
skipflag = False
continue
if a.name == b.name:
if tag:
a.name += "/1"
b.name += "/2"
print(a, file=pairsfw)
print(b, file=pairsfw)
skipflag = True
else:
print(a, file=fragsfw)
# don't forget the last one, when b is None
if not skipflag:
print(a, file=fragsfw)
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
return pairs
|
%prog pairinplace bulk.fastq
Pair up the records in bulk.fastq by comparing the names for adjancent
records. If they match, print to bulk.pairs.fastq, else print to
bulk.frags.fastq.
|
def process_strings(self, string, docstrings=False):
"""Process escapes."""
m = RE_STRING_TYPE.match(string)
stype = self.get_string_type(m.group(1) if m.group(1) else '')
if not self.match_string(stype) and not docstrings:
return '', False
is_bytes = 'b' in stype
is_raw = 'r' in stype
is_format = 'f' in stype
content = m.group(3)
if is_raw and (not is_format or not self.decode_escapes):
string = self.norm_nl(content)
elif is_raw and is_format:
string = self.norm_nl(FE_RFESC.sub(self.replace_unicode, content))
elif is_bytes:
string = self.norm_nl(RE_BESC.sub(self.replace_bytes, content))
elif is_format:
string = self.norm_nl(RE_FESC.sub(self.replace_unicode, content))
else:
string = self.norm_nl(RE_ESC.sub(self.replace_unicode, content))
return textwrap.dedent(RE_NON_PRINTABLE.sub('\n', string) if is_bytes else string), is_bytes
|
Process escapes.
|
def user_twitter_list_bag_of_words(twitter_list_corpus,
sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set):
"""
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user.
Inputs: - twitter_list_corpus: A python list of Twitter lists in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: A bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
"""
# Extract a bag-of-words from a list of Twitter lists.
# May result in empty sets
list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus,
sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
# Reduce keyword sets.
bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets)
# Reduce lemma to keywordbag maps.
lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int))
for lemma_to_keywordbag in list_of_lemma_to_keywordbags:
for lemma, keywordbag in lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag_total[lemma][keyword] += multiplicity
return bag_of_words, lemma_to_keywordbag_total
|
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user.
Inputs: - twitter_list_corpus: A python list of Twitter lists in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: A bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
|
def get_user(self, username):
""" Get the user details from MAM. """
cmd = ["glsuser", "-u", username, "--raw"]
results = self._read_output(cmd)
if len(results) == 0:
return None
elif len(results) > 1:
logger.error(
"Command returned multiple results for '%s'." % username)
raise RuntimeError(
"Command returned multiple results for '%s'." % username)
the_result = results[0]
the_name = the_result["Name"]
if username.lower() != the_name.lower():
logger.error(
"We expected username '%s' but got username '%s'."
% (username, the_name))
raise RuntimeError(
"We expected username '%s' but got username '%s'."
% (username, the_name))
return the_result
|
Get the user details from MAM.
|
def _collapse_preconditions(base_preconditions: List[List[Contract]], bases_have_func: bool,
preconditions: List[List[Contract]], func: Callable[..., Any]) -> List[List[Contract]]:
"""
Collapse function preconditions with the preconditions collected from the base classes.
:param base_preconditions: preconditions collected from the base classes (grouped by base class)
:param bases_have_func: True if one of the base classes has the function
:param preconditions: preconditions of the function (before the collapse)
:param func: function whose preconditions we are collapsing
:return: collapsed sequence of precondition groups
"""
if not base_preconditions and bases_have_func and preconditions:
raise TypeError(("The function {} can not weaken the preconditions because the bases specify "
"no preconditions at all. Hence this function must accept all possible input since "
"the preconditions are OR'ed and no precondition implies a dummy precondition which is always "
"fulfilled.").format(func.__qualname__))
return base_preconditions + preconditions
|
Collapse function preconditions with the preconditions collected from the base classes.
:param base_preconditions: preconditions collected from the base classes (grouped by base class)
:param bases_have_func: True if one of the base classes has the function
:param preconditions: preconditions of the function (before the collapse)
:param func: function whose preconditions we are collapsing
:return: collapsed sequence of precondition groups
|
def get_signin_url(self, service='ec2'):
"""
Get the URL where IAM users can use their login profile to sign in
to this account's console.
:type service: string
:param service: Default service to go to in the console.
"""
alias = self.get_account_alias()
if not alias:
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
return "https://%s.signin.aws.amazon.com/console/%s" % (alias, service)
|
Get the URL where IAM users can use their login profile to sign in
to this account's console.
:type service: string
:param service: Default service to go to in the console.
|
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
return lib
|
Load LightGBM library.
|
def cast_from_bunq_response(cls, bunq_response):
"""
:type bunq_response: BunqResponse
"""
return cls(
bunq_response.value,
bunq_response.headers,
bunq_response.pagination
)
|
:type bunq_response: BunqResponse
|
def buildWorkbenchWithLauncher():
"""Builds a workbench.
The workbench has a launcher with all of the default tools. The
launcher will be displayed on the workbench.
"""
workbench = ui.Workbench()
tools = [exercises.SearchTool()]
launcher = ui.Launcher(workbench, tools)
workbench.display(launcher)
return workbench, launcher
|
Builds a workbench.
The workbench has a launcher with all of the default tools. The
launcher will be displayed on the workbench.
|
def not_next(e):
"""
Create a PEG function for negative lookahead.
"""
def match_not_next(s, grm=None, pos=0):
try:
e(s, grm, pos)
except PegreError as ex:
return PegreResult(s, Ignore, (pos, pos))
else:
raise PegreError('Negative lookahead failed', pos)
return match_not_next
|
Create a PEG function for negative lookahead.
|
def get_config_value(name, fallback=None):
"""Gets a config by name.
In the case where the config name is not found, will use fallback value."""
cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)
return cli_config.get('servicefabric', name, fallback)
|
Gets a config by name.
In the case where the config name is not found, will use fallback value.
|
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
|
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
|
def format_request_email_title(increq, **ctx):
"""Format the email message title for inclusion request notification.
:param increq: Inclusion request object for which the request is made.
:type increq: `invenio_communities.models.InclusionRequest`
:param ctx: Optional extra context parameters passed to formatter.
:type ctx: dict.
:returns: Email message title.
:rtype: str
"""
template = current_app.config["COMMUNITIES_REQUEST_EMAIL_TITLE_TEMPLATE"],
return format_request_email_templ(increq, template, **ctx)
|
Format the email message title for inclusion request notification.
:param increq: Inclusion request object for which the request is made.
:type increq: `invenio_communities.models.InclusionRequest`
:param ctx: Optional extra context parameters passed to formatter.
:type ctx: dict.
:returns: Email message title.
:rtype: str
|
def _init_grps(code2nt):
"""Return list of groups in same order as in code2nt"""
seen = set()
seen_add = seen.add
groups = [nt.group for nt in code2nt.values()]
return [g for g in groups if not (g in seen or seen_add(g))]
|
Return list of groups in same order as in code2nt
|
def revoke_auth(preserve_minion_cache=False):
'''
The minion sends a request to the master to revoke its own key.
Note that the minion session will be revoked and the minion may
not be able to return the result of this command back to the master.
If the 'preserve_minion_cache' flag is set to True, the master
cache for this minion will not be removed.
CLI Example:
.. code-block:: bash
salt '*' saltutil.revoke_auth
'''
masters = list()
ret = True
if 'master_uri_list' in __opts__:
for master_uri in __opts__['master_uri_list']:
masters.append(master_uri)
else:
masters.append(__opts__['master_uri'])
for master in masters:
channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master)
tok = channel.auth.gen_token(b'salt')
load = {'cmd': 'revoke_auth',
'id': __opts__['id'],
'tok': tok,
'preserve_minion_cache': preserve_minion_cache}
try:
channel.send(load)
except SaltReqTimeoutError:
ret = False
finally:
channel.close()
return ret
|
The minion sends a request to the master to revoke its own key.
Note that the minion session will be revoked and the minion may
not be able to return the result of this command back to the master.
If the 'preserve_minion_cache' flag is set to True, the master
cache for this minion will not be removed.
CLI Example:
.. code-block:: bash
salt '*' saltutil.revoke_auth
|
def setup_menu(self):
"""Setup context menu."""
copy_action = create_action(self, _('Copy'),
shortcut=keybinding('Copy'),
icon=ima.icon('editcopy'),
triggered=self.copy,
context=Qt.WidgetShortcut)
functions = ((_("To bool"), bool), (_("To complex"), complex),
(_("To int"), int), (_("To float"), float),
(_("To str"), to_text_string))
types_in_menu = [copy_action]
for name, func in functions:
slot = lambda func=func: self.change_type(func)
types_in_menu += [create_action(self, name,
triggered=slot,
context=Qt.WidgetShortcut)]
menu = QMenu(self)
add_actions(menu, types_in_menu)
return menu
|
Setup context menu.
|
def boll(self, n, dev, array=False):
"""布林通道"""
mid = self.sma(n, array)
std = self.std(n, array)
up = mid + std * dev
down = mid - std * dev
return up, down
|
布林通道
|
def date_to_epiweek(date=datetime.date.today()) -> Epiweek:
"""
Convert python date to Epiweek
"""
year = date.year
start_dates = list(map(_start_date_of_year, [year - 1, year, year + 1]))
start_date = start_dates[1]
if start_dates[1] > date:
start_date = start_dates[0]
elif date >= start_dates[2]:
start_date = start_dates[2]
return Epiweek(
year=(start_date + datetime.timedelta(days=7)).year,
week=((date - start_date).days // 7) + 1,
day=(date.isoweekday() % 7) + 1
)
|
Convert python date to Epiweek
|
def eth_sendTransaction(self, from_, to=None, gas=None,
gas_price=None, value=None, data=None,
nonce=None):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendtransaction
:param from_: From account address
:type from_: str
:param to: To account address (optional)
:type to: str
:param gas: Gas amount for current transaction (optional)
:type gas: int
:param gas_price: Gas price for current transaction (optional)
:type gas_price: int
:param value: Amount of ether to send (optional)
:type value: int
:param data: Additional data for transaction (optional)
:type data: hex
:param nonce: Unique nonce for transaction (optional)
:type nonce: int
:return: txhash
:rtype: str
"""
obj = {}
obj['from'] = from_
if to is not None:
obj['to'] = to
if gas is not None:
obj['gas'] = hex(gas)
if gas_price is not None:
obj['gasPrice'] = hex(gas_price)
if value is not None:
obj['value'] = hex(ether_to_wei(value))
if data is not None:
obj['data'] = data
if nonce is not None:
obj['nonce'] = hex(nonce)
return (yield from self.rpc_call('eth_sendTransaction', [obj]))
|
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendtransaction
:param from_: From account address
:type from_: str
:param to: To account address (optional)
:type to: str
:param gas: Gas amount for current transaction (optional)
:type gas: int
:param gas_price: Gas price for current transaction (optional)
:type gas_price: int
:param value: Amount of ether to send (optional)
:type value: int
:param data: Additional data for transaction (optional)
:type data: hex
:param nonce: Unique nonce for transaction (optional)
:type nonce: int
:return: txhash
:rtype: str
|
def get_workflows() -> dict:
"""Get dict of ALL known workflow definitions.
Returns
list[dict]
"""
keys = DB.get_keys("workflow_definitions:*")
known_workflows = dict()
for key in keys:
values = key.split(':')
if values[1] not in known_workflows:
known_workflows[values[1]] = list()
known_workflows[values[1]].append(values[2])
return known_workflows
|
Get dict of ALL known workflow definitions.
Returns
list[dict]
|
def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
""" Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
"""
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = np.max(group_idx) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = np.max(group_idx) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size
|
Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
|
def get_sea_names():
'''
Returns a list of NODC sea names
source of list: http://www.nodc.noaa.gov/General/NODC-Archive/seanames.xml
'''
global _SEA_NAMES
if _SEA_NAMES is None:
resource_text = get_data("cc_plugin_ncei", "data/seanames.xml")
parser = etree.XMLParser(remove_blank_text=True)
root = etree.fromstring(resource_text, parser)
buf = {}
for seaname in root.findall('seaname'):
name = seaname.find('seaname').text
buf[name] = seaname.find('seacode').text if seaname.find('seacode') is not None else 'N/A'
_SEA_NAMES = buf
return _SEA_NAMES
|
Returns a list of NODC sea names
source of list: http://www.nodc.noaa.gov/General/NODC-Archive/seanames.xml
|
def import_app(files, category, overwrite, id, name):
""" Upload application from file.
By default, file name will be used as application name, with "-vXX.YYY" suffix stripped.
Application is looked up by one of these classifiers, in order of priority:
app-id, app-name, filename.
If app-id is provided, looks up existing application and updates its manifest.
If app-id is NOT specified, looks up by name, or creates new application.
"""
platform = _get_platform()
org = platform.get_organization(QUBELL["organization"])
if category:
category = org.categories[category]
regex = re.compile(r"^(.*?)(-v(\d+)|)\.[^.]+$")
if (id or name) and len(files) > 1:
raise Exception("--id and --name are supported only for single-file mode")
for filename in files:
click.echo("Importing " + filename, nl=False)
if not name:
match = regex.match(basename(filename))
if not match:
click.echo(_color("RED", "FAIL") + " unknown filename format")
break
name = regex.match(basename(filename)).group(1)
click.echo(" => ", nl=False)
app = None
try:
app = org.get_application(id=id, name=name)
if app and not overwrite:
click.echo("%s %s already exists %s" % (
app.id, _color("BLUE", app and app.name or name), _color("RED", "FAIL")))
break
except NotFoundError:
if id:
click.echo("%s %s not found %s" % (
id or "", _color("BLUE", app and app.name or name), _color("RED", "FAIL")))
break
click.echo(_color("BLUE", app and app.name or name) + " ", nl=False)
try:
with file(filename, "r") as f:
if app:
app.update(name=app.name,
category=category and category.id or app.category,
manifest=Manifest(content=f.read()))
else:
app = org.application(id=id, name=name, manifest=Manifest(content=f.read()))
if category:
app.update(category=category.id)
click.echo(app.id + _color("GREEN", " OK"))
except IOError as e:
click.echo(_color("RED", " FAIL") + " " + e.message)
break
|
Upload application from file.
By default, file name will be used as application name, with "-vXX.YYY" suffix stripped.
Application is looked up by one of these classifiers, in order of priority:
app-id, app-name, filename.
If app-id is provided, looks up existing application and updates its manifest.
If app-id is NOT specified, looks up by name, or creates new application.
|
def print_params(self, allpars=False, loglevel=logging.INFO):
"""Print information about the model parameters (values,
errors, bounds, scale)."""
pars = self.get_params()
o = '\n'
o += '%4s %-20s%10s%10s%10s%10s%10s%5s\n' % (
'idx', 'parname', 'value', 'error',
'min', 'max', 'scale', 'free')
o += '-' * 80 + '\n'
src_pars = collections.OrderedDict()
for p in pars:
src_pars.setdefault(p['src_name'], [])
src_pars[p['src_name']] += [p]
free_sources = []
for k, v in src_pars.items():
for p in v:
if not p['free']:
continue
free_sources += [k]
for k, v in src_pars.items():
if not allpars and k not in free_sources:
continue
o += '%s\n' % k
for p in v:
o += '%4i %-20.19s' % (p['idx'], p['par_name'])
o += '%10.3g%10.3g' % (p['value'], p['error'])
o += '%10.3g%10.3g%10.3g' % (p['min'], p['max'],
p['scale'])
if p['free']:
o += ' *'
else:
o += ' '
o += '\n'
self.logger.log(loglevel, o)
|
Print information about the model parameters (values,
errors, bounds, scale).
|
def _content_blocks(self, r):
"""Number of content blocks in block row `r`."""
return (self._block_rows - self._left_zero_blocks(r)
- self._right_zero_blocks(r))
|
Number of content blocks in block row `r`.
|
def reserve_udp_port(self, port, project):
"""
Reserve a specific UDP port number
:param port: UDP port number
:param project: Project instance
"""
if port in self._used_udp_ports:
raise HTTPConflict(text="UDP port {} already in use on host {}".format(port, self._console_host))
if port < self._udp_port_range[0] or port > self._udp_port_range[1]:
raise HTTPConflict(text="UDP port {} is outside the range {}-{}".format(port, self._udp_port_range[0], self._udp_port_range[1]))
self._used_udp_ports.add(port)
project.record_udp_port(port)
log.debug("UDP port {} has been reserved".format(port))
|
Reserve a specific UDP port number
:param port: UDP port number
:param project: Project instance
|
def delete(self, num_iid, properties, session, item_price=None, item_num=None, lang=None):
'''taobao.item.sku.delete 删除SKU
删除一个sku的数据 需要删除的sku通过属性properties进行匹配查找'''
request = TOPRequest('taobao.item.sku.delete')
request['num_iid'] = num_iid
request['properties'] = properties
if item_num!=None:
request['item_num'] = item_num
if item_price!=None:
request['item_price'] = item_price
if lang!=None:
request['lang'] = lang
self.create(self.execute(request, session)['sku'])
return self
|
taobao.item.sku.delete 删除SKU
删除一个sku的数据 需要删除的sku通过属性properties进行匹配查找
|
def get_description(self):
"""Get the description of a GObject."""
vo = ffi.cast('VipsObject *', self.pointer)
return _to_string(vips_lib.vips_object_get_description(vo))
|
Get the description of a GObject.
|
def fetch_one(self, *args, **kwargs):
"""
return one document which match the structure of the object
`fetch_one()` takes the same arguments than the the pymongo.collection.find method.
If multiple documents are found, raise a MultipleResultsFound exception.
If no document is found, return None
The query is launch against the db and collection of the object.
"""
bson_obj = self.fetch(*args, **kwargs)
count = bson_obj.count()
if count > 1:
raise MultipleResultsFound("%s results found" % count)
elif count == 1:
# return self(bson_obj.next(), fetched_fields=kwargs.get("projection"))
return next(bson_obj)
|
return one document which match the structure of the object
`fetch_one()` takes the same arguments than the the pymongo.collection.find method.
If multiple documents are found, raise a MultipleResultsFound exception.
If no document is found, return None
The query is launch against the db and collection of the object.
|
def with_local_env_strategy(molecule, strategy, reorder=True,
extend_structure=True):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:param reorder: bool, representing if graph nodes need to be reordered
following the application of the local_env strategy
:param extend_structure: If True (default), then a large artificial box
will be placed around the Molecule, because some strategies assume
periodic boundary conditions.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
if extend_structure:
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
molecule = molecule.get_boxed_structure(a, b, c, no_cross=True)
for n in range(len(molecule)):
neighbors = strategy.get_nn_info(molecule, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not np.array_equal(neighbor['image'], [0, 0, 0]):
continue
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
mg.add_edge(from_index=n,
to_index=neighbor['site_index'],
weight=neighbor['weight'],
warn_duplicates=False)
if reorder:
# Reverse order of nodes to match with molecule
n = len(mg.molecule)
mapping = {i: (n-i) for i in range(n)}
mapping = {i: (j-1) for i, j in mapping.items()}
mg.graph = nx.relabel_nodes(mg.graph, mapping)
duplicates = []
for edge in mg.graph.edges:
if edge[2] != 0:
duplicates.append(edge)
for duplicate in duplicates:
mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])
mg.set_node_attributes()
return mg
|
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:param reorder: bool, representing if graph nodes need to be reordered
following the application of the local_env strategy
:param extend_structure: If True (default), then a large artificial box
will be placed around the Molecule, because some strategies assume
periodic boundary conditions.
:return: mg, a MoleculeGraph
|
def removeBinder(self, name):
"""Remove a binder from a table
"""
root = self.etree
t_bindings = root.find('bindings')
t_binder = t_bindings.find(name)
if t_binder :
t_bindings.remove(t_binder)
return True
return False
|
Remove a binder from a table
|
def clubConsumables(self, fast=False):
"""Return all consumables from club."""
method = 'GET'
url = 'club/consumables/development'
rc = self.__request__(method, url)
events = [self.pin.event('page_view', 'Hub - Club')]
self.pin.send(events, fast=fast)
events = [self.pin.event('page_view', 'Club - Consumables')]
self.pin.send(events, fast=fast)
events = [self.pin.event('page_view', 'Club - Consumables - List View')]
self.pin.send(events, fast=fast)
return [itemParse(i) for i in rc.get('itemData', ())]
|
Return all consumables from club.
|
def send_rpc(self, address, rpc_id, call_payload, timeout=3.0):
"""Send an rpc to our connected device.
The device must already be connected and the rpc interface open. This
method will synchronously send an RPC and wait for the response. Any
RPC errors will be raised as exceptions and if there were no errors, the
RPC's response payload will be returned as a binary bytearray.
See :meth:`AbstractDeviceAdapter.send_rpc` for documentation of the possible
exceptions that can be raised here.
Args:
address (int): The tile address containing the RPC
rpc_id (int): The ID of the RPC that we wish to call.
call_payload (bytes): The payload containing encoded arguments for the
RPC.
timeout (float): The maximum number of seconds to wait for the RPC to
finish. Defaults to 3s.
Returns:
bytearray: The RPC's response payload.
"""
if not self.connected:
raise HardwareError("Cannot send an RPC if we are not in a connected state")
if timeout is None:
timeout = 3.0
status = -1
payload = b''
recording = None
if self.connection_interrupted:
self._try_reconnect()
if self._record is not None:
recording = _RecordedRPC(self.connection_string, address, rpc_id, call_payload)
recording.start()
try:
payload = self._loop.run_coroutine(self.adapter.send_rpc(0, address, rpc_id, call_payload, timeout))
status, payload = pack_rpc_response(payload, None)
except VALID_RPC_EXCEPTIONS as exc:
status, payload = pack_rpc_response(payload, exc)
if self._record is not None:
recording.finish(status, payload)
self._recording.append(recording)
if self.connection_interrupted:
self._try_reconnect()
return unpack_rpc_response(status, payload, rpc_id, address)
|
Send an rpc to our connected device.
The device must already be connected and the rpc interface open. This
method will synchronously send an RPC and wait for the response. Any
RPC errors will be raised as exceptions and if there were no errors, the
RPC's response payload will be returned as a binary bytearray.
See :meth:`AbstractDeviceAdapter.send_rpc` for documentation of the possible
exceptions that can be raised here.
Args:
address (int): The tile address containing the RPC
rpc_id (int): The ID of the RPC that we wish to call.
call_payload (bytes): The payload containing encoded arguments for the
RPC.
timeout (float): The maximum number of seconds to wait for the RPC to
finish. Defaults to 3s.
Returns:
bytearray: The RPC's response payload.
|
def create(cls, community, record, user=None, expires_at=None,
notify=True):
"""Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore.
"""
if expires_at and expires_at < datetime.utcnow():
raise InclusionRequestExpiryTimeError(
community=community, record=record)
if community.has_record(record):
raise InclusionRequestObsoleteError(
community=community, record=record)
try:
# Create inclusion request
with db.session.begin_nested():
obj = cls(
id_community=community.id,
id_record=record.id,
user=user,
expires_at=expires_at
)
db.session.add(obj)
except (IntegrityError, FlushError):
raise InclusionRequestExistsError(
community=community, record=record)
# Send signal
inclusion_request_created.send(
current_app._get_current_object(),
request=obj,
notify=notify
)
return obj
|
Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore.
|
def _parse_extra(self, fp):
""" Parse and store the config comments and create maps for dot notion lookup """
comment = ''
section = ''
fp.seek(0)
for line in fp:
line = line.rstrip()
if not line:
if comment:
comment += '\n'
continue
if line.startswith('#'): # Comment
comment += line + '\n'
continue
if line.startswith('['): # Section
section = line.strip('[]')
self._add_dot_key(section)
if comment:
self._comments[section] = comment.rstrip()
elif CONFIG_KEY_RE.match(line): # Config
key = line.split('=', 1)[0].strip()
self._add_dot_key(section, key)
if comment:
self._comments[(section, key)] = comment.rstrip()
comment = ''
if comment:
self._comments[self.LAST_COMMENT_KEY] = comment
|
Parse and store the config comments and create maps for dot notion lookup
|
def plotfft(s, fmax, doplot=False):
""" This functions computes the fft of a signal, returning the frequency
and their magnitude values.
Parameters
----------
s: array-like
the input signal.
fmax: int
the sampling frequency.
doplot: boolean
a variable to indicate whether the plot is done or not.
Returns
-------
f: array-like
the frequency values (xx axis)
fs: array-like
the amplitude of the frequency values (yy axis)
"""
fs = abs(np.fft.fft(s))
f = linspace(0, fmax / 2, len(s) / 2)
if doplot:
#pl.plot(f[1:int(len(s) / 2)], fs[1:int(len(s) / 2)])
pass
return (f[1:int(len(s) / 2)].copy(), fs[1:int(len(s) / 2)].copy())
|
This functions computes the fft of a signal, returning the frequency
and their magnitude values.
Parameters
----------
s: array-like
the input signal.
fmax: int
the sampling frequency.
doplot: boolean
a variable to indicate whether the plot is done or not.
Returns
-------
f: array-like
the frequency values (xx axis)
fs: array-like
the amplitude of the frequency values (yy axis)
|
def _publish(self, msg):
"""Publish, handling retries, a message in the queue.
:param msg: Object which represents the message to be sent in
the queue. Note that this object should be serializable in the
configured format (by default JSON).
"""
connection = self._connection.clone()
publish = connection.ensure(self.producer, self.producer.publish,
errback=self.__error_callback,
max_retries=MQ_PRODUCER_MAX_RETRIES)
publish(json.dumps(msg), exchange=self._exchange,
routing_key=self._routing_key, declare=[self._queue])
logging.debug('Publisher: message sent: %s', msg)
|
Publish, handling retries, a message in the queue.
:param msg: Object which represents the message to be sent in
the queue. Note that this object should be serializable in the
configured format (by default JSON).
|
def cli_form(self, *args):
"""Display a schemata's form definition"""
if args[0] == '*':
for schema in schemastore:
self.log(schema, ':', schemastore[schema]['form'], pretty=True)
else:
self.log(schemastore[args[0]]['form'], pretty=True)
|
Display a schemata's form definition
|
def run(self):
"""
线程启动
:return:
"""
try:
super().run()
except Exception as e:
print(trace_info())
finally:
if not self.no_ack:
# 如果为True,任务结束后需要确认
self.ch.basic_ack(delivery_tag=self.method.delivery_tag)
|
线程启动
:return:
|
def release(self, resource):
"""release(resource)
Returns a resource to the pool. Most of the time you will want
to use :meth:`transaction`, but if you use :meth:`acquire`,
you must release the acquired resource back to the pool when
finished. Failure to do so could result in deadlock.
:param resource: Resource
"""
with self.releaser:
resource.claimed = False
self.releaser.notify_all()
|
release(resource)
Returns a resource to the pool. Most of the time you will want
to use :meth:`transaction`, but if you use :meth:`acquire`,
you must release the acquired resource back to the pool when
finished. Failure to do so could result in deadlock.
:param resource: Resource
|
def extent_string_to_array(extent_text):
"""Convert an extent string to an array.
.. versionadded: 2.2.0
:param extent_text: String representing an extent e.g.
109.829170982, -8.13333290561, 111.005344795, -7.49226294379
:type extent_text: str
:returns: A list of floats, or None
:rtype: list, None
"""
coordinates = extent_text.replace(' ', '').split(',')
count = len(coordinates)
if count != 4:
message = (
'Extent need exactly 4 value but got %s instead' % count)
LOGGER.error(message)
return None
# parse the value to float type
try:
coordinates = [float(i) for i in coordinates]
except ValueError as e:
message = str(e)
LOGGER.error(message)
return None
return coordinates
|
Convert an extent string to an array.
.. versionadded: 2.2.0
:param extent_text: String representing an extent e.g.
109.829170982, -8.13333290561, 111.005344795, -7.49226294379
:type extent_text: str
:returns: A list of floats, or None
:rtype: list, None
|
def columns(self):
"""Return names of all the addressable columns (including foreign keys) referenced in user supplied model"""
res = [col['name'] for col in self.column_definitions]
res.extend([col['name'] for col in self.foreign_key_definitions])
return res
|
Return names of all the addressable columns (including foreign keys) referenced in user supplied model
|
def set_back_led_output(self, value):
"""value can be between 0x00 and 0xFF"""
return self.write(request.SetBackLEDOutput(self.seq, value))
|
value can be between 0x00 and 0xFF
|
def setting_ctx(num_gpus):
"""
Description : set gpu module
"""
if num_gpus > 0:
ctx = [mx.gpu(i) for i in range(num_gpus)]
else:
ctx = [mx.cpu()]
return ctx
|
Description : set gpu module
|
def _convert_from_thrift_endpoint(self, thrift_endpoint):
"""Accepts a thrift decoded endpoint and converts it to an Endpoint.
:param thrift_endpoint: thrift encoded endpoint
:type thrift_endpoint: thrift endpoint
:returns: decoded endpoint
:rtype: Encoding
"""
ipv4 = None
ipv6 = None
port = struct.unpack('H', struct.pack('h', thrift_endpoint.port))[0]
if thrift_endpoint.ipv4 != 0:
ipv4 = socket.inet_ntop(
socket.AF_INET,
struct.pack('!i', thrift_endpoint.ipv4),
)
if thrift_endpoint.ipv6:
ipv6 = socket.inet_ntop(socket.AF_INET6, thrift_endpoint.ipv6)
return Endpoint(
service_name=thrift_endpoint.service_name,
ipv4=ipv4,
ipv6=ipv6,
port=port,
)
|
Accepts a thrift decoded endpoint and converts it to an Endpoint.
:param thrift_endpoint: thrift encoded endpoint
:type thrift_endpoint: thrift endpoint
:returns: decoded endpoint
:rtype: Encoding
|
def get_authority(config, metrics, rrset_channel, **kwargs):
"""Get a GCEAuthority client.
A factory function that validates configuration and creates a
proper GCEAuthority.
Args:
config (dict): GCEAuthority related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue used for sending messages
to the reconciler plugin.
kw (dict): Additional keyword arguments to pass to the
Authority.
Returns:
A :class:`GCEAuthority` instance.
"""
builder = authority.GCEAuthorityBuilder(
config, metrics, rrset_channel, **kwargs)
return builder.build_authority()
|
Get a GCEAuthority client.
A factory function that validates configuration and creates a
proper GCEAuthority.
Args:
config (dict): GCEAuthority related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue used for sending messages
to the reconciler plugin.
kw (dict): Additional keyword arguments to pass to the
Authority.
Returns:
A :class:`GCEAuthority` instance.
|
def slots_class_sealer(fields, defaults):
"""
This sealer makes a container class that uses ``__slots__`` (it uses :func:`class_sealer` internally).
The resulting class has a metaclass that forcibly sets ``__slots__`` on subclasses.
"""
class __slots_meta__(type):
def __new__(mcs, name, bases, namespace):
if "__slots__" not in namespace:
namespace["__slots__"] = fields
return type.__new__(mcs, name, bases, namespace)
class __slots_base__(_with_metaclass(__slots_meta__, object)):
__slots__ = ()
def __init__(self, *args, **kwargs):
pass
return class_sealer(fields, defaults, base=__slots_base__)
|
This sealer makes a container class that uses ``__slots__`` (it uses :func:`class_sealer` internally).
The resulting class has a metaclass that forcibly sets ``__slots__`` on subclasses.
|
def process_gene_interaction(self, limit):
"""
The gene interaction file includes identified interactions,
that are between two or more gene (products).
In the case of interactions with >2 genes, this requires creating
groups of genes that are involved in the interaction.
From the wormbase help list: In the example WBInteraction000007779
it would likely be misleading to suggest that lin-12 interacts with
(suppresses in this case) smo-1 ALONE or that lin-12 suppresses let-60
ALONE; the observation in the paper; see Table V in paper PMID:15990876
was that a lin-12 allele (heterozygous lin-12(n941/+)) could suppress
the "multivulva" phenotype induced synthetically by simultaneous
perturbation of BOTH smo-1 (by RNAi) AND let-60 (by the n2021 allele).
So this is necessarily a three-gene interaction.
Therefore, we can create groups of genes based on their "status" of
Effector | Effected.
Status: IN PROGRESS
:param limit:
:return:
"""
raw = '/'.join((self.rawdir, self.files['gene_interaction']['file']))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing gene interaction associations")
line_counter = 0
with gzip.open(raw, 'rb') as csvfile:
filereader = csv.reader(
io.TextIOWrapper(csvfile, newline=""), delimiter='\t',
quotechar="'")
for row in filereader:
line_counter += 1
if re.match(r'#', ''.join(row)):
continue
(interaction_num, interaction_type, interaction_subtype,
summary, citation) = row[0:5]
# print(row)
interaction_id = 'WormBase:'+interaction_num
# TODO deal with subtypes
interaction_type_id = None
if interaction_type == 'Genetic':
interaction_type_id = self.globaltt['genetically interacts with']
elif interaction_type == 'Physical':
interaction_type_id = self.globaltt['molecularly_interacts_with']
elif interaction_type == 'Regulatory':
interaction_type_id = self.globaltt['regulates']
else:
LOG.info(
"An interaction type I don't understand %s", interaction_type)
num_interactors = (len(row) - 5) / 3
if num_interactors != 2:
LOG.info(
"Skipping interactions with !=2 participants:\n %s",
str(row))
continue
gene_a_id = 'WormBase:'+row[5]
gene_b_id = 'WormBase:'+row[8]
if self.test_mode \
and gene_a_id not in self.test_ids['gene'] \
and gene_b_id not in self.test_ids['gene']:
continue
assoc = InteractionAssoc(
graph, self.name, gene_a_id, gene_b_id, interaction_type_id)
assoc.set_association_id(interaction_id)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
# citation is not a pmid or WBref - get this some other way
model.addDescription(assoc_id, summary)
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
The gene interaction file includes identified interactions,
that are between two or more gene (products).
In the case of interactions with >2 genes, this requires creating
groups of genes that are involved in the interaction.
From the wormbase help list: In the example WBInteraction000007779
it would likely be misleading to suggest that lin-12 interacts with
(suppresses in this case) smo-1 ALONE or that lin-12 suppresses let-60
ALONE; the observation in the paper; see Table V in paper PMID:15990876
was that a lin-12 allele (heterozygous lin-12(n941/+)) could suppress
the "multivulva" phenotype induced synthetically by simultaneous
perturbation of BOTH smo-1 (by RNAi) AND let-60 (by the n2021 allele).
So this is necessarily a three-gene interaction.
Therefore, we can create groups of genes based on their "status" of
Effector | Effected.
Status: IN PROGRESS
:param limit:
:return:
|
def _read_unquote(ctx: ReaderContext) -> LispForm:
"""Read an unquoted form and handle any special logic of unquoting.
Unquoted forms can take two, well... forms:
`~form` is read as `(unquote form)` and any nested forms are read
literally and passed along to the compiler untouched.
`~@form` is read as `(unquote-splicing form)` which tells the compiler
to splice in the contents of a sequential form such as a list or
vector into the final compiled form. This helps macro writers create
longer forms such as function calls, function bodies, or data structures
with the contents of another collection they have."""
start = ctx.reader.advance()
assert start == "~"
with ctx.unquoted():
next_char = ctx.reader.peek()
if next_char == "@":
ctx.reader.advance()
next_form = _read_next_consuming_comment(ctx)
return llist.l(_UNQUOTE_SPLICING, next_form)
else:
next_form = _read_next_consuming_comment(ctx)
return llist.l(_UNQUOTE, next_form)
|
Read an unquoted form and handle any special logic of unquoting.
Unquoted forms can take two, well... forms:
`~form` is read as `(unquote form)` and any nested forms are read
literally and passed along to the compiler untouched.
`~@form` is read as `(unquote-splicing form)` which tells the compiler
to splice in the contents of a sequential form such as a list or
vector into the final compiled form. This helps macro writers create
longer forms such as function calls, function bodies, or data structures
with the contents of another collection they have.
|
def mv_normal_cov_like(x, mu, C):
R"""
Multivariate normal log-likelihood parameterized by a covariance
matrix.
.. math::
f(x \mid \pi, C) = \frac{1}{(2\pi|C|)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}C^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `C` : (k,k) Positive definite covariance matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_chol_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.cov_mvnorm(r, mu, C) for r in x])
else:
return flib.cov_mvnorm(x, mu, C)
|
R"""
Multivariate normal log-likelihood parameterized by a covariance
matrix.
.. math::
f(x \mid \pi, C) = \frac{1}{(2\pi|C|)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}C^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `C` : (k,k) Positive definite covariance matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_chol_like`
|
def reset(self):
"""
Reset all fields of this object to class defaults
"""
for name in self.__dict__:
if name.startswith("_"):
continue
attr = getattr(self, name)
setattr(self, name, attr and attr.__class__())
|
Reset all fields of this object to class defaults
|
def probe(self, ipaddr=None):
""" Probe given address for bulb. """
if ipaddr is None:
# no address so use broadcast
ipaddr = self._broadcast_addr
cmd = {"payloadtype": PayloadType.GET,
"target": ipaddr}
self._send_command(cmd)
|
Probe given address for bulb.
|
def check_packed_data(self, ds):
"""
8.1 Simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and
add_offset. After the data values of a variable have been read, they are to be multiplied by the scale_factor,
and have add_offset added to them.
The units of a variable should be representative of the unpacked data.
If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked
data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset
attributes are of a different data type from the variable (containing the packed data) then the unpacked data
should match the type of these attributes, which must both be of type float or both be of type double. An additional
restriction in this case is that the variable containing the packed data must be of type byte, short or int. It is
not advised to unpack an int into a float as there is a potential precision loss.
When data to be packed contains missing values the attributes that indicate missing values (_FillValue, valid_min,
valid_max, valid_range) must be of the same data type as the packed data.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for name, var in ds.variables.items():
add_offset = getattr(var, 'add_offset', None)
scale_factor = getattr(var, 'scale_factor', None)
if not (add_offset or scale_factor):
continue
valid = True
reasoning = []
# if only one of these attributes is defined, assume they
# are the same type (value doesn't matter here)
if not add_offset:
add_offset = scale_factor
if not scale_factor:
scale_factor = add_offset
if type(add_offset) != type(scale_factor):
valid = False
reasoning.append("Attributes add_offset and scale_factor have different data type.")
elif type(scale_factor) != var.dtype.type:
# Check both attributes are type float or double
if not isinstance(scale_factor, (float, np.floating)):
valid = False
reasoning.append("Attributes add_offset and scale_factor are not of type float or double.")
else:
# Check variable type is byte, short or int
if var.dtype.type not in [np.int, np.int8, np.int16, np.int32, np.int64]:
valid = False
reasoning.append("Variable is not of type byte, short, or int.")
result = Result(BaseCheck.MEDIUM, valid, self.section_titles['8.1'], reasoning)
ret_val.append(result)
reasoning = []
valid = True
# test further with _FillValue , valid_min , valid_max , valid_range
if hasattr(var, "_FillValue"):
if var._FillValue.dtype.type != var.dtype.type:
valid = False
reasoning.append("Type of %s:_FillValue attribute (%s) does not match variable type (%s)" %
(name, var._FillValue.dtype.type, var.dtype.type))
if hasattr(var, "valid_min"):
if var.valid_min.dtype.type != var.dtype.type:
valid = False
reasoning.append("Type of %svalid_min attribute (%s) does not match variable type (%s)" %
(name, var.valid_min.dtype.type, var.dtype.type))
if hasattr(var, "valid_max"):
if var.valid_max.dtype.type != var.dtype.type:
valid = False
reasoning.append("Type of %s:valid_max attribute (%s) does not match variable type (%s)" %
(name, var.valid_max.dtype.type, var.dtype.type))
if hasattr(var, "valid_range"):
if var.valid_range.dtype.type != var.dtype.type:
valid = False
reasoning.append("Type of %s:valid_range attribute (%s) does not match variable type (%s)" %
(name, var.valid_range.dtype.type, var.dtype.type))
result = Result(BaseCheck.MEDIUM,
valid,
self.section_titles['8.1'],
reasoning)
ret_val.append(result)
return ret_val
|
8.1 Simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and
add_offset. After the data values of a variable have been read, they are to be multiplied by the scale_factor,
and have add_offset added to them.
The units of a variable should be representative of the unpacked data.
If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked
data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset
attributes are of a different data type from the variable (containing the packed data) then the unpacked data
should match the type of these attributes, which must both be of type float or both be of type double. An additional
restriction in this case is that the variable containing the packed data must be of type byte, short or int. It is
not advised to unpack an int into a float as there is a potential precision loss.
When data to be packed contains missing values the attributes that indicate missing values (_FillValue, valid_min,
valid_max, valid_range) must be of the same data type as the packed data.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
|
def get_last_content(request, page_id):
"""Get the latest content for a particular type"""
content_type = request.GET.get('content_type')
language_id = request.GET.get('language_id')
page = get_object_or_404(Page, pk=page_id)
placeholders = get_placeholders(page.get_template())
_template = template.loader.get_template(page.get_template())
for placeholder in placeholders:
if placeholder.name == content_type:
context = RequestContext(request, {
'current_page': page,
'lang': language_id
})
with context.bind_template(_template.template):
content = placeholder.render(context)
return HttpResponse(content)
raise Http404
|
Get the latest content for a particular type
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.