code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def people_findByEmail(email):
"""Returns User object."""
method = 'flickr.people.findByEmail'
data = _doget(method, find_email=email)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
|
Returns User object.
|
def _init_groups(self):
"""
初始化group数据
:return:
"""
for group_id, conf in self.group_conf.items():
self.parent_input_dict[group_id] = Queue(conf.get('input_max_size', 0))
self.parent_output_dict[group_id] = Queue(conf.get('output_max_size', 0))
|
初始化group数据
:return:
|
def notch(self, frequency, type='iir', filtfilt=True, **kwargs):
"""Notch out a frequency in this `TimeSeries`.
Parameters
----------
frequency : `float`, `~astropy.units.Quantity`
frequency (default in Hertz) at which to apply the notch
type : `str`, optional
type of filter to apply, currently only 'iir' is supported
**kwargs
other keyword arguments to pass to `scipy.signal.iirdesign`
Returns
-------
notched : `TimeSeries`
a notch-filtered copy of the input `TimeSeries`
See Also
--------
TimeSeries.filter
for details on the filtering method
scipy.signal.iirdesign
for details on the IIR filter design method
"""
zpk = filter_design.notch(frequency, self.sample_rate.value,
type=type, **kwargs)
return self.filter(*zpk, filtfilt=filtfilt)
|
Notch out a frequency in this `TimeSeries`.
Parameters
----------
frequency : `float`, `~astropy.units.Quantity`
frequency (default in Hertz) at which to apply the notch
type : `str`, optional
type of filter to apply, currently only 'iir' is supported
**kwargs
other keyword arguments to pass to `scipy.signal.iirdesign`
Returns
-------
notched : `TimeSeries`
a notch-filtered copy of the input `TimeSeries`
See Also
--------
TimeSeries.filter
for details on the filtering method
scipy.signal.iirdesign
for details on the IIR filter design method
|
def start(self, *_):
""" reading box configurations and starting timers to start/monitor/kill processes """
try:
box_configurations = self.bc_dao.run_query(QUERY_PROCESSES_FOR_BOX_ID(self.box_id))
for box_config in box_configurations:
handler = RepeatTimer(TRIGGER_INTERVAL, self.manage_process, args=[box_config.process_name])
self.thread_handlers[box_config.process_name] = handler
handler.start()
self.logger.info('Started Supervisor Thread for {0}, triggering every {1} seconds'
.format(box_config.process_name, TRIGGER_INTERVAL))
except LookupError as e:
self.logger.error('Supervisor failed to start because of: {0}'.format(e))
|
reading box configurations and starting timers to start/monitor/kill processes
|
def isemptyfile(filepath):
"""Determine if the file both exists and isempty
Args:
filepath (str, path): file path
Returns:
bool
"""
exists = os.path.exists(safepath(filepath))
if exists:
filesize = os.path.getsize(safepath(filepath))
return filesize == 0
else:
return False
|
Determine if the file both exists and isempty
Args:
filepath (str, path): file path
Returns:
bool
|
def ensure(self, connection, func, *args, **kwargs):
"""Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
"""
channel = None
while 1:
try:
if channel is None:
channel = connection.channel()
return func(channel, *args, **kwargs), channel
except (connection.connection_errors, IOError):
self._call_errback()
channel = self.connect(connection)
|
Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
|
def nn(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : integer
The number of nearest neighbors to return.
eps : nonnegative float
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
"""
self.n, self.m = np.shape(self.get_data_x())
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p < 1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape != ():
if k is None:
dd = np.empty(retshape,dtype=np.object)
ii = np.empty(retshape,dtype=np.object)
elif k > 1:
dd = np.empty(retshape+(k,),dtype=np.float)
dd.fill(np.inf)
ii = np.empty(retshape+(k,),dtype=np.int)
ii.fill(self.n)
elif k == 1:
dd = np.empty(retshape,dtype=np.float)
dd.fill(np.inf)
ii = np.empty(retshape,dtype=np.int)
ii.fill(self.n)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
for c in np.ndindex(retshape):
hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
dd[c] = [d for (d,i) in hits]
ii[c] = [i for (d,i) in hits]
elif k > 1:
for j in range(len(hits)):
dd[c+(j,)], ii[c+(j,)] = hits[j]
elif k == 1:
if len(hits) > 0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
return dd, ii
else:
hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
return [d for (d,i) in hits], [i for (d,i) in hits]
elif k == 1:
if len(hits) > 0:
return hits[0]
else:
return np.inf, self.n
elif k > 1:
dd = np.empty(k,dtype=np.float)
dd.fill(np.inf)
ii = np.empty(k,dtype=np.int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
|
Query the tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : integer
The number of nearest neighbors to return.
eps : nonnegative float
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
|
def compute_depth(self):
"""
Recursively computes true depth of the subtree. Should only
be needed for debugging. Unless something is wrong, the
depth field should reflect the correct depth of the subtree.
"""
left_depth = self.left_node.compute_depth() if self.left_node else 0
right_depth = self.right_node.compute_depth() if self.right_node else 0
return 1 + max(left_depth, right_depth)
|
Recursively computes true depth of the subtree. Should only
be needed for debugging. Unless something is wrong, the
depth field should reflect the correct depth of the subtree.
|
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
if arcpyFound and isinstance(geom, arcpy.Polyline):
feature_geom = []
fPart = []
wkt = None
wkid = None
for part in geom:
fPart = []
for pnt in part:
if geom.spatialReference is None:
if self._wkid is None and self._wkt is not None:
wkt = self._wkt
else:
wkid = self._wkid
else:
wkid = geom.spatialReference.factoryCode
fPart.append(Point(coord=[pnt.X, pnt.Y],
wkid=wkid,
wkt=wkt,
z=pnt.Z, m=pnt.M))
feature_geom.append(fPart)
return feature_geom
|
converts a geometry object to a common.Geometry object
|
def load_step_specifications(self, file_name, short=False,
dataset_number=None):
""" Load a table that contains step-type definitions.
This function loads a file containing a specification for each step or
for each (cycle_number, step_number) combinations if short==False. The
step_cycle specifications that are allowed are stored in the variable
cellreader.list_of_step_types.
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
# if short:
# # the table only consists of steps (not cycle,step pairs) assuming
# # that the step numbers uniquely defines step type (this is true
# # for arbin at least).
# raise NotImplementedError
step_specs = pd.read_csv(file_name, sep=prms.Reader.sep)
if "step" not in step_specs.columns:
self.logger.info("step col is missing")
raise IOError
if "type" not in step_specs.columns:
self.logger.info("type col is missing")
raise IOError
if not short and "cycle" not in step_specs.columns:
self.logger.info("cycle col is missing")
raise IOError
self.make_step_table(custom_step_definition=True,
step_specifications=step_specs,
short=short)
|
Load a table that contains step-type definitions.
This function loads a file containing a specification for each step or
for each (cycle_number, step_number) combinations if short==False. The
step_cycle specifications that are allowed are stored in the variable
cellreader.list_of_step_types.
|
def configure(self, cnf={}, **kw):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~TickScale.keys`.
See :meth:`~TickScale.__init__` for a description of the widget specific option.
"""
kw.update(cnf)
reinit = False
if 'orient' in kw:
if kw['orient'] == 'vertical':
self._style_name = self._style_name.replace('Horizontal', 'Vertical')
if 'tickpos' not in kw:
self._tickpos = 'w'
else:
self._style_name = self._style_name.replace('Vertical', 'Horizontal')
if 'tickpos' not in kw:
self._tickpos = 's'
self.scale.configure(style=self._style_name)
reinit = True
if 'showvalue' in kw:
self._showvalue = bool(kw.pop('showvalue'))
reinit = True
if 'tickinterval' in kw:
self._tickinterval = kw.pop('tickinterval')
reinit = True
if 'tickpos' in kw:
tickpos = kw.pop('tickpos')
orient = kw.get('orient', str(self.cget('orient')))
if orient == 'vertical' and tickpos not in ['w', 'e']:
raise ValueError("For a vertical TickScale, 'tickpos' must be 'w' or 'e'.")
elif orient == 'horizontal' and tickpos not in ['n', 's']:
raise ValueError("For a horizontal TickScale, 'tickpos' must be 'n' or 's'.")
elif orient in ['vertical', 'horizontal']:
self._tickpos = tickpos
reinit = True
if 'labelpos' in kw:
labelpos = kw.pop('labelpos')
if labelpos not in ['w', 'e', 'n', 's']:
raise ValueError("'labelpos' must be 'n', 's', 'e' or 'w'.")
else:
self._labelpos = labelpos
reinit = True
if 'resolution' in kw:
try:
self._resolution = float(kw.pop('resolution'))
if self._resolution < 0:
raise ValueError("'resolution' must be non negative.")
except ValueError:
raise TypeError("'resolution' must be a float.")
if self._tickinterval != 0 and self._resolution > self._tickinterval:
self._tickinterval = self._resolution
reinit = True
if 'digits' in kw:
digits = kw.pop('digits')
if not isinstance(digits, int):
raise TypeError("'digits' must be an integer.")
elif digits < 0:
self._digits = digits
self._formatter = '{:g}'
reinit = True
else:
self._digits = digits
self._formatter = '{:.' + str(self._digits) + 'f}'
interv = self._get_precision(self._tickinterval)
resol = self._get_precision(self._resolution)
start = kw.get('from', kw.get('from_', self._start))
end = kw.get('to', self.scale.cget('to'))
from_ = self._get_precision(start)
to = self._get_precision(end)
d = max(interv, resol, from_, to)
if self._digits < d:
self._resolution = float('1e-{}'.format(self._digits))
self._tickinterval = round(self._tickinterval, self._digits)
if self._resolution > self._tickinterval:
self._tickinterval = self._resolution
kw['to'] = round(end, self._digits)
if 'from_' in kw:
del kw['from_']
kw['from'] = round(start, self._digits)
reinit = True
elif self._digits > 0:
start = kw.get('from', kw.get('from_', self._start))
end = kw.get('to', self.scale.cget('to'))
from_ = self._get_precision(start)
to = self._get_precision(end)
interv = self._get_precision(self._tickinterval)
resol = self._get_precision(self._resolution)
digits = max(self._digits, interv, resol, from_, to)
if digits != self._digits:
self._digits = digits
self._formatter = '{:.' + str(self._digits) + 'f}'
reinit = True
if 'variable' in kw:
self._var = kw['variable']
if not self._var:
self._var = tk.DoubleVar(self, self.get())
kw['variable'] = self._var
try:
self._var.trace_add('write', self._increment)
except AttributeError:
# backward compatibility
self._var.trace('w', self._increment)
self.scale.configure(**kw)
if 'from_' in kw or 'from' in kw or 'to' in kw:
self._extent = self.scale.cget('to') - self.scale.cget('from')
self._start = self.scale.cget('from')
reinit = True
if 'style' in kw:
self._style_name = kw['style']
if not self._style_name:
self._style_name = '%s.TScale' % (str(self.scale.cget('orient')).capitalize())
if reinit:
self._init()
if 'orient' in kw:
# needed after the reinitialization in case of orientation change
self._apply_style()
|
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~TickScale.keys`.
See :meth:`~TickScale.__init__` for a description of the widget specific option.
|
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._build_mapreduce_func(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze()
return self.index[first_result]
|
Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
|
def open(self, verbose):
"""
open the serial port using the configuration data
returns a reference to this instance
"""
# open a serial port
if verbose:
print('\nOpening Arduino Serial port %s ' % self.port_id)
try:
# in case the port is already open, let's close it and then
# reopen it
self.arduino.close()
time.sleep(1)
self.arduino.open()
time.sleep(1)
return self.arduino
except Exception:
# opened failed - will report back to caller
raise
|
open the serial port using the configuration data
returns a reference to this instance
|
def get_organisations(self, service_desk_id=None, start=0, limit=50):
"""
Returns a list of organizations in the Jira instance. If the user is not an agent,
the resource returns a list of organizations the user is a member of.
:param service_desk_id: OPTIONAL: str Get organizations from single Service Desk
:param start: OPTIONAL: int The starting index of the returned objects.
Base index: 0. See the Pagination section for more details.
:param limit: OPTIONAL: int The maximum number of users to return per page.
Default: 50. See the Pagination section for more details.
:return:
"""
url_without_sd_id = 'rest/servicedeskapi/organization'
url_with_sd_id = 'rest/servicedeskapi/servicedesk/{}/organization'.format(service_desk_id)
params = {}
if start is not None:
params['start'] = int(start)
if limit is not None:
params['limit'] = int(limit)
if service_desk_id is None:
return self.get(url_without_sd_id, headers=self.experimental_headers, params=params)
else:
return self.get(url_with_sd_id, headers=self.experimental_headers, params=params)
|
Returns a list of organizations in the Jira instance. If the user is not an agent,
the resource returns a list of organizations the user is a member of.
:param service_desk_id: OPTIONAL: str Get organizations from single Service Desk
:param start: OPTIONAL: int The starting index of the returned objects.
Base index: 0. See the Pagination section for more details.
:param limit: OPTIONAL: int The maximum number of users to return per page.
Default: 50. See the Pagination section for more details.
:return:
|
def _build_likelihood(self):
"""
This function computes the optimal density for v, q*(v), up to a constant
"""
# get the (marginals of) q(f): exactly predicting!
fmean, fvar = self._build_predict(self.X, full_cov=False)
return tf.reduce_sum(self.likelihood.variational_expectations(fmean, fvar, self.Y))
|
This function computes the optimal density for v, q*(v), up to a constant
|
def from_desmond(cls, path, **kwargs):
"""
Loads a topology from a Desmond DMS file located at `path`.
Arguments
---------
path : str
Path to a Desmond DMS file
"""
dms = DesmondDMSFile(path)
pos = kwargs.pop('positions', dms.getPositions())
return cls(master=dms, topology=dms.getTopology(), positions=pos, path=path,
**kwargs)
|
Loads a topology from a Desmond DMS file located at `path`.
Arguments
---------
path : str
Path to a Desmond DMS file
|
def from_packages(cls, parse_context, rev='', packages=None, **kwargs):
"""
:param list packages: The package import paths within the remote library; by default just the
root package will be available (equivalent to passing `packages=['']`).
:param string rev: Identifies which version of the remote library to download. This could be a
commit SHA (git), node id (hg), etc. If left unspecified the version will
default to the latest available. It's highly recommended to not accept the
default and instead pin the rev explicitly for repeatable builds.
"""
for pkg in packages or ('',):
cls.from_package(parse_context, pkg=pkg, rev=rev, **kwargs)
|
:param list packages: The package import paths within the remote library; by default just the
root package will be available (equivalent to passing `packages=['']`).
:param string rev: Identifies which version of the remote library to download. This could be a
commit SHA (git), node id (hg), etc. If left unspecified the version will
default to the latest available. It's highly recommended to not accept the
default and instead pin the rev explicitly for repeatable builds.
|
def _should_really_index(self, instance):
"""Return True if according to should_index the object should be indexed."""
if self._should_index_is_method:
is_method = inspect.ismethod(self.should_index)
try:
count_args = len(inspect.signature(self.should_index).parameters)
except AttributeError:
# noinspection PyDeprecation
count_args = len(inspect.getargspec(self.should_index).args)
if is_method or count_args is 1:
# bound method, call with instance
return self.should_index(instance)
else:
# unbound method, simply call without arguments
return self.should_index()
else:
# property/attribute/Field, evaluate as bool
attr_type = type(self.should_index)
if attr_type is DeferredAttribute:
attr_value = self.should_index.__get__(instance, None)
elif attr_type is str:
attr_value = getattr(instance, self.should_index)
elif attr_type is property:
attr_value = self.should_index.__get__(instance)
else:
raise AlgoliaIndexError('{} should be a boolean attribute or a method that returns a boolean.'.format(
self.should_index))
if type(attr_value) is not bool:
raise AlgoliaIndexError("%s's should_index (%s) should be a boolean" % (
instance.__class__.__name__, self.should_index))
return attr_value
|
Return True if according to should_index the object should be indexed.
|
def get_metrics(self, from_time=None, to_time=None, metrics=None,
ifs=[], storageIds=[], view=None):
"""
This endpoint is not supported as of v6. Use the timeseries API
instead. To get all metrics for a host with the timeseries API use
the query:
'select * where hostId = $HOST_ID'.
To get specific metrics for a host use a comma-separated list of
the metric names as follows:
'select $METRIC_NAME1, $METRIC_NAME2 where hostId = $HOST_ID'.
For more information see http://tiny.cloudera.com/tsquery_doc
@param from_time: A datetime; start of the period to query (optional).
@param to_time: A datetime; end of the period to query (default = now).
@param metrics: List of metrics to query (default = all).
@param ifs: network interfaces to query. Default all, use None to disable.
@param storageIds: storage IDs to query. Default all, use None to disable.
@param view: View to materialize ('full' or 'summary')
@return: List of metrics and their readings.
"""
params = { }
if ifs:
params['ifs'] = ifs
elif ifs is None:
params['queryNw'] = 'false'
if storageIds:
params['storageIds'] = storageIds
elif storageIds is None:
params['queryStorage'] = 'false'
return self._get_resource_root().get_metrics(self._path() + '/metrics',
from_time, to_time, metrics, view, params)
|
This endpoint is not supported as of v6. Use the timeseries API
instead. To get all metrics for a host with the timeseries API use
the query:
'select * where hostId = $HOST_ID'.
To get specific metrics for a host use a comma-separated list of
the metric names as follows:
'select $METRIC_NAME1, $METRIC_NAME2 where hostId = $HOST_ID'.
For more information see http://tiny.cloudera.com/tsquery_doc
@param from_time: A datetime; start of the period to query (optional).
@param to_time: A datetime; end of the period to query (default = now).
@param metrics: List of metrics to query (default = all).
@param ifs: network interfaces to query. Default all, use None to disable.
@param storageIds: storage IDs to query. Default all, use None to disable.
@param view: View to materialize ('full' or 'summary')
@return: List of metrics and their readings.
|
def interleaved_filename(file_path):
"""Return filename used to represent a set of paired-end files. Assumes Illumina-style naming
conventions where each file has _R1_ or _R2_ in its name."""
if not isinstance(file_path, tuple):
raise OneCodexException("Cannot get the interleaved filename without a tuple.")
if re.match(".*[._][Rr][12][_.].*", file_path[0]):
return re.sub("[._][Rr][12]", "", file_path[0])
else:
warnings.warn("Paired-end filenames do not match--are you sure they are correct?")
return file_path[0]
|
Return filename used to represent a set of paired-end files. Assumes Illumina-style naming
conventions where each file has _R1_ or _R2_ in its name.
|
def _which(executable, flags=os.X_OK, abspath_only=False, disallow_symlinks=False):
"""Borrowed from Twisted's :mod:twisted.python.proutils .
Search PATH for executable files with the given name.
On newer versions of MS-Windows, the PATHEXT environment variable will be
set to the list of file extensions for files considered executable. This
will normally include things like ".EXE". This fuction will also find files
with the given name ending with any of these extensions.
On MS-Windows the only flag that has any meaning is os.F_OK. Any other
flags will be ignored.
Note: This function does not help us prevent an attacker who can already
manipulate the environment's PATH settings from placing malicious code
higher in the PATH. It also does happily follows links.
:param str name: The name for which to search.
:param int flags: Arguments to L{os.access}.
:rtype: list
:returns: A list of the full paths to files found, in the order in which
they were found.
"""
def _can_allow(p):
if not os.access(p, flags):
return False
if abspath_only and not os.path.abspath(p):
log.warn('Ignoring %r (path is not absolute)', p)
return False
if disallow_symlinks and os.path.islink(p):
log.warn('Ignoring %r (path is a symlink)', p)
return False
return True
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, executable)
if _can_allow(p):
result.append(p)
for e in exts:
pext = p + e
if _can_allow(pext):
result.append(pext)
return result
|
Borrowed from Twisted's :mod:twisted.python.proutils .
Search PATH for executable files with the given name.
On newer versions of MS-Windows, the PATHEXT environment variable will be
set to the list of file extensions for files considered executable. This
will normally include things like ".EXE". This fuction will also find files
with the given name ending with any of these extensions.
On MS-Windows the only flag that has any meaning is os.F_OK. Any other
flags will be ignored.
Note: This function does not help us prevent an attacker who can already
manipulate the environment's PATH settings from placing malicious code
higher in the PATH. It also does happily follows links.
:param str name: The name for which to search.
:param int flags: Arguments to L{os.access}.
:rtype: list
:returns: A list of the full paths to files found, in the order in which
they were found.
|
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
|
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
|
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
|
Find windzone from map.
|
def res_block(nf, dense:bool=False, norm_type:Optional[NormType]=NormType.Batch, bottle:bool=False, **conv_kwargs):
"Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`."
norm2 = norm_type
if not dense and (norm_type==NormType.Batch): norm2 = NormType.BatchZero
nf_inner = nf//2 if bottle else nf
return SequentialEx(conv_layer(nf, nf_inner, norm_type=norm_type, **conv_kwargs),
conv_layer(nf_inner, nf, norm_type=norm2, **conv_kwargs),
MergeLayer(dense))
|
Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`.
|
def get_tour_list(self):
"""
Inquire all tour list
:rtype: list
"""
resp = json.loads(urlopen(self.tour_list_url.format(1)).read().decode('utf-8'))
total_count = resp['response']['body']['totalCount']
# Get total count
resp = json.loads(urlopen(self.tour_list_url.format(total_count)).read().decode('utf-8'))
data = resp['response']['body']['items']['item']
# Extract data list
keychain = {
'contentid': ('content_id', None),
'contenttypeid': ('content_type_id', None),
'title': ('title', None),
'addr1': ('address', None),
'zipcode': ('zipcode', None),
'sigungucode': ('municipality', None),
'mapx': ('x', None),
'mapy': ('y', None),
'cat1': ('main_category', None),
'cat2': ('middle_category', None),
'cat3': ('small_category', None),
'readcount': ('views', 0),
'tel': ('tel', None),
'firstimage': ('image', None),
}
for tour in data:
_dict_key_changer(tour, keychain)
tour['creation_date'] = str(tour.pop('createdtime'))[:8] if 'createdtime' in tour else None
tour['modified_date'] = str(tour.pop('modifiedtime'))[:8] if 'modifiedtime' in tour else None
tour.pop('areacode', None)
tour.pop('addr2', None)
tour.pop('mlevel', None)
# Manufacture
return data
|
Inquire all tour list
:rtype: list
|
def fill_tree(self, tree, input_dict):
"""
fills a tree with nested parameters
Args:
tree: QtGui.QTreeView
parameters: dictionary or Parameter object
Returns:
"""
def add_element(item, key, value):
child_name = QtGui.QStandardItem(key)
child_name.setDragEnabled(False)
child_name.setSelectable(False)
child_name.setEditable(False)
if isinstance(value, dict):
for ket_child, value_child in value.items():
add_element(child_name, ket_child, value_child)
child_value = QtGui.QStandardItem('')
else:
child_value = QtGui.QStandardItem(str(value))
child_value.setData(value)
child_value.setDragEnabled(False)
child_value.setSelectable(False)
child_value.setEditable(False)
item.appendRow([child_name, child_value])
for index, (loaded_item, loaded_item_settings) in enumerate(input_dict.items()):
# print(index, loaded_item, loaded_item_settings)
item = QtGui.QStandardItem(loaded_item)
for key, value in loaded_item_settings['settings'].items():
add_element(item, key, value)
value = QtGui.QStandardItem('')
tree.model().appendRow([item, value])
if tree == self.tree_loaded:
item.setEditable(False)
tree.setFirstColumnSpanned(index, self.tree_infile.rootIndex(), True)
|
fills a tree with nested parameters
Args:
tree: QtGui.QTreeView
parameters: dictionary or Parameter object
Returns:
|
def from_string(cls, cl_function, dependencies=()):
"""Parse the given CL function into a SimpleCLFunction object.
Args:
cl_function (str): the function we wish to turn into an object
dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on
Returns:
SimpleCLFunction: the CL data type for this parameter declaration
"""
return_type, function_name, parameter_list, body = split_cl_function(cl_function)
return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies)
|
Parse the given CL function into a SimpleCLFunction object.
Args:
cl_function (str): the function we wish to turn into an object
dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on
Returns:
SimpleCLFunction: the CL data type for this parameter declaration
|
def setStimRisefall(self):
"""Sets the Risefall of the StimulusModel's tone from values pulled from
this widget"""
rf = self.ui.risefallSpnbx.value()
self.tone.setRisefall(rf)
|
Sets the Risefall of the StimulusModel's tone from values pulled from
this widget
|
def log_level(self, subsystem, level, **kwargs):
r"""Changes the logging output of a running daemon.
.. code-block:: python
>>> c.log_level("path", "info")
{'Message': "Changed log level of 'path' to 'info'\n"}
Parameters
----------
subsystem : str
The subsystem logging identifier (Use ``"all"`` for all subsystems)
level : str
The desired logging level. Must be one of:
* ``"debug"``
* ``"info"``
* ``"warning"``
* ``"error"``
* ``"fatal"``
* ``"panic"``
Returns
-------
dict : Status message
"""
args = (subsystem, level)
return self._client.request('/log/level', args,
decoder='json', **kwargs)
|
r"""Changes the logging output of a running daemon.
.. code-block:: python
>>> c.log_level("path", "info")
{'Message': "Changed log level of 'path' to 'info'\n"}
Parameters
----------
subsystem : str
The subsystem logging identifier (Use ``"all"`` for all subsystems)
level : str
The desired logging level. Must be one of:
* ``"debug"``
* ``"info"``
* ``"warning"``
* ``"error"``
* ``"fatal"``
* ``"panic"``
Returns
-------
dict : Status message
|
def process_view(self, request, view_func, view_args, view_kwargs):
"""Run the profiler on _view_func_."""
profiler = getattr(request, 'profiler', None)
if profiler:
# Make sure profiler variables don't get passed into view_func
original_get = request.GET
request.GET = original_get.copy()
request.GET.pop('profile', None)
request.GET.pop('show_queries', None)
request.GET.pop('show_stats', None)
try:
return profiler.runcall(
view_func, request, *view_args, **view_kwargs
)
finally:
request.GET = original_get
|
Run the profiler on _view_func_.
|
def _wrapped_method_with_watch_fn(self, f, *args, **kwargs):
"""A wrapped method with a watch function.
When this method is called, it will call the underlying method with
the same arguments, *except* that if the ``watch`` argument isn't
:data:`None`, it will be replaced with a wrapper around that watch
function, so that the watch function will be called in the reactor
thread. This means that the watch function can safely use Twisted
APIs.
"""
bound_args = signature(f).bind(*args, **kwargs)
orig_watch = bound_args.arguments.get("watch")
if orig_watch is not None:
wrapped_watch = partial(self._call_in_reactor_thread, orig_watch)
wrapped_watch = wraps(orig_watch)(wrapped_watch)
bound_args.arguments["watch"] = wrapped_watch
return f(**bound_args.arguments)
|
A wrapped method with a watch function.
When this method is called, it will call the underlying method with
the same arguments, *except* that if the ``watch`` argument isn't
:data:`None`, it will be replaced with a wrapper around that watch
function, so that the watch function will be called in the reactor
thread. This means that the watch function can safely use Twisted
APIs.
|
def get_updates(self, offset=None, limit=None, timeout=20, allowed_updates=None):
"""
Use this method to receive incoming updates using long polling (wiki). An Array of Update objects is returned.
:param allowed_updates: Array of string. List the types of updates you want your bot to receive.
:param offset: Integer. Identifier of the first update to be returned.
:param limit: Integer. Limits the number of updates to be retrieved.
:param timeout: Integer. Timeout in seconds for long polling.
:return: array of Updates
"""
json_updates = apihelper.get_updates(self.token, offset, limit, timeout, allowed_updates)
ret = []
for ju in json_updates:
ret.append(types.Update.de_json(ju))
return ret
|
Use this method to receive incoming updates using long polling (wiki). An Array of Update objects is returned.
:param allowed_updates: Array of string. List the types of updates you want your bot to receive.
:param offset: Integer. Identifier of the first update to be returned.
:param limit: Integer. Limits the number of updates to be retrieved.
:param timeout: Integer. Timeout in seconds for long polling.
:return: array of Updates
|
def maybe_start_recording(tokens, index):
"""Return a new _RSTCommentBlockRecorder when its time to record."""
if tokens[index].type == TokenType.BeginRSTComment:
return _RSTCommentBlockRecorder(index, tokens[index].line)
return None
|
Return a new _RSTCommentBlockRecorder when its time to record.
|
def _set_lim_and_transforms(self):
"""Setup the key transforms for the axes."""
# Most of the transforms are set up correctly by LambertAxes
LambertAxes._set_lim_and_transforms(self)
# Transform for latitude ticks. These are typically unused, but just
# in case we need them...
yaxis_stretch = Affine2D().scale(4 * self.horizon, 1.0)
yaxis_stretch = yaxis_stretch.translate(-self.horizon, 0.0)
# These are identical to LambertAxes._set_lim_and_transforms, but we
# need to update things to reflect the new "yaxis_stretch"
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
|
Setup the key transforms for the axes.
|
def main(args=None):
"""Main command-line interface entrypoint.
Runs the given subcommand or argument that were specified. If not given a
``args`` parameter, assumes the arguments are passed on the command-line.
Args:
args (list): list of command-line arguments
Returns:
Zero on success, non-zero otherwise.
"""
if args is None:
args = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(args)
if args.verbose >= 2:
level = logging.DEBUG
elif args.verbose >= 1:
level = logging.INFO
else:
level = logging.WARNING
logging.basicConfig(level=level)
try:
args.command(args)
except pylink.JLinkException as e:
sys.stderr.write('Error: %s%s' % (str(e), os.linesep))
return 1
return 0
|
Main command-line interface entrypoint.
Runs the given subcommand or argument that were specified. If not given a
``args`` parameter, assumes the arguments are passed on the command-line.
Args:
args (list): list of command-line arguments
Returns:
Zero on success, non-zero otherwise.
|
def disable_beacons(self):
'''
Enable beacons
'''
self.opts['beacons']['enabled'] = False
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacons_disabled_complete')
return True
|
Enable beacons
|
def check(text):
"""Check the text."""
err = "uncomparables.misc"
msg = "Comparison of an uncomparable: '{}' is not comparable."
comparators = [
"most",
"more",
"less",
"least",
"very",
"quite",
"largely",
"extremely",
"increasingly",
"kind of",
"mildly"
]
uncomparables = [
"absolute",
"adequate",
"chief",
"complete",
"correct",
"devoid",
"entire",
"false",
"fatal",
"favorite",
"final",
"ideal",
"impossible",
"inevitable",
"infinite",
"irrevocable",
"main",
"manifest",
"only",
"paramount",
"perfect",
"perpetual",
"possible",
"preferable",
"principal",
"singular",
"stationary",
"sufficient",
"true",
"unanimous",
"unavoidable",
"unbroken",
"uniform",
"unique",
"universal",
"void",
"whole",
]
exceptions = [
("more", "perfect"),
("more", "possible") # FIXME
]
all = ["\\b" + i[0] + "\s" + i[1] + "[\W$]" for i in itertools.product(
comparators, uncomparables) if i not in exceptions]
occ = re.finditer("|".join(all), text.lower())
return [(o.start(), o.end(), err, msg.format(o.group(0)), None)
for o in occ]
|
Check the text.
|
def _get_command(classes):
"""Associates each command class with command depending on setup.cfg
"""
commands = {}
setup_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')),
'setup.cfg')
for line in open(setup_file, 'r'):
for cl in classes:
if cl in line:
commands[cl] = line.split(' = ')[0].strip().replace('_', ' ')
return commands
|
Associates each command class with command depending on setup.cfg
|
def groups_remove_owner(self, room_id, user_id, **kwargs):
"""Removes the role of owner from a user in the current Group."""
return self.__call_api_post('groups.removeOwner', roomId=room_id, userId=user_id, kwargs=kwargs)
|
Removes the role of owner from a user in the current Group.
|
def _create_input_transactions(self, addy):
# type: (Address) -> None
"""
Creates transactions for the specified input address.
"""
self._transactions.append(ProposedTransaction(
address=addy,
tag=self.tag,
# Spend the entire address balance; if necessary, we will
# add a change transaction to the bundle.
value=-addy.balance,
))
# Signatures require additional transactions to store, due to
# transaction length limit.
# Subtract 1 to account for the transaction we just added.
for _ in range(addy.security_level - 1):
self._transactions.append(ProposedTransaction(
address=addy,
tag=self.tag,
# Note zero value; this is a meta transaction.
value=0,
))
|
Creates transactions for the specified input address.
|
def update_one(self, filter_, document, **kwargs):
"""update method
"""
self._valide_update_document(document)
return self.__collect.update_one(filter_, document, **kwargs)
|
update method
|
def get_pret_embs(self, word_dims=None):
"""Read pre-trained embedding file
Parameters
----------
word_dims : int or None
vector size. Use `None` for auto-infer
Returns
-------
numpy.ndarray
T x C numpy NDArray
"""
assert (self._pret_embeddings is not None), "No pretrained file provided."
pret_embeddings = gluonnlp.embedding.create(self._pret_embeddings[0], source=self._pret_embeddings[1])
embs = [None] * len(self._id2word)
for idx, vec in enumerate(pret_embeddings.idx_to_vec):
embs[idx] = vec.asnumpy()
if word_dims is None:
word_dims = len(pret_embeddings.idx_to_vec[0])
for idx, emb in enumerate(embs):
if emb is None:
embs[idx] = np.zeros(word_dims)
pret_embs = np.array(embs, dtype=np.float32)
return pret_embs / np.std(pret_embs)
|
Read pre-trained embedding file
Parameters
----------
word_dims : int or None
vector size. Use `None` for auto-infer
Returns
-------
numpy.ndarray
T x C numpy NDArray
|
def select_limit(self, table, cols='*', offset=0, limit=MAX_ROWS_PER_QUERY):
"""Run a select query with an offset and limit parameter."""
return self.fetch(self._select_limit_statement(table, cols, offset, limit))
|
Run a select query with an offset and limit parameter.
|
def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps))
return -d, d
|
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
|
def get_types(self):
"""
Retrieve a set of all recognized content types for this
translator object.
"""
# Convert translators into a set of content types
content_types = set()
for name in self.translators:
content_types |= type_names[name]
return content_types
|
Retrieve a set of all recognized content types for this
translator object.
|
def coerce_value(type, value):
# type: (Any, Any) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and any value, return a runtime value coerced to match the type."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValue is
# non-null.
# We only call this function after calling isValidValue.
return coerce_value(type.of_type, value)
if value is None:
return None
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, Iterable):
return [coerce_value(item_type, item) for item in value]
else:
return [coerce_value(item_type, value)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
obj = {}
for field_name, field in fields.items():
if field_name not in value:
if field.default_value is not None:
field_value = field.default_value
obj[field.out_name or field_name] = field_value
else:
field_value = coerce_value(field.type, value.get(field_name))
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_value(value)
|
Given a type and any value, return a runtime value coerced to match the type.
|
def _converged(self):
"""Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
diff = prior - posterior
max_diff = np.max(np.fabs(diff))
if self.verbose:
_, mse = self._mse_converged()
diff_ratio = np.sum(diff ** 2) / np.sum(posterior ** 2)
logger.info(
'htfa prior posterior max diff %f mse %f diff_ratio %f' %
((max_diff, mse, diff_ratio)))
if max_diff > self.threshold:
return False, max_diff
else:
return True, max_diff
|
Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
|
def getKwCtrlConf(self, kw, fmt='dict'):
""" return keyword's control configuration, followed after '!epics' notation
:param kw: keyword name
:param fmt: return format, 'raw', 'dict', 'json', default is 'dict'
"""
try:
confd = self.ctrlconf_dict[kw]
if fmt == 'dict':
retval = confd
else: # 'json' string for other options
retval = json.dumps(confd)
except KeyError:
# try to get from raw line string
self.getKw(kw)
if self.confstr_epics != '':
if fmt == 'dict':
retval = ast.literal_eval(self.confstr_epics)
elif fmt == 'json':
retval = json.dumps(ast.literal_eval(self.confstr_epics))
else: # raw string
retval = self.confstr_epics
else:
retval = None
return retval
|
return keyword's control configuration, followed after '!epics' notation
:param kw: keyword name
:param fmt: return format, 'raw', 'dict', 'json', default is 'dict'
|
def set_delegate(address=None, pubkey=None, secret=None):
"""Set delegate parameters. Call set_delegate with no arguments to clear."""
c.DELEGATE['ADDRESS'] = address
c.DELEGATE['PUBKEY'] = pubkey
c.DELEGATE['PASSPHRASE'] = secret
|
Set delegate parameters. Call set_delegate with no arguments to clear.
|
def save(self, data, xparent=None):
"""
Parses the element from XML to Python.
:param data | <variant>
xparent | <xml.etree.ElementTree.Element> || None
:return <xml.etree.ElementTree.Element>
"""
if xparent is not None:
elem = ElementTree.SubElement(xparent, 'dict')
else:
elem = ElementTree.Element('dict')
for key, value in sorted(data.items()):
xitem = ElementTree.SubElement(elem, 'item')
xitem.set('key', nstr(key))
XmlDataIO.toXml(value, xitem)
return elem
|
Parses the element from XML to Python.
:param data | <variant>
xparent | <xml.etree.ElementTree.Element> || None
:return <xml.etree.ElementTree.Element>
|
def add_data(self, address, data):
"""! @brief Add a chunk of data to be programmed.
The data may cross flash memory region boundaries, as long as the regions are contiguous.
@param self
@param address Integer address for where the first byte of _data_ should be written.
@param data A list of byte values to be programmed at the given address.
@return The FlashLoader instance is returned, to allow chaining further add_data()
calls or a call to commit().
@exception ValueError Raised when the address is not within a flash memory region.
@exception RuntimeError Raised if the flash memory region does not have a valid Flash
instance associated with it, which indicates that the target connect sequence did
not run successfully.
"""
while len(data):
# Look up flash region.
region = self._map.get_region_for_address(address)
if region is None:
raise ValueError("no memory region defined for address 0x%08x" % address)
if not region.is_flash:
raise ValueError("memory region at address 0x%08x is not flash" % address)
# Get our builder instance.
if region in self._builders:
builder = self._builders[region]
else:
if region.flash is None:
raise RuntimeError("flash memory region at address 0x%08x has no flash instance" % address)
builder = region.flash.get_flash_builder()
builder.log_performance = False
self._builders[region] = builder
# Add as much data to the builder as is contained by this region.
programLength = min(len(data), region.end - address + 1)
assert programLength != 0
builder.add_data(address, data[:programLength])
# Advance.
data = data[programLength:]
address += programLength
self._total_data_size += programLength
return self
|
! @brief Add a chunk of data to be programmed.
The data may cross flash memory region boundaries, as long as the regions are contiguous.
@param self
@param address Integer address for where the first byte of _data_ should be written.
@param data A list of byte values to be programmed at the given address.
@return The FlashLoader instance is returned, to allow chaining further add_data()
calls or a call to commit().
@exception ValueError Raised when the address is not within a flash memory region.
@exception RuntimeError Raised if the flash memory region does not have a valid Flash
instance associated with it, which indicates that the target connect sequence did
not run successfully.
|
def add_graph(self, graph):
"""Adds a `Graph` protocol buffer to the event file."""
event = event_pb2.Event(graph_def=graph.SerializeToString())
self._add_event(event, None)
|
Adds a `Graph` protocol buffer to the event file.
|
def tryload(self, cfgstr=None, on_error='raise'):
"""
Like load, but returns None if the load fails due to a cache miss.
Args:
on_error (str): How to handle non-io errors errors. Either raise,
which re-raises the exception, or clear which deletes the cache
and returns None.
"""
cfgstr = self._rectify_cfgstr(cfgstr)
if self.enabled:
try:
if self.verbose > 1:
self.log('[cacher] tryload fname={}'.format(self.fname))
return self.load(cfgstr)
except IOError:
if self.verbose > 0:
self.log('[cacher] ... {} cache miss'.format(self.fname))
except Exception:
if self.verbose > 0:
self.log('[cacher] ... failed to load')
if on_error == 'raise':
raise
elif on_error == 'clear':
self.clear(cfgstr)
return None
else:
raise KeyError('Unknown method on_error={}'.format(on_error))
else:
if self.verbose > 1:
self.log('[cacher] ... cache disabled: fname={}'.format(self.fname))
return None
|
Like load, but returns None if the load fails due to a cache miss.
Args:
on_error (str): How to handle non-io errors errors. Either raise,
which re-raises the exception, or clear which deletes the cache
and returns None.
|
def delete_namespaced_endpoints(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_endpoints # noqa: E501
delete Endpoints # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_endpoints(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Endpoints (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_endpoints_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_endpoints_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
delete_namespaced_endpoints # noqa: E501
delete Endpoints # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_endpoints(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Endpoints (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
|
def add_line(self, line, source, *lineno):
"""Add a line to the result"""
self.result.append(line, source, *lineno)
|
Add a line to the result
|
def skipgram_fasttext_batch(centers, contexts, num_tokens, subword_lookup,
dtype, index_dtype):
"""Create a batch for SG training objective with subwords."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
data, row, col = subword_lookup(centers)
centers = mx.nd.array(centers, dtype=index_dtype)
centers_csr = mx.nd.sparse.csr_matrix(
(data, (row, col)), dtype=dtype,
shape=(len(centers), num_tokens)) # yapf: disable
return centers_csr, contexts, centers
|
Create a batch for SG training objective with subwords.
|
def Nu_vertical_cylinder_Eigenson_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_,
presented in [3]_ and in more detail in [4]_.
.. math::
Nu_H = 0.48 Ra_H^{0.25},\; 10^{9} < Ra
Nu_H = 51.5 + 0.0000726 Ra_H^{0.63},\; 10^{9} < Ra < 1.69 \times 10^{10}
Nu_H = 0.148 Ra_H^{1/3} - 127.6 ,\; 1.69 \times 10^{10} < Ra
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Author presents results as appropriate for both flat plates and cylinders.
Height of 2.5 m with diameters of 2.4, 7.55, 15, 35, and 50 mm. Another
experiment of diameter 58 mm and length of 6.5 m was considered.
Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127.Transition
between ranges is not smooth. If outside of range, no warning is given.
Formulas are presented similarly in [3]_ and [4]_, but only [4]_ shows
the transition formula.
Examples
--------
>>> Nu_vertical_cylinder_Eigenson_Morgan(0.7, 2E10)
230.55946525499715
References
----------
.. [1] Eigenson L (1940). Les lois gouvernant la transmission de la chaleur
aux gaz biatomiques par les parois des cylindres verticaux dans le cas
de convection naturelle. Dokl Akad Nauk SSSR 26:440-444
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1.69E10 and turbulent is None):
return 0.148*Ra**(1/3.) - 127.6
elif 1E9 < Ra < 1.69E10 and turbulent is not False:
return 51.5 + 0.0000726*Ra**0.63
else:
return 0.48*Ra**0.25
|
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_,
presented in [3]_ and in more detail in [4]_.
.. math::
Nu_H = 0.48 Ra_H^{0.25},\; 10^{9} < Ra
Nu_H = 51.5 + 0.0000726 Ra_H^{0.63},\; 10^{9} < Ra < 1.69 \times 10^{10}
Nu_H = 0.148 Ra_H^{1/3} - 127.6 ,\; 1.69 \times 10^{10} < Ra
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Author presents results as appropriate for both flat plates and cylinders.
Height of 2.5 m with diameters of 2.4, 7.55, 15, 35, and 50 mm. Another
experiment of diameter 58 mm and length of 6.5 m was considered.
Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127.Transition
between ranges is not smooth. If outside of range, no warning is given.
Formulas are presented similarly in [3]_ and [4]_, but only [4]_ shows
the transition formula.
Examples
--------
>>> Nu_vertical_cylinder_Eigenson_Morgan(0.7, 2E10)
230.55946525499715
References
----------
.. [1] Eigenson L (1940). Les lois gouvernant la transmission de la chaleur
aux gaz biatomiques par les parois des cylindres verticaux dans le cas
de convection naturelle. Dokl Akad Nauk SSSR 26:440-444
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
|
def from_table(table, engine, limit=None):
"""
Select data in a database table and put into prettytable.
Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`.
**中文文档**
将数据表中的数据放入prettytable中.
"""
sql = select([table])
if limit is not None:
sql = sql.limit(limit)
result_proxy = engine.execute(sql)
return from_db_cursor(result_proxy.cursor)
|
Select data in a database table and put into prettytable.
Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`.
**中文文档**
将数据表中的数据放入prettytable中.
|
def _get_converter(self, convert_to=None):
'''see convert and save. This is a helper function that returns
the proper conversion function, but doesn't call it. We do this
so that in the case of convert, we do the conversion and return
a string. In the case of save, we save the recipe to file for the
user.
Parameters
==========
convert_to: a string either docker or singularity, if a different
Returns
=======
converter: the function to do the conversion
'''
conversion = self._get_conversion_type(convert_to)
# Perform conversion
if conversion == "singularity":
return self.docker2singularity
return self.singularity2docker
|
see convert and save. This is a helper function that returns
the proper conversion function, but doesn't call it. We do this
so that in the case of convert, we do the conversion and return
a string. In the case of save, we save the recipe to file for the
user.
Parameters
==========
convert_to: a string either docker or singularity, if a different
Returns
=======
converter: the function to do the conversion
|
def get_object(self):
"""
Get the object for previewing.
Raises a http404 error if the object is not found.
"""
obj = super(DeleteView, self).get_object()
if not obj:
raise http.Http404
return obj
|
Get the object for previewing.
Raises a http404 error if the object is not found.
|
def partition(self):
"""Partitions all tasks into groups of tasks. A group is
represented by a task_store object that indexes a sub-
set of tasks."""
step = int(math.ceil(self.num_tasks / float(self.partitions)))
if self.indices == None:
slice_ind = list(range(0, self.num_tasks, step))
for start in slice_ind:
yield self.__class__(self.partitions,
list(range(start, start + step)))
else:
slice_ind = list(range(0, len(self.indices), step))
for start in slice_ind:
if start + step <= len(self.indices):
yield self.__class__(self.partitions,
self.indices[start: start + step])
else:
yield self.__class__(self.partitions, self.indices[start:])
|
Partitions all tasks into groups of tasks. A group is
represented by a task_store object that indexes a sub-
set of tasks.
|
async def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if aiohttp is None: # pragma: no cover
self.logger.error('aiohttp not installed -- cannot make HTTP '
'requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status != 200:
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status))
try:
p = payload.Payload(encoded_payload=await r.read())
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
await self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if await self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
|
Establish a long-polling connection to the Engine.IO server.
|
async def handle_client_ping(self, client_addr, _: Ping):
""" Handle an Ping message. Pong the client """
await ZMQUtils.send_with_addr(self._client_socket, client_addr, Pong())
|
Handle an Ping message. Pong the client
|
def get_help_datapacks(module_name, server_prefix):
"""
Get the help datapacks for a module
Args:
module_name (str): The module to get help data for
server_prefix (str): The command prefix for this server
Returns:
datapacks (list): The help datapacks for the module
"""
_dir = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
module_dir = "{}/../{}".format(_dir, module_name, "_help.json")
if os.path.isdir(module_dir):
module_help_path = "{}/{}".format(module_dir, "_help.json")
if os.path.isfile(module_help_path):
return helptools.get_help_datapacks(module_help_path, server_prefix)
else:
return [("Help", "{} does not have a help.json file".format(module_name), False)]
else:
return [("Help", "No module found called {}".format(module_name), False)]
|
Get the help datapacks for a module
Args:
module_name (str): The module to get help data for
server_prefix (str): The command prefix for this server
Returns:
datapacks (list): The help datapacks for the module
|
def walk(self, dispatcher, node):
"""
Walk through the node with a custom dispatcher for extraction of
details that are required.
"""
deferrable_handlers = {
Declare: self.declare,
Resolve: self.register_reference,
}
layout_handlers = {
PushScope: self.push_scope,
PopScope: self.pop_scope,
PushCatch: self.push_catch,
# should really be different, but given that the
# mechanism is within the same tree, the only difference
# would be sanity check which should have been tested in
# the first place in the primitives anyway.
PopCatch: self.pop_scope,
}
if not self.shadow_funcname:
layout_handlers[ResolveFuncName] = self.shadow_reference
local_dispatcher = Dispatcher(
definitions=dict(dispatcher),
token_handler=None,
layout_handlers=layout_handlers,
deferrable_handlers=deferrable_handlers,
)
return list(walk(local_dispatcher, node))
|
Walk through the node with a custom dispatcher for extraction of
details that are required.
|
def getLipdNames(D=None):
"""
Get a list of all LiPD names in the library
| Example
| names = lipd.getLipdNames(D)
:return list f_list: File list
"""
_names = []
try:
if not D:
print("Error: LiPD data not provided. Pass LiPD data into the function.")
else:
_names = D.keys()
except Exception:
pass
return _names
|
Get a list of all LiPD names in the library
| Example
| names = lipd.getLipdNames(D)
:return list f_list: File list
|
def _get_distance_scaling(self, C, mag, rhypo):
"""
Returns the distance scalig term
"""
return (C["a3"] * np.log(rhypo)) + (C["a4"] + C["a5"] * mag) * rhypo
|
Returns the distance scalig term
|
def from_rgb(r, g=None, b=None):
"""
Return the nearest xterm 256 color code from rgb input.
"""
c = r if isinstance(r, list) else [r, g, b]
best = {}
for index, item in enumerate(colors):
d = __distance(item, c)
if(not best or d <= best['distance']):
best = {'distance': d, 'index': index}
if 'index' in best:
return best['index']
else:
return 1
|
Return the nearest xterm 256 color code from rgb input.
|
def draw_address(canvas):
""" Draws the business address """
business_details = (
u'COMPANY NAME LTD',
u'STREET',
u'TOWN',
U'COUNTY',
U'POSTCODE',
U'COUNTRY',
u'',
u'',
u'Phone: +00 (0) 000 000 000',
u'Email: example@example.com',
u'Website: www.example.com',
u'Reg No: 00000000'
)
canvas.setFont('Helvetica', 9)
textobject = canvas.beginText(13 * cm, -2.5 * cm)
for line in business_details:
textobject.textLine(line)
canvas.drawText(textobject)
|
Draws the business address
|
def list_objects(self, query=None, limit=-1, offset=-1):
"""List of all objects in the database. Optinal parameter limit and
offset for pagination. A dictionary of key,value-pairs can be given as
addictional query condition for document properties.
Parameters
----------
query : Dictionary
Filter objects by property-value pairs defined by dictionary.
limit : int
Limit number of items in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
"""
result = []
# Build the document query
doc = {'active' : True}
if not query is None:
for key in query:
doc[key] = query[key]
# Iterate over all objects in the MongoDB collection and add them to
# the result
coll = self.collection.find(doc).sort([('timestamp', pymongo.DESCENDING)])
count = 0
for document in coll:
# We are done if the limit is reached. Test first in case limit is
# zero.
if limit >= 0 and len(result) == limit:
break
if offset < 0 or count >= offset:
result.append(self.from_dict(document))
count += 1
return ObjectListing(result, offset, limit, coll.count())
|
List of all objects in the database. Optinal parameter limit and
offset for pagination. A dictionary of key,value-pairs can be given as
addictional query condition for document properties.
Parameters
----------
query : Dictionary
Filter objects by property-value pairs defined by dictionary.
limit : int
Limit number of items in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
|
def get_info(pyfile):
'''Retrieve dunder values from a pyfile'''
info = {}
info_re = re.compile(r"^__(\w+)__ = ['\"](.*)['\"]")
with open(pyfile, 'r') as f:
for line in f.readlines():
match = info_re.search(line)
if match:
info[match.group(1)] = match.group(2)
return info
|
Retrieve dunder values from a pyfile
|
def render_html(root, options=0, extensions=None):
"""Render a given syntax tree as HTML.
Args:
root (Any): The reference to the root node of the syntax tree.
options (int): The cmark options.
extensions (Any): The reference to the syntax extensions, generally
from :func:`parser_get_syntax_extensions`
Returns:
str: The rendered HTML.
"""
if extensions is None:
extensions = _cmark.ffi.NULL
raw_result = _cmark.lib.cmark_render_html(
root, options, extensions)
return _cmark.ffi.string(raw_result).decode('utf-8')
|
Render a given syntax tree as HTML.
Args:
root (Any): The reference to the root node of the syntax tree.
options (int): The cmark options.
extensions (Any): The reference to the syntax extensions, generally
from :func:`parser_get_syntax_extensions`
Returns:
str: The rendered HTML.
|
def view(self, rec):
'''
View the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_view.html',
postinfo=rec,
kwd=kwd,
author=rec.user_name,
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG)
|
View the page.
|
def set_env_info(self, env_state=None, env_id=None, episode_id=None, bump_past=None, fps=None):
"""Atomically set the environment state tracking variables.
"""
with self.cv:
if env_id is None:
env_id = self._env_id
if env_state is None:
env_state = self._env_state
if fps is None:
fps = self._fps
self.cv.notifyAll()
old_episode_id = self._episode_id
if self.primary:
current_id = parse_episode_id(self._episode_id)
# Bump when changing from resetting -> running
if bump_past is not None:
bump_past_id = parse_episode_id(bump_past)
current_id = max(bump_past_id+1, current_id+1)
elif env_state == 'resetting':
current_id += 1
self._episode_id = generate_episode_id(current_id)
assert self._fps or fps
elif episode_id is False:
# keep the same episode_id: this is just us proactive
# setting the state to resetting after a done=True
pass
else:
assert episode_id is not None, "No episode_id provided. This likely indicates a misbehaving server, which did not send an episode_id"
self._episode_id = episode_id
self._fps = fps
logger.info('[%s] Changing env_state: %s (env_id=%s) -> %s (env_id=%s) (episode_id: %s->%s, fps=%s)', self.label, self._env_state, self._env_id, env_state, env_id, old_episode_id, self._episode_id, self._fps)
self._env_state = env_state
if env_id is not None:
self._env_id = env_id
return self.env_info()
|
Atomically set the environment state tracking variables.
|
def __build_config_block(self, config_block_node):
"""parse `config_block` in each section
Args:
config_block_node (TreeNode): Description
Returns:
[line_node1, line_node2, ...]
"""
node_lists = []
for line_node in config_block_node:
if isinstance(line_node, pegnode.ConfigLine):
node_lists.append(self.__build_config(line_node))
elif isinstance(line_node, pegnode.OptionLine):
node_lists.append(self.__build_option(line_node))
elif isinstance(line_node, pegnode.ServerLine):
node_lists.append(
self.__build_server(line_node))
elif isinstance(line_node, pegnode.BindLine):
node_lists.append(
self.__build_bind(line_node))
elif isinstance(line_node, pegnode.AclLine):
node_lists.append(
self.__build_acl(line_node))
elif isinstance(line_node, pegnode.BackendLine):
node_lists.append(
self.__build_usebackend(line_node))
elif isinstance(line_node, pegnode.UserLine):
node_lists.append(
self.__build_user(line_node))
elif isinstance(line_node, pegnode.GroupLine):
node_lists.append(
self.__build_group(line_node))
else:
# may blank_line, comment_line
pass
return node_lists
|
parse `config_block` in each section
Args:
config_block_node (TreeNode): Description
Returns:
[line_node1, line_node2, ...]
|
def save(self, items):
'''
Save a series of items to a sequence.
Args:
items (tuple): The series of items to save into the sequence.
Returns:
The index of the first item
'''
rows = []
indx = self.indx
size = 0
tick = s_common.now()
for item in items:
byts = s_msgpack.en(item)
size += len(byts)
lkey = s_common.int64en(indx)
indx += 1
rows.append((lkey, byts))
self.slab.putmulti(rows, append=True, db=self.db)
took = s_common.now() - tick
origindx = self.indx
self.indx = indx
return {'indx': indx, 'size': size, 'count': len(items), 'time': tick, 'took': took}
return origindx
|
Save a series of items to a sequence.
Args:
items (tuple): The series of items to save into the sequence.
Returns:
The index of the first item
|
def get_mesh_dict(self):
"""Returns calculated mesh sampling phonons
Returns
-------
dict
keys: qpoints, weights, frequencies, eigenvectors, and
group_velocities
Each value for the corresponding key is explained as below.
qpoints: ndarray
q-points in reduced coordinates of reciprocal lattice
dtype='double'
shape=(ir-grid points, 3)
weights: ndarray
Geometric q-point weights. Its sum is the number of grid
points.
dtype='intc'
shape=(ir-grid points,)
frequencies: ndarray
Phonon frequencies at ir-grid points. Imaginary frequenies are
represented by negative real numbers.
dtype='double'
shape=(ir-grid points, bands)
eigenvectors: ndarray
Phonon eigenvectors at ir-grid points. See the data structure
at np.linalg.eigh.
dtype='complex'
shape=(ir-grid points, bands, bands)
group_velocities: ndarray
Phonon group velocities at ir-grid points.
dtype='double'
shape=(ir-grid points, bands, 3)
"""
if self._mesh is None:
msg = ("run_mesh has to be done.")
raise RuntimeError(msg)
retdict = {'qpoints': self._mesh.qpoints,
'weights': self._mesh.weights,
'frequencies': self._mesh.frequencies,
'eigenvectors': self._mesh.eigenvectors,
'group_velocities': self._mesh.group_velocities}
return retdict
|
Returns calculated mesh sampling phonons
Returns
-------
dict
keys: qpoints, weights, frequencies, eigenvectors, and
group_velocities
Each value for the corresponding key is explained as below.
qpoints: ndarray
q-points in reduced coordinates of reciprocal lattice
dtype='double'
shape=(ir-grid points, 3)
weights: ndarray
Geometric q-point weights. Its sum is the number of grid
points.
dtype='intc'
shape=(ir-grid points,)
frequencies: ndarray
Phonon frequencies at ir-grid points. Imaginary frequenies are
represented by negative real numbers.
dtype='double'
shape=(ir-grid points, bands)
eigenvectors: ndarray
Phonon eigenvectors at ir-grid points. See the data structure
at np.linalg.eigh.
dtype='complex'
shape=(ir-grid points, bands, bands)
group_velocities: ndarray
Phonon group velocities at ir-grid points.
dtype='double'
shape=(ir-grid points, bands, 3)
|
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
if axis == 0:
pass
elif axis == 1:
arr = arr.T
else:
raise NotImplementedError
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = da.log(da.sum(da.exp(arr - vmax), axis=0))
out += vmax
return out
|
Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
|
def execute(self, input_data):
''' This worker computes meta data for any file type. '''
raw_bytes = input_data['sample']['raw_bytes']
self.meta['md5'] = hashlib.md5(raw_bytes).hexdigest()
self.meta['tags'] = input_data['tags']['tags']
self.meta['type_tag'] = input_data['sample']['type_tag']
with magic.Magic() as mag:
self.meta['file_type'] = mag.id_buffer(raw_bytes[:1024])
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as mag:
self.meta['mime_type'] = mag.id_buffer(raw_bytes[:1024])
with magic.Magic(flags=magic.MAGIC_MIME_ENCODING) as mag:
try:
self.meta['encoding'] = mag.id_buffer(raw_bytes[:1024])
except magic.MagicError:
self.meta['encoding'] = 'unknown'
self.meta['file_size'] = len(raw_bytes)
self.meta['filename'] = input_data['sample']['filename']
self.meta['import_time'] = input_data['sample']['import_time']
self.meta['customer'] = input_data['sample']['customer']
self.meta['length'] = input_data['sample']['length']
return self.meta
|
This worker computes meta data for any file type.
|
def start_session_if_none(self):
"""
Starts a session it is not yet initialized.
"""
if not (self._screen_id and self._session):
self.update_screen_id()
self._session = YouTubeSession(screen_id=self._screen_id)
|
Starts a session it is not yet initialized.
|
def s3_write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if append and self.s3_log_exists(remote_log_location):
old_log = self.s3_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=configuration.conf.getboolean('core', 'ENCRYPT_S3_LOGS'),
)
except Exception:
self.log.exception('Could not write logs to %s', remote_log_location)
|
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
|
def recommend_delete(self, num_iid, session):
'''taobao.item.recommend.delete 取消橱窗推荐一个商品
取消当前用户指定商品的橱窗推荐状态 这个Item所属卖家从传入的session中获取,需要session绑定'''
request = TOPRequest('taobao.item.recommend.delete')
request['num_iid'] = num_iid
self.create(self.execute(request, session)['item'])
return self
|
taobao.item.recommend.delete 取消橱窗推荐一个商品
取消当前用户指定商品的橱窗推荐状态 这个Item所属卖家从传入的session中获取,需要session绑定
|
def config(name='EMAIL_URL', default='console://'):
"""Returns a dictionary with EMAIL_* settings from EMAIL_URL."""
conf = {}
s = env(name, default)
if s:
conf = parse_email_url(s)
return conf
|
Returns a dictionary with EMAIL_* settings from EMAIL_URL.
|
def from_char(
cls, char, name=None, width=None, fill_char=None,
bounce=False, reverse=False, back_char=None, wrapper=None):
""" Create progress bar frames from a "moving" character.
The frames simulate movement of the character, from left to
right through empty space (`fill_char`).
Arguments:
char : Character to move across the bar.
name : Name for the new BarSet.
width : Width of the progress bar.
Default: 25
fill_char : Character to fill empty space.
Default: ' ' (space)
bounce : Whether the frames should simulate a bounce
from one side to another.
Default: False
reverse : Whether the character should start on the
right.
Default: False
back_char : Character to use when "bouncing" backward.
Default: `char`
"""
return cls(
cls._generate_move(
char,
width=width or cls.default_width,
fill_char=str(fill_char or cls.default_fill_char),
bounce=bounce,
reverse=reverse,
back_char=back_char,
),
name=name,
wrapper=wrapper or cls.default_wrapper,
)
|
Create progress bar frames from a "moving" character.
The frames simulate movement of the character, from left to
right through empty space (`fill_char`).
Arguments:
char : Character to move across the bar.
name : Name for the new BarSet.
width : Width of the progress bar.
Default: 25
fill_char : Character to fill empty space.
Default: ' ' (space)
bounce : Whether the frames should simulate a bounce
from one side to another.
Default: False
reverse : Whether the character should start on the
right.
Default: False
back_char : Character to use when "bouncing" backward.
Default: `char`
|
def _wkt(eivals, timescales, normalization, normalized_laplacian):
"""
Computes wave kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
"""
nv = eivals.shape[0]
wkt = np.zeros(timescales.shape)
for idx, t in enumerate(timescales):
wkt[idx] = np.sum(np.exp(-1j * t * eivals))
if isinstance(normalization, np.ndarray):
return hkt / normalization
if normalization == 'empty' or normalization == True:
return wkt / nv
if normalization == 'complete':
if normalized_laplacian:
return wkt / (1 + (nv - 1) * np.cos(timescales))
else:
return wkt / (1 + (nv - 1) * np.cos(nv * timescales))
return wkt
|
Computes wave kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
|
def normalize_locale(loc):
'''
Format a locale specifier according to the format returned by `locale -a`.
'''
comps = split_locale(loc)
comps['territory'] = comps['territory'].upper()
comps['codeset'] = comps['codeset'].lower().replace('-', '')
comps['charmap'] = ''
return join_locale(comps)
|
Format a locale specifier according to the format returned by `locale -a`.
|
def _GetStatus(self):
"""Retrieves status information.
Returns:
dict[str, object]: status attributes, indexed by name.
"""
if self._parser_mediator:
number_of_produced_events = (
self._parser_mediator.number_of_produced_events)
number_of_produced_sources = (
self._parser_mediator.number_of_produced_event_sources)
number_of_produced_warnings = (
self._parser_mediator.number_of_produced_warnings)
else:
number_of_produced_events = None
number_of_produced_sources = None
number_of_produced_warnings = None
if self._extraction_worker and self._parser_mediator:
last_activity_timestamp = max(
self._extraction_worker.last_activity_timestamp,
self._parser_mediator.last_activity_timestamp)
processing_status = self._extraction_worker.processing_status
else:
last_activity_timestamp = 0.0
processing_status = self._status
task_identifier = getattr(self._task, 'identifier', '')
if self._process_information:
used_memory = self._process_information.GetUsedMemory() or 0
else:
used_memory = 0
if self._memory_profiler:
self._memory_profiler.Sample('main', used_memory)
# XML RPC does not support integer values > 2 GiB so we format them
# as a string.
used_memory = '{0:d}'.format(used_memory)
status = {
'display_name': self._current_display_name,
'identifier': self._name,
'last_activity_timestamp': last_activity_timestamp,
'number_of_consumed_event_tags': None,
'number_of_consumed_events': self._number_of_consumed_events,
'number_of_consumed_sources': self._number_of_consumed_sources,
'number_of_consumed_warnings': None,
'number_of_produced_event_tags': None,
'number_of_produced_events': number_of_produced_events,
'number_of_produced_sources': number_of_produced_sources,
'number_of_produced_warnings': number_of_produced_warnings,
'processing_status': processing_status,
'task_identifier': task_identifier,
'used_memory': used_memory}
return status
|
Retrieves status information.
Returns:
dict[str, object]: status attributes, indexed by name.
|
def _spectrogram(y=None, S=None, n_fft=2048, hop_length=512, power=1,
win_length=None, window='hann', center=True, pad_mode='reflect'):
'''Helper function to retrieve a magnitude spectrogram.
This is primarily used in feature extraction functions that can operate on
either audio time-series or spectrogram input.
Parameters
----------
y : None or np.ndarray [ndim=1]
If provided, an audio time series
S : None or np.ndarray
Spectrogram input, optional
n_fft : int > 0
STFT window size
hop_length : int > 0
STFT hop length
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
S_out : np.ndarray [dtype=np.float32]
- If `S` is provided as input, then `S_out == S`
- Else, `S_out = |stft(y, ...)|**power`
n_fft : int > 0
- If `S` is provided, then `n_fft` is inferred from `S`
- Else, copied from input
'''
if S is not None:
# Infer n_fft from spectrogram shape
n_fft = 2 * (S.shape[0] - 1)
else:
# Otherwise, compute a magnitude spectrogram from input
S = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, center=center,
window=window, pad_mode=pad_mode))**power
return S, n_fft
|
Helper function to retrieve a magnitude spectrogram.
This is primarily used in feature extraction functions that can operate on
either audio time-series or spectrogram input.
Parameters
----------
y : None or np.ndarray [ndim=1]
If provided, an audio time series
S : None or np.ndarray
Spectrogram input, optional
n_fft : int > 0
STFT window size
hop_length : int > 0
STFT hop length
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
S_out : np.ndarray [dtype=np.float32]
- If `S` is provided as input, then `S_out == S`
- Else, `S_out = |stft(y, ...)|**power`
n_fft : int > 0
- If `S` is provided, then `n_fft` is inferred from `S`
- Else, copied from input
|
def checkKey(self, credentials):
"""
Retrieve the keys of the user specified by the credentials, and check
if one matches the blob in the credentials.
"""
filename = self._keyfile
if not os.path.exists(filename):
return 0
lines = open(filename).xreadlines()
for l in lines:
l2 = l.split()
if len(l2) < 2:
continue
try:
if base64.decodestring(l2[1]) == credentials.blob:
return 1
except binascii.Error:
continue
return 0
|
Retrieve the keys of the user specified by the credentials, and check
if one matches the blob in the credentials.
|
def perp(weights):
r"""Calculate the normalized perplexity :math:`\mathcal{P}` of samples
with ``weights`` :math:`\omega_i`. :math:`\mathcal{P}=0` is
terrible and :math:`\mathcal{P}=1` is perfect.
.. math::
\mathcal{P} = exp(H) / N
where
.. math::
H = - \sum_{i=1}^N \bar{\omega}_i log ~ \bar{\omega}_i
.. math::
\bar{\omega}_i = \frac{\omega_i}{\sum_i \omega_i}
:param weights:
Vector-like array; the samples' weights
"""
# normalize weights
w = _np.asarray(weights) / _np.sum(weights)
# mask zero weights
w = _np.ma.MaskedArray(w, copy=False, mask=(w == 0))
# avoid NaN due to log(0) by log(1)=0
entr = - _np.sum( w * _np.log(w.filled(1.0)))
return _np.exp(entr) / len(w)
|
r"""Calculate the normalized perplexity :math:`\mathcal{P}` of samples
with ``weights`` :math:`\omega_i`. :math:`\mathcal{P}=0` is
terrible and :math:`\mathcal{P}=1` is perfect.
.. math::
\mathcal{P} = exp(H) / N
where
.. math::
H = - \sum_{i=1}^N \bar{\omega}_i log ~ \bar{\omega}_i
.. math::
\bar{\omega}_i = \frac{\omega_i}{\sum_i \omega_i}
:param weights:
Vector-like array; the samples' weights
|
def print_splits(cliques, next_cliques):
"""Print shifts for new forks."""
splits = 0
for i, clique in enumerate(cliques):
parent, _ = clique
# If this fork continues
if parent in next_cliques:
# If there is a new fork, print a split
if len(next_cliques[parent]) > 1:
print_split(i + splits, len(cliques) + splits)
splits += 1
|
Print shifts for new forks.
|
def read_ipx(self, length):
"""Read Internetwork Packet Exchange.
Structure of IPX header [RFC 1132]:
Octets Bits Name Description
0 0 ipx.cksum Checksum
2 16 ipx.len Packet Length (header includes)
4 32 ipx.count Transport Control (hop count)
5 40 ipx.type Packet Type
6 48 ipx.dst Destination Address
18 144 ipx.src Source Address
"""
if length is None:
length = len(self)
_csum = self._read_fileng(2)
_tlen = self._read_unpack(2)
_ctrl = self._read_unpack(1)
_type = self._read_unpack(1)
_dsta = self._read_ipx_address()
_srca = self._read_ipx_address()
ipx = dict(
chksum=_csum,
len=_tlen,
count=_ctrl,
type=TYPE.get(_type),
dst=_dsta,
src=_srca,
)
proto = ipx['type']
length = ipx['len'] - 30
ipx['packet'] = self._read_packet(header=30, payload=length)
return self._decode_next_layer(ipx, proto, length)
|
Read Internetwork Packet Exchange.
Structure of IPX header [RFC 1132]:
Octets Bits Name Description
0 0 ipx.cksum Checksum
2 16 ipx.len Packet Length (header includes)
4 32 ipx.count Transport Control (hop count)
5 40 ipx.type Packet Type
6 48 ipx.dst Destination Address
18 144 ipx.src Source Address
|
def maintenance_center(self, storage_disk_xml=None):
""" Collector for how many disk(s) are in NetApp maintenance center
For more information on maintenance center please see:
bit.ly/19G4ptr
"""
disk_in_maintenance = 0
for filer_disk in storage_disk_xml:
disk_status = filer_disk.find('disk-raid-info/container-type')
if disk_status.text == 'maintenance':
disk_in_maintenance += 1
self.push('maintenance_disk', 'disk', disk_in_maintenance)
|
Collector for how many disk(s) are in NetApp maintenance center
For more information on maintenance center please see:
bit.ly/19G4ptr
|
def build_template(
initial_template=None,
image_list=None,
iterations = 3,
gradient_step = 0.2,
**kwargs ):
"""
Estimate an optimal template from an input image_list
ANTsR function: N/A
Arguments
---------
initial_template : ANTsImage
initialization for the template building
image_list : ANTsImages
images from which to estimate template
iterations : integer
number of template building iterations
gradient_step : scalar
for shape update gradient
kwargs : keyword args
extra arguments passed to ants registration
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read( ants.get_ants_data('r16') , 'float')
>>> image2 = ants.image_read( ants.get_ants_data('r27') , 'float')
>>> image3 = ants.image_read( ants.get_ants_data('r85') , 'float')
>>> timage = ants.build_template( image_list = ( image, image2, image3 ) )
"""
wt = 1.0 / len( image_list )
if initial_template is None:
initial_template = image_list[ 0 ] * 0
for i in range( len( image_list ) ):
initial_template = initial_template + image_list[ i ] * wt
xavg = initial_template.clone()
for i in range( iterations ):
for k in range( len( image_list ) ):
w1 = registration( xavg, image_list[k],
type_of_transform='SyN', **kwargs )
if k == 0:
wavg = iio.image_read( w1['fwdtransforms'][0] ) * wt
xavgNew = w1['warpedmovout'] * wt
else:
wavg = wavg + iio.image_read( w1['fwdtransforms'][0] ) * wt
xavgNew = xavgNew + w1['warpedmovout'] * wt
print( wavg.abs().mean() )
wscl = (-1.0) * gradient_step
wavg = wavg * wscl
wavgfn = mktemp(suffix='.nii.gz')
iio.image_write(wavg, wavgfn)
xavg = apply_transforms( xavg, xavg, wavgfn )
return xavg
|
Estimate an optimal template from an input image_list
ANTsR function: N/A
Arguments
---------
initial_template : ANTsImage
initialization for the template building
image_list : ANTsImages
images from which to estimate template
iterations : integer
number of template building iterations
gradient_step : scalar
for shape update gradient
kwargs : keyword args
extra arguments passed to ants registration
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read( ants.get_ants_data('r16') , 'float')
>>> image2 = ants.image_read( ants.get_ants_data('r27') , 'float')
>>> image3 = ants.image_read( ants.get_ants_data('r85') , 'float')
>>> timage = ants.build_template( image_list = ( image, image2, image3 ) )
|
def create_server_app(provider, password=None, cache=True, cache_timeout=3600,
debug=False):
"""
Create a DAAP server, based around a Flask application. The server requires
a content provider, server name and optionally, a password. The content
provider should return raw object data.
Object responses can be cached. This may dramatically speed up connections
for multiple clients. However, this is only limited to objects, not file
servings.
Note: in case the server is mounted as a WSGI app, make sure the server
passes the authorization header.
"""
# Create Flask App
app = Flask(__name__, static_folder=None)
app.debug = debug
# Setup cache
if cache:
if type(cache) == bool:
cache = SimpleCache()
else:
# Assume is a user-provided cache with a get-set method.
pass
else:
cache = False
#
# Context-aware helpers and decorators
#
def daap_wsgi_app(func):
"""
WSGI middleware which will modify the environment and strip 'daap://'
from the path. This way, Flask can route the request properly.
"""
@wraps(func)
def _inner(environment, start_response):
if environment["PATH_INFO"].startswith("daap://") or \
environment["PATH_INFO"].startswith("http://"):
environment["PATH_INFO"] = "/" + \
environment["PATH_INFO"].split("/", 3)[3]
return func(environment, start_response)
return _inner
app.wsgi_app = daap_wsgi_app(app.wsgi_app)
def daap_trace(func):
"""
Utility method for tracing function calls. Helps debugging malicious
requests (e.g. protocol changes). Is only enabled when `debug` is True.
Normally, exceptions are caught by Flask and handled as Bad Requests.
Any debugging is therefore lost.
"""
# Do not apply when debug is False.
if not debug:
return func
@wraps(func)
def _inner(*args, **kwargs):
try:
start = time.time()
result = func(*args, **kwargs)
logger.debug(
"Request handling took %.6f seconds",
time.time() - start)
return result
except:
logger.exception(
"Caught exception before raising it to Flask.")
raise
return _inner
def daap_unpack_args(func):
"""
Strip query string arguments and add them to the method as keyword
arguments. Since the query string keys are defined, values will be
converted to their approriate format. An exception will be thrown in
case a requested argument is not available, or if the value could not
be converted.
"""
# Create a function specific mapping, only for arguments appearing in
# the function declaration.
args, _, _, _ = inspect.getargspec(func)
mappings = [mapping for mapping in QS_MAPPING if mapping[1] in args]
@wraps(func)
def _inner(*args, **kwargs):
for key, kwarg, casting in mappings:
kwargs[kwarg] = casting(request.args[key])
return func(*args, **kwargs)
return _inner
def daap_authenticate(func):
"""
Check authorization header, if authorization is given. Returns 401
response if the authentication failed.
"""
# Do not apply when no password is set
if not password:
return func
@wraps(func)
def _inner(*args, **kwargs):
auth = request.authorization
if not auth or not auth.password == password:
return Response(None, 401, {
"WWW-Authenticate": "Basic realm=\"%s\"" %
provider.server.name})
return func(*args, **kwargs)
return _inner
app.authenticate = daap_authenticate
def daap_cache_response(func):
"""
Cache object responses if the cache has been initialized. The cache key
is based on the request path and the semi-constant request arguments.
The response is caches for as long as possible, which should not be a
problem if the cache is cleared if the provider has new data.
"""
# Do not apply when cache is False.
if not cache:
return func
@wraps(func)
def _inner(*args, **kwargs):
# Create hash key via hashlib. We use MD5 since it is slightly
# faster than SHA1. Note that we don't require cryptographically
# strong hashes -- we just want to have a short and computationally
# unique key.
key = hashlib.md5()
# Add basic info
key.update(func.__name__)
key.update(request.path)
for k, v in request.args.iteritems():
if k not in QS_IGNORE_CACHE:
key.update(v)
# Hit the cache
key = key.digest()
value = cache.get(key)
if value is None:
value = func(*args, **kwargs)
cache.set(key, value, timeout=cache_timeout)
elif debug:
logger.debug("Loaded response from cache.")
return value
return _inner
#
# Request handlers
#
@app.after_request
def after_request(response):
"""
Append default response headers, independent of the return type.
"""
response.headers["DAAP-Server"] = provider.server.name
response.headers["Content-Language"] = "en_us"
response.headers["Accept-Ranges"] = "bytes"
return response
@app.route("/server-info", methods=["GET"])
@daap_trace
@daap_cache_response
def server_info():
"""
"""
data = responses.server_info(provider, provider.server.name, password)
return ObjectResponse(data)
@app.route("/content-codes", methods=["GET"])
@daap_trace
@daap_cache_response
def content_codes():
"""
"""
data = responses.content_codes(provider)
return ObjectResponse(data)
@app.route("/login", methods=["GET"])
@daap_trace
@daap_authenticate
def login():
"""
"""
session_id = provider.create_session(
user_agent=request.headers.get("User-Agent"),
remote_address=request.remote_addr,
client_version=request.headers.get(
"Client-DAAP-Version"))
data = responses.login(provider, session_id)
return ObjectResponse(data)
@app.route("/logout", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_unpack_args
def logout(session_id):
"""
"""
provider.destroy_session(session_id)
return Response(None, status=204)
@app.route("/activity", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_unpack_args
def activity(session_id):
"""
"""
return Response(None, status=200)
@app.route("/update", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_unpack_args
def update(session_id, revision, delta):
"""
"""
revision = provider.get_next_revision(session_id, revision, delta)
data = responses.update(provider, revision)
return ObjectResponse(data)
@app.route("/fp-setup", methods=["POST"])
@daap_trace
@daap_authenticate
def fp_setup():
"""
Fairplay validation, as sent by iTunes 11+. It will be unlikely this
will be ever implemented.
"""
raise NotImplementedError("Fairplay not supported.")
@app.route("/databases", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def databases(session_id, revision, delta):
"""
"""
new, old = provider.get_databases(session_id, revision, delta)
added, removed, is_update = utils.diff(new, old)
data = responses.databases(
provider, new, old, added, removed, is_update)
return ObjectResponse(data)
@app.route(
"/databases/<int:database_id>/items/<int:item_id>/extra_data/artwork",
methods=["GET"])
@daap_trace
@daap_unpack_args
def database_item_artwork(database_id, item_id, session_id):
"""
"""
data, mimetype, total_length = provider.get_artwork(
session_id, database_id, item_id)
# Setup response
response = Response(
data, 200, mimetype=mimetype,
direct_passthrough=not isinstance(data, basestring))
if total_length:
response.headers["Content-Length"] = total_length
return response
@app.route(
"/databases/<int:database_id>/groups/<int:group_id>/extra_data/"
"artwork", methods=["GET"])
@daap_trace
@daap_unpack_args
def database_group_artwork(database_id, group_id, session_id, revision,
delta):
"""
"""
raise NotImplemented("Groups not supported.")
@app.route(
"/databases/<int:database_id>/items/<int:item_id>.<suffix>",
methods=["GET"])
@daap_trace
@daap_unpack_args
def database_item(database_id, item_id, suffix, session_id):
"""
"""
range_header = request.headers.get("Range", None)
if range_header:
begin, end = http.parse_range_header(range_header).ranges[0]
data, mimetype, total_length = provider.get_item(
session_id, database_id, item_id, byte_range=(begin, end))
begin, end = (begin or 0), (end or total_length)
# Setup response
response = Response(
data, 206, mimetype=mimetype,
direct_passthrough=not isinstance(data, basestring))
# A streaming response with unknown content lenght, Range x-*
# as per RFC2616 section 14.16
if total_length <= 0:
response.headers["Content-Range"] = "bytes %d-%d/*" % (
begin, end - 1)
elif total_length > 0:
response.headers["Content-Range"] = "bytes %d-%d/%d" % (
begin, end - 1, total_length)
response.headers["Content-Length"] = end - begin
else:
data, mimetype, total_length = provider.get_item(
session_id, database_id, item_id)
# Setup response
response = Response(
data, 200, mimetype=mimetype,
direct_passthrough=not isinstance(data, basestring))
if total_length > 0:
response.headers["Content-Length"] = total_length
return response
@app.route("/databases/<int:database_id>/items", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def database_items(database_id, session_id, revision, delta, type):
"""
"""
new, old = provider.get_items(session_id, database_id, revision, delta)
added, removed, is_update = utils.diff(new, old)
data = responses.items(provider, new, old, added, removed, is_update)
return ObjectResponse(data)
@app.route("/databases/<int:database_id>/containers", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def database_containers(database_id, session_id, revision, delta):
"""
"""
new, old = provider.get_containers(
session_id, database_id, revision, delta)
added, removed, is_update = utils.diff(new, old)
data = responses.containers(
provider, new, old, added, removed, is_update)
return ObjectResponse(data)
@app.route("/databases/<int:database_id>/groups", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def database_groups(database_id, session_id, revision, delta, type):
"""
"""
raise NotImplementedError("Groups not supported.")
@app.route(
"/databases/<int:database_id>/containers/<int:container_id>/items",
methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def database_container_item(database_id, container_id, session_id,
revision, delta):
"""
"""
new, old = provider.get_container_items(
session_id, database_id, container_id, revision, delta)
added, removed, is_update = utils.diff(new, old)
data = responses.container_items(
provider, new, old, added, removed, is_update)
return ObjectResponse(data)
# Return the app
return app
|
Create a DAAP server, based around a Flask application. The server requires
a content provider, server name and optionally, a password. The content
provider should return raw object data.
Object responses can be cached. This may dramatically speed up connections
for multiple clients. However, this is only limited to objects, not file
servings.
Note: in case the server is mounted as a WSGI app, make sure the server
passes the authorization header.
|
def drive_rotational_speed_rpm(self):
"""Gets set of rotational speed of the disks"""
drv_rot_speed_rpm = set()
for member in self._drives_list():
if member.rotation_speed_rpm is not None:
drv_rot_speed_rpm.add(member.rotation_speed_rpm)
return drv_rot_speed_rpm
|
Gets set of rotational speed of the disks
|
def start(self):
"""Start the background process."""
self._lc = LoopingCall(self._download)
# Run immediately, and then every 30 seconds:
self._lc.start(30, now=True)
|
Start the background process.
|
def sanity(request, sysmeta_pyxb):
"""Check that sysmeta_pyxb is suitable for creating a new object and matches the
uploaded sciobj bytes."""
_does_not_contain_replica_sections(sysmeta_pyxb)
_is_not_archived(sysmeta_pyxb)
_obsoleted_by_not_specified(sysmeta_pyxb)
if 'HTTP_VENDOR_GMN_REMOTE_URL' in request.META:
return
_has_correct_file_size(request, sysmeta_pyxb)
_is_supported_checksum_algorithm(sysmeta_pyxb)
_is_correct_checksum(request, sysmeta_pyxb)
|
Check that sysmeta_pyxb is suitable for creating a new object and matches the
uploaded sciobj bytes.
|
def _specialKeyEvent(key, upDown):
""" Helper method for special keys.
Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
"""
assert upDown in ('up', 'down'), "upDown argument must be 'up' or 'down'"
key_code = special_key_translate_table[key]
ev = AppKit.NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
Quartz.NSSystemDefined, # type
(0,0), # location
0xa00 if upDown == 'down' else 0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
(key_code << 16) | ((0xa if upDown == 'down' else 0xb) << 8), # data1
-1 # data2
)
Quartz.CGEventPost(0, ev.CGEvent())
|
Helper method for special keys.
Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
|
def run(self):
""" Filter job callback.
"""
from pyrocore import config
try:
config.engine.open()
# TODO: select view into items
items = []
self.run_filter(items)
except (error.LoggableError, xmlrpc.ERRORS) as exc:
self.LOG.warn(str(exc))
|
Filter job callback.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.