code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _get_init_containers(self):
"""When using git to retrieve the DAGs, use the GitSync Init Container"""
# If we're using volume claims to mount the dags, no init container is needed
if self.kube_config.dags_volume_claim or \
self.kube_config.dags_volume_host or self.kube_config.dags_in_image:
return []
# Otherwise, define a git-sync init container
init_environment = [{
'name': 'GIT_SYNC_REPO',
'value': self.kube_config.git_repo
}, {
'name': 'GIT_SYNC_BRANCH',
'value': self.kube_config.git_branch
}, {
'name': 'GIT_SYNC_ROOT',
'value': self.kube_config.git_sync_root
}, {
'name': 'GIT_SYNC_DEST',
'value': self.kube_config.git_sync_dest
}, {
'name': 'GIT_SYNC_DEPTH',
'value': '1'
}, {
'name': 'GIT_SYNC_ONE_TIME',
'value': 'true'
}]
if self.kube_config.git_user:
init_environment.append({
'name': 'GIT_SYNC_USERNAME',
'value': self.kube_config.git_user
})
if self.kube_config.git_password:
init_environment.append({
'name': 'GIT_SYNC_PASSWORD',
'value': self.kube_config.git_password
})
volume_mounts = [{
'mountPath': self.kube_config.git_sync_root,
'name': self.dags_volume_name,
'readOnly': False
}]
if self.kube_config.git_ssh_key_secret_name:
volume_mounts.append({
'name': self.git_sync_ssh_secret_volume_name,
'mountPath': '/etc/git-secret/ssh',
'subPath': 'ssh'
})
init_environment.extend([
{
'name': 'GIT_SSH_KEY_FILE',
'value': '/etc/git-secret/ssh'
},
{
'name': 'GIT_SYNC_SSH',
'value': 'true'
}])
if self.kube_config.git_ssh_known_hosts_configmap_name:
volume_mounts.append({
'name': self.git_sync_ssh_known_hosts_volume_name,
'mountPath': '/etc/git-secret/known_hosts',
'subPath': 'known_hosts'
})
init_environment.extend([
{
'name': 'GIT_KNOWN_HOSTS',
'value': 'true'
},
{
'name': 'GIT_SSH_KNOWN_HOSTS_FILE',
'value': '/etc/git-secret/known_hosts'
}
])
else:
init_environment.append({
'name': 'GIT_KNOWN_HOSTS',
'value': 'false'
})
return [{
'name': self.kube_config.git_sync_init_container_name,
'image': self.kube_config.git_sync_container,
'securityContext': {'runAsUser': 65533}, # git-sync user
'env': init_environment,
'volumeMounts': volume_mounts
}]
|
When using git to retrieve the DAGs, use the GitSync Init Container
|
def operator_relocate(self, graph, solution, op_diff_round_digits, anim):
"""applies Relocate inter-route operator to solution
Takes every node from every route and calculates savings when inserted
into all possible positions in other routes. Insertion is done at
position with max. saving and procedure starts over again with newly
created graph as input. Stops when no improvement is found.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
solution: BaseSolution
BaseSolution instance
op_diff_round_digits: float
Precision (floating point digits) for rounding route length differences.
*Details*: In some cases when an exchange is performed on two routes with one node each,
the difference between the both solutions (before and after the exchange) is not zero.
This is due to internal rounding errors of float type. So the loop won't break
(alternating between these two solutions), we need an additional criterion to avoid
this behaviour: A threshold to handle values very close to zero as if they were zero
(for a more detailed description of the matter see http://floating-point-gui.de or
https://docs.python.org/3.5/tutorial/floatingpoint.html)
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
Notes
-----
(Inner) Loop variables:
* i: node that is checked for possible moves (position in the route `tour`, not node name)
* j: node that precedes the insert position in target route (position in the route `target_tour`, not node name)
Todo
----
* Remove ugly nested loops, convert to more efficient matrix operations
"""
# shorter var names for loop
dm = graph._matrix
dn = graph._nodes
# Relocate: Search better solutions by checking possible node moves
while True:
length_diff_best = 0
for route in solution.routes():
# exclude origin routes with single high-demand nodes (Load Areas)
if len(route._nodes) == 1:
if solution._problem._is_aggregated[str(route._nodes[0])]:
continue
# create tour by adding depot at start and end
tour = [graph._depot] + route._nodes + [graph._depot]
for target_route in solution.routes():
# exclude (origin+target) routes with single high-demand nodes (Load Areas)
if len(target_route._nodes) == 1:
if solution._problem._is_aggregated[str(target_route._nodes[0])]:
continue
target_tour = [graph._depot] + target_route._nodes + [graph._depot]
if route == target_route:
continue
n = len(route._nodes)
nt = len(target_route._nodes)+1
for i in range(0,n):
node = route._nodes[i]
for j in range(0,nt):
#target_node = target_route._nodes[j]
if target_route.can_allocate([node]):
length_diff = (-dm[dn[tour[i].name()]][dn[tour[i+1].name()]] -
dm[dn[tour[i+1].name()]][dn[tour[i+2].name()]] +
dm[dn[tour[i].name()]][dn[tour[i+2].name()]] +
dm[dn[target_tour[j].name()]][dn[tour[i+1].name()]] +
dm[dn[tour[i+1].name()]][dn[target_tour[j+1].name()]] -
dm[dn[target_tour[j].name()]][dn[target_tour[j+1].name()]])
if length_diff < length_diff_best:
length_diff_best = length_diff
node_best, target_route_best, j_best = node, target_route, j
if length_diff_best < 0:
# insert new node
target_route_best.insert([node_best], j_best)
# remove empty routes from solution
solution._routes = [route for route in solution._routes if route._nodes]
if anim is not None:
solution.draw_network(anim)
#print('Bessere Loesung gefunden:', node_best, target_node_best, target_route_best, length_diff_best)
# no improvement found
if round(length_diff_best, op_diff_round_digits) == 0:
break
return solution
|
applies Relocate inter-route operator to solution
Takes every node from every route and calculates savings when inserted
into all possible positions in other routes. Insertion is done at
position with max. saving and procedure starts over again with newly
created graph as input. Stops when no improvement is found.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
solution: BaseSolution
BaseSolution instance
op_diff_round_digits: float
Precision (floating point digits) for rounding route length differences.
*Details*: In some cases when an exchange is performed on two routes with one node each,
the difference between the both solutions (before and after the exchange) is not zero.
This is due to internal rounding errors of float type. So the loop won't break
(alternating between these two solutions), we need an additional criterion to avoid
this behaviour: A threshold to handle values very close to zero as if they were zero
(for a more detailed description of the matter see http://floating-point-gui.de or
https://docs.python.org/3.5/tutorial/floatingpoint.html)
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
Notes
-----
(Inner) Loop variables:
* i: node that is checked for possible moves (position in the route `tour`, not node name)
* j: node that precedes the insert position in target route (position in the route `target_tour`, not node name)
Todo
----
* Remove ugly nested loops, convert to more efficient matrix operations
|
def from_dict(cls, d):
"""
Create an instance from a dictionary.
"""
assert isinstance(d, dict)
init_args = dict()
for key, is_required in cls.dictionary_attributes.iteritems():
try:
init_args[key] = d[key]
except KeyError:
if is_required:
raise DictConvertible.Error('missing key in dictionary', cls, missing_key=key)
return cls(**init_args)
|
Create an instance from a dictionary.
|
def binary(self):
"""Load and return the path to the native engine binary."""
lib_name = '{}.so'.format(NATIVE_ENGINE_MODULE)
lib_path = os.path.join(safe_mkdtemp(), lib_name)
try:
with closing(pkg_resources.resource_stream(__name__, lib_name)) as input_fp:
# NB: The header stripping code here must be coordinated with header insertion code in
# build-support/bin/native/bootstrap_code.sh
engine_version = input_fp.readline().decode('utf-8').strip()
repo_version = input_fp.readline().decode('utf-8').strip()
logger.debug('using {} built at {}'.format(engine_version, repo_version))
with open(lib_path, 'wb') as output_fp:
output_fp.write(input_fp.read())
except (IOError, OSError) as e:
raise self.BinaryLocationError(
"Error unpacking the native engine binary to path {}: {}".format(lib_path, e),
e)
return lib_path
|
Load and return the path to the native engine binary.
|
def add_download(self, info, future):
"""
Hand off a download to the Downloads plugin, if it is present.
Parameters
----------
info : `~ginga.misc.Bunch.Bunch`
A bunch of information about the URI as returned by
`ginga.util.iohelper.get_fileinfo()`
future : `~ginga.misc.Future.Future`
A future that represents the future computation to be performed
after downloading the file. Resolving the future will trigger
the computation.
"""
if self.gpmon.has_plugin('Downloads'):
obj = self.gpmon.get_plugin('Downloads')
self.gui_do(obj.add_download, info, future)
else:
self.show_error("Please activate the 'Downloads' plugin to"
" enable download functionality")
|
Hand off a download to the Downloads plugin, if it is present.
Parameters
----------
info : `~ginga.misc.Bunch.Bunch`
A bunch of information about the URI as returned by
`ginga.util.iohelper.get_fileinfo()`
future : `~ginga.misc.Future.Future`
A future that represents the future computation to be performed
after downloading the file. Resolving the future will trigger
the computation.
|
def upload_service_version(self, service_zip_file, mode='production', service_version='default', service_id=None, **kwargs):
'''
upload_service_version(self, service_zip_file, mode='production', service_version='default', service_id=None, **kwargs)
Upload a service version to Opereto
:Parameters:
* *service_zip_file* (`string`) -- zip file location containing service and service specification
* *mode* (`string`) -- production/development (default is production)
* *service_version* (`string`) -- Service version
* *service_id* (`string`) -- Service Identifier
:Keywords args:
* *comment* (`string`) -- comment
:Example:
.. code-block:: python
opereto_client.upload_service_version(service_zip_file=zip_action_file+'.zip', mode='production', service_version='111')
'''
files = {'service_file': open(service_zip_file,'rb')}
url_suffix = '/services/upload/%s'%mode
if mode=='production':
url_suffix+='/'+service_version
if service_id:
url_suffix+='/'+service_id
if kwargs:
url_suffix=url_suffix+'?'+urlencode(kwargs)
return self._call_rest_api('post', url_suffix, files=files, error='Failed to upload service version')
|
upload_service_version(self, service_zip_file, mode='production', service_version='default', service_id=None, **kwargs)
Upload a service version to Opereto
:Parameters:
* *service_zip_file* (`string`) -- zip file location containing service and service specification
* *mode* (`string`) -- production/development (default is production)
* *service_version* (`string`) -- Service version
* *service_id* (`string`) -- Service Identifier
:Keywords args:
* *comment* (`string`) -- comment
:Example:
.. code-block:: python
opereto_client.upload_service_version(service_zip_file=zip_action_file+'.zip', mode='production', service_version='111')
|
def _justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == 'left':
return [x.ljust(max_len) for x in texts]
elif mode == 'center':
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts]
|
Perform ljust, center, rjust against string or list-like
|
def plot_info(self, dvs):
'''
Plots miscellaneous de-trending information on the data
validation summary figure.
:param dvs: A :py:class:`dvs.DVS` figure instance
'''
axl, axc, axr = dvs.title()
axc.annotate("%s %d" % (self._mission.IDSTRING, self.ID),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=18)
axc.annotate(r"%.2f ppm $\rightarrow$ %.2f ppm" %
(self.cdppr, self.cdpp),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic')
axl.annotate("%s %s%02d: %s" %
(self.mission.upper(),
self._mission.SEASONCHAR, self.season, self.name),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=12,
color='k')
axl.annotate(self.aperture_name if len(self.neighbors) == 0
else "%s, %d neighbors" %
(self.aperture_name, len(self.neighbors)),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic')
axr.annotate("%s %.3f" % (self._mission.MAGSTRING, self.mag),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=12,
color='k')
if not np.isnan(self.cdppg) and self.cdppg > 0:
axr.annotate(r"GP %.3f ppm" % (self.cdppg),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic')
|
Plots miscellaneous de-trending information on the data
validation summary figure.
:param dvs: A :py:class:`dvs.DVS` figure instance
|
def rupdate(source, target):
''' recursively update nested dictionaries
see: http://stackoverflow.com/a/3233356/1289080
'''
for k, v in target.iteritems():
if isinstance(v, Mapping):
r = rupdate(source.get(k, {}), v)
source[k] = r
else:
source[k] = target[k]
return source
|
recursively update nested dictionaries
see: http://stackoverflow.com/a/3233356/1289080
|
def get_file_metadata(self, secure_data_path, version=None):
"""Get just the metadata for a file, not the content"""
if not version:
version = "CURRENT"
payload = {'versionId': str(version)}
secret_resp = head_with_retry(str.join('', [self.cerberus_url, '/v1/secure-file/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.headers
|
Get just the metadata for a file, not the content
|
def crossref_paths(self):
"""Just like crossrefs, but all the targets are munged to :all."""
return set(
[address.new(repo=x.repo, path=x.path) for x in self.crossrefs])
|
Just like crossrefs, but all the targets are munged to :all.
|
def add_method(self, loop, callback):
"""Add a coroutine function
Args:
loop: The :class:`event loop <asyncio.BaseEventLoop>` instance
on which to schedule callbacks
callback: The :term:`coroutine function` to add
"""
f, obj = get_method_vars(callback)
wrkey = (f, id(obj))
self[wrkey] = obj
self.event_loop_map[wrkey] = loop
|
Add a coroutine function
Args:
loop: The :class:`event loop <asyncio.BaseEventLoop>` instance
on which to schedule callbacks
callback: The :term:`coroutine function` to add
|
def gml_to_geojson(el):
"""Given an lxml Element of a GML geometry, returns a dict in GeoJSON format."""
if el.get('srsName') not in ('urn:ogc:def:crs:EPSG::4326', None):
if el.get('srsName') == 'EPSG:4326':
return _gmlv2_to_geojson(el)
else:
raise NotImplementedError("Unrecognized srsName %s" % el.get('srsName'))
tag = el.tag.replace('{%s}' % NS_GML, '')
if tag == 'Point':
coordinates = _reverse_gml_coords(el.findtext('{%s}pos' % NS_GML))[0]
elif tag == 'LineString':
coordinates = _reverse_gml_coords(el.findtext('{%s}posList' % NS_GML))
elif tag == 'Polygon':
coordinates = []
for ring in el.xpath('gml:exterior/gml:LinearRing/gml:posList', namespaces=NSMAP) \
+ el.xpath('gml:interior/gml:LinearRing/gml:posList', namespaces=NSMAP):
coordinates.append(_reverse_gml_coords(ring.text))
elif tag in ('MultiPoint', 'MultiLineString', 'MultiPolygon'):
single_type = tag[5:]
member_tag = single_type[0].lower() + single_type[1:] + 'Member'
coordinates = [
gml_to_geojson(member)['coordinates']
for member in el.xpath('gml:%s/gml:%s' % (member_tag, single_type), namespaces=NSMAP)
]
else:
raise NotImplementedError
return {
'type': tag,
'coordinates': coordinates
}
|
Given an lxml Element of a GML geometry, returns a dict in GeoJSON format.
|
def convert_entry_to_path(path):
# type: (Dict[S, Union[S, bool, Tuple[S], List[S]]]) -> S
"""Convert a pipfile entry to a string"""
if not isinstance(path, Mapping):
raise TypeError("expecting a mapping, received {0!r}".format(path))
if not any(key in path for key in ["file", "path"]):
raise ValueError("missing path-like entry in supplied mapping {0!r}".format(path))
if "file" in path:
path = vistir.path.url_to_path(path["file"])
elif "path" in path:
path = path["path"]
return path
|
Convert a pipfile entry to a string
|
def AddPathInfo(self, path_info):
"""Updates existing path information of the path record."""
if self._path_type != path_info.path_type:
message = "Incompatible path types: `%s` and `%s`"
raise ValueError(message % (self._path_type, path_info.path_type))
if self._components != path_info.components:
message = "Incompatible path components: `%s` and `%s`"
raise ValueError(message % (self._components, path_info.components))
if path_info.timestamp in self._path_infos:
raise ValueError("PathInfo with timestamp %r was added before." %
path_info.timestamp)
new_path_info = path_info.Copy()
if new_path_info.timestamp is None:
new_path_info.timestamp = rdfvalue.RDFDatetime.Now()
self._path_infos[new_path_info.timestamp] = new_path_info
|
Updates existing path information of the path record.
|
def main(args=None):
"""Call the CLI interface and wait for the result."""
retcode = 0
try:
ci = CliInterface()
args = ci.parser.parse_args()
result = args.func(args)
if result is not None:
print(result)
retcode = 0
except Exception:
retcode = 1
traceback.print_exc()
sys.exit(retcode)
|
Call the CLI interface and wait for the result.
|
def write_json(self, chunk, code=None, headers=None):
"""A convenient method that binds `chunk`, `code`, `headers` together
chunk could be any type of (str, dict, list)
"""
assert chunk is not None, 'None cound not be written in write_json'
self.set_header("Content-Type", "application/json; charset=UTF-8")
if isinstance(chunk, dict) or isinstance(chunk, list):
chunk = self.json_encode(chunk)
# convert chunk to utf8 before `RequestHandler.write()`
# so that if any error occurs, we can catch and log it
try:
chunk = utf8(chunk)
except Exception:
app_log.error('chunk encoding error, repr: %s' % repr(chunk))
raise_exc_info(sys.exc_info())
self.write(chunk)
if code:
self.set_status(code)
if headers:
for k, v in headers.items():
self.set_header(k, v)
|
A convenient method that binds `chunk`, `code`, `headers` together
chunk could be any type of (str, dict, list)
|
def distance_to_point(self, point):
"""
Computes the absolute distance from the plane to the point
:param point: Point for which distance is computed
:return: Distance between the plane and the point
"""
return np.abs(np.dot(self.normal_vector, point) + self.d)
|
Computes the absolute distance from the plane to the point
:param point: Point for which distance is computed
:return: Distance between the plane and the point
|
def create_file(self, path, message, content,
branch=github.GithubObject.NotSet,
committer=github.GithubObject.NotSet,
author=github.GithubObject.NotSet):
"""Create a file in this repository.
:calls: `PUT /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents#create-a-file>`_
:param path: string, (required), path of the file in the repository
:param message: string, (required), commit message
:param content: string, (required), the actual data in the file
:param branch: string, (optional), branch to create the commit on. Defaults to the default branch of the repository
:param committer: InputGitAuthor, (optional), if no information is given the authenticated user's information will be used. You must specify both a name and email.
:param author: InputGitAuthor, (optional), if omitted this will be filled in with committer information. If passed, you must specify both a name and email.
:rtype: {
'content': :class:`ContentFile <github.ContentFile.ContentFile>`:,
'commit': :class:`Commit <github.Commit.Commit>`}
"""
assert isinstance(path, (str, unicode)), \
'path must be str/unicode object'
assert isinstance(message, (str, unicode)), \
'message must be str/unicode object'
assert isinstance(content, (str, unicode, bytes)), \
'content must be a str/unicode object'
assert branch is github.GithubObject.NotSet \
or isinstance(branch, (str, unicode)), \
'branch must be a str/unicode object'
assert author is github.GithubObject.NotSet \
or isinstance(author, github.InputGitAuthor), \
'author must be a github.InputGitAuthor object'
assert committer is github.GithubObject.NotSet \
or isinstance(committer, github.InputGitAuthor), \
'committer must be a github.InputGitAuthor object'
if atLeastPython3:
if isinstance(content, str):
content = content.encode('utf-8')
content = b64encode(content).decode('utf-8')
else:
if isinstance(content, unicode):
content = content.encode('utf-8')
content = b64encode(content)
put_parameters = {'message': message, 'content': content}
if branch is not github.GithubObject.NotSet:
put_parameters['branch'] = branch
if author is not github.GithubObject.NotSet:
put_parameters["author"] = author._identity
if committer is not github.GithubObject.NotSet:
put_parameters["committer"] = committer._identity
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/contents/" + urllib.quote(path),
input=put_parameters
)
return {'content': github.ContentFile.ContentFile(self._requester, headers, data["content"], completed=False),
'commit': github.Commit.Commit(self._requester, headers, data["commit"], completed=True)}
|
Create a file in this repository.
:calls: `PUT /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents#create-a-file>`_
:param path: string, (required), path of the file in the repository
:param message: string, (required), commit message
:param content: string, (required), the actual data in the file
:param branch: string, (optional), branch to create the commit on. Defaults to the default branch of the repository
:param committer: InputGitAuthor, (optional), if no information is given the authenticated user's information will be used. You must specify both a name and email.
:param author: InputGitAuthor, (optional), if omitted this will be filled in with committer information. If passed, you must specify both a name and email.
:rtype: {
'content': :class:`ContentFile <github.ContentFile.ContentFile>`:,
'commit': :class:`Commit <github.Commit.Commit>`}
|
def do_imageplaceholder(parser, token):
"""
Method that parse the imageplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return ImagePlaceholderNode(name, **params)
|
Method that parse the imageplaceholder template tag.
|
def exec_action(module, action, module_parameter=None, action_parameter=None, state_only=False):
'''
Execute an arbitrary action on a module.
module
name of the module to be executed
action
name of the module's action to be run
module_parameter
additional params passed to the defined module
action_parameter
additional params passed to the defined action
state_only
don't return any output but only the success/failure of the operation
CLI Example (updating the ``php`` implementation used for ``apache2``):
.. code-block:: bash
salt '*' eselect.exec_action php update action_parameter='apache2'
'''
out = __salt__['cmd.run'](
'eselect --brief --colour=no {0} {1} {2} {3}'.format(
module, module_parameter or '', action, action_parameter or ''),
python_shell=False
)
out = out.strip().split('\n')
if out[0].startswith('!!! Error'):
return False
if state_only:
return True
if not out:
return False
if len(out) == 1 and not out[0].strip():
return False
return out
|
Execute an arbitrary action on a module.
module
name of the module to be executed
action
name of the module's action to be run
module_parameter
additional params passed to the defined module
action_parameter
additional params passed to the defined action
state_only
don't return any output but only the success/failure of the operation
CLI Example (updating the ``php`` implementation used for ``apache2``):
.. code-block:: bash
salt '*' eselect.exec_action php update action_parameter='apache2'
|
def read(self, length, timeout=None):
"""Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
data = b""
# Read length bytes if timeout is None
# Read up to length bytes if timeout is not None
while True:
if timeout is not None:
# Select
(rlist, _, _) = select.select([self._fd], [], [], timeout)
# If timeout
if self._fd not in rlist:
break
try:
data += os.read(self._fd, length - len(data))
except OSError as e:
raise SerialError(e.errno, "Reading serial port: " + e.strerror)
if len(data) == length:
break
return data
|
Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs.
|
def _external_request(self, method, url, *args, **kwargs):
"""
Wrapper for requests.get with useragent automatically set.
And also all requests are reponses are cached.
"""
self.last_url = url
if url in self.responses.keys() and method == 'get':
return self.responses[url] # return from cache if its there
headers = kwargs.pop('headers', None)
custom = {'User-Agent': useragent}
if headers:
headers.update(custom)
kwargs['headers'] = headers
else:
kwargs['headers'] = custom
if self.timeout:
# add timeout parameter to requests.get if one was passed in on construction...
kwargs['timeout'] = self.timeout
start = datetime.datetime.now()
response = getattr(requests, method)(url, verify=self.ssl_verify, *args, **kwargs)
self.total_external_fetch_duration += datetime.datetime.now() - start
if self.verbose:
print("Got Response: %s (took %s)" % (url, (datetime.datetime.now() - start)))
self.last_raw_response = response
self.check_error(response)
if method == 'get':
self.responses[url] = response # cache for later
return response
|
Wrapper for requests.get with useragent automatically set.
And also all requests are reponses are cached.
|
def write_eps(matrix, version, out, scale=1, border=None, color='#000',
background=None):
"""\
Serializes the QR Code as EPS document.
:param matrix: The matrix to serialize.
:param int version: The (Micro) QR code version
:param out: Filename or a file-like object supporting to write strings.
:param scale: Indicates the size of a single module (default: 1 which
corresponds to 1 point (1/72 inch) per module).
:param int border: Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes).
:param color: Color of the modules (default: black). The
color can be provided as ``(R, G, B)`` tuple (this method
acceppts floats as R, G, B values), as web color name (like
"red") or in hexadecimal format (``#RGB`` or ``#RRGGBB``).
:param background: Optional background color (default: ``None`` = no
background color). See `color` for valid values.
"""
import textwrap
def write_line(writemeth, content):
"""\
Writes `content` and ``LF``.
"""
# Postscript: Max. 255 characters per line
for line in textwrap.wrap(content, 254):
writemeth(line)
writemeth('\n')
def rgb_to_floats(clr):
"""\
Converts the provided color into an acceptable format for Postscript's
``setrgbcolor``
"""
def to_float(c):
if isinstance(c, float):
if not 0.0 <= c <= 1.0:
raise ValueError('Invalid color "{0}". Not in range 0 .. 1'
.format(c))
return c
return 1 / 255.0 * c if c != 1 else c
return tuple([to_float(i) for i in colors.color_to_rgb(clr)])
check_valid_scale(scale)
check_valid_border(border)
with writable(out, 'wt') as f:
writeline = partial(write_line, f.write)
border = get_border(version, border)
width, height = get_symbol_size(version, scale, border)
# Write common header
writeline('%!PS-Adobe-3.0 EPSF-3.0')
writeline('%%Creator: {0}'.format(CREATOR))
writeline('%%CreationDate: {0}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
writeline('%%DocumentData: Clean7Bit')
writeline('%%BoundingBox: 0 0 {0} {1}'.format(width, height))
# Write the shortcuts
writeline('/m { rmoveto } bind def')
writeline('/l { rlineto } bind def')
stroke_color_is_black = colors.color_is_black(color)
stroke_color = color if stroke_color_is_black else rgb_to_floats(color)
if background is not None:
writeline('{0:f} {1:f} {2:f} setrgbcolor clippath fill'
.format(*rgb_to_floats(background)))
if stroke_color_is_black:
# Reset RGB color back to black iff stroke color is black
# In case stroke color != black set the RGB color later
writeline('0 0 0 setrgbcolor')
if not stroke_color_is_black:
writeline('{0:f} {1:f} {2:f} setrgbcolor'.format(*stroke_color))
if scale != 1:
writeline('{0} {0} scale'.format(scale))
writeline('newpath')
# Current pen position y-axis
# Note: 0, 0 = lower left corner in PS coordinate system
y = get_symbol_size(version, scale=1, border=0)[1] + border - .5 # .5 = linewidth / 2
line_iter = matrix_to_lines(matrix, border, y, incby=-1)
# EPS supports absolute coordinates as well, but relative coordinates
# are more compact and IMO nicer; so the 1st coordinate is absolute, all
# other coordinates are relative
(x1, y1), (x2, y2) = next(line_iter)
coord = ['{0} {1} moveto {2} 0 l'.format(x1, y1, x2 - x1)]
append_coord = coord.append
x = x2
for (x1, y1), (x2, y2) in line_iter:
append_coord(' {0} {1} m {2} 0 l'.format(x1 - x, int(y1 - y), x2 - x1))
x, y = x2, y2
writeline(''.join(coord))
writeline('stroke')
writeline('%%EOF')
|
\
Serializes the QR Code as EPS document.
:param matrix: The matrix to serialize.
:param int version: The (Micro) QR code version
:param out: Filename or a file-like object supporting to write strings.
:param scale: Indicates the size of a single module (default: 1 which
corresponds to 1 point (1/72 inch) per module).
:param int border: Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes).
:param color: Color of the modules (default: black). The
color can be provided as ``(R, G, B)`` tuple (this method
acceppts floats as R, G, B values), as web color name (like
"red") or in hexadecimal format (``#RGB`` or ``#RRGGBB``).
:param background: Optional background color (default: ``None`` = no
background color). See `color` for valid values.
|
def __insert_action(self, revision):
"""
Handle the insert action type.
Creates new document to be created in this collection.
This allows you to stage a creation of an object
:param dict revision: The revision dictionary
"""
revision["patch"]["_id"] = ObjectId(revision.get("master_id"))
insert_response = yield self.collection.insert(revision.get("patch"))
if not isinstance(insert_response, str):
raise DocumentRevisionInsertFailed()
|
Handle the insert action type.
Creates new document to be created in this collection.
This allows you to stage a creation of an object
:param dict revision: The revision dictionary
|
def read_envvar_file(name, extension):
"""
Read values from a file provided as a environment variable
``NAME_CONFIG_FILE``.
:param name: environment variable prefix to look for (without the
``_CONFIG_FILE``)
:param extension: *(unused)*
:return: a `.Configuration`, possibly `.NotConfigured`
"""
envvar_file = environ.get('{}_config_file'.format(name).upper())
if envvar_file:
# envvar set, load value as file
return loadf(envvar_file)
else:
# envvar not set, return an empty source
return NotConfigured
|
Read values from a file provided as a environment variable
``NAME_CONFIG_FILE``.
:param name: environment variable prefix to look for (without the
``_CONFIG_FILE``)
:param extension: *(unused)*
:return: a `.Configuration`, possibly `.NotConfigured`
|
def compactor_daemon(conf_file):
"""
Run the compactor daemon.
:param conf_file: Name of the configuration file.
"""
eventlet.monkey_patch()
conf = config.Config(conf_file=conf_file)
compactor.compactor(conf)
|
Run the compactor daemon.
:param conf_file: Name of the configuration file.
|
def flatten(d, reducer='tuple', inverse=False):
"""Flatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be flattened.
reducer: {'tuple', 'path', function} (default: 'tuple')
The key joining method. If a function is given, the function will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys
'path': Use ``os.path.join`` to join keys.
inverse: bool (default: False)
Whether you want invert the resulting key and value.
Returns
-------
flat_dict: dict
"""
if isinstance(reducer, str):
reducer = REDUCER_DICT[reducer]
flat_dict = {}
def _flatten(d, parent=None):
for key, value in six.viewitems(d):
flat_key = reducer(parent, key)
if isinstance(value, Mapping):
_flatten(value, flat_key)
else:
if inverse:
flat_key, value = value, flat_key
if flat_key in flat_dict:
raise ValueError("duplicated key '{}'".format(flat_key))
flat_dict[flat_key] = value
_flatten(d)
return flat_dict
|
Flatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be flattened.
reducer: {'tuple', 'path', function} (default: 'tuple')
The key joining method. If a function is given, the function will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys
'path': Use ``os.path.join`` to join keys.
inverse: bool (default: False)
Whether you want invert the resulting key and value.
Returns
-------
flat_dict: dict
|
def getSimilarTermsForExpression(self, body, contextId=None, posType=None, getFingerprint=None, startIndex=0, maxResults=10, sparsity=1.0):
"""Get similar terms for the contexts of an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
contextId, int: The identifier of a context (optional)
posType, str: Part of speech (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful
"""
return self._expressions.getSimilarTermsForExpressionContext(self._retina, body, contextId, posType, getFingerprint, startIndex, maxResults, sparsity)
|
Get similar terms for the contexts of an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
contextId, int: The identifier of a context (optional)
posType, str: Part of speech (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful
|
def get_data(self, linesep=os.linesep):
"""
Serialize the section and return it as bytes
:return bytes
"""
stream = BytesIO()
self.store(stream, linesep)
return stream.getvalue()
|
Serialize the section and return it as bytes
:return bytes
|
def get(self, key, filepath):
"""Get configuration parameter.
Reads 'key' configuration parameter from the configuration file given
in 'filepath'. Configuration parameter in 'key' must follow the schema
<section>.<option> .
:param key: key to get
:param filepath: configuration file
"""
if not filepath:
raise RuntimeError("Configuration file not given")
if not self.__check_config_key(key):
raise RuntimeError("%s parameter does not exists" % key)
if not os.path.isfile(filepath):
raise RuntimeError("%s config file does not exist" % filepath)
section, option = key.split('.')
config = configparser.SafeConfigParser()
config.read(filepath)
try:
option = config.get(section, option)
self.display('config.tmpl', key=key, option=option)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return CMD_SUCCESS
|
Get configuration parameter.
Reads 'key' configuration parameter from the configuration file given
in 'filepath'. Configuration parameter in 'key' must follow the schema
<section>.<option> .
:param key: key to get
:param filepath: configuration file
|
def get_applicable_overlays(self, error_bundle):
"""
Given an error bundle, a list of overlays that are present in the
current package or subpackage are returned.
"""
content_paths = self.get_triples(subject='content')
if not content_paths:
return set()
# Create some variables that will store where the applicable content
# instruction path references and where it links to.
chrome_path = ''
content_root_path = '/'
# Look through each of the listed packages and paths.
for path in content_paths:
chrome_name = path['predicate']
if not path['object']:
continue
path_location = path['object'].strip().split()[0]
# Handle jarred paths differently.
if path_location.startswith('jar:'):
if not error_bundle.is_nested_package:
continue
# Parse out the JAR and it's location within the chrome.
split_jar_url = path_location[4:].split('!', 2)
# Ignore invalid/unsupported JAR URLs.
if len(split_jar_url) != 2:
continue
# Unpack the JAR URL.
jar_path, package_path = split_jar_url
# Ignore the instruction if the JAR it points to doesn't match
# up with the current subpackage tree.
if jar_path != error_bundle.package_stack[0]:
continue
chrome_path = self._url_chunk_join(chrome_name, package_path)
# content_root_path stays at the default: /
break
else:
# If we're in a subpackage, a content instruction referring to
# the root of the package obviously doesn't apply.
if error_bundle.is_nested_package:
continue
chrome_path = self._url_chunk_join(chrome_name, 'content')
content_root_path = '/%s/' % path_location.strip('/')
break
if not chrome_path:
return set()
applicable_overlays = set()
chrome_path = 'chrome://%s' % self._url_chunk_join(chrome_path + '/')
for overlay in self.get_triples(subject='overlay'):
if not overlay['object']:
error_bundle.error(
err_id=('chromemanifest', 'get_applicable_overalys',
'object'),
error='Overlay instruction missing a property.',
description='When overlays are registered in a chrome '
'manifest file, they require a namespace and '
'a chrome URL at minimum.',
filename=overlay['filename'],
line=overlay['line'],
context=self.context) #TODO(basta): Update this!
continue
overlay_url = overlay['object'].split()[0]
if overlay_url.startswith(chrome_path):
overlay_relative_path = overlay_url[len(chrome_path):]
applicable_overlays.add('/%s' %
self._url_chunk_join(content_root_path,
overlay_relative_path))
return applicable_overlays
|
Given an error bundle, a list of overlays that are present in the
current package or subpackage are returned.
|
def timestamp_to_microseconds(timestamp):
"""Convert a timestamp string into a microseconds value
:param timestamp
:return time in microseconds
"""
timestamp_str = datetime.datetime.strptime(timestamp, ISO_DATETIME_REGEX)
epoch_time_secs = calendar.timegm(timestamp_str.timetuple())
epoch_time_mus = epoch_time_secs * 1e6 + timestamp_str.microsecond
return epoch_time_mus
|
Convert a timestamp string into a microseconds value
:param timestamp
:return time in microseconds
|
def _map_relation(c, language='any'):
"""
Map related concept or collection, leaving out the relations.
:param c: the concept or collection to map
:param string language: Language to render the relation's label in
:rtype: :class:`dict`
"""
label = c.label(language)
return {
'id': c.id,
'type': c.type,
'uri': c.uri,
'label': label.label if label else None
}
|
Map related concept or collection, leaving out the relations.
:param c: the concept or collection to map
:param string language: Language to render the relation's label in
:rtype: :class:`dict`
|
def fix_deplist(deps):
""" Turn a dependency list into lowercase, and make sure all entries
that are just a string become a tuple of strings
"""
deps = [
((dep.lower(),)
if not isinstance(dep, (list, tuple))
else tuple([dep_entry.lower()
for dep_entry in dep
]))
for dep in deps
]
return deps
|
Turn a dependency list into lowercase, and make sure all entries
that are just a string become a tuple of strings
|
def jpl_horizons_ephemeris(
log,
objectId,
mjd,
obscode=500,
verbose=False):
"""Given a known solar-system object ID (human-readable name, MPC number or MPC packed format) and one or more specific epochs, return the calculated ephemerides
**Key Arguments:**
- ``log`` -- logger
- ``objectId`` -- human-readable name, MPC number or MPC packed format id of the solar-system object or list of names
- ``mjd`` -- a single MJD, or a list of up to 10,000 MJDs to generate an ephemeris for
- ``obscode`` -- the observatory code for the ephemeris generation. Default **500** (geocentric)
- ``verbose`` -- return extra information with each ephemeris
**Return:**
- ``resultList`` -- a list of ordered dictionaries containing the returned ephemerides
**Usage:**
To generate a an ephemeris for a single epoch run, using ATLAS Haleakala as your observatory:
.. code-block:: python
from rockfinder import jpl_horizons_ephemeris
eph = jpl_horizons_ephemeris(
log=log,
objectId=1,
mjd=57916.,
obscode='T05'
)
or to generate an ephemeris for multiple epochs:
.. code-block:: python
from rockfinder import jpl_horizons_ephemeris
eph = jpl_horizons_ephemeris(
log=log,
objectId="ceres",
mjd=[57916.1,57917.234,57956.34523]
verbose=True
)
Note by passing `verbose=True` the essential ephemeris data is supplimented with some extra data
It's also possible to pass in an array of object IDs:
.. code-block:: python
from rockfinder import jpl_horizons_ephemeris
eph = jpl_horizons_ephemeris(
log=log,
objectId=[1,5,03547,"Shikoku","K10B11A"],
mjd=[57916.1,57917.234,57956.34523]
)
"""
log.debug('starting the ``jpl_horizons_ephemeris`` function')
# MAKE SURE MJDs ARE IN A LIST
if not isinstance(mjd, list):
mjd = [str(mjd)]
mjd = (" ").join(map(str, mjd))
if not isinstance(objectId, list):
objectList = [objectId]
else:
objectList = objectId
keys = ["jd", "solar_presence", "lunar_presence", "ra_deg", "dec_deg", "ra_arcsec_per_hour", "dec_arcsec_per_hour", "apparent_mag", "surface_brightness", "heliocentric_distance", "heliocentric_motion", "observer_distance", "observer_motion",
"sun_obs_target_angle", "apparent_motion_relative_to_sun", "sun_target_obs_angle", "ra_3sig_error", "dec_3sig_error", "true_anomaly_angle", "phase_angle", "phase_angle_bisector_long", "phase_angle_bisector_lat"]
if verbose == True:
order = ["requestId", "objectId", "mjd", "ra_deg", "dec_deg", "ra_3sig_error", "dec_3sig_error", "ra_arcsec_per_hour", "dec_arcsec_per_hour", "apparent_mag", "heliocentric_distance", "heliocentric_motion", "observer_distance", "observer_motion", "phase_angle", "true_anomaly_angle", "surface_brightness",
"sun_obs_target_angle", "sun_target_obs_angle", "apparent_motion_relative_to_sun", "phase_angle_bisector_long", "phase_angle_bisector_lat"]
else:
order = ["requestId", "objectId", "mjd", "ra_deg", "dec_deg", "ra_3sig_error", "dec_3sig_error", "ra_arcsec_per_hour",
"dec_arcsec_per_hour", "apparent_mag", "heliocentric_distance", "observer_distance", "phase_angle"]
params = {
"COMMAND": "",
"OBJ_DATA": "'NO'",
"MAKE_EPHEM": "'YES'",
"TABLE_TYPE": "'OBS'",
"CENTER": "'%(obscode)s'" % locals(),
"TLIST": mjd,
"QUANTITIES": "'1,3,9,19,20,23,24,36,41,43'",
"REF_SYSTEM": "'J2000'",
"CAL_FORMAT": "'JD'",
"ANG_FORMAT": "'DEG'",
"APPARENT": "'REFRACTED'",
"TIME_DIGITS": "'FRACSEC'",
"TIME_ZONE": "'+00:00'",
"RANGE_UNITS": "'AU'",
"SUPPRESS_RANGE_RATE": "'NO'",
"SKIP_DAYLT": "'YES'",
"EXTRA_PREC": "'YES'",
"CSV_FORMAT": "'YES'",
"batch": "1",
}
resultList = []
paramList = []
for objectId in objectList:
requestId = objectId
# FIX THE COMMAND FOR NUMBERED OBJECTS
try:
thisId = int(objectId)
objectId = "%(thisId)s" % locals()
except Exception as e:
pass
theseparams = copy.deepcopy(params)
theseparams["COMMAND"] = '"' + objectId + '"'
paramList.append(theseparams)
# TEST THE URL
# try:
# import requests
# response = requests.get(
# url="https://ssd.jpl.nasa.gov/horizons_batch.cgi",
# params=theseparams,
# )
# content = response.content
# status_code = response.status_code
# print response.url
# except requests.exceptions.RequestException:
# print('HTTP Request failed')
# sys.exit(0)
rs = [grequests.get("https://ssd.jpl.nasa.gov/horizons_batch.cgi", params=p)
for p in paramList]
def exception_handler(request, exception):
print "Request failed"
print exception
returns = grequests.map(rs, size=1, exception_handler=exception_handler)
for result, requestId in zip(returns, objectList):
r = result.content
match = re.search(
r'Target body name:\s(.*?)\{',
r,
flags=re.S # re.S
)
if not match:
log.warning(
"Horizons could not find a match for `%(requestId)s`" % locals())
try:
import requests
response = requests.get(
url="https://ssd.jpl.nasa.gov/horizons_batch.cgi",
params=theseparams,
)
content = response.content
status_code = response.status_code
print response.url
except requests.exceptions.RequestException:
print('HTTP Request failed')
sys.exit(0)
objectDict = {}
for k in keys:
v = None
objectDict[k] = v
objectDict["objectId"] = requestId + " - NOT FOUND"
objectDict["requestId"] = requestId
objectDict["mjd"] = None
orderDict = collections.OrderedDict({})
for i in order:
orderDict[i] = objectDict[i]
resultList.append(orderDict)
continue
horizonsId = match.group(1).replace("(", "").replace(")", "").strip()
match = re.search(
r'\$\$SOE\n(.*?)\n\$\$EOE',
r,
flags=re.S # re.S
)
keys2 = copy.deepcopy(keys)
order2 = copy.deepcopy(order)
if "S-brt," not in r:
keys2.remove("surface_brightness")
try:
order2.remove("surface_brightness")
except:
pass
lines = match.group(1).split("\n")
for line in lines:
vals = line.split(",")
objectDict = {}
for k, v in zip(keys2, vals):
v = v.strip().replace("/", "")
try:
v = float(v)
except:
pass
objectDict[k] = v
objectDict["mjd"] = objectDict["jd"] - 2400000.5
objectDict["objectId"] = horizonsId
objectDict["requestId"] = requestId
orderDict = collections.OrderedDict({})
for i in order2:
orderDict[i] = objectDict[i]
resultList.append(orderDict)
log.debug('completed the ``jpl_horizons_ephemeris`` function')
return resultList
|
Given a known solar-system object ID (human-readable name, MPC number or MPC packed format) and one or more specific epochs, return the calculated ephemerides
**Key Arguments:**
- ``log`` -- logger
- ``objectId`` -- human-readable name, MPC number or MPC packed format id of the solar-system object or list of names
- ``mjd`` -- a single MJD, or a list of up to 10,000 MJDs to generate an ephemeris for
- ``obscode`` -- the observatory code for the ephemeris generation. Default **500** (geocentric)
- ``verbose`` -- return extra information with each ephemeris
**Return:**
- ``resultList`` -- a list of ordered dictionaries containing the returned ephemerides
**Usage:**
To generate a an ephemeris for a single epoch run, using ATLAS Haleakala as your observatory:
.. code-block:: python
from rockfinder import jpl_horizons_ephemeris
eph = jpl_horizons_ephemeris(
log=log,
objectId=1,
mjd=57916.,
obscode='T05'
)
or to generate an ephemeris for multiple epochs:
.. code-block:: python
from rockfinder import jpl_horizons_ephemeris
eph = jpl_horizons_ephemeris(
log=log,
objectId="ceres",
mjd=[57916.1,57917.234,57956.34523]
verbose=True
)
Note by passing `verbose=True` the essential ephemeris data is supplimented with some extra data
It's also possible to pass in an array of object IDs:
.. code-block:: python
from rockfinder import jpl_horizons_ephemeris
eph = jpl_horizons_ephemeris(
log=log,
objectId=[1,5,03547,"Shikoku","K10B11A"],
mjd=[57916.1,57917.234,57956.34523]
)
|
def multiple_paths_parser(value):
"""Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths.
"""
if isinstance(value, six.string_types):
value = value.split(os.path.pathsep)
return value
|
Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths.
|
def array_equivalent(left, right, strict_nan=False):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left) or is_string_dtype(right):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
ensure_object(left.ravel()), ensure_object(right.ravel()))
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if (not isinstance(right_value, float) or
not np.isnan(right_value)):
return False
else:
if left_value != right_value:
return False
return True
# NaNs can occur in float and complex arrays.
if is_float_dtype(left) or is_complex_dtype(left):
# empty
if not (np.prod(left.shape) and np.prod(right.shape)):
return True
return ((left == right) | (isna(left) & isna(right))).all()
# numpy will will not allow this type of datetimelike vs integer comparison
elif is_datetimelike_v_numeric(left, right):
return False
# M8/m8
elif needs_i8_conversion(left) and needs_i8_conversion(right):
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view('i8')
right = right.view('i8')
# if we have structured dtypes, compare first
if (left.dtype.type is np.void or
right.dtype.type is np.void):
if left.dtype != right.dtype:
return False
return np.array_equal(left, right)
|
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
|
def stop(ctx, yes):
"""Stops the tensorboard deployment for project/experiment/experiment group if it exists.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples: stopping project tensorboard
\b
```bash
$ polyaxon tensorboard stop
```
Examples: stopping experiment group tensorboard
\b
```bash
$ polyaxon tensorboard -g 1 stop
```
Examples: stopping experiment tensorboard
\b
```bash
$ polyaxon tensorboard -xp 112 stop
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
group = ctx.obj.get('group')
experiment = ctx.obj.get('experiment')
if experiment:
obj = 'experiment `{}`'.format(experiment)
elif group:
obj = 'group `{}`'.format(group)
else:
obj = 'project `{}/{}`'.format(user, project_name)
if not yes and not click.confirm("Are sure you want to stop tensorboard "
"for {}".format(obj)):
click.echo('Existing without stopping tensorboard.')
sys.exit(1)
if experiment:
try:
PolyaxonClient().experiment.stop_tensorboard(
username=user,
project_name=project_name,
experiment_id=experiment)
Printer.print_success('Tensorboard is being deleted')
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop tensorboard {}.'.format(obj))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
elif group:
try:
PolyaxonClient().experiment_group.stop_tensorboard(
username=user,
project_name=project_name,
group_id=group)
Printer.print_success('Tensorboard is being deleted')
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop tensorboard {}.'.format(obj))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
else:
try:
PolyaxonClient().project.stop_tensorboard(
username=user,
project_name=project_name)
Printer.print_success('Tensorboard is being deleted')
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop tensorboard {}.'.format(obj))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
|
Stops the tensorboard deployment for project/experiment/experiment group if it exists.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples: stopping project tensorboard
\b
```bash
$ polyaxon tensorboard stop
```
Examples: stopping experiment group tensorboard
\b
```bash
$ polyaxon tensorboard -g 1 stop
```
Examples: stopping experiment tensorboard
\b
```bash
$ polyaxon tensorboard -xp 112 stop
```
|
def _post_start(self):
"""Set stdout to non-blocking
VLC does not always return a newline when reading status so in order to
be lazy and still use the read API without caring about how much output
there is we switch stdout to nonblocking mode and just read a large
chunk of datin order to be lazy and still use the read API without
caring about how much output there is we switch stdout to nonblocking
mode and just read a large chunk of data.
"""
flags = fcntl.fcntl(self._process.stdout, fcntl.F_GETFL)
fcntl.fcntl(self._process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
|
Set stdout to non-blocking
VLC does not always return a newline when reading status so in order to
be lazy and still use the read API without caring about how much output
there is we switch stdout to nonblocking mode and just read a large
chunk of datin order to be lazy and still use the read API without
caring about how much output there is we switch stdout to nonblocking
mode and just read a large chunk of data.
|
def paintEvent(self, event):
"""
Paints the background for the dock toolbar.
:param event | <QPaintEvent>
"""
x = 1
y = 1
w = self.width()
h = self.height()
clr_a = QColor(220, 220, 220)
clr_b = QColor(190, 190, 190)
grad = QLinearGradient()
grad.setColorAt(0.0, clr_a)
grad.setColorAt(0.6, clr_a)
grad.setColorAt(1.0, clr_b)
# adjust the coloring for the horizontal toolbar
if self.position() & (self.Position.North | self.Position.South):
h = self.minimumPixmapSize().height() + 6
if self.position() == self.Position.South:
y = self.height() - h
grad.setStart(0, y)
grad.setFinalStop(0, self.height())
else:
grad.setStart(0, 0)
grad.setFinalStart(0, h)
# adjust the coloring for the vertical toolbar
if self.position() & (self.Position.East | self.Position.West):
w = self.minimumPixmapSize().width() + 6
if self.position() == self.Position.West:
x = self.width() - w
grad.setStart(x, 0)
grad.setFinalStop(self.width(), 0)
else:
grad.setStart(0, 0)
grad.setFinalStop(w, 0)
with XPainter(self) as painter:
painter.fillRect(x, y, w, h, grad)
# show the active action
action = self.selectedAction()
if action is not None and \
not self.currentAction() and \
not self._animating:
for lbl in self.actionLabels():
if lbl.action() != action:
continue
geom = lbl.geometry()
size = lbl.pixmapSize()
if self.position() == self.Position.North:
x = geom.left()
y = 0
w = geom.width()
h = size.height() + geom.top() + 2
elif self.position() == self.Position.East:
x = 0
y = geom.top()
w = size.width() + geom.left() + 2
h = geom.height()
painter.setPen(QColor(140, 140, 40))
painter.setBrush(QColor(160, 160, 160))
painter.drawRect(x, y, w, h)
break
|
Paints the background for the dock toolbar.
:param event | <QPaintEvent>
|
def _drawForeground(self, scene, painter, rect):
"""
Draws the backgroud for a particular scene within the charts.
:param scene | <XChartScene>
painter | <QPainter>
rect | <QRectF>
"""
rect = scene.sceneRect()
if scene == self.uiChartVIEW.scene():
self.renderer().drawForeground(painter,
rect,
self.showGrid(),
self.showColumns(),
self.showRows())
|
Draws the backgroud for a particular scene within the charts.
:param scene | <XChartScene>
painter | <QPainter>
rect | <QRectF>
|
def find_guests(names, path=None):
'''
Return a dict of hosts and named guests
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
'''
ret = {}
names = names.split(',')
for data in _list_iter(path=path):
host, stat = next(six.iteritems(data))
for state in stat:
for name in stat[state]:
if name in names:
if host in ret:
ret[host].append(name)
else:
ret[host] = [name]
return ret
|
Return a dict of hosts and named guests
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
|
def parse_stream(self, stream: BytesIO, context=None):
"""
Parse some python object from the stream.
:param stream: Stream from which the data is read and parsed.
:param context: Optional context dictionary.
"""
if context is None:
context = Context()
if not isinstance(context, Context):
context = Context(context)
try:
return self._parse_stream(stream, context)
except Error:
raise
except Exception as exc:
raise ParsingError(str(exc))
|
Parse some python object from the stream.
:param stream: Stream from which the data is read and parsed.
:param context: Optional context dictionary.
|
def verify(self):
"""
Access the Verify Twilio Domain
:returns: Verify Twilio Domain
:rtype: twilio.rest.verify.Verify
"""
if self._verify is None:
from twilio.rest.verify import Verify
self._verify = Verify(self)
return self._verify
|
Access the Verify Twilio Domain
:returns: Verify Twilio Domain
:rtype: twilio.rest.verify.Verify
|
def normalize_so_name(name):
"""
Handle different types of python installations
"""
if "cpython" in name:
return os.path.splitext(os.path.splitext(name)[0])[0]
# XXX: Special handling for Fedora python2 distribution
# See: https://github.com/python-rope/rope/issues/211
if name == "timemodule.so":
return "time"
return os.path.splitext(name)[0]
|
Handle different types of python installations
|
def detect_branchings(self):
"""Detect all branchings up to `n_branchings`.
Writes Attributes
-----------------
segs : np.ndarray
List of integer index arrays.
segs_tips : np.ndarray
List of indices of the tips of segments.
"""
logg.m(' detect', self.n_branchings,
'branching' + ('' if self.n_branchings == 1 else 's'))
# a segment is a subset of points of the data set (defined by the
# indices of the points in the segment)
# initialize the search for branchings with a single segment,
# that is, get the indices of the whole data set
indices_all = np.arange(self._adata.shape[0], dtype=int)
# let's keep a list of segments, the first segment to add is the
# whole data set
segs = [indices_all]
# a segment can as well be defined by the two points that have maximal
# distance in the segment, the "tips" of the segment
#
# the rest of the points in the segment is then defined by demanding
# them to "be close to the line segment that connects the tips", that
# is, for such a point, the normalized added distance to both tips is
# smaller than one:
# (D[tips[0],i] + D[tips[1],i])/D[tips[0],tips[1] < 1
# of course, this condition is fulfilled by the full cylindrical
# subspace surrounding that line segment, where the radius of the
# cylinder can be infinite
#
# if D denotes a euclidian distance matrix, a line segment is a linear
# object, and the name "line" is justified. if we take the
# diffusion-based distance matrix Dchosen, which approximates geodesic
# distance, with "line", we mean the shortest path between two points,
# which can be highly non-linear in the original space
#
# let us define the tips of the whole data set
if False: # this is safe, but not compatible with on-the-fly computation
tips_all = np.array(np.unravel_index(np.argmax(self.distances_dpt), self.distances_dpt.shape))
else:
if self.iroot is not None:
tip_0 = np.argmax(self.distances_dpt[self.iroot])
else:
tip_0 = np.argmax(self.distances_dpt[0])
tips_all = np.array([tip_0, np.argmax(self.distances_dpt[tip_0])])
# we keep a list of the tips of each segment
segs_tips = [tips_all]
segs_connects = [[]]
segs_undecided = [True]
segs_adjacency = [[]]
logg.m(' do not consider groups with less than {} points for splitting'
.format(self.min_group_size))
for ibranch in range(self.n_branchings):
iseg, tips3 = self.select_segment(segs, segs_tips, segs_undecided)
if iseg == -1:
logg.m(' partitioning converged')
break
logg.m(' branching {}:'.format(ibranch + 1),
'split group', iseg) # [third start end]
# detect branching and update segs and segs_tips
self.detect_branching(segs, segs_tips,
segs_connects,
segs_undecided,
segs_adjacency, iseg, tips3)
# store as class members
self.segs = segs
self.segs_tips = segs_tips
self.segs_undecided = segs_undecided
# the following is a bit too much, but this allows easy storage
self.segs_adjacency = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=float)
self.segs_connects = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=int)
for i, seg_adjacency in enumerate(segs_adjacency):
self.segs_connects[i, seg_adjacency] = segs_connects[i]
for i in range(len(segs)):
for j in range(len(segs)):
self.segs_adjacency[i, j] = self.distances_dpt[self.segs_connects[i, j],
self.segs_connects[j, i]]
self.segs_adjacency = self.segs_adjacency.tocsr()
self.segs_connects = self.segs_connects.tocsr()
|
Detect all branchings up to `n_branchings`.
Writes Attributes
-----------------
segs : np.ndarray
List of integer index arrays.
segs_tips : np.ndarray
List of indices of the tips of segments.
|
def chr(self):
"""the reference chromosome. greedy return the first chromosome in exon array
:return: chromosome
:rtype: string
"""
if len(self.exons)==0:
sys.stderr.write("WARNING can't return chromsome with nothing here\n")
return None
return self._rngs[0].chr
|
the reference chromosome. greedy return the first chromosome in exon array
:return: chromosome
:rtype: string
|
def _check_rel(attrs, rel_whitelist, rel_blacklist):
""" Check a link's relations against the whitelist or blacklist.
First, this will reject based on blacklist.
Next, if there is a whitelist, there must be at least one rel that matches.
To explicitly allow links without a rel you can add None to the whitelist
(e.g. ['in-reply-to',None])
"""
rels = attrs.get('rel', [None])
if rel_blacklist:
# Never return True for a link whose rel appears in the blacklist
for rel in rels:
if rel in rel_blacklist:
return False
if rel_whitelist:
# If there is a whitelist for rels, only return true for a rel that
# appears in it
for rel in rels:
if rel in rel_whitelist:
return True
# If there is a whitelist and we don't match, then reject
return False
return True
|
Check a link's relations against the whitelist or blacklist.
First, this will reject based on blacklist.
Next, if there is a whitelist, there must be at least one rel that matches.
To explicitly allow links without a rel you can add None to the whitelist
(e.g. ['in-reply-to',None])
|
def draw_line(self, img, pixmapper, pt1, pt2, colour, linewidth):
'''draw a line on the image'''
pix1 = pixmapper(pt1)
pix2 = pixmapper(pt2)
(width, height) = image_shape(img)
(ret, pix1, pix2) = cv2.clipLine((0, 0, width, height), pix1, pix2)
if ret is False:
if len(self._pix_points) == 0:
self._pix_points.append(None)
self._pix_points.append(None)
return
cv2.line(img, pix1, pix2, colour, linewidth)
cv2.circle(img, pix2, linewidth*2, colour)
if len(self._pix_points) == 0:
self._pix_points.append(pix1)
self._pix_points.append(pix2)
if self.arrow:
xdiff = pix2[0]-pix1[0]
ydiff = pix2[1]-pix1[1]
if (xdiff*xdiff + ydiff*ydiff) > 400: # the segment is longer than 20 pix
SlipArrow(self.key, self.layer, (int(pix1[0]+xdiff/2.0), int(pix1[1]+ydiff/2.0)), self.colour,
self.linewidth, math.atan2(ydiff, xdiff)+math.pi/2.0).draw(img)
|
draw a line on the image
|
def load_models(*chain, **kwargs):
"""
Decorator to load a chain of models from the given parameters. This works just like
:func:`load_model` and accepts the same parameters, with some small differences.
:param chain: The chain is a list of tuples of (``model``, ``attributes``, ``parameter``).
Lists and tuples can be used interchangeably. All retrieved instances are passed as
parameters to the decorated function
:param permission: Same as in :func:`load_model`, except
:meth:`~coaster.sqlalchemy.PermissionMixin.permissions` is called on every instance
in the chain and the retrieved permissions are passed as the second parameter to the
next instance in the chain. This allows later instances to revoke permissions granted
by earlier instances. As an example, if a URL represents a hierarchy such as
``/<page>/<comment>``, the ``page`` can assign ``edit`` and ``delete`` permissions,
while the ``comment`` can revoke ``edit`` and retain ``delete`` if the current user
owns the page but not the comment
In the following example, load_models loads a Folder with a name matching the name in the
URL, then loads a Page with a matching name and with the just-loaded Folder as parent.
If the Page provides a 'view' permission to the current user, the decorated
function is called::
@app.route('/<folder_name>/<page_name>')
@load_models(
(Folder, {'name': 'folder_name'}, 'folder'),
(Page, {'name': 'page_name', 'parent': 'folder'}, 'page'),
permission='view')
def show_page(folder, page):
return render_template('page.html', folder=folder, page=page)
"""
def inner(f):
@wraps(f)
def decorated_function(*args, **kw):
permissions = None
permission_required = kwargs.get('permission')
url_check_attributes = kwargs.get('urlcheck', [])
if isinstance(permission_required, six.string_types):
permission_required = set([permission_required])
elif permission_required is not None:
permission_required = set(permission_required)
result = {}
for models, attributes, parameter in chain:
if not isinstance(models, (list, tuple)):
models = (models,)
item = None
for model in models:
query = model.query
url_check = False
url_check_paramvalues = {}
for k, v in attributes.items():
if callable(v):
query = query.filter_by(**{k: v(result, kw)})
else:
if '.' in v:
first, attrs = v.split('.', 1)
val = result.get(first)
for attr in attrs.split('.'):
val = getattr(val, attr)
else:
val = result.get(v, kw.get(v))
query = query.filter_by(**{k: val})
if k in url_check_attributes:
url_check = True
url_check_paramvalues[k] = (v, val)
item = query.first()
if item is not None:
# We found it, so don't look in additional models
break
if item is None:
abort(404)
if hasattr(item, 'redirect_view_args'):
# This item is a redirect object. Redirect to destination
view_args = dict(request.view_args)
view_args.update(item.redirect_view_args())
location = url_for(request.endpoint, **view_args)
if request.query_string:
location = location + u'?' + request.query_string.decode()
return redirect(location, code=307)
if permission_required:
permissions = item.permissions(current_auth.actor, inherited=permissions)
addlperms = kwargs.get('addlperms') or []
if callable(addlperms):
addlperms = addlperms() or []
permissions.update(addlperms)
if g: # XXX: Deprecated
g.permissions = permissions
if request:
add_auth_attribute('permissions', permissions)
if url_check and request.method == 'GET': # Only do urlcheck redirects on GET requests
url_redirect = False
view_args = None
for k, v in url_check_paramvalues.items():
uparam, uvalue = v
if getattr(item, k) != uvalue:
url_redirect = True
if view_args is None:
view_args = dict(request.view_args)
view_args[uparam] = getattr(item, k)
if url_redirect:
location = url_for(request.endpoint, **view_args)
if request.query_string:
location = location + u'?' + request.query_string.decode()
return redirect(location, code=302)
if parameter.startswith('g.'):
parameter = parameter[2:]
setattr(g, parameter, item)
result[parameter] = item
if permission_required and not permission_required & permissions:
abort(403)
if kwargs.get('kwargs'):
return f(*args, kwargs=kw, **result)
else:
return f(*args, **result)
return decorated_function
return inner
|
Decorator to load a chain of models from the given parameters. This works just like
:func:`load_model` and accepts the same parameters, with some small differences.
:param chain: The chain is a list of tuples of (``model``, ``attributes``, ``parameter``).
Lists and tuples can be used interchangeably. All retrieved instances are passed as
parameters to the decorated function
:param permission: Same as in :func:`load_model`, except
:meth:`~coaster.sqlalchemy.PermissionMixin.permissions` is called on every instance
in the chain and the retrieved permissions are passed as the second parameter to the
next instance in the chain. This allows later instances to revoke permissions granted
by earlier instances. As an example, if a URL represents a hierarchy such as
``/<page>/<comment>``, the ``page`` can assign ``edit`` and ``delete`` permissions,
while the ``comment`` can revoke ``edit`` and retain ``delete`` if the current user
owns the page but not the comment
In the following example, load_models loads a Folder with a name matching the name in the
URL, then loads a Page with a matching name and with the just-loaded Folder as parent.
If the Page provides a 'view' permission to the current user, the decorated
function is called::
@app.route('/<folder_name>/<page_name>')
@load_models(
(Folder, {'name': 'folder_name'}, 'folder'),
(Page, {'name': 'page_name', 'parent': 'folder'}, 'page'),
permission='view')
def show_page(folder, page):
return render_template('page.html', folder=folder, page=page)
|
def verify_item_signature(signature_attribute, encrypted_item, verification_key, crypto_config):
# type: (dynamodb_types.BINARY_ATTRIBUTE, dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> None
"""Verify the item signature.
:param dict signature_attribute: Item signature DynamoDB attribute value
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey verification_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
"""
signature = signature_attribute[Tag.BINARY.dynamodb_tag]
verification_key.verify(
algorithm=verification_key.algorithm,
signature=signature,
data=_string_to_sign(
item=encrypted_item,
table_name=crypto_config.encryption_context.table_name,
attribute_actions=crypto_config.attribute_actions,
),
)
|
Verify the item signature.
:param dict signature_attribute: Item signature DynamoDB attribute value
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey verification_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
|
def get_overall_services_health(self) -> str:
"""Get the overall health of all the services.
Returns:
str, overall health status
"""
services_health_status = self.get_services_health()
# Evaluate overall health
health_status = all(status == "Healthy" for status in
services_health_status.values())
# Converting from bool to str
if health_status:
overall_status = "Healthy"
else:
overall_status = "Unhealthy"
return overall_status
|
Get the overall health of all the services.
Returns:
str, overall health status
|
def _wrap_handling(kwargs):
""" Starts running a queue handler and creates a log file for the queue."""
_configure_logging(kwargs, extract=False)
# Main job, make the listener to the queue start receiving message for writing to disk.
handler=kwargs['handler']
graceful_exit = kwargs['graceful_exit']
# import cProfile as profile
# profiler = profile.Profile()
# profiler.enable()
if graceful_exit:
sigint_handling.start()
handler.run()
|
Starts running a queue handler and creates a log file for the queue.
|
def OnApprove(self, event):
"""File approve event handler"""
if not self.main_window.safe_mode:
return
msg = _(u"You are going to approve and trust a file that\n"
u"you have not created yourself.\n"
u"After proceeding, the file is executed.\n \n"
u"It may harm your system as any program can.\n"
u"Please check all cells thoroughly before\nproceeding.\n \n"
u"Proceed and sign this file as trusted?")
short_msg = _("Security warning")
if self.main_window.interfaces.get_warning_choice(msg, short_msg):
# Leave safe mode
self.main_window.grid.actions.leave_safe_mode()
# Display safe mode end in status bar
statustext = _("Safe mode deactivated.")
post_command_event(self.main_window, self.main_window.StatusBarMsg,
text=statustext)
|
File approve event handler
|
def to_dict(self):
"""convert detailed proxy info into a dict
Returns:
dict: A dict with four keys: ``addr``, ``protocol``,
``weight`` and ``last_checked``
"""
return dict(
addr=self.addr,
protocol=self.protocol,
weight=self.weight,
last_checked=self.last_checked)
|
convert detailed proxy info into a dict
Returns:
dict: A dict with four keys: ``addr``, ``protocol``,
``weight`` and ``last_checked``
|
def http_adapter_kwargs():
"""
Provides Zenpy's default HTTPAdapter args for those users providing their own adapter.
"""
return dict(
# Transparently retry requests that are safe to retry, with the exception of 429. This is handled
# in the Api._call_api() method.
max_retries=Retry(
total=3,
status_forcelist=[r for r in Retry.RETRY_AFTER_STATUS_CODES if r != 429],
respect_retry_after_header=False
)
)
|
Provides Zenpy's default HTTPAdapter args for those users providing their own adapter.
|
def timeline_list(self, id, max_id=None, min_id=None, since_id=None, limit=None):
"""
Fetches a timeline containing all the toots by users in a given list.
Returns a list of `toot dicts`_.
"""
id = self.__unpack_id(id)
return self.timeline('list/{0}'.format(id), max_id=max_id,
min_id=min_id, since_id=since_id, limit=limit)
|
Fetches a timeline containing all the toots by users in a given list.
Returns a list of `toot dicts`_.
|
def parse(self, request):
""" Parse request content.
:return dict: parsed data.
"""
if request.method in ('POST', 'PUT', 'PATCH'):
content_type = self.determine_content(request)
if content_type:
split = content_type.split(';', 1)
if len(split) > 1:
content_type = split[0]
content_type = content_type.strip()
parser = self._meta.parsers_dict.get(
content_type, self._meta.default_parser)
data = parser(self).parse(request)
return dict() if isinstance(data, basestring) else data
return dict()
|
Parse request content.
:return dict: parsed data.
|
def _match_serializers_by_accept_headers(self, serializers,
default_media_type):
"""Match serializer by `Accept` headers."""
# Bail out fast if no accept headers were given.
if len(request.accept_mimetypes) == 0:
return serializers[default_media_type]
# Determine best match based on quality.
best_quality = -1
best = None
has_wildcard = False
for client_accept, quality in request.accept_mimetypes:
if quality <= best_quality:
continue
if client_accept == '*/*':
has_wildcard = True
for s in serializers:
if s in ['*/*', client_accept] and quality > 0:
best_quality = quality
best = s
# If no match found, but wildcard exists, them use default media
# type.
if best is None and has_wildcard:
best = default_media_type
if best is not None:
return serializers[best]
return None
|
Match serializer by `Accept` headers.
|
def _connected(self, sock):
"""When the socket is writtable, the socket is ready to be used."""
logger.debug('socket connected, building protocol')
self.protocol = self.factory.build(self.loop)
self.connection = Connection(self.loop, self.sock, self.addr,
self.protocol, self)
self.connector = None
self.connect_deferred.callback(self.protocol)
|
When the socket is writtable, the socket is ready to be used.
|
def check_permissions(self, request):
"""Permission checking for DRF."""
objs = [None]
if hasattr(self, 'get_perms_objects'):
objs = self.get_perms_objects()
else:
if hasattr(self, 'get_object'):
try:
objs = [self.get_object()]
except Http404:
raise
except:
pass
if objs == [None]:
objs = self.get_queryset()
if len(objs) == 0:
objs = [None]
if (hasattr(self, 'permission_filter_queryset') and
self.permission_filter_queryset is not False and
self.request.method == 'GET'):
if objs != [None]:
self.perms_filter_queryset(objs)
else:
has_perm = check_perms(self.request.user,
self.get_permission_required(),
objs, self.request.method)
if not has_perm:
msg = self.get_permission_denied_message(
default="Permission denied."
)
if isinstance(msg, Sequence):
msg = msg[0]
self.permission_denied(request, message=msg)
|
Permission checking for DRF.
|
def _write_rigid_information(xml_file, rigid_bodies):
"""Write rigid body information.
Parameters
----------
xml_file : file object
The file object of the hoomdxml file being written
rigid_bodies : list, len=n_particles
The rigid body that each particle belongs to (-1 for none)
"""
if not all(body is None for body in rigid_bodies):
xml_file.write('<body>\n')
for body in rigid_bodies:
if body is None:
body = -1
xml_file.write('{}\n'.format(int(body)))
xml_file.write('</body>\n')
|
Write rigid body information.
Parameters
----------
xml_file : file object
The file object of the hoomdxml file being written
rigid_bodies : list, len=n_particles
The rigid body that each particle belongs to (-1 for none)
|
def is_address_valid(self, address):
"""
Determines if an address is a valid user mode address.
@type address: int
@param address: Memory address to query.
@rtype: bool
@return: C{True} if the address is a valid user mode address.
@raise WindowsError: An exception is raised on error.
"""
try:
mbi = self.mquery(address)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror == win32.ERROR_INVALID_PARAMETER:
return False
raise
return True
|
Determines if an address is a valid user mode address.
@type address: int
@param address: Memory address to query.
@rtype: bool
@return: C{True} if the address is a valid user mode address.
@raise WindowsError: An exception is raised on error.
|
def p_NonAnyType_domString(p):
"""NonAnyType : DOMString TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.SimpleType(
type=model.SimpleType.DOMSTRING), p[2])
|
NonAnyType : DOMString TypeSuffix
|
def gather_categories(imap, header, categories=None):
"""
Find the user specified categories in the map and create a dictionary to contain the
relevant data for each type within the categories. Multiple categories will have their
types combined such that each possible combination will have its own entry in the
dictionary.
:type imap: dict
:param imap: The input mapping file data keyed by SampleID
:type header: list
:param header: The header line from the input mapping file. This will be searched for
the user-specified categories
:type categories: list
:param categories: The list of user-specified category column name from mapping file
:rtype: dict
:return: A sorted dictionary keyed on the combinations of all the types found within
the user-specified categories. Each entry will contain an empty DataCategory
namedtuple. If no categories are specified, a single entry with the key
'default' will be returned
"""
# If no categories provided, return all SampleIDs
if categories is None:
return {"default": DataCategory(set(imap.keys()), {})}
cat_ids = [header.index(cat)
for cat in categories if cat in header and "=" not in cat]
table = OrderedDict()
conditions = defaultdict(set)
for i, cat in enumerate(categories):
if "=" in cat and cat.split("=")[0] in header:
cat_name = header[header.index(cat.split("=")[0])]
conditions[cat_name].add(cat.split("=")[1])
# If invalid categories or conditions identified, return all SampleIDs
if not cat_ids and not conditions:
return {"default": DataCategory(set(imap.keys()), {})}
#If only category column given, return column-wise SampleIDs
if cat_ids and not conditions:
for sid, row in imap.items():
cat_name = "_".join([row[cid] for cid in cat_ids])
if cat_name not in table:
table[cat_name] = DataCategory(set(), {})
table[cat_name].sids.add(sid)
return table
# Collect all condition names
cond_ids = set()
for k in conditions:
try:
cond_ids.add(header.index(k))
except ValueError:
continue
idx_to_test = set(cat_ids).union(cond_ids)
# If column name and condition given, return overlapping SampleIDs of column and
# condition combinations
for sid, row in imap.items():
if all([row[header.index(c)] in conditions[c] for c in conditions]):
key = "_".join([row[idx] for idx in idx_to_test])
try:
assert key in table.keys()
except AssertionError:
table[key] = DataCategory(set(), {})
table[key].sids.add(sid)
try:
assert len(table) > 0
except AssertionError:
return {"default": DataCategory(set(imap.keys()), {})}
else:
return table
|
Find the user specified categories in the map and create a dictionary to contain the
relevant data for each type within the categories. Multiple categories will have their
types combined such that each possible combination will have its own entry in the
dictionary.
:type imap: dict
:param imap: The input mapping file data keyed by SampleID
:type header: list
:param header: The header line from the input mapping file. This will be searched for
the user-specified categories
:type categories: list
:param categories: The list of user-specified category column name from mapping file
:rtype: dict
:return: A sorted dictionary keyed on the combinations of all the types found within
the user-specified categories. Each entry will contain an empty DataCategory
namedtuple. If no categories are specified, a single entry with the key
'default' will be returned
|
def active_editor_buffer(self):
""" The active EditorBuffer or None. """
if self.active_tab and self.active_tab.active_window:
return self.active_tab.active_window.editor_buffer
|
The active EditorBuffer or None.
|
def update_range(self, share_name, directory_name, file_name, data,
start_range, end_range, content_md5=None, timeout=None):
'''
Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param str content_md5:
An MD5 hash of the range content. This hash is used to
verify the integrity of the range during transport. When this header
is specified, the storage service compares the hash of the content
that has arrived with the header value that was sent. If the two
hashes do not match, the operation will fail with error code 400
(Bad Request).
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('data', data)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path(share_name, directory_name, file_name)
request.query = [
('comp', 'range'),
('timeout', _int_to_str(timeout)),
]
request.headers = [
('Content-MD5', _to_str(content_md5)),
('x-ms-write', 'update'),
]
_validate_and_format_range_headers(
request, start_range, end_range)
request.body = _get_request_body_bytes_only('data', data)
self._perform_request(request)
|
Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param str content_md5:
An MD5 hash of the range content. This hash is used to
verify the integrity of the range during transport. When this header
is specified, the storage service compares the hash of the content
that has arrived with the header value that was sent. If the two
hashes do not match, the operation will fail with error code 400
(Bad Request).
:param int timeout:
The timeout parameter is expressed in seconds.
|
def fetchChildren(self):
""" Fetches children.
The actual work is done by _fetchAllChildren. Descendant classes should typically
override that method instead of this one.
"""
assert self._canFetchChildren, "canFetchChildren must be True"
try:
childItems = self._fetchAllChildren()
finally:
self._canFetchChildren = False # Set to True, even if tried and failed.
return childItems
|
Fetches children.
The actual work is done by _fetchAllChildren. Descendant classes should typically
override that method instead of this one.
|
def set_chassis_datacenter(location,
host=None,
admin_username=None,
admin_password=None):
'''
Set the location of the chassis.
location
The name of the datacenter to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_datacenter datacenter-name host=111.222.333.444
admin_username=root admin_password=secret
'''
return set_general('cfgLocation', 'cfgLocationDatacenter', location,
host=host, admin_username=admin_username,
admin_password=admin_password)
|
Set the location of the chassis.
location
The name of the datacenter to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_datacenter datacenter-name host=111.222.333.444
admin_username=root admin_password=secret
|
def pca(U, centre=False):
"""Compute the PCA basis for columns of input array `U`.
Parameters
----------
U : array_like
2D data array with rows corresponding to different variables and
columns corresponding to different observations
center : bool, optional (default False)
Flag indicating whether to centre data
Returns
-------
B : ndarray
A 2D array representing the PCA basis; each column is a PCA
component.
B.T is the analysis transform into the PCA representation, and B
is the corresponding synthesis transform
S : ndarray
The eigenvalues of the PCA components
C : ndarray or None
None if centering is disabled, otherwise the mean of the data
matrix subtracted in performing the centering
"""
if centre:
C = np.mean(U, axis=1, keepdims=True)
U = U - C
else:
C = None
B, S, _ = np.linalg.svd(U, full_matrices=False, compute_uv=True)
return B, S**2, C
|
Compute the PCA basis for columns of input array `U`.
Parameters
----------
U : array_like
2D data array with rows corresponding to different variables and
columns corresponding to different observations
center : bool, optional (default False)
Flag indicating whether to centre data
Returns
-------
B : ndarray
A 2D array representing the PCA basis; each column is a PCA
component.
B.T is the analysis transform into the PCA representation, and B
is the corresponding synthesis transform
S : ndarray
The eigenvalues of the PCA components
C : ndarray or None
None if centering is disabled, otherwise the mean of the data
matrix subtracted in performing the centering
|
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
|
Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
def get(self, name, default="", parent_search=False, multikeys_search=False, __settings_temp=None, __rank_recursion=0):
"""
Récupération d'une configuration
le paramètre ```name``` peut être soit un nom ou
un chemin vers la valeur (séparateur /)
```parent_search``` est le boolean qui indique si on doit
chercher la valeur dans la hiérarchie plus haute. Si la chaîne
"/document/host/val" retourne None, on recherche dans "/document/val"
puis dans "/val"
```multikeys_search``` indique si la recherche d'une clef non trouvabe
se fait sur les parents en multi clef
ie: /graphic/output/logo/enable va aussi chercher dans /graphic/logo/enable
```__settings_temp``` est le dictionnaire temporaire de
transmission récursif (intégrant les sous configurations)
```__rank_recursion``` défini le rang de récusion pour chercher aussi
depuis la racine du chemin en cas de récursion inverse
exemple :
valeur = self.settings("document/host/val", "mon_defaut")
valeur = self.settings("/document/host/val", "mon_defaut")
"""
# configuration des settings temporaire pour traitement local
if __settings_temp is None:
__settings_temp = self.settings
# check si le chemin commence par / auquel cas on le supprime
if name.startswith("/"):
name = name[1:]
# check si le chemin termine par / auquel cas on le supprime
if name.endswith("/"):
name = name[:-1]
# check s'il s'agit d'un chemin complet
if "/" in name:
# récupération du nom de la sous configuraiton
name_master = name.split("/")[0]
# récupération de l'indice si le nom obtenu contient []
indice_master = -1
indices_master = re.findall(r"\[\d+\]", name_master)
if len(indices_master) > 0:
try:
indice_master = int(indices_master[0].replace("[", "").replace("]", ""))
except:
pass
# suppression de l'indice dans le nom du chemin courant (ie: data[0] devient data)
name_master = name_master.replace("[{}]".format(indice_master), "")
# recherche si la clef est présente dans le chemin courant
if name_master not in __settings_temp.keys():
return None
# récupération de la sous configuration
if indice_master < 0:
# la sous configuration n'est pas une liste
__settings_temp = __settings_temp[name_master]
else:
# la sous configuration est une liste (SI JSON !!)
__settings_temp = __settings_temp[name_master][indice_master] if self.is_json else __settings_temp[name]
# recursion sur le chemin en dessous
name_split = name.split("/")[1:]
search_path = "/".join(name_split)
return_value = self.get(
search_path, default, parent_search, multikeys_search, __settings_temp, __rank_recursion + 1)
# pas de valeur trouvé, on cherche sur la récursion inverse
if len(name_split) > 1 and return_value is None:
i = len(name_split)
while i >= 0:
# on décrémente le curseur de recherche
i -= 1
# établissement du nouveau chemin en supprimant le niveau supérieur
new_search_path = "/".join(name_split[i-len(name_split):])
return_value = self.get(
new_search_path, default, parent_search, multikeys_search, __settings_temp, __rank_recursion + 1)
# pas de recherche multi clef
if not multikeys_search:
break
# une valeur a été trouvée
if not return_value is None:
break
# pas de valeur trouvé et on est à la racine du chemin
if return_value is None and __rank_recursion == 0:
# on change le nom du master et on cherche
name = name_split[-1]
return_value = self.get(
name, default, parent_search, multikeys_search, self.settings, 0)
# toujours pas de valeur, on garde le défaut
if return_value is None:
return_value = default
# retour de la valeur récupérée
return return_value
# récupération de l'indice si le nom obtenu contient []
indice_master = -1
indices_master = re.findall(r"\[\d+\]", name)
if len(indices_master) > 0:
try:
indice_master = int(indices_master[0].replace("[", "").replace("]", ""))
except:
pass
# suppression de l'indice dans le nom du chemin courant (ie: data[0] devient data)
name = name.replace("[{}]".format(indice_master), "")
# check de la précense de la clef
if type(__settings_temp) is str or name not in __settings_temp.keys():
# le hash n'est pas présent !
# si la recherche récursive inverse est activée et pas de valeur trouvée,
# on recherche plus haut
if parent_search:
return None
return default
# récupération de la valeur
if indice_master < 0:
# la sous configuration n'est pas une liste
value = __settings_temp[name]
else:
# la sous configuration est une liste (SI JSON !!)
value = __settings_temp[name][indice_master] if self.is_json else __settings_temp[name]
# interdiction de la valeur "None"
if value is None:
# si la recherche récursive inverse est activée et pas de valeur trouvée,
# on recherche plus haut
if parent_search:
return None
# valeur par défaut
value = default
# trim si value est un str
if isinstance(value, str):
value = value.strip()
# retour de la valeur
return value
|
Récupération d'une configuration
le paramètre ```name``` peut être soit un nom ou
un chemin vers la valeur (séparateur /)
```parent_search``` est le boolean qui indique si on doit
chercher la valeur dans la hiérarchie plus haute. Si la chaîne
"/document/host/val" retourne None, on recherche dans "/document/val"
puis dans "/val"
```multikeys_search``` indique si la recherche d'une clef non trouvabe
se fait sur les parents en multi clef
ie: /graphic/output/logo/enable va aussi chercher dans /graphic/logo/enable
```__settings_temp``` est le dictionnaire temporaire de
transmission récursif (intégrant les sous configurations)
```__rank_recursion``` défini le rang de récusion pour chercher aussi
depuis la racine du chemin en cas de récursion inverse
exemple :
valeur = self.settings("document/host/val", "mon_defaut")
valeur = self.settings("/document/host/val", "mon_defaut")
|
def getCandScoresMapFromSamplesFile(self, profile, sampleFileName):
"""
Returns a dictonary that associates the integer representation of each candidate with the
Bayesian utilities we approximate from the samples we generated into a file.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: The name of the input file containing the sample data.
"""
wmg = profile.getWmg(True)
# Initialize our list of expected utilities.
utilities = dict()
for cand in wmg.keys():
utilities[cand] = 0.0
# Open the file and skip the lines of meta data in the file and skip samples for burn-in.
sampleFile = open(sampleFileName)
for i in range(0, SAMPLESFILEMETADATALINECOUNT):
sampleFile.readline()
for i in range(0, self.burnIn):
sampleFile.readline()
# We update our utilities as we read the file.
numSamples = 0
for i in range(0, self.n2*self.n1):
line = sampleFile.readline()
if i % self.n1 != 0: continue
sample = json.loads(line)
for cand in wmg.keys():
utilities[cand] += self.utilityFunction.getUtility([cand], sample)
numSamples += 1
sampleFile.close()
for key in utilities.keys():
utilities[key] = utilities[key]/numSamples
return utilities
|
Returns a dictonary that associates the integer representation of each candidate with the
Bayesian utilities we approximate from the samples we generated into a file.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: The name of the input file containing the sample data.
|
def update(self):
"""Update object properties."""
current_time = int(time.time())
last_refresh = 0 if self._last_refresh is None else self._last_refresh
if current_time >= (last_refresh + self._refresh_rate):
self.get_cameras_properties()
self.get_ambient_sensor_data()
self.get_camera_extended_properties()
self._attrs = self._session.refresh_attributes(self.name)
self._attrs = assert_is_dict(self._attrs)
_LOGGER.debug("Called base station update of camera properties: "
"Scan Interval: %s, New Properties: %s",
self._refresh_rate, self.camera_properties)
|
Update object properties.
|
def base_prompt(self, prompt):
"""Extract the base prompt pattern."""
if prompt is None:
return None
if not self.device.is_target:
return prompt
pattern = pattern_manager.pattern(self.platform, "prompt_dynamic", compiled=False)
pattern = pattern.format(prompt="(?P<prompt>.*?)")
result = re.search(pattern, prompt)
if result:
base = result.group("prompt") + "#"
self.log("base prompt: {}".format(base))
return base
else:
self.log("Unable to extract the base prompt")
return prompt
|
Extract the base prompt pattern.
|
def updatePhysicalInterface(self, physicalInterfaceId, name, schemaId, description=None):
"""
Update a physical interface.
Parameters:
- physicalInterfaceId (string)
- name (string)
- schemaId (string)
- description (string, optional)
Throws APIException on failure.
"""
req = ApiClient.onePhysicalInterfacesUrl % (self.host, "/draft", physicalInterfaceId)
body = {"name" : name, "schemaId" : schemaId}
if description:
body["description"] = description
resp = requests.put(req, auth=self.credentials, headers={"Content-Type":"application/json"},
data=json.dumps(body), verify=self.verify)
if resp.status_code == 200:
self.logger.debug("physical interface updated")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error updating physical interface", resp)
return resp.json()
|
Update a physical interface.
Parameters:
- physicalInterfaceId (string)
- name (string)
- schemaId (string)
- description (string, optional)
Throws APIException on failure.
|
def dump_children(self, f, indent=''):
"""Dump the children of the current section to a file-like object"""
for child in self.__order:
child.dump(f, indent+' ')
|
Dump the children of the current section to a file-like object
|
def init_app(self, app):
"""
Initialize this class with the specified :class:`flask.Flask` application
:param app: The Flask application.
"""
self.__app = app
self.__app.before_first_request(self.__setup)
self.tracer.enabled = self.__app.config.get('TRACE_ENABLED', self.tracer.enabled)
|
Initialize this class with the specified :class:`flask.Flask` application
:param app: The Flask application.
|
def sendPassword(self, password):
"""send password"""
pw = (password + '\0' * 8)[:8] #make sure its 8 chars long, zero padded
des = RFBDes(pw)
response = des.encrypt(self._challenge)
self.transport.write(response)
|
send password
|
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
|
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
|
def is_streamable(self):
"""Returns True if the artist is streamable."""
return bool(
_number(
_extract(self._request(self.ws_prefix + ".getInfo", True), "streamable")
)
)
|
Returns True if the artist is streamable.
|
def connect(self, config):
"""Connect to database with given configuration, which may be a dict or
a path to a pymatgen-db configuration.
"""
if isinstance(config, str):
conn = dbutil.get_database(config_file=config)
elif isinstance(config, dict):
conn = dbutil.get_database(settings=config)
else:
raise ValueError("Configuration, '{}', must be a path to "
"a configuration file or dict".format(config))
return conn
|
Connect to database with given configuration, which may be a dict or
a path to a pymatgen-db configuration.
|
def zero_pad(data, count, right=True):
"""
Parameters
--------
data : (n,)
1D array
count : int
Minimum length of result array
Returns
--------
padded : (m,)
1D array where m >= count
"""
if len(data) == 0:
return np.zeros(count)
elif len(data) < count:
padded = np.zeros(count)
if right:
padded[-len(data):] = data
else:
padded[:len(data)] = data
return padded
else:
return np.asanyarray(data)
|
Parameters
--------
data : (n,)
1D array
count : int
Minimum length of result array
Returns
--------
padded : (m,)
1D array where m >= count
|
def smoothing_window(data, window=[1, 1, 1]):
""" This is a smoothing functionality so we can fix misclassifications.
It will run a sliding window of form [border, smoothing, border] on the
signal and if the border elements are the same it will change the
smooth elements to match the border. An example would be for a window
of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will
transform it into [1, 1, 1, 1, 1]. So if the border elements match it
will transform the middle (smoothing) into the same as the border.
:param data array: One-dimensional array.
:param window array: Used to define the [border, smoothing, border]
regions.
:return data array: The smoothed version of the original data.
"""
for i in range(len(data) - sum(window)):
start_window_from = i
start_window_to = i+window[0]
end_window_from = start_window_to + window[1]
end_window_to = end_window_from + window[2]
if np.all(data[start_window_from: start_window_to] == data[end_window_from: end_window_to]):
data[start_window_from: end_window_to] = data[start_window_from]
return data
|
This is a smoothing functionality so we can fix misclassifications.
It will run a sliding window of form [border, smoothing, border] on the
signal and if the border elements are the same it will change the
smooth elements to match the border. An example would be for a window
of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will
transform it into [1, 1, 1, 1, 1]. So if the border elements match it
will transform the middle (smoothing) into the same as the border.
:param data array: One-dimensional array.
:param window array: Used to define the [border, smoothing, border]
regions.
:return data array: The smoothed version of the original data.
|
def from_string(dir_string):
'''Returns the correct constant for a given string.
@raises InvalidDirectionError
'''
dir_string = dir_string.upper()
if dir_string == UP:
return UP
elif dir_string == DOWN:
return DOWN
elif dir_string == LEFT:
return LEFT
elif dir_string == RIGHT:
return RIGHT
else:
raise InvalidDirectionError(dir_string)
|
Returns the correct constant for a given string.
@raises InvalidDirectionError
|
def dict_has_all_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
"""
if not _is_non_string_iterable(keys):
keys = [keys]
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys))
|
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
|
def auto_model_name_recognize(model_name):
"""
自动将 site-user 识别成 SiteUser
:param model_name:
:return:
"""
name_list = model_name.split('-')
return ''.join(['%s%s' % (name[0].upper(), name[1:]) for name in name_list])
|
自动将 site-user 识别成 SiteUser
:param model_name:
:return:
|
def _describe_tree(self, prefix, with_transform):
"""Helper function to actuall construct the tree"""
extra = ': "%s"' % self.name if self.name is not None else ''
if with_transform:
extra += (' [%s]' % self.transform.__class__.__name__)
output = ''
if len(prefix) > 0:
output += prefix[:-3]
output += ' +--'
output += '%s%s\n' % (self.__class__.__name__, extra)
n_children = len(self.children)
for ii, child in enumerate(self.children):
sub_prefix = prefix + (' ' if ii+1 == n_children else ' |')
output += child._describe_tree(sub_prefix, with_transform)
return output
|
Helper function to actuall construct the tree
|
def date_added(self, date_added):
"""
Updates the security labels date_added
Args:
date_added: Converted to %Y-%m-%dT%H:%M:%SZ date format
"""
date_added = self._utils.format_datetime(date_added, date_format='%Y-%m-%dT%H:%M:%SZ')
self._data['dateAdded'] = date_added
request = self._base_request
request['dateAdded'] = date_added
return self._tc_requests.update(request, owner=self.owner)
|
Updates the security labels date_added
Args:
date_added: Converted to %Y-%m-%dT%H:%M:%SZ date format
|
def _validate_data(self, data: dict):
"""
Validates data against provider schema. Raises :class:`~notifiers.exceptions.BadArguments` if relevant
:param data: Data to validate
:raises: :class:`~notifiers.exceptions.BadArguments`
"""
log.debug("validating provided data")
e = best_match(self.validator.iter_errors(data))
if e:
custom_error_key = f"error_{e.validator}"
msg = (
e.schema[custom_error_key]
if e.schema.get(custom_error_key)
else e.message
)
raise BadArguments(validation_error=msg, provider=self.name, data=data)
|
Validates data against provider schema. Raises :class:`~notifiers.exceptions.BadArguments` if relevant
:param data: Data to validate
:raises: :class:`~notifiers.exceptions.BadArguments`
|
def _flatten_dicts(self, dicts):
"""Flatten a dict
:param dicts: Flatten a dict
:type dicts: list(dict)
"""
d = dict()
list_of_dicts = [d.get() for d in dicts or []]
return {k: v for d in list_of_dicts for k, v in d.items()}
|
Flatten a dict
:param dicts: Flatten a dict
:type dicts: list(dict)
|
def process_call(self, i2c_addr, register, value, force=None):
"""
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
|
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
|
def _get_enterprise_admin_users_batch(self, start, end):
"""
Returns a batched queryset of User objects.
"""
LOGGER.info('Fetching new batch of enterprise admin users from indexes: %s to %s', start, end)
return User.objects.filter(groups__name=ENTERPRISE_DATA_API_ACCESS_GROUP, is_staff=False)[start:end]
|
Returns a batched queryset of User objects.
|
def text_antialias(self, flag=True):
"""text antialias
:param flag: True or False. (default is True)
:type flag: bool
"""
antialias = pgmagick.DrawableTextAntialias(flag)
self.drawer.append(antialias)
|
text antialias
:param flag: True or False. (default is True)
:type flag: bool
|
def show_vcs_output_vcs_nodes_vcs_node_info_node_fabric_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_fabric_state = ET.SubElement(vcs_node_info, "node-fabric-state")
node_fabric_state.text = kwargs.pop('node_fabric_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def read(self,filename,min_length,slide_sec=0,buffer=0):
"""
Parse the science segments from the segwizard output contained in file.
@param filename: input text file containing a list of science segments generated by
segwizard.
@param min_length: only append science segments that are longer than min_length.
@param slide_sec: Slide each ScienceSegment by::
delta > 0:
[s,e] -> [s+delta,e].
delta < 0:
[s,e] -> [s,e-delta].
@param buffer: shrink the ScienceSegment::
[s,e] -> [s+buffer,e-buffer]
"""
self.__filename = filename
octothorpe = re.compile(r'\A#')
for line in open(filename):
if not octothorpe.match(line) and int(line.split()[3]) >= min_length:
(id,st,en,du) = map(int,line.split())
# slide the data if doing a background estimation
if slide_sec > 0:
st += slide_sec
elif slide_sec < 0:
en += slide_sec
du -= abs(slide_sec)
# add a buffer
if buffer > 0:
st += buffer
en -= buffer
du -= 2*abs(buffer)
x = ScienceSegment(tuple([id,st,en,du]))
self.__sci_segs.append(x)
|
Parse the science segments from the segwizard output contained in file.
@param filename: input text file containing a list of science segments generated by
segwizard.
@param min_length: only append science segments that are longer than min_length.
@param slide_sec: Slide each ScienceSegment by::
delta > 0:
[s,e] -> [s+delta,e].
delta < 0:
[s,e] -> [s,e-delta].
@param buffer: shrink the ScienceSegment::
[s,e] -> [s+buffer,e-buffer]
|
def get(self, prefix, url, schema_version=None):
""" Get the cached object """
if not self.cache_dir:
return None
filename = self._get_cache_file(prefix, url)
try:
with open(filename, 'rb') as file:
item = pickle.load(file)
if schema_version and schema_version != item.schema:
LOGGER.debug("Cache get %s %s: Wanted schema %d, got %d",
prefix, url,
schema_version, item.schema)
return None
return item
except FileNotFoundError:
pass
except Exception: # pylint:disable=broad-except
_, msg, _ = sys.exc_info()
LOGGER.warning("Cache get %s %s failed: %s", prefix, url, msg)
return None
|
Get the cached object
|
def related_linkage_states_and_scoped_variables(self, state_ids, scoped_variables):
""" TODO: document
"""
# find all related transitions
related_transitions = {'enclosed': [], 'ingoing': [], 'outgoing': []}
for t in self.transitions.values():
# check if internal of new hierarchy state
if t.from_state in state_ids and t.to_state in state_ids:
related_transitions['enclosed'].append(t)
elif t.to_state in state_ids:
related_transitions['ingoing'].append(t)
elif t.from_state in state_ids:
related_transitions['outgoing'].append(t)
# find all related data flows
related_data_flows = {'enclosed': [], 'ingoing': [], 'outgoing': []}
for df in self.data_flows.values():
# check if internal of new hierarchy state
if df.from_state in state_ids and df.to_state in state_ids or \
df.from_state in state_ids and self.state_id == df.to_state and df.to_key in scoped_variables or \
self.state_id == df.from_state and df.from_key in scoped_variables and df.to_state in state_ids:
related_data_flows['enclosed'].append(df)
elif df.to_state in state_ids or \
self.state_id == df.to_state and df.to_key in scoped_variables:
related_data_flows['ingoing'].append(df)
elif df.from_state in state_ids or \
self.state_id == df.from_state and df.from_key in scoped_variables:
related_data_flows['outgoing'].append(df)
return related_transitions, related_data_flows
|
TODO: document
|
def rmfile(path):
"""Ensure file deleted also on *Windows* where read-only files need special treatment."""
if osp.isfile(path):
if is_win:
os.chmod(path, 0o777)
os.remove(path)
|
Ensure file deleted also on *Windows* where read-only files need special treatment.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.