code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get(self, resource, **params):
"""
Generic TeleSign REST API GET handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the GET request with, as a dictionary.
:return: The RestClient Response object.
"""
return self._execute(self.session.get, 'GET', resource, **params)
|
Generic TeleSign REST API GET handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the GET request with, as a dictionary.
:return: The RestClient Response object.
|
def to_UNIXtime(timeobject):
"""
Returns the UNIXtime corresponding to the time value conveyed by the
specified object, which can be either a UNIXtime, a
``datetime.datetime`` object or an ISO8601-formatted string in the format
`YYYY-MM-DD HH:MM:SS+00``.
:param timeobject: the object conveying the time value
:type timeobject: int, ``datetime.datetime`` or ISO8601-formatted
string
:returns: an int UNIXtime
:raises: *TypeError* when bad argument types are provided, *ValueError*
when negative UNIXtimes are provided
"""
if isinstance(timeobject, int):
if timeobject < 0:
raise ValueError("The time value is a negative number")
return timeobject
elif isinstance(timeobject, datetime):
return _datetime_to_UNIXtime(timeobject)
elif isinstance(timeobject, str):
return _ISO8601_to_UNIXtime(timeobject)
else:
raise TypeError('The time value must be expressed either by an int ' \
'UNIX time, a datetime.datetime object or an ' \
'ISO8601-formatted string')
|
Returns the UNIXtime corresponding to the time value conveyed by the
specified object, which can be either a UNIXtime, a
``datetime.datetime`` object or an ISO8601-formatted string in the format
`YYYY-MM-DD HH:MM:SS+00``.
:param timeobject: the object conveying the time value
:type timeobject: int, ``datetime.datetime`` or ISO8601-formatted
string
:returns: an int UNIXtime
:raises: *TypeError* when bad argument types are provided, *ValueError*
when negative UNIXtimes are provided
|
async def play(self):
"""
Starts playback from lavalink.
"""
if self.repeat and self.current is not None:
self.queue.append(self.current)
self.current = None
self.position = 0
self._paused = False
if not self.queue:
await self.stop()
else:
self._is_playing = True
if self.shuffle:
track = self.queue.pop(randrange(len(self.queue)))
else:
track = self.queue.pop(0)
self.current = track
log.debug("Assigned current.")
await self.node.play(self.channel.guild.id, track)
|
Starts playback from lavalink.
|
def set_canvas_properties(self, canvas, x_title=None, y_title=None, x_lim=None, y_lim=None, x_labels=True, y_labels=True):
"""!
@brief Set properties for specified canvas.
@param[in] canvas (uint): Index of canvas whose properties should changed.
@param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed.
@param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed.
@param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then
borders are calculated automatically.
@param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated
automatically.
@param[in] x_labels (bool): If True then labels of X axis are displayed.
@param[in] y_labels (bool): If True then labels of Y axis are displayed.
"""
self.__canvases[canvas] = canvas_descr(x_title, y_title, x_lim, y_lim, x_labels, y_labels);
|
!
@brief Set properties for specified canvas.
@param[in] canvas (uint): Index of canvas whose properties should changed.
@param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed.
@param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed.
@param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then
borders are calculated automatically.
@param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated
automatically.
@param[in] x_labels (bool): If True then labels of X axis are displayed.
@param[in] y_labels (bool): If True then labels of Y axis are displayed.
|
def get_users_with_permission(obj, permission):
"""Return users with specific permission on object.
:param obj: Object to return users for
:param permission: Permission codename
"""
user_model = get_user_model()
return user_model.objects.filter(
userobjectpermission__object_pk=obj.pk,
userobjectpermission__permission__codename=permission,
).distinct()
|
Return users with specific permission on object.
:param obj: Object to return users for
:param permission: Permission codename
|
def _folder_item_duedate(self, analysis_brain, item):
"""Set the analysis' due date to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
# Note that if the analysis is a Reference Analysis, `getDueDate`
# returns the date when the ReferenceSample expires. If the analysis is
# a duplicate, `getDueDate` returns the due date of the source analysis
due_date = analysis_brain.getDueDate
if not due_date:
return None
due_date_str = self.ulocalized_time(due_date, long_format=0)
item['DueDate'] = due_date_str
# If the Analysis is late/overdue, display an icon
capture_date = analysis_brain.getResultCaptureDate
capture_date = capture_date or DateTime()
if capture_date > due_date:
# The analysis is late or overdue
img = get_image('late.png', title=t(_("Late Analysis")),
width='16px', height='16px')
item['replace']['DueDate'] = '{} {}'.format(due_date_str, img)
|
Set the analysis' due date to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
|
def get_assets_by_provider(self, resource_id=None):
"""Gets an ``AssetList`` from the given provider.
In plenary mode, the returned list contains all known assets or
an error results. Otherwise, the returned list may contain only
those assets that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
return: (osid.repository.AssetList) - the returned ``Asset
list``
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return AssetList(self._provider_session.get_assets_by_provider(resource_id),
self._config_map)
|
Gets an ``AssetList`` from the given provider.
In plenary mode, the returned list contains all known assets or
an error results. Otherwise, the returned list may contain only
those assets that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
return: (osid.repository.AssetList) - the returned ``Asset
list``
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def introspect_operation(self, operation):
"""
Introspects an entire operation, returning::
* the method name (to expose to the user)
* the API name (used server-side)
* docs
* introspected information about the parameters
* information about the output
:param operation: The operation to introspect
:type operation: A <botocore.operation.Operation> object
:returns: A dict of information
"""
return {
'method_name': operation.py_name,
'api_name': operation.name,
'docs': self.convert_docs(operation.documentation),
'params': self.parse_params(operation.params),
'output': operation.output,
}
|
Introspects an entire operation, returning::
* the method name (to expose to the user)
* the API name (used server-side)
* docs
* introspected information about the parameters
* information about the output
:param operation: The operation to introspect
:type operation: A <botocore.operation.Operation> object
:returns: A dict of information
|
def _request(self, endpoint, method, data=None, **kwargs):
"""
Method to hanle both GET and POST requests.
:param endpoint: Endpoint of the API.
:param method: Method of HTTP request.
:param data: POST DATA for the request.
:param kwargs: Other keyword arguments.
:return: Response for the request.
"""
final_url = self.url + endpoint
if not self._is_authenticated:
raise LoginRequired
rq = self.session
if method == 'get':
request = rq.get(final_url, **kwargs)
else:
request = rq.post(final_url, data, **kwargs)
request.raise_for_status()
request.encoding = 'utf_8'
if len(request.text) == 0:
data = json.loads('{}')
else:
try:
data = json.loads(request.text)
except ValueError:
data = request.text
return data
|
Method to hanle both GET and POST requests.
:param endpoint: Endpoint of the API.
:param method: Method of HTTP request.
:param data: POST DATA for the request.
:param kwargs: Other keyword arguments.
:return: Response for the request.
|
def recall():
"""RECALL Section 9.3.18a"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0xb) # 00001011
c = RecallType()
d = Facility()
packet = a / b / c / d
return packet
|
RECALL Section 9.3.18a
|
def remove(self, method_or_response=None, url=None):
"""
Removes a response previously added using ``add()``, identified
either by a response object inheriting ``BaseResponse`` or
``method`` and ``url``. Removes all matching responses.
>>> response.add(responses.GET, 'http://example.org')
>>> response.remove(responses.GET, 'http://example.org')
"""
if isinstance(method_or_response, BaseResponse):
response = method_or_response
else:
response = BaseResponse(method=method_or_response, url=url)
while response in self._matches:
self._matches.remove(response)
|
Removes a response previously added using ``add()``, identified
either by a response object inheriting ``BaseResponse`` or
``method`` and ``url``. Removes all matching responses.
>>> response.add(responses.GET, 'http://example.org')
>>> response.remove(responses.GET, 'http://example.org')
|
def namedb_get_name(cur, name, current_block, include_expired=False, include_history=True, only_registered=True):
"""
Get a name and all of its history. Note: will return a revoked name
Return the name + history on success
Return None if the name doesn't exist, or is expired (NOTE: will return a revoked name)
"""
if not include_expired:
unexpired_fragment, unexpired_args = namedb_select_where_unexpired_names(current_block, only_registered=only_registered)
select_query = "SELECT name_records.* FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \
"WHERE name = ? AND " + unexpired_fragment + ";"
args = (name, ) + unexpired_args
else:
select_query = "SELECT * FROM name_records WHERE name = ?;"
args = (name,)
# log.debug(namedb_format_query(select_query, args))
name_rows = namedb_query_execute( cur, select_query, args )
name_row = name_rows.fetchone()
if name_row is None:
# no such name
return None
name_rec = {}
name_rec.update( name_row )
if include_history:
name_history = namedb_get_history( cur, name )
name_rec['history'] = name_history
return name_rec
|
Get a name and all of its history. Note: will return a revoked name
Return the name + history on success
Return None if the name doesn't exist, or is expired (NOTE: will return a revoked name)
|
def threshold(self, scalars, vmin=None, vmax=None, useCells=False):
"""
Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_
"""
if utils.isSequence(scalars):
self.addPointScalars(scalars, "threshold")
scalars = "threshold"
elif self.scalars(scalars) is None:
colors.printc("~times No scalars found with name", scalars, c=1)
exit()
thres = vtk.vtkThreshold()
thres.SetInputData(self.poly)
if useCells:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
else:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
thres.SetInputArrayToProcess(0, 0, 0, asso, scalars)
if vmin is None and vmax is not None:
thres.ThresholdByLower(vmax)
elif vmax is None and vmin is not None:
thres.ThresholdByUpper(vmin)
else:
thres.ThresholdBetween(vmin, vmax)
thres.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputData(thres.GetOutput())
gf.Update()
return self.updateMesh(gf.GetOutput())
|
Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_
|
def topWindow():
"""
Returns the very top window for all Qt purposes.
:return <QWidget> || None
"""
import_qt(globals())
window = QtGui.QApplication.instance().activeWindow()
if not window:
return None
parent = window.parent()
while parent:
window = parent
parent = window.parent()
return window
|
Returns the very top window for all Qt purposes.
:return <QWidget> || None
|
def make_helix(aa, axis_distance, z_shift, phi, splay, off_plane):
"""Builds a helix for a given set of parameters."""
start = numpy.array([axis_distance, 0 + z_shift, 0])
end = numpy.array([axis_distance, (aa * 1.52) + z_shift, 0])
mid = (start + end) / 2
helix = Helix.from_start_and_end(start, end, aa=aa)
helix.rotate(splay, (0, 0, 1), mid)
helix.rotate(off_plane, (1, 0, 0), mid)
helix.rotate(phi, helix.axis.unit_tangent, helix.helix_start)
return helix
|
Builds a helix for a given set of parameters.
|
def _first_stoppoint(self, irsb, extra_stop_points=None):
"""
Enumerate the imarks in the block. If any of them (after the first one) are at a stop point, returns the address
of the stop point. None is returned otherwise.
"""
if self._stop_points is None and extra_stop_points is None and self.project is None:
return None
first_imark = True
for stmt in irsb.statements:
if type(stmt) is pyvex.stmt.IMark: # pylint: disable=unidiomatic-typecheck
addr = stmt.addr + stmt.delta
if not first_imark:
if self.is_stop_point(addr, extra_stop_points):
# could this part be moved by pyvex?
return addr
if stmt.delta != 0 and self.is_stop_point(stmt.addr, extra_stop_points):
return addr
first_imark = False
return None
|
Enumerate the imarks in the block. If any of them (after the first one) are at a stop point, returns the address
of the stop point. None is returned otherwise.
|
def get_vm_host_info(hostip, auth, url):
"""
function takes hostId as input to RESTFUL call to HP IMC
:param hostip: int or string of hostip of Hypervisor host
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: Dictionary contraining the information for the target VM host
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vrm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> host_info = get_vm_host_info('10.101.0.6', auth.creds, auth.url)
>>> assert type(host_info) is dict
>>> assert len(host_info) == 10
>>> assert 'cpuFeg' in host_info
>>> assert 'cpuNum' in host_info
>>> assert 'devId' in host_info
>>> assert 'devIp' in host_info
>>> assert 'diskSize' in host_info
>>> assert 'memory' in host_info
>>> assert 'parentDevId' in host_info
>>> assert 'porductFlag' in host_info
>>> assert 'serverName' in host_info
>>> assert 'vendor' in host_info
"""
hostId = get_dev_details(hostip, auth, url)['id']
get_vm_host_info_url = "/imcrs/vrm/host?hostId=" + str(hostId)
f_url = url + get_vm_host_info_url
payload = None
r = requests.get(f_url, auth=auth,
headers=HEADERS) # creates the URL using the payload variable as the contents
# print(r.status_code)
try:
if r.status_code == 200:
if len(r.text) > 0:
return json.loads(r.text)
elif r.status_code == 204:
print("Device is not a supported Hypervisor")
return "Device is not a supported Hypervisor"
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_vm_host_info: An Error has occured"
|
function takes hostId as input to RESTFUL call to HP IMC
:param hostip: int or string of hostip of Hypervisor host
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: Dictionary contraining the information for the target VM host
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vrm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> host_info = get_vm_host_info('10.101.0.6', auth.creds, auth.url)
>>> assert type(host_info) is dict
>>> assert len(host_info) == 10
>>> assert 'cpuFeg' in host_info
>>> assert 'cpuNum' in host_info
>>> assert 'devId' in host_info
>>> assert 'devIp' in host_info
>>> assert 'diskSize' in host_info
>>> assert 'memory' in host_info
>>> assert 'parentDevId' in host_info
>>> assert 'porductFlag' in host_info
>>> assert 'serverName' in host_info
>>> assert 'vendor' in host_info
|
def get_portchannel_info_by_intf_output_lacp_actor_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
actor_priority = ET.SubElement(lacp, "actor-priority")
actor_priority.text = kwargs.pop('actor_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def cal_gpa(grades):
"""
根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考
:param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组
:return: 包含了课程平均绩点和 gpa 的元组
"""
# 课程总数
courses_sum = len(grades)
# 课程绩点和
points_sum = 0
# 学分和
credit_sum = 0
# 课程学分 x 课程绩点之和
gpa_points_sum = 0
for grade in grades:
point = get_point(grade.get('补考成绩') or grade['成绩'])
credit = float(grade['学分'])
points_sum += point
credit_sum += credit
gpa_points_sum += credit * point
ave_point = points_sum / courses_sum
gpa = gpa_points_sum / credit_sum
return round(ave_point, 5), round(gpa, 5)
|
根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考
:param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组
:return: 包含了课程平均绩点和 gpa 的元组
|
def getSchema(cls):
"""
return all persistent class attributes
"""
schema = []
for name, atr in cls.__attributes__:
atr = atr.__get__(None, cls)
if isinstance(atr, SQLAttribute):
schema.append((name, atr))
cls.getSchema = staticmethod(lambda schema=schema: schema)
return schema
|
return all persistent class attributes
|
def remove_imaginary_terms(pauli_sums: PauliSum) -> PauliSum:
"""
Remove the imaginary component of each term in a Pauli sum.
:param pauli_sums: The Pauli sum to process.
:return: a purely Hermitian Pauli sum.
"""
if not isinstance(pauli_sums, PauliSum):
raise TypeError("not a pauli sum. please give me one")
new_term = sI(0) * 0.0
for term in pauli_sums:
new_term += term_with_coeff(term, term.coefficient.real)
return new_term
|
Remove the imaginary component of each term in a Pauli sum.
:param pauli_sums: The Pauli sum to process.
:return: a purely Hermitian Pauli sum.
|
def _lint(self):
"""Run linter in a subprocess."""
command = self._get_command()
process = subprocess.run(command, stdout=subprocess.PIPE, # nosec
stderr=subprocess.PIPE)
LOG.info('Finished %s', ' '.join(command))
stdout, stderr = self._get_output_lines(process)
return self._linter.parse(stdout), self._parse_stderr(stderr)
|
Run linter in a subprocess.
|
def get_empty_dimension(**kwargs):
"""
Returns a dimension object initialized with empty values
"""
dimension = JSONObject(Dimension())
dimension.id = None
dimension.name = ''
dimension.description = ''
dimension.project_id = None
dimension.units = []
return dimension
|
Returns a dimension object initialized with empty values
|
def propertySearch(self, pid, getall=0):
"""
Searches this 'GameTree' for nodes containing matching properties.
Returns a 'GameTree' containing the matched node(s). Arguments:
- pid : string -- ID of properties to search for.
- getall : boolean -- Set to true (1) to return all 'Node''s that
match, or to false (0) to return only the first match."""
matches = []
for n in self:
if n.has_key(pid):
matches.append(n)
if not getall:
break
else: # getall or not matches:
for v in self.variations:
matches = matches + v.propertySearch(pid, getall)
if not getall and matches:
break
return GameTree(matches)
|
Searches this 'GameTree' for nodes containing matching properties.
Returns a 'GameTree' containing the matched node(s). Arguments:
- pid : string -- ID of properties to search for.
- getall : boolean -- Set to true (1) to return all 'Node''s that
match, or to false (0) to return only the first match.
|
def get_dataframe(self, force_computation=False):
"""
Preprocesses then transforms the return of fetch().
Args:
force_computation (bool, optional) : Defaults to False. If set to True, forces the computation of DataFrame at each call.
Returns:
pandas.DataFrame: Preprocessed and transformed DataFrame.
"""
# returns df if it was already computed
if self.df is not None and not force_computation: return self.df
self.df = self.fetch(self.context)
# compute df = transform(preprocess(df)
self.df = self.preprocess(self.df)
self.transform(self.df)
return self.df
|
Preprocesses then transforms the return of fetch().
Args:
force_computation (bool, optional) : Defaults to False. If set to True, forces the computation of DataFrame at each call.
Returns:
pandas.DataFrame: Preprocessed and transformed DataFrame.
|
def _check_accessed_members(self, node, accessed):
"""check that accessed members are defined"""
# XXX refactor, probably much simpler now that E0201 is in type checker
excs = ("AttributeError", "Exception", "BaseException")
for attr, nodes in accessed.items():
try:
# is it a class attribute ?
node.local_attr(attr)
# yes, stop here
continue
except astroid.NotFoundError:
pass
# is it an instance attribute of a parent class ?
try:
next(node.instance_attr_ancestors(attr))
# yes, stop here
continue
except StopIteration:
pass
# is it an instance attribute ?
try:
defstmts = node.instance_attr(attr)
except astroid.NotFoundError:
pass
else:
# filter out augment assignment nodes
defstmts = [stmt for stmt in defstmts if stmt not in nodes]
if not defstmts:
# only augment assignment for this node, no-member should be
# triggered by the typecheck checker
continue
# filter defstmts to only pick the first one when there are
# several assignments in the same scope
scope = defstmts[0].scope()
defstmts = [
stmt
for i, stmt in enumerate(defstmts)
if i == 0 or stmt.scope() is not scope
]
# if there are still more than one, don't attempt to be smarter
# than we can be
if len(defstmts) == 1:
defstmt = defstmts[0]
# check that if the node is accessed in the same method as
# it's defined, it's accessed after the initial assignment
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if (
_node.frame() is frame
and _node.fromlineno < lno
and not astroid.are_exclusive(
_node.statement(), defstmt, excs
)
):
self.add_message(
"access-member-before-definition",
node=_node,
args=(attr, lno),
)
|
check that accessed members are defined
|
def modules():
'''
Return list of static and shared modules (``apachectl -M``)
CLI Example:
.. code-block:: bash
salt '*' apache.modules
'''
cmd = '{0} -M'.format(_detect_os())
ret = {}
ret['static'] = []
ret['shared'] = []
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.split()
if not comps:
continue
if '(static)' in line:
ret['static'].append(comps[0])
if '(shared)' in line:
ret['shared'].append(comps[0])
return ret
|
Return list of static and shared modules (``apachectl -M``)
CLI Example:
.. code-block:: bash
salt '*' apache.modules
|
def read_storage_class(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.read_storage_class_with_http_info(name, **kwargs)
return data
|
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
|
def metar(data: MetarData, units: Units) -> str:
"""
Convert MetarData into a string for text-to-speech
"""
speech = []
if data.wind_direction and data.wind_speed:
speech.append(wind(data.wind_direction, data.wind_speed,
data.wind_gust, data.wind_variable_direction,
units.wind_speed))
if data.visibility:
speech.append(visibility(data.visibility, units.visibility))
if data.temperature:
speech.append(temperature('Temperature', data.temperature, units.temperature))
if data.dewpoint:
speech.append(temperature('Dew point', data.dewpoint, units.temperature))
if data.altimeter:
speech.append(altimeter(data.altimeter, units.altimeter))
if data.other:
speech.append(other(data.other))
speech.append(translate.clouds(data.clouds,
units.altitude).replace(' - Reported AGL', ''))
return ('. '.join([l for l in speech if l])).replace(',', '.')
|
Convert MetarData into a string for text-to-speech
|
def get_success_enrollment_message(cls, users, enrolled_in):
"""
Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of users who were successfully enrolled
enrolled_in (str): A string identifier for the course or program the users were enrolled in
Returns:
tuple: A 2-tuple containing a message type and message text
"""
enrolled_count = len(users)
return (
'success',
ungettext(
'{enrolled_count} learner was enrolled in {enrolled_in}.',
'{enrolled_count} learners were enrolled in {enrolled_in}.',
enrolled_count,
).format(
enrolled_count=enrolled_count,
enrolled_in=enrolled_in,
)
)
|
Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of users who were successfully enrolled
enrolled_in (str): A string identifier for the course or program the users were enrolled in
Returns:
tuple: A 2-tuple containing a message type and message text
|
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
# Note, it should be possible to query the FIB:
# "<show><routing><fib></fib></routing></show>"
# To add informations to this getter
routes = {}
if destination:
destination = "<destination>{0}</destination>".format(destination)
if protocol:
protocol = "<type>{0}</type>".format(protocol)
cmd = "<show><routing><route>{0}{1}</route></routing></show>".format(protocol, destination)
try:
self.device.op(cmd=cmd)
routes_table_xml = xmltodict.parse(self.device.xml_root())
routes_table_json = json.dumps(routes_table_xml['response']['result']['entry'])
routes_table = json.loads(routes_table_json)
except (AttributeError, KeyError):
routes_table = []
if isinstance(routes_table, dict):
routes_table = [routes_table]
for route in routes_table:
d = {
'current_active': False,
'last_active': False,
'age': -1,
'next_hop': u'',
'protocol': u'',
'outgoing_interface': u'',
'preference': -1,
'inactive_reason': u'',
'routing_table': u'default',
'selected_next_hop': False,
'protocol_attributes': {}
}
destination = route['destination']
flags = route['flags']
if 'A' in flags:
d['current_active'] = True
else:
d['current_active'] = False
if 'C' in flags:
d['protocol'] = "connect"
if 'S' in flags:
d['protocol'] = "static"
if 'R' in flags:
d['protocol'] = "rip"
if 'R' in flags:
d['protocol'] = "rip"
if 'O' in flags:
d['protocol'] = "ospf"
if 'B' in flags:
d['protocol'] = "bgp"
if 'H' in flags:
d['protocol'] = "host"
if route['age'] is not None:
d['age'] = int(route['age'])
if route['nexthop'] is not None:
d['next_hop'] = route['nexthop']
if route['interface'] is not None:
d['outgoing_interface'] = route['interface']
if route['metric'] is not None:
d['preference'] = int(route['metric'])
if route['virtual-router'] is not None:
d['routing_table'] = route['virtual-router']
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
|
Return route details to a specific destination, learned from a certain protocol.
|
def copy(self):
"""
Creates a copy of itself
"""
return Particle(copy(self.position),
copy(self.velocity),
self.fitness)
|
Creates a copy of itself
|
def parallel_bulk(
client,
actions,
thread_count=4,
chunk_size=500,
max_chunk_bytes=100 * 1024 * 1024,
queue_size=4,
expand_action_callback=expand_action,
*args,
**kwargs
):
"""
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
:arg queue_size: size of the task queue between the main thread (producing
chunks to send) and the processing threads.
"""
# Avoid importing multiprocessing unless parallel_bulk is used
# to avoid exceptions on restricted environments like App Engine
from multiprocessing.pool import ThreadPool
actions = map(expand_action_callback, actions)
class BlockingPool(ThreadPool):
def _setup_queues(self):
super(BlockingPool, self)._setup_queues()
# The queue must be at least the size of the number of threads to
# prevent hanging when inserting sentinel values during teardown.
self._inqueue = Queue(max(queue_size, thread_count))
self._quick_put = self._inqueue.put
pool = BlockingPool(thread_count)
try:
for result in pool.imap(
lambda bulk_chunk: list(
_process_bulk_chunk(
client, bulk_chunk[1], bulk_chunk[0], *args, **kwargs
)
),
_chunk_actions(
actions, chunk_size, max_chunk_bytes, client.transport.serializer
),
):
for item in result:
yield item
finally:
pool.close()
pool.join()
|
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
:arg queue_size: size of the task queue between the main thread (producing
chunks to send) and the processing threads.
|
def circles(st, layer, axis, ax=None, talpha=1.0, cedge='white', cface='white'):
"""
Plots a set of circles corresponding to a slice through the platonic
structure. Copied from twoslice_overlay with comments, standaloneness.
Inputs
------
pos : array of particle positions; [N,3]
rad : array of particle radii; [N]
ax : plt.axis instance
layer : Which layer of the slice to use.
axis : The slice of the image, 0, 1, or 2.
cedge : edge color
cface : face color
talpha : Alpha of the thing
"""
pos = st.obj_get_positions()
rad = st.obj_get_radii()
shape = st.ishape.shape.tolist()
shape.pop(axis) #shape is now the shape of the image
if ax is None:
fig = plt.figure()
axisbg = 'white' if cface == 'black' else 'black'
sx, sy = ((1,shape[1]/float(shape[0])) if shape[0] > shape[1] else
(shape[0]/float(shape[1]), 1))
ax = fig.add_axes((0,0, sx, sy), axisbg=axisbg)
# get the index of the particles we want to include
particles = np.arange(len(pos))[np.abs(pos[:,axis] - layer) < rad]
# for each of these particles display the effective radius
# in the proper place
scale = 1.0 #np.max(shape).astype('float')
for i in particles:
p = pos[i].copy()
r = 2*np.sqrt(rad[i]**2 - (p[axis] - layer)**2)
#CIRCLE IS IN FIGURE COORDINATES!!!
if axis==0:
ix = 1; iy = 2
elif axis == 1:
ix = 0; iy = 2
elif axis==2:
ix = 0; iy = 1
c = Circle((p[ix]/scale, p[iy]/scale), radius=r/2/scale, fc=cface,
ec=cedge, alpha=talpha)
ax.add_patch(c)
# plt.axis([0,1,0,1])
plt.axis('equal') #circles not ellipses
return ax
|
Plots a set of circles corresponding to a slice through the platonic
structure. Copied from twoslice_overlay with comments, standaloneness.
Inputs
------
pos : array of particle positions; [N,3]
rad : array of particle radii; [N]
ax : plt.axis instance
layer : Which layer of the slice to use.
axis : The slice of the image, 0, 1, or 2.
cedge : edge color
cface : face color
talpha : Alpha of the thing
|
def _syspath(dev):
'''
Full SysFS path of a device
'''
dev = _devbase(dev)
dev = re.sub(r'^([vhs][a-z]+)([0-9]+)', r'\1/\1\2', dev)
# name = re.sub(r'^([a-z]+)(?<!(bcache|md|dm))([0-9]+)', r'\1/\1\2', name)
return os.path.join('/sys/block/', dev)
|
Full SysFS path of a device
|
def update_note(note, **kwargs):
"""
Update a note
"""
note_i = _get_note(note.id)
if note.ref_key != note_i.ref_key:
raise HydraError("Cannot convert a %s note to a %s note. Please create a new note instead."%(note_i.ref_key, note.ref_key))
note_i.set_ref(note.ref_key, note.ref_id)
note_i.value = note.value
db.DBSession.flush()
return note_i
|
Update a note
|
def server_list(endpoint_id):
"""
Executor for `globus endpoint server list`
"""
# raises usage error on shares for us
endpoint, server_list = get_endpoint_w_server_list(endpoint_id)
if server_list == "S3": # not GCS -- this is an S3 endpoint
server_list = {"s3_url": endpoint["s3_url"]}
fields = [("S3 URL", "s3_url")]
text_format = FORMAT_TEXT_RECORD
else: # regular GCS host endpoint
fields = (
("ID", "id"),
("URI", lambda s: (s["uri"] or "none (Globus Connect Personal)")),
)
text_format = FORMAT_TEXT_TABLE
formatted_print(server_list, text_format=text_format, fields=fields)
|
Executor for `globus endpoint server list`
|
def add_hotkey(control, key, func, id = None):
"""
Add a global hotkey bound to control via id that should call func.
control: The control to bind to.
key: The hotkey to use.
func: The func to call.
id: The new ID to use (defaults to creating a new ID.
"""
if win32con is None:
raise RuntimeError('win32con is not available.')
logger.debug('Adding hotkey "%s" to control %s to call %s.', key, control, func)
modifiers, keycode = str_to_key(key, key_table = win32con, accel_format = 'MOD_%s', key_format = 'VK_%s', key_transpositions = {'CTRL': 'CONTROL'})
id = get_id(id)
control.Bind(wx.EVT_HOTKEY, func, id = id)
l = _hotkeys.get(control, [])
l.append([key, id])
_hotkeys[control] = l
return control.RegisterHotKey(id, modifiers, keycode)
|
Add a global hotkey bound to control via id that should call func.
control: The control to bind to.
key: The hotkey to use.
func: The func to call.
id: The new ID to use (defaults to creating a new ID.
|
def modify_product(self, product_id, name=None, description=None, attributes={}):
'''
modify_product(self, product_id, name=None, description=None, attributes={})
Modify an existing product
:Parameters:
* *product_id* (`string`) -- identifier of an existing product
* *name* (`string`) -- name of the product
* *description* (`string`) -- product description
* *attributes* (`object`) -- product attributes to modify
'''
request_data = {'id': product_id}
if name: request_data['name']=name
if description: request_data['description']=description
if attributes: request_data['attributes']=attributes
return self._call_rest_api('post', '/products', data=request_data, error='Failed to modify a new product')
|
modify_product(self, product_id, name=None, description=None, attributes={})
Modify an existing product
:Parameters:
* *product_id* (`string`) -- identifier of an existing product
* *name* (`string`) -- name of the product
* *description* (`string`) -- product description
* *attributes* (`object`) -- product attributes to modify
|
def name_scope(name=None):
"""
This decorator wraps a function so that it runs inside a TensorFlow
name scope. The name is given by the `name` option; if this is None,
then the name of the function will be used.
```
>>> @name_scope()
>>> def foo(...):
>>> # now runs inside scope "foo"
>>> @name_scope('bar')
>>> def baz(...):
>>> # now runs inside scope "bar", not "baz"
```
"""
def name_scope_wrapper_decorator(method):
@functools.wraps(method)
def name_scope_wrapper(*args, **kwargs):
scope_name = name if name is not None else method.__name__
with tf.name_scope(scope_name):
return method(*args, **kwargs)
return name_scope_wrapper
return name_scope_wrapper_decorator
|
This decorator wraps a function so that it runs inside a TensorFlow
name scope. The name is given by the `name` option; if this is None,
then the name of the function will be used.
```
>>> @name_scope()
>>> def foo(...):
>>> # now runs inside scope "foo"
>>> @name_scope('bar')
>>> def baz(...):
>>> # now runs inside scope "bar", not "baz"
```
|
def build_vep_string(vep_info, vep_columns):
"""
Build a vep string formatted string.
Take a list with vep annotations and build a new vep string
Args:
vep_info (list): A list with vep annotation dictionaries
vep_columns (list): A list with the vep column names found in the
header of the vcf
Returns:
string: A string with the proper vep annotations
"""
logger = getLogger(__name__)
logger.debug("Building vep string from {0}".format(vep_info))
logger.debug("Found vep headers {0}".format(vep_columns))
vep_strings = []
for vep_annotation in vep_info:
try:
vep_info_list = [
vep_annotation[vep_key] for vep_key in vep_columns
]
except KeyError:
raise SyntaxError("Vep entry does not correspond to vep headers")
vep_strings.append('|'.join(vep_info_list))
return ','.join(vep_strings)
|
Build a vep string formatted string.
Take a list with vep annotations and build a new vep string
Args:
vep_info (list): A list with vep annotation dictionaries
vep_columns (list): A list with the vep column names found in the
header of the vcf
Returns:
string: A string with the proper vep annotations
|
def _attempting(self, text):
"""Keeps track of the furthest point in the source code the parser has reached to this point."""
consumed = len(self.original_text) - len(text)
self.most_consumed = max(consumed, self.most_consumed)
|
Keeps track of the furthest point in the source code the parser has reached to this point.
|
def Pn(x):
"""Calculate Legendre polyomials P0 to P28 and returns them
in a dictionary ``Pn``.
:param float x: argument to calculate Legendre polynomials
:return Pn: dictionary which contains order of Legendre polynomials
(from 0 to 28) as keys and the corresponding evaluation
of Legendre polynomials as values.
:rtype: dict
"""
Pn = {}
Pn['0'] = P0(x)
Pn['1'] = P1(x)
Pn['2'] = P2(x)
Pn['3'] = P3(x)
Pn['4'] = P4(x)
Pn['5'] = P5(x)
Pn['6'] = P6(x)
Pn['8'] = P8(x)
Pn['10'] = P10(x)
Pn['12'] = P12(x)
Pn['14'] = P14(x)
Pn['16'] = P16(x)
Pn['18'] = P18(x)
Pn['20'] = P20(x)
Pn['22'] = P22(x)
Pn['24'] = P24(x)
Pn['26'] = P26(x)
Pn['28'] = P28(x)
return Pn
|
Calculate Legendre polyomials P0 to P28 and returns them
in a dictionary ``Pn``.
:param float x: argument to calculate Legendre polynomials
:return Pn: dictionary which contains order of Legendre polynomials
(from 0 to 28) as keys and the corresponding evaluation
of Legendre polynomials as values.
:rtype: dict
|
def get_rsa_pub_key(path):
'''
Read a public key off the disk.
'''
log.debug('salt.crypt.get_rsa_pub_key: Loading public key')
if HAS_M2:
with salt.utils.files.fopen(path, 'rb') as f:
data = f.read().replace(b'RSA ', b'')
bio = BIO.MemoryBuffer(data)
key = RSA.load_pub_key_bio(bio)
else:
with salt.utils.files.fopen(path) as f:
key = RSA.importKey(f.read())
return key
|
Read a public key off the disk.
|
def from_config(cls, cp, **kwargs):
r"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
"""
args = cls._init_args_from_config(cp)
args['low_frequency_cutoff'] = low_frequency_cutoff_from_config(cp)
args['high_frequency_cutoff'] = high_frequency_cutoff_from_config(cp)
# get any other keyword arguments provided in the model section
ignore_args = ['name', 'low-frequency-cutoff', 'high-frequency-cutoff']
args.update(cls.extra_args_from_config(cp, "model",
skip_args=ignore_args))
args.update(kwargs)
return cls(**args)
|
r"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
|
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
|
This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
|
def get_string_plus_property_value(value):
# type: (Any) -> Optional[List[str]]
"""
Converts a string or list of string into a list of strings
:param value: A string or a list of strings
:return: A list of strings or None
"""
if value:
if isinstance(value, str):
return [value]
if isinstance(value, list):
return value
if isinstance(value, tuple):
return list(value)
return None
|
Converts a string or list of string into a list of strings
:param value: A string or a list of strings
:return: A list of strings or None
|
def _curvelength(self, x0, y0, x1, y1, x2, y2, x3, y3, n=20):
""" Returns the length of the spline.
Integrates the estimated length of the cubic bezier spline defined by x0, y0, ... x3, y3,
by adding the lengths of lineair lines between points at t.
The number of points is defined by n
(n=10 would add the lengths of lines between 0.0 and 0.1, between 0.1 and 0.2, and so on).
The default n=20 is fine for most cases, usually resulting in a deviation of less than 0.01.
"""
# Originally from nodebox-gl
length = 0
xi = x0
yi = y0
for i in range(n):
t = 1.0 * (i + 1) / n
pt_x, pt_y, pt_c1x, pt_c1y, pt_c2x, pt_c2y = \
self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3)
c = sqrt(pow(abs(xi - pt_x), 2) + pow(abs(yi - pt_y), 2))
length += c
xi = pt_x
yi = pt_y
return length
|
Returns the length of the spline.
Integrates the estimated length of the cubic bezier spline defined by x0, y0, ... x3, y3,
by adding the lengths of lineair lines between points at t.
The number of points is defined by n
(n=10 would add the lengths of lines between 0.0 and 0.1, between 0.1 and 0.2, and so on).
The default n=20 is fine for most cases, usually resulting in a deviation of less than 0.01.
|
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
|
Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
|
def from_grpc_status(status_code, message, **kwargs):
"""Create a :class:`GoogleAPICallError` from a :class:`grpc.StatusCode`.
Args:
status_code (grpc.StatusCode): The gRPC status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
error_class = exception_class_for_grpc_status(status_code)
error = error_class(message, **kwargs)
if error.grpc_status_code is None:
error.grpc_status_code = status_code
return error
|
Create a :class:`GoogleAPICallError` from a :class:`grpc.StatusCode`.
Args:
status_code (grpc.StatusCode): The gRPC status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
|
def clean(self):
"""
Get the .txt file from within the many-layered
directory structure, then delete the directories.
"""
logger.info('Cleaning up archive')
shutil.move(
os.path.join(
self.data_dir,
'var/IRS/data/scripts/pofd/download/FullDataFile.txt'
),
self.final_path
)
shutil.rmtree(os.path.join(self.data_dir, 'var'))
os.remove(self.zip_path)
|
Get the .txt file from within the many-layered
directory structure, then delete the directories.
|
def update_submit_s3_uri(estimator, job_name):
"""Updated the S3 URI of the framework source directory in given estimator.
Args:
estimator (sagemaker.estimator.Framework): The Framework estimator to update.
job_name (str): The new job name included in the submit S3 URI
Returns:
str: The updated S3 URI of framework source directory
"""
if estimator.uploaded_code is None:
return
pattern = r'(?<=/)[^/]+?(?=/source/sourcedir.tar.gz)'
# update the S3 URI with the latest training job.
# s3://path/old_job/source/sourcedir.tar.gz will become s3://path/new_job/source/sourcedir.tar.gz
submit_uri = estimator.uploaded_code.s3_prefix
submit_uri = re.sub(pattern, job_name, submit_uri)
script_name = estimator.uploaded_code.script_name
estimator.uploaded_code = fw_utils.UploadedCode(submit_uri, script_name)
|
Updated the S3 URI of the framework source directory in given estimator.
Args:
estimator (sagemaker.estimator.Framework): The Framework estimator to update.
job_name (str): The new job name included in the submit S3 URI
Returns:
str: The updated S3 URI of framework source directory
|
def p_LD_reg_val(p):
""" asm : LD reg8 COMMA expr
| LD reg8 COMMA pexpr
| LD reg16 COMMA expr
| LD reg8_hl COMMA expr
| LD A COMMA expr
| LD SP COMMA expr
| LD reg8i COMMA expr
"""
s = 'LD %s,N' % p[2]
if p[2] in REGS16:
s += 'N'
p[0] = Asm(p.lineno(1), s, p[4])
|
asm : LD reg8 COMMA expr
| LD reg8 COMMA pexpr
| LD reg16 COMMA expr
| LD reg8_hl COMMA expr
| LD A COMMA expr
| LD SP COMMA expr
| LD reg8i COMMA expr
|
def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port,
inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num,
scheduler_cls, threads, get_object=False):
"""
Run Scheduler, only one scheduler is allowed.
"""
g = ctx.obj
Scheduler = load_cls(None, None, scheduler_cls)
kwargs = dict(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb,
newtask_queue=g.newtask_queue, status_queue=g.status_queue,
out_queue=g.scheduler2fetcher, data_path=g.get('data_path', 'data'))
if threads:
kwargs['threads'] = int(threads)
scheduler = Scheduler(**kwargs)
scheduler.INQUEUE_LIMIT = inqueue_limit
scheduler.DELETE_TIME = delete_time
scheduler.ACTIVE_TASKS = active_tasks
scheduler.LOOP_LIMIT = loop_limit
scheduler.FAIL_PAUSE_NUM = fail_pause_num
g.instances.append(scheduler)
if g.get('testing_mode') or get_object:
return scheduler
if xmlrpc:
utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
scheduler.run()
|
Run Scheduler, only one scheduler is allowed.
|
def delete_model_translation(self, request, translation):
"""
Hook for deleting a translation.
This calls :func:`get_translation_objects` to collect all related objects for the translation.
By default, that includes the translations for inline objects.
"""
master = translation.master
for qs in self.get_translation_objects(request, translation.language_code, obj=master, inlines=self.delete_inline_translations):
if isinstance(qs, (tuple, list)):
# The objects are deleted one by one.
# This triggers the post_delete signals and such.
for obj in qs:
obj.delete()
else:
# Also delete translations of inlines which the user has access to.
# This doesn't trigger signals, just like the regular
qs.delete()
|
Hook for deleting a translation.
This calls :func:`get_translation_objects` to collect all related objects for the translation.
By default, that includes the translations for inline objects.
|
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return (
self.is_numpy_compatible and np.issubdtype(self.as_numpy_dtype, np.floating)
) or self.base_dtype == bfloat16
|
Returns whether this is a (non-quantized, real) floating point type.
|
def getstats(self, save=True, filename=None, samples=None, subset=None, ablation_time=False):
"""
Return pandas dataframe of all sample statistics.
"""
slst = []
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in self.stats_calced:
for nm in [n for n in samples if self.srm_identifier
not in n]:
if self.stats[nm][s].ndim == 2:
# make multi - index
reps = np.arange(self.stats[nm][s].shape[-1])
ss = np.array([s] * reps.size)
nms = np.array([nm] * reps.size)
# make sub - dataframe
stdf = pd.DataFrame(self.stats[nm][s].T,
columns=self.stats[nm]['analytes'],
index=[ss, nms, reps])
stdf.index.set_names(['statistic', 'sample', 'rep'],
inplace=True)
else:
stdf = pd.DataFrame(self.stats[nm][s],
index=self.stats[nm]['analytes'],
columns=[[s], [nm]]).T
stdf.index.set_names(['statistic', 'sample'],
inplace=True)
slst.append(stdf)
out = pd.concat(slst)
if ablation_time:
ats = self.ablation_times(samples=samples, subset=subset)
ats['statistic'] = 'nanmean'
ats.set_index('statistic', append=True, inplace=True)
ats = ats.reorder_levels(['statistic', 'sample', 'rep'])
out = out.join(ats)
out.drop(self.internal_standard, 1, inplace=True)
if save:
if filename is None:
filename = 'stat_export.csv'
out.to_csv(self.export_dir + '/' + filename)
self.stats_df = out
return out
|
Return pandas dataframe of all sample statistics.
|
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = ('Zappa - Deploy Python applications to AWS Lambda'
' and API Gateway.\n')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'-v', '--version', action='version',
version=pkg_resources.get_distribution("zappa").version,
help='Print the zappa version'
)
parser.add_argument(
'--color', default='auto', choices=['auto','never','always']
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = ('Execute this command for all of our defined '
'Zappa stages.')
me_group.add_argument('--all', action='store_true', help=all_help)
me_group.add_argument('stage_env', nargs='?')
group = env_parser.add_argument_group()
group.add_argument(
'-a', '--app_function', help='The WSGI application function.'
)
group.add_argument(
'-s', '--settings_file', help='The path to a Zappa settings file.'
)
group.add_argument(
'-q', '--quiet', action='store_true', help='Silence all output.'
)
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument(
'-j', '--json', action='store_true', help='Make the output of this command be machine readable.'
)
# https://github.com/Miserlou/Zappa/issues/891
group.add_argument(
'--disable_progress', action='store_true', help='Disable progress bars.'
)
##
# Certify
##
subparsers = parser.add_subparsers(title='subcommands', dest='command')
cert_parser = subparsers.add_parser(
'certify', parents=[env_parser],
help='Create and install SSL certificate'
)
cert_parser.add_argument(
'--manual', action='store_true',
help=("Gets new Let's Encrypt certificates, but prints them to console."
"Does not update API Gateway domains.")
)
cert_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Deploy
##
deploy_parser = subparsers.add_parser(
'deploy', parents=[env_parser], help='Deploy application.'
)
deploy_parser.add_argument(
'-z', '--zip', help='Deploy Lambda with specific local or S3 hosted zip package'
)
##
# Init
##
init_parser = subparsers.add_parser('init', help='Initialize Zappa app.')
##
# Package
##
package_parser = subparsers.add_parser(
'package', parents=[env_parser], help='Build the application zip package locally.'
)
package_parser.add_argument(
'-o', '--output', help='Name of file to output the package to.'
)
##
# Template
##
template_parser = subparsers.add_parser(
'template', parents=[env_parser], help='Create a CloudFormation template for this API Gateway.'
)
template_parser.add_argument(
'-l', '--lambda-arn', required=True, help='ARN of the Lambda function to template to.'
)
template_parser.add_argument(
'-r', '--role-arn', required=True, help='ARN of the Role to template with.'
)
template_parser.add_argument(
'-o', '--output', help='Name of file to output the template to.'
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
'invoke', parents=[env_parser],
help='Invoke remote function.'
)
invoke_parser.add_argument(
'--raw', action='store_true',
help=('When invoking remotely, invoke this python as a string,'
' not as a modular path.')
)
invoke_parser.add_argument(
'--no-color', action='store_true',
help=("Don't color the output")
)
invoke_parser.add_argument('command_rest')
##
# Manage
##
manage_parser = subparsers.add_parser(
'manage',
help='Invoke remote Django manage.py commands.'
)
rest_help = ("Command in the form of <env> <command>. <env> is not "
"required if --all is specified")
manage_parser.add_argument('--all', action='store_true', help=all_help)
manage_parser.add_argument('command_rest', nargs='+', help=rest_help)
manage_parser.add_argument(
'--no-color', action='store_true',
help=("Don't color the output")
)
# This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser
# https://github.com/Miserlou/Zappa/issues/1002
manage_parser.add_argument(
'-s', '--settings_file', help='The path to a Zappa settings file.'
)
##
# Rollback
##
def positive_int(s):
""" Ensure an arg is positive """
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
'rollback', parents=[env_parser],
help='Rollback deployed code to a previous version.'
)
rollback_parser.add_argument(
'-n', '--num-rollback', type=positive_int, default=1,
help='The number of versions to rollback.'
)
##
# Scheduling
##
subparsers.add_parser(
'schedule', parents=[env_parser],
help='Schedule functions to occur at regular intervals.'
)
##
# Status
##
status_parser = subparsers.add_parser(
'status', parents=[env_parser],
help='Show deployment status and event schedules.'
)
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
'tail', parents=[env_parser], help='Tail deployment logs.'
)
tail_parser.add_argument(
'--no-color', action='store_true',
help="Don't color log tail output."
)
tail_parser.add_argument(
'--http', action='store_true',
help='Only show HTTP requests in tail output.'
)
tail_parser.add_argument(
'--non-http', action='store_true',
help='Only show non-HTTP requests in tail output.'
)
tail_parser.add_argument(
'--since', type=str, default="100000s",
help="Only show lines since a certain timeframe."
)
tail_parser.add_argument(
'--filter', type=str, default="",
help="Apply a filter pattern to the logs."
)
tail_parser.add_argument(
'--force-color', action='store_true',
help='Force coloring log tail output even if coloring support is not auto-detected. (example: piping)'
)
tail_parser.add_argument(
'--disable-keep-open', action='store_true',
help="Exit after printing the last available log, rather than keeping the log open."
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
'undeploy', parents=[env_parser], help='Undeploy application.'
)
undeploy_parser.add_argument(
'--remove-logs', action='store_true',
help=('Removes log groups of api gateway and lambda task'
' during the undeployment.'),
)
undeploy_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Unschedule
##
subparsers.add_parser('unschedule', parents=[env_parser],
help='Unschedule functions.')
##
# Updating
##
update_parser = subparsers.add_parser(
'update', parents=[env_parser], help='Update deployed application.'
)
update_parser.add_argument(
'-z', '--zip', help='Update Lambda with specific local or S3 hosted zip package'
)
update_parser.add_argument(
'-n', '--no-upload', help="Update configuration where appropriate, but don't upload new code"
)
##
# Debug
##
subparsers.add_parser(
'shell', parents=[env_parser], help='A debug shell with a loaded Zappa object.'
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == 'never':
disable_click_colors()
elif args.color == 'always':
#TODO: Support aggressive coloring like "--force-color" on all commands
pass
elif args.color == 'auto':
pass
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if not args.command:
parser.print_help()
return
if args.command == 'manage' and not self.vargs.get('all'):
self.stage_env = self.vargs['command_rest'].pop(0)
else:
self.stage_env = self.vargs.get('stage_env')
if args.command == 'package':
self.load_credentials = False
self.command = args.command
self.disable_progress = self.vargs.get('disable_progress')
if self.vargs.get('quiet'):
self.silence()
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == 'init':
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get('json'):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get('settings_file'))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get('all')
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
|
Main function.
Parses command, load settings and dispatches accordingly.
|
def serviceQueues(self, limit=None):
"""
Process `limit` number of messages in the inBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
"""
# TODO should handle SuspiciousNode here
r = self.dequeue_pre_prepares()
r += self.inBoxRouter.handleAllSync(self.inBox, limit)
r += self.send_3pc_batch()
r += self._serviceActions()
return r
|
Process `limit` number of messages in the inBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
|
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
|
Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
|
def from_spec(spec, kwargs=None):
"""
Creates a layer from a specification dict.
"""
layer = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.networks.layers,
kwargs=kwargs
)
assert isinstance(layer, Layer)
return layer
|
Creates a layer from a specification dict.
|
def lines(self):
"""
Array of all the lines.
"""
# Cache, because this one is reused very often.
if self._cache.lines is None:
self._cache.lines = _ImmutableLineList(self.text.split('\n'))
return self._cache.lines
|
Array of all the lines.
|
def get_rsn_ie(defcipher, defauth, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n441.
Positional arguments:
defcipher -- default cipher if not in data (string).
defauth -- default authentication suites if not in data (string).
data -- bytearray data to read.
Returns:
Dict.
"""
answers = dict()
answers['version'] = data[0] + (data[1] << 8)
data = data[2:]
if len(data) < 4:
answers['group_cipher'] = answers['pairwise_ciphers'] = defcipher
return answers
answers['group_cipher'] = get_cipher(data)
data = data[4:]
if len(data) < 2:
answers['pairwise_ciphers'] = defcipher
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers['bogus tail data'] = data
return answers
answers['pairwise_ciphers'] = ' '.join(get_cipher(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) < 2:
answers['authentication_suites'] = defauth
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers['bogus tail data'] = data
return answers
answers['authentication_suites'] = ' '.join(get_auth(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) >= 2:
capa = data[0] | (data[1] << 8)
answers['rsn_ie_capabilities'] = list()
if capa & 0x0001:
answers['rsn_ie_capabilities'].append('PreAuth')
if capa & 0x0002:
answers['rsn_ie_capabilities'].append('NoPairwise')
case = {0: '1-PTKSA-RC', 1: '2-PTKSA-RC', 2: '4-PTKSA-RC', 3: '16-PTKSA-RC'}.get((capa & 0x000c) >> 2)
if case:
answers['rsn_ie_capabilities'].append(case)
case = {0: '1-GTKSA-RC', 1: '2-GTKSA-RC', 2: '4-GTKSA-RC', 3: '16-GTKSA-RC'}.get((capa & 0x0030) >> 4)
if case:
answers['rsn_ie_capabilities'].append(case)
if capa & 0x0040:
answers['rsn_ie_capabilities'].append('MFP-required')
if capa & 0x0080:
answers['rsn_ie_capabilities'].append('MFP-capable')
if capa & 0x0200:
answers['rsn_ie_capabilities'].append('Peerkey-enabled')
if capa & 0x0400:
answers['rsn_ie_capabilities'].append('SPP-AMSDU-capable')
if capa & 0x0800:
answers['rsn_ie_capabilities'].append('SPP-AMSDU-required')
answers['rsn_ie_capabilities'].append('(0x{0:04x})'.format(capa))
data = data[2:]
invalid = False
if len(data) >= 2:
pmkid_count = data[0] | (data[1] << 8)
if len(data) >= 2 + 16 * pmkid_count:
answers['PMKIDs'] = pmkid_count
data = data[2 + 16 * pmkid_count:]
else:
invalid = True
if len(data) >= 4 and not invalid:
answers['Group mgmt cipher suite'] = get_cipher(data)
data = data[4:]
if data:
answers['* bogus tail data ({0})'.format(len(data))] = ' '.join(format(x, '02x') for x in data)
return answers
|
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n441.
Positional arguments:
defcipher -- default cipher if not in data (string).
defauth -- default authentication suites if not in data (string).
data -- bytearray data to read.
Returns:
Dict.
|
def root(self):
""" Property provides access to root object in CFB. """
sector = self.header.directory_sector_start
position = (sector + 1) << self.header.sector_shift
return RootEntry(self, position)
|
Property provides access to root object in CFB.
|
def songs_iter(self, *, continuation_token=None, export_type=1):
"""Get a paged iterator of Music Library songs.
Parameters:
continuation_token (str, Optional): The token of the page to return.
Default: Not sent to get first page.
export_type (int, Optional): The type of tracks to return. 1 for all tracks, 2 for promotional and purchased.
Default: ``1``
Yields:
list: Song dicts.
"""
def track_info_to_dict(track_info):
return dict(
(field.name, value)
for field, value in track_info.ListFields()
)
while True:
response = self._call(
mm_calls.ExportIDs,
self.uploader_id,
continuation_token=continuation_token,
export_type=export_type
)
items = [
track_info_to_dict(track_info)
for track_info in response.body.download_track_info
]
if items:
yield items
continuation_token = response.body.continuation_token
if not continuation_token:
break
|
Get a paged iterator of Music Library songs.
Parameters:
continuation_token (str, Optional): The token of the page to return.
Default: Not sent to get first page.
export_type (int, Optional): The type of tracks to return. 1 for all tracks, 2 for promotional and purchased.
Default: ``1``
Yields:
list: Song dicts.
|
def _cdata_io(self, action, data, ctype, frames):
"""Call one of libsndfile's read/write functions."""
assert ctype in _ffi_types.values()
self._check_if_closed()
if self.seekable():
curr = self.tell()
func = getattr(_snd, 'sf_' + action + 'f_' + ctype)
frames = func(self._file, data, frames)
_error_check(self._errorcode)
if self.seekable():
self.seek(curr + frames, SEEK_SET) # Update read & write position
return frames
|
Call one of libsndfile's read/write functions.
|
def set_attributes(self, **kwargs):
"""
Set a group of attributes (parameters and members). Calls
`setp` directly, so kwargs can include more than just the
parameter value (e.g., bounds, free, etc.).
"""
self.clear_derived()
kwargs = dict(kwargs)
for name, value in kwargs.items():
# Raise AttributeError if param not found
try:
self.getp(name)
except KeyError:
print ("Warning: %s does not have attribute %s" %
(type(self), name))
# Set attributes
try:
self.setp(name, clear_derived=False, **value)
except TypeError:
try:
self.setp(name, clear_derived=False, *value)
except (TypeError, KeyError):
try:
self.setp(name, clear_derived=False, value=value)
except (TypeError, KeyError):
self.__setattr__(name, value)
# pop this attribued off the list of missing properties
self._missing.pop(name, None)
# Check to make sure we got all the required properties
if self._missing:
raise ValueError(
"One or more required properties are missing ",
self._missing.keys())
|
Set a group of attributes (parameters and members). Calls
`setp` directly, so kwargs can include more than just the
parameter value (e.g., bounds, free, etc.).
|
def pairdists(alignment, subs_model, alpha=None, ncat=4, tolerance=1e-6, verbose=False):
""" Load an alignment, calculate all pairwise distances and variances
model parameter must be a Substitution model type from phylo_utils """
# Check
if not isinstance(subs_model, phylo_utils.models.Model):
raise ValueError("Can't handle this model: {}".format(model))
if alpha is None:
alpha = 1.0
ncat = 1
# Set up markov model
tm = TransitionMatrix(subs_model)
gamma_rates = discrete_gamma(alpha, ncat)
partials = alignment_to_partials(alignment)
seqnames = alignment.get_names()
nseq = len(seqnames)
distances = np.zeros((nseq, nseq))
variances = np.zeros((nseq, nseq))
# Check the model has the appropriate size
if not subs_model.size == partials[seqnames[0]].shape[1]:
raise ValueError("Model {} expects {} states, but the alignment has {}".format(model.name,
model.size,
partials[seqnames[0]].shape[1]))
nodes = [phylo_utils.likelihood.LnlModel(tm) for seq in range(nseq)]
for node, header in zip(nodes, seqnames):
node.set_partials(partials[header]) # retrieve partial likelihoods from partials dictionary
for i, j in itertools.combinations(range(nseq), 2):
brlen, var = brent_optimise(nodes[i], nodes[j], verbose=verbose)
distances[i, j] = distances[j, i] = brlen
variances[i, j] = variances[j, i] = var
dm = DistanceMatrix.from_array(distances, names=seqnames)
vm = DistanceMatrix.from_array(variances, names=seqnames)
return dm, vm
|
Load an alignment, calculate all pairwise distances and variances
model parameter must be a Substitution model type from phylo_utils
|
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam):
"""Convert HLAs into ABSOLUTE format for use with LOHHLA.
LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move
"""
work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs"))
hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample)
with open(calls) as in_handle:
with open(hla_file, "w") as out_handle:
next(in_handle) # header
for line in in_handle:
_, _, a, _, _ = line.strip().split(",")
a1, a2 = a.split(";")
out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n")
out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n")
return hla_file
|
Convert HLAs into ABSOLUTE format for use with LOHHLA.
LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move
|
def evaluation_get(self, service_staff_id, start_date, end_date, session):
'''taobao.wangwang.eservice.evaluation.get 客服评价统计
根据操作者ID,返回被查者ID指定日期内每个帐号每日的"客服评价统计" 备注:
- 1、如果是操作者ID=被查者ID,返回被查者ID的"客服评价统计"。
- 2、如果操作者是组管理员,他可以查询他的组中的所有子帐号的"客服评价统计"。
- 3、如果操作者是主账户,他可以查询所有子帐号的"客服评价统计"。
- 4、被查者ID可以是多个,用 "," 隔开,id数不能超过30。
- 5、开始时间与结束时间之间的间隔不能超过7天
- 6、不能查询90天以前的数据
- 7、不能查询当天的记录'''
request = TOPRequest('taobao.wangwang.eservice.evaluation.get')
request['service_staff_id'] = service_staff_id
request['start_date'] = start_date
request['end_date'] = end_date
self.create(self.execute(request, session))
return self.staff_eval_stat_on_days
|
taobao.wangwang.eservice.evaluation.get 客服评价统计
根据操作者ID,返回被查者ID指定日期内每个帐号每日的"客服评价统计" 备注:
- 1、如果是操作者ID=被查者ID,返回被查者ID的"客服评价统计"。
- 2、如果操作者是组管理员,他可以查询他的组中的所有子帐号的"客服评价统计"。
- 3、如果操作者是主账户,他可以查询所有子帐号的"客服评价统计"。
- 4、被查者ID可以是多个,用 "," 隔开,id数不能超过30。
- 5、开始时间与结束时间之间的间隔不能超过7天
- 6、不能查询90天以前的数据
- 7、不能查询当天的记录
|
def genealogic_types(self):
""" Get genealogic types
Returns:
Returns a list of all parent types
"""
types = []
parent = self
while parent:
types.append(parent.rest_name)
parent = parent.parent_object
return types
|
Get genealogic types
Returns:
Returns a list of all parent types
|
def _delete(self, namespace, stream, start_id, end_time, configuration):
"""
Delete events with id > `start_id` and end_time <= `end_time`.
"""
start_id_event = Event(start_id)
end_id_event = Event(uuid_from_kronos_time(end_time,
_type=UUIDType.HIGHEST))
stream_events = self.db[namespace][stream]
# Find the interval our events belong to.
lo = bisect.bisect_left(stream_events, start_id_event)
if lo + 1 > len(stream_events):
return 0, []
if stream_events[lo] == start_id_event:
lo += 1
hi = bisect.bisect_right(stream_events, end_id_event)
del stream_events[lo:hi]
return max(0, hi - lo), []
|
Delete events with id > `start_id` and end_time <= `end_time`.
|
def delete_lbaas_member(self, lbaas_member, lbaas_pool):
"""Deletes the specified lbaas_member."""
return self.delete(self.lbaas_member_path % (lbaas_pool, lbaas_member))
|
Deletes the specified lbaas_member.
|
def json_minify(string, strip_space=True): # pragma: no cover
"""Removes whitespace from json strings, returning the string
"""
in_string = False
in_multi = False
in_single = False
new_str = []
index = 0
for match in re.finditer(TOKENIZER, string):
if not (in_multi or in_single):
tmp = string[index:match.start()]
if not in_string and strip_space:
# replace white space as defined in standard
tmp = re.sub('[ \t\n\r]+', '', tmp)
new_str.append(tmp)
index = match.end()
val = match.group()
if val == '"' and not (in_multi or in_single):
escaped = END_SLASHES_RE.search(string, 0, match.start())
# start of string or unescaped quote character to end string
if not in_string or (escaped is None or len(escaped.group()) % 2 == 0):
in_string = not in_string
index -= 1 # include " character in next catch
elif not (in_string or in_multi or in_single):
if val == '/*':
in_multi = True
elif val == '//':
in_single = True
elif val == '*/' and in_multi and not (in_string or in_single):
in_multi = False
elif val in '\r\n' and not (in_multi or in_string) and in_single:
in_single = False
elif not ((in_multi or in_single) or (val in ' \r\n\t' and strip_space)):
new_str.append(val)
new_str.append(string[index:])
return ''.join(new_str)
|
Removes whitespace from json strings, returning the string
|
def removeFriend(self, friend_id=None):
"""
Removes a specifed friend from your friend list
:param friend_id: The ID of the friend that you want to remove
:return: Returns error if the removing was unsuccessful, returns True when successful.
"""
payload = {"friend_id": friend_id, "unref": "none", "confirm": "Confirm"}
r = self._post(self.req_url.REMOVE_FRIEND, payload)
query = parse_qs(urlparse(r.url).query)
if "err" not in query:
log.debug("Remove was successful!")
return True
else:
log.warning("Error while removing friend")
return False
|
Removes a specifed friend from your friend list
:param friend_id: The ID of the friend that you want to remove
:return: Returns error if the removing was unsuccessful, returns True when successful.
|
def create(obj: PersistedObject, obj_type: Type[T], errors: Dict[Type, Exception]):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param errors: a dictionary of the errors raised for each alternate type tried
:return:
"""
e = NoParserFoundForUnionType('{obj} cannot be parsed as a {typ} because no parser could be found for any of '
'the alternate types. Caught exceptions: {errs}'
''.format(obj=obj, typ=get_pretty_type_str(obj_type), errs=errors))
# save the errors
e.errors = errors
return e
|
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param errors: a dictionary of the errors raised for each alternate type tried
:return:
|
def delete_joined_table_sql(qualified_name, removing_qualified_name, primary_key):
"""SQL statement for a joined delete from.
Generate SQL statement for deleting the intersection of rows between
both tables from table referenced by tablename.
"""
condition_template = 't.{}=d.{}'
where_clause = ' AND '.join(condition_template.format(pkey, pkey)
for pkey in primary_key)
delete_statement = (
'DELETE FROM {table} t'
' USING {delete_table} d'
' WHERE {where_clause}').format(table=qualified_name,
delete_table=removing_qualified_name,
where_clause=where_clause)
return delete_statement
|
SQL statement for a joined delete from.
Generate SQL statement for deleting the intersection of rows between
both tables from table referenced by tablename.
|
def parse_colors(path):
"""Parse the given color files.
Supported are:
* .txt for X11 colors
* .json for colornames
"""
if path.endswith(".txt"):
return parse_rgb_txt_file(path)
elif path.endswith(".json"):
return parse_json_color_file(path)
raise TypeError("colorful only supports .txt and .json files for colors")
|
Parse the given color files.
Supported are:
* .txt for X11 colors
* .json for colornames
|
def has_option(self, option_name):
"""
Check that an option exists.
:param str option_name: The name of the option to check.
:return: True of the option exists in the configuration.
:rtype: bool
"""
if self.prefix:
option_name = self.prefix + self.seperator + option_name
item_names = option_name.split(self.seperator)
node = self._storage
for item_name in item_names:
if node is None:
return False
if not item_name in node:
return False
node = node[item_name]
return True
|
Check that an option exists.
:param str option_name: The name of the option to check.
:return: True of the option exists in the configuration.
:rtype: bool
|
def derive_from_seed(self, offset):
""" Derive private key using "generate_from_seed" method.
Here, the key itself serves as a `seed`, and `offset`
is expected to be a sha256 digest.
"""
seed = int(hexlify(bytes(self)).decode("ascii"), 16)
z = int(hexlify(offset).decode("ascii"), 16)
order = ecdsa.SECP256k1.order
secexp = (seed + z) % order
secret = "%0x" % secexp
if len(secret) < 64: # left-pad with zeroes
secret = ("0" * (64-len(secret))) + secret
return PrivateKey(secret, prefix=self.pubkey.prefix)
|
Derive private key using "generate_from_seed" method.
Here, the key itself serves as a `seed`, and `offset`
is expected to be a sha256 digest.
|
def __shouldSysExit(self, iteration):
"""
Checks to see if the model should exit based on the exitAfter dummy
parameter
"""
if self._exitAfter is None \
or iteration < self._exitAfter:
return False
results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params'])
modelIDs = [e[0] for e in results]
modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results]
sameModelNumbers = filter(lambda x: x[1] == self.modelIndex,
zip(modelIDs, modelNums))
firstModelID = min(zip(*sameModelNumbers)[0])
return firstModelID == self._modelID
|
Checks to see if the model should exit based on the exitAfter dummy
parameter
|
def items_to_extract(self, offset=0, length=None):
"""
Return an iterable of specific items to extract.
As a side-effect, set self.items_to_extract_length.
:param offset: where to start extracting
:param length: how many to extract
:return: An iterable of the specific
"""
endoffset = length and offset + length
qs = self.origin_data()[offset:endoffset]
self.items_to_extract_length = qs.count()
return qs
|
Return an iterable of specific items to extract.
As a side-effect, set self.items_to_extract_length.
:param offset: where to start extracting
:param length: how many to extract
:return: An iterable of the specific
|
def team_absent(name, profile="github", **kwargs):
'''
Ensure a team is absent.
Example:
.. code-block:: yaml
ensure team test is present in github:
github.team_absent:
- name: 'test'
The following parameters are required:
name
This is the name of the team in the organization.
.. versionadded:: 2016.11.0
'''
ret = {
'name': name,
'changes': {},
'result': None,
'comment': ''
}
target = __salt__['github.get_team'](name, profile=profile, **kwargs)
if not target:
ret['comment'] = 'Team {0} does not exist'.format(name)
ret['result'] = True
return ret
else:
if __opts__['test']:
ret['comment'] = "Team {0} will be deleted".format(name)
ret['result'] = None
return ret
result = __salt__['github.remove_team'](name, profile=profile, **kwargs)
if result:
ret['comment'] = 'Deleted team {0}'.format(name)
ret['changes'].setdefault('old', 'Team {0} exists'.format(name))
ret['changes'].setdefault('new', 'Team {0} deleted'.format(name))
ret['result'] = True
else:
ret['comment'] = 'Failed to delete {0}'.format(name)
ret['result'] = False
return ret
|
Ensure a team is absent.
Example:
.. code-block:: yaml
ensure team test is present in github:
github.team_absent:
- name: 'test'
The following parameters are required:
name
This is the name of the team in the organization.
.. versionadded:: 2016.11.0
|
def findCaller(self, stack_info=False, callers=0):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name. Not only ignore this class's
and logger's source, but also as many callers as requested.
"""
f = logging.currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
if callers > 0:
# yes we can!
co = f.f_code
logger.debug2("%s:%s", co.co_filename, co.co_name)
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
countdown = callers
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if callers > 0:
# yes we can!
logger.debug2("%s:%s", co.co_filename, co.co_name)
if filename in(_srcfile, logging._srcfile):
f = f.f_back
continue
if countdown > 0:
f = f.f_back
countdown -= 1
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
|
Find the stack frame of the caller so that we can note the source
file name, line number and function name. Not only ignore this class's
and logger's source, but also as many callers as requested.
|
def _call_lrem(self, command, count, value, *args, **kwargs):
"""
If count is 0, we remove all elements equal to value, so we know we have
nothing to index, and this value to deindex. In other case, we don't
know how much elements will remain in the list, so we have to do a full
deindex/reindex. So do it carefuly.
"""
if not count:
if self.indexable:
self.deindex([value])
return self._traverse_command(command, count, value, *args, **kwargs)
else:
return self._reset(command, count, value, *args, **kwargs)
|
If count is 0, we remove all elements equal to value, so we know we have
nothing to index, and this value to deindex. In other case, we don't
know how much elements will remain in the list, so we have to do a full
deindex/reindex. So do it carefuly.
|
def ellplot (mjr, mnr, pa):
"""Utility for debugging."""
_ellcheck (mjr, mnr, pa)
import omega as om
th = np.linspace (0, 2 * np.pi, 200)
x, y = ellpoint (mjr, mnr, pa, th)
return om.quickXY (x, y, 'mjr=%f mnr=%f pa=%f' %
(mjr, mnr, pa * 180 / np.pi))
|
Utility for debugging.
|
def members(self, as_set=False):
"""Return the set members tuple/frozenset."""
if as_set:
return frozenset(map(self._members.__getitem__, self._indexes()))
return tuple(map(self._members.__getitem__, self._indexes()))
|
Return the set members tuple/frozenset.
|
def get_context_from_xlsx(self):
"""
Get context from an Excel file
"""
if re.search('^(http|https)://', self.project.CONTEXT_SOURCE_FILE):
resp = requests.get(self.project.CONTEXT_SOURCE_FILE)
content = resp.content
else:
try:
with open(self.project.CONTEXT_SOURCE_FILE) as xlsxfile:
content = xlsxfile.read()
except IOError:
filepath = "%s/%s" % (
os.path.abspath(self.path),
self.project.CONTEXT_SOURCE_FILE)
with open(filepath) as xlsxfile:
content = xlsxfile.read()
data = process_xlsx(content)
if 'values' in data:
data = copy_global_values(data)
return data
|
Get context from an Excel file
|
def update_repository(self, repository_form):
"""Updates an existing repository.
arg: repository_form (osid.repository.RepositoryForm): the
form containing the elements to be updated
raise: IllegalState - ``repository_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``repository_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``repository_form`` did not originate from
``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.update_bin_template
if self._catalog_session is not None:
return self._catalog_session.update_catalog(catalog_form=repository_form)
collection = JSONClientValidated('repository',
collection='Repository',
runtime=self._runtime)
if not isinstance(repository_form, ABCRepositoryForm):
raise errors.InvalidArgument('argument type is not an RepositoryForm')
if not repository_form.is_for_update():
raise errors.InvalidArgument('the RepositoryForm is for update only, not create')
try:
if self._forms[repository_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('repository_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('repository_form did not originate from this session')
if not repository_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(repository_form._my_map) # save is deprecated - change to replace_one
self._forms[repository_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned
return objects.Repository(osid_object_map=repository_form._my_map, runtime=self._runtime, proxy=self._proxy)
|
Updates an existing repository.
arg: repository_form (osid.repository.RepositoryForm): the
form containing the elements to be updated
raise: IllegalState - ``repository_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``repository_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``repository_form`` did not originate from
``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
|
def as_list(self):
"return some attributes as a list"
netloc = ''
if self.vpath_connector:
netloc = '(('+self.vpath_connector+'))'
elif self.authority:
netloc = self.authority
else:
netloc = self.netloc
return [
self.scheme,
netloc,
self.path,
self.query,
'',
]
|
return some attributes as a list
|
def grab_zipped_url(zipped_url, ensure=True, appname='utool',
download_dir=None, force_commonprefix=True, cleanup=False,
redownload=False, spoof=False):
r"""
downloads and unzips the url
Args:
zipped_url (str): url which must be either a .zip of a .tar.gz file
ensure (bool): eager evaluation if True(default = True)
appname (str): (default = 'utool')
download_dir (str): containing downloading directory
force_commonprefix (bool): (default = True)
cleanup (bool): (default = False)
redownload (bool): (default = False)
spoof (bool): (default = False)
CommandLine:
python -m utool.util_grabdata --exec-grab_zipped_url --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> zipped_url = '?'
>>> ensure = True
>>> appname = 'utool'
>>> download_dir = None
>>> force_commonprefix = True
>>> cleanup = False
>>> redownload = False
>>> spoof = False
>>> result = grab_zipped_url(zipped_url, ensure, appname, download_dir,
>>> force_commonprefix, cleanup, redownload,
>>> spoof)
>>> print(result)
Examples:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> zipped_url = 'https://lev.cs.rpi.edu/public/data/testdata.zip'
>>> zipped_url = 'http://www.spam.com/eggs/data.zip'
"""
zipped_url = clean_dropbox_link(zipped_url)
zip_fname = split(zipped_url)[1]
data_name = split_archive_ext(zip_fname)[0]
# Download zipfile to
if download_dir is None:
download_dir = util_cplat.get_app_cache_dir(appname)
# Zipfile should unzip to:
data_dir = join(download_dir, data_name)
if ensure or redownload:
if redownload:
util_path.remove_dirs(data_dir)
util_path.ensurepath(download_dir)
if not exists(data_dir) or redownload:
# Download and unzip testdata
zip_fpath = realpath(join(download_dir, zip_fname))
#print('[utool] Downloading archive %s' % zip_fpath)
if not exists(zip_fpath) or redownload:
download_url(zipped_url, zip_fpath, spoof=spoof)
unarchive_file(zip_fpath, force_commonprefix)
if cleanup:
util_path.delete(zip_fpath) # Cleanup
if cleanup:
util_path.assert_exists(data_dir)
return util_path.unixpath(data_dir)
|
r"""
downloads and unzips the url
Args:
zipped_url (str): url which must be either a .zip of a .tar.gz file
ensure (bool): eager evaluation if True(default = True)
appname (str): (default = 'utool')
download_dir (str): containing downloading directory
force_commonprefix (bool): (default = True)
cleanup (bool): (default = False)
redownload (bool): (default = False)
spoof (bool): (default = False)
CommandLine:
python -m utool.util_grabdata --exec-grab_zipped_url --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> zipped_url = '?'
>>> ensure = True
>>> appname = 'utool'
>>> download_dir = None
>>> force_commonprefix = True
>>> cleanup = False
>>> redownload = False
>>> spoof = False
>>> result = grab_zipped_url(zipped_url, ensure, appname, download_dir,
>>> force_commonprefix, cleanup, redownload,
>>> spoof)
>>> print(result)
Examples:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> zipped_url = 'https://lev.cs.rpi.edu/public/data/testdata.zip'
>>> zipped_url = 'http://www.spam.com/eggs/data.zip'
|
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
"""
Print a summary of the difference between the two files.
"""
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
|
Print a summary of the difference between the two files.
|
def refreshResults( self ):
"""
Joins together the queries from the fixed system, the search, and the
query builder to generate a query for the browser to display.
"""
if ( self.currentMode() == XOrbBrowserWidget.Mode.Detail ):
self.refreshDetails()
elif ( self.currentMode() == XOrbBrowserWidget.Mode.Card ):
self.refreshCards()
else:
self.refreshThumbnails()
|
Joins together the queries from the fixed system, the search, and the
query builder to generate a query for the browser to display.
|
def _set_collector_ip(self, v, load=False):
"""
Setter method for collector_ip, mapped from YANG variable /telemetry/collector/collector_ip (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_ip() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=collector_ip.collector_ip, is_container='container', presence=False, yang_name="collector-ip", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Collector IP Address Configuration', u'cli-incomplete-no': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'ip', u'cli-flatten-container': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collector_ip must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=collector_ip.collector_ip, is_container='container', presence=False, yang_name="collector-ip", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Collector IP Address Configuration', u'cli-incomplete-no': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'ip', u'cli-flatten-container': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='container', is_config=True)""",
})
self.__collector_ip = t
if hasattr(self, '_set'):
self._set()
|
Setter method for collector_ip, mapped from YANG variable /telemetry/collector/collector_ip (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_ip() directly.
|
def is_dec(ip):
"""Return true if the IP address is in decimal notation."""
try:
dec = int(str(ip))
except ValueError:
return False
if dec > 4294967295 or dec < 0:
return False
return True
|
Return true if the IP address is in decimal notation.
|
def check_membership(self, groups):
''' Allows for objects with no required groups '''
if not groups or groups == ['']:
return True
if self.request.user.is_superuser:
return True
user_groups = self.request.user.groups.values_list("name", flat=True)
return set(groups).intersection(set(user_groups))
|
Allows for objects with no required groups
|
def connect(self, output_port, input_port):
"""
Connect two :class:`.Effect` instances in this pedalboard.
For this, is necessary informs the output port origin and the input port destination::
>>> pedalboard.append(driver)
>>> pedalboard.append(reverb)
>>> driver_output = driver.outputs[0]
>>> reverb_input = reverb.inputs[0]
>>> Connection(driver_output, reverb_input) in driver.connections
False
>>> pedalboard.connect(driver_output, reverb_input)
>>> Connection(driver_output, reverb_input) in driver.connections
True
:param Port output_port: Effect output port
:param Port input_port: Effect input port
"""
ConnectionClass = output_port.connection_class
self.connections.append(ConnectionClass(output_port, input_port))
|
Connect two :class:`.Effect` instances in this pedalboard.
For this, is necessary informs the output port origin and the input port destination::
>>> pedalboard.append(driver)
>>> pedalboard.append(reverb)
>>> driver_output = driver.outputs[0]
>>> reverb_input = reverb.inputs[0]
>>> Connection(driver_output, reverb_input) in driver.connections
False
>>> pedalboard.connect(driver_output, reverb_input)
>>> Connection(driver_output, reverb_input) in driver.connections
True
:param Port output_port: Effect output port
:param Port input_port: Effect input port
|
def get_hla(sample, cromwell_dir, hla_glob):
"""Retrieve HLA calls and input fastqs for a sample.
"""
hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0]
fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq")
calls = os.path.join(hla_dir, "%s-optitype.csv" % sample)
return fastq, calls
|
Retrieve HLA calls and input fastqs for a sample.
|
def set_max_image_pixels(self, pixels):
"""Set PIL limit on pixel size of images to load if non-zero.
WARNING: This is a global setting in PIL, it is
not local to this manipulator instance!
Setting a value here will not only set the given limit but
also convert the PIL "DecompressionBombWarning" into an
error. Thus setting a moderate limit sets a hard limit on
image size loaded, setting a very large limit will have the
effect of disabling the warning.
"""
if (pixels):
Image.MAX_IMAGE_PIXELS = pixels
Image.warnings.simplefilter(
'error', Image.DecompressionBombWarning)
|
Set PIL limit on pixel size of images to load if non-zero.
WARNING: This is a global setting in PIL, it is
not local to this manipulator instance!
Setting a value here will not only set the given limit but
also convert the PIL "DecompressionBombWarning" into an
error. Thus setting a moderate limit sets a hard limit on
image size loaded, setting a very large limit will have the
effect of disabling the warning.
|
def _compute_stddevs(self, C, mag, rjb, imt, stddev_types):
"""
Compute total standard deviation, equations 5 and 6, page 48.
"""
# aleatory uncertainty
sigma_ale_m = np.interp(mag, [5.0, 5.5, 8.0],
[C['m50'], C['m55'], C['m80']])
sigma_ale_rjb = np.interp(rjb, [5.0, 20.0], [C['r5'], C['r20']])
sigma_ale = np.sqrt(sigma_ale_m ** 2 + sigma_ale_rjb ** 2)
# epistemic uncertainty
if imt.period < 1:
sigma_epi = 0.36 + 0.07 * (mag - 6)
else:
sigma_epi = 0.34 + 0.06 * (mag - 6)
sigma_total = np.sqrt(sigma_ale ** 2 + sigma_epi ** 2)
stddevs = []
for _ in stddev_types:
stddevs.append(sigma_total)
return stddevs
|
Compute total standard deviation, equations 5 and 6, page 48.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.