Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
373,700
|
def get(self, resource, **params):
return self._execute(self.session.get, , resource, **params)
|
Generic TeleSign REST API GET handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the GET request with, as a dictionary.
:return: The RestClient Response object.
|
373,701
|
def to_UNIXtime(timeobject):
if isinstance(timeobject, int):
if timeobject < 0:
raise ValueError("The time value is a negative number")
return timeobject
elif isinstance(timeobject, datetime):
return _datetime_to_UNIXtime(timeobject)
elif isinstance(timeobject, str):
return _ISO8601_to_UNIXtime(timeobject)
else:
raise TypeError( \
\
)
|
Returns the UNIXtime corresponding to the time value conveyed by the
specified object, which can be either a UNIXtime, a
``datetime.datetime`` object or an ISO8601-formatted string in the format
`YYYY-MM-DD HH:MM:SS+00``.
:param timeobject: the object conveying the time value
:type timeobject: int, ``datetime.datetime`` or ISO8601-formatted
string
:returns: an int UNIXtime
:raises: *TypeError* when bad argument types are provided, *ValueError*
when negative UNIXtimes are provided
|
373,702
|
async def play(self):
if self.repeat and self.current is not None:
self.queue.append(self.current)
self.current = None
self.position = 0
self._paused = False
if not self.queue:
await self.stop()
else:
self._is_playing = True
if self.shuffle:
track = self.queue.pop(randrange(len(self.queue)))
else:
track = self.queue.pop(0)
self.current = track
log.debug("Assigned current.")
await self.node.play(self.channel.guild.id, track)
|
Starts playback from lavalink.
|
373,703
|
def set_canvas_properties(self, canvas, x_title=None, y_title=None, x_lim=None, y_lim=None, x_labels=True, y_labels=True):
self.__canvases[canvas] = canvas_descr(x_title, y_title, x_lim, y_lim, x_labels, y_labels);
|
!
@brief Set properties for specified canvas.
@param[in] canvas (uint): Index of canvas whose properties should changed.
@param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed.
@param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed.
@param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then
borders are calculated automatically.
@param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated
automatically.
@param[in] x_labels (bool): If True then labels of X axis are displayed.
@param[in] y_labels (bool): If True then labels of Y axis are displayed.
|
373,704
|
def get_users_with_permission(obj, permission):
user_model = get_user_model()
return user_model.objects.filter(
userobjectpermission__object_pk=obj.pk,
userobjectpermission__permission__codename=permission,
).distinct()
|
Return users with specific permission on object.
:param obj: Object to return users for
:param permission: Permission codename
|
373,705
|
def _folder_item_duedate(self, analysis_brain, item):
due_date = analysis_brain.getDueDate
if not due_date:
return None
due_date_str = self.ulocalized_time(due_date, long_format=0)
item[] = due_date_str
capture_date = analysis_brain.getResultCaptureDate
capture_date = capture_date or DateTime()
if capture_date > due_date:
img = get_image(, title=t(_("Late Analysis")),
width=, height=)
item[][] = .format(due_date_str, img)
|
Set the analysis' due date to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
|
373,706
|
def get_assets_by_provider(self, resource_id=None):
return AssetList(self._provider_session.get_assets_by_provider(resource_id),
self._config_map)
|
Gets an ``AssetList`` from the given provider.
In plenary mode, the returned list contains all known assets or
an error results. Otherwise, the returned list may contain only
those assets that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
return: (osid.repository.AssetList) - the returned ``Asset
list``
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
373,707
|
def introspect_operation(self, operation):
return {
: operation.py_name,
: operation.name,
: self.convert_docs(operation.documentation),
: self.parse_params(operation.params),
: operation.output,
}
|
Introspects an entire operation, returning::
* the method name (to expose to the user)
* the API name (used server-side)
* docs
* introspected information about the parameters
* information about the output
:param operation: The operation to introspect
:type operation: A <botocore.operation.Operation> object
:returns: A dict of information
|
373,708
|
def _request(self, endpoint, method, data=None, **kwargs):
final_url = self.url + endpoint
if not self._is_authenticated:
raise LoginRequired
rq = self.session
if method == :
request = rq.get(final_url, **kwargs)
else:
request = rq.post(final_url, data, **kwargs)
request.raise_for_status()
request.encoding =
if len(request.text) == 0:
data = json.loads()
else:
try:
data = json.loads(request.text)
except ValueError:
data = request.text
return data
|
Method to hanle both GET and POST requests.
:param endpoint: Endpoint of the API.
:param method: Method of HTTP request.
:param data: POST DATA for the request.
:param kwargs: Other keyword arguments.
:return: Response for the request.
|
373,709
|
def recall():
a = TpPd(pd=0x3)
b = MessageType(mesType=0xb)
c = RecallType()
d = Facility()
packet = a / b / c / d
return packet
|
RECALL Section 9.3.18a
|
373,710
|
def remove(self, method_or_response=None, url=None):
if isinstance(method_or_response, BaseResponse):
response = method_or_response
else:
response = BaseResponse(method=method_or_response, url=url)
while response in self._matches:
self._matches.remove(response)
|
Removes a response previously added using ``add()``, identified
either by a response object inheriting ``BaseResponse`` or
``method`` and ``url``. Removes all matching responses.
>>> response.add(responses.GET, 'http://example.org')
>>> response.remove(responses.GET, 'http://example.org')
|
373,711
|
def namedb_get_name(cur, name, current_block, include_expired=False, include_history=True, only_registered=True):
if not include_expired:
unexpired_fragment, unexpired_args = namedb_select_where_unexpired_names(current_block, only_registered=only_registered)
select_query = "SELECT name_records.* FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \
"WHERE name = ? AND " + unexpired_fragment + ";"
args = (name, ) + unexpired_args
else:
select_query = "SELECT * FROM name_records WHERE name = ?;"
args = (name,)
name_rows = namedb_query_execute( cur, select_query, args )
name_row = name_rows.fetchone()
if name_row is None:
return None
name_rec = {}
name_rec.update( name_row )
if include_history:
name_history = namedb_get_history( cur, name )
name_rec[] = name_history
return name_rec
|
Get a name and all of its history. Note: will return a revoked name
Return the name + history on success
Return None if the name doesn't exist, or is expired (NOTE: will return a revoked name)
|
373,712
|
def threshold(self, scalars, vmin=None, vmax=None, useCells=False):
if utils.isSequence(scalars):
self.addPointScalars(scalars, "threshold")
scalars = "threshold"
elif self.scalars(scalars) is None:
colors.printc("~times No scalars found with name", scalars, c=1)
exit()
thres = vtk.vtkThreshold()
thres.SetInputData(self.poly)
if useCells:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
else:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
thres.SetInputArrayToProcess(0, 0, 0, asso, scalars)
if vmin is None and vmax is not None:
thres.ThresholdByLower(vmax)
elif vmax is None and vmin is not None:
thres.ThresholdByUpper(vmin)
else:
thres.ThresholdBetween(vmin, vmax)
thres.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputData(thres.GetOutput())
gf.Update()
return self.updateMesh(gf.GetOutput())
|
Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_
|
373,713
|
def topWindow():
import_qt(globals())
window = QtGui.QApplication.instance().activeWindow()
if not window:
return None
parent = window.parent()
while parent:
window = parent
parent = window.parent()
return window
|
Returns the very top window for all Qt purposes.
:return <QWidget> || None
|
373,714
|
def make_helix(aa, axis_distance, z_shift, phi, splay, off_plane):
start = numpy.array([axis_distance, 0 + z_shift, 0])
end = numpy.array([axis_distance, (aa * 1.52) + z_shift, 0])
mid = (start + end) / 2
helix = Helix.from_start_and_end(start, end, aa=aa)
helix.rotate(splay, (0, 0, 1), mid)
helix.rotate(off_plane, (1, 0, 0), mid)
helix.rotate(phi, helix.axis.unit_tangent, helix.helix_start)
return helix
|
Builds a helix for a given set of parameters.
|
373,715
|
def _first_stoppoint(self, irsb, extra_stop_points=None):
if self._stop_points is None and extra_stop_points is None and self.project is None:
return None
first_imark = True
for stmt in irsb.statements:
if type(stmt) is pyvex.stmt.IMark:
addr = stmt.addr + stmt.delta
if not first_imark:
if self.is_stop_point(addr, extra_stop_points):
return addr
if stmt.delta != 0 and self.is_stop_point(stmt.addr, extra_stop_points):
return addr
first_imark = False
return None
|
Enumerate the imarks in the block. If any of them (after the first one) are at a stop point, returns the address
of the stop point. None is returned otherwise.
|
373,716
|
def get_vm_host_info(hostip, auth, url):
hostId = get_dev_details(hostip, auth, url)[]
get_vm_host_info_url = "/imcrs/vrm/host?hostId=" + str(hostId)
f_url = url + get_vm_host_info_url
payload = None
r = requests.get(f_url, auth=auth,
headers=HEADERS)
try:
if r.status_code == 200:
if len(r.text) > 0:
return json.loads(r.text)
elif r.status_code == 204:
print("Device is not a supported Hypervisor")
return "Device is not a supported Hypervisor"
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_vm_host_info: An Error has occured"
|
function takes hostId as input to RESTFUL call to HP IMC
:param hostip: int or string of hostip of Hypervisor host
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: Dictionary contraining the information for the target VM host
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vrm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> host_info = get_vm_host_info('10.101.0.6', auth.creds, auth.url)
>>> assert type(host_info) is dict
>>> assert len(host_info) == 10
>>> assert 'cpuFeg' in host_info
>>> assert 'cpuNum' in host_info
>>> assert 'devId' in host_info
>>> assert 'devIp' in host_info
>>> assert 'diskSize' in host_info
>>> assert 'memory' in host_info
>>> assert 'parentDevId' in host_info
>>> assert 'porductFlag' in host_info
>>> assert 'serverName' in host_info
>>> assert 'vendor' in host_info
|
373,717
|
def get_portchannel_info_by_intf_output_lacp_actor_priority(self, **kwargs):
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
actor_priority = ET.SubElement(lacp, "actor-priority")
actor_priority.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
373,718
|
def cal_gpa(grades):
courses_sum = len(grades)
points_sum = 0
credit_sum = 0
gpa_points_sum = 0
for grade in grades:
point = get_point(grade.get() or grade[])
credit = float(grade[])
points_sum += point
credit_sum += credit
gpa_points_sum += credit * point
ave_point = points_sum / courses_sum
gpa = gpa_points_sum / credit_sum
return round(ave_point, 5), round(gpa, 5)
|
根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考
:param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组
:return: 包含了课程平均绩点和 gpa 的元组
|
373,719
|
def getSchema(cls):
schema = []
for name, atr in cls.__attributes__:
atr = atr.__get__(None, cls)
if isinstance(atr, SQLAttribute):
schema.append((name, atr))
cls.getSchema = staticmethod(lambda schema=schema: schema)
return schema
|
return all persistent class attributes
|
373,720
|
def remove_imaginary_terms(pauli_sums: PauliSum) -> PauliSum:
if not isinstance(pauli_sums, PauliSum):
raise TypeError("not a pauli sum. please give me one")
new_term = sI(0) * 0.0
for term in pauli_sums:
new_term += term_with_coeff(term, term.coefficient.real)
return new_term
|
Remove the imaginary component of each term in a Pauli sum.
:param pauli_sums: The Pauli sum to process.
:return: a purely Hermitian Pauli sum.
|
373,721
|
def _lint(self):
command = self._get_command()
process = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOG.info(, .join(command))
stdout, stderr = self._get_output_lines(process)
return self._linter.parse(stdout), self._parse_stderr(stderr)
|
Run linter in a subprocess.
|
373,722
|
def get_empty_dimension(**kwargs):
dimension = JSONObject(Dimension())
dimension.id = None
dimension.name =
dimension.description =
dimension.project_id = None
dimension.units = []
return dimension
|
Returns a dimension object initialized with empty values
|
373,723
|
def propertySearch(self, pid, getall=0):
matches = []
for n in self:
if n.has_key(pid):
matches.append(n)
if not getall:
break
else:
for v in self.variations:
matches = matches + v.propertySearch(pid, getall)
if not getall and matches:
break
return GameTree(matches)
|
Searches this 'GameTree' for nodes containing matching properties.
Returns a 'GameTree' containing the matched node(s). Arguments:
- pid : string -- ID of properties to search for.
- getall : boolean -- Set to true (1) to return all 'Node''s that
match, or to false (0) to return only the first match.
|
373,724
|
def get_dataframe(self, force_computation=False):
if self.df is not None and not force_computation: return self.df
self.df = self.fetch(self.context)
self.df = self.preprocess(self.df)
self.transform(self.df)
return self.df
|
Preprocesses then transforms the return of fetch().
Args:
force_computation (bool, optional) : Defaults to False. If set to True, forces the computation of DataFrame at each call.
Returns:
pandas.DataFrame: Preprocessed and transformed DataFrame.
|
373,725
|
def _check_accessed_members(self, node, accessed):
excs = ("AttributeError", "Exception", "BaseException")
for attr, nodes in accessed.items():
try:
node.local_attr(attr)
continue
except astroid.NotFoundError:
pass
try:
next(node.instance_attr_ancestors(attr))
continue
except StopIteration:
pass
try:
defstmts = node.instance_attr(attr)
except astroid.NotFoundError:
pass
else:
defstmts = [stmt for stmt in defstmts if stmt not in nodes]
if not defstmts:
continue
scope = defstmts[0].scope()
defstmts = [
stmt
for i, stmt in enumerate(defstmts)
if i == 0 or stmt.scope() is not scope
]
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if (
_node.frame() is frame
and _node.fromlineno < lno
and not astroid.are_exclusive(
_node.statement(), defstmt, excs
)
):
self.add_message(
"access-member-before-definition",
node=_node,
args=(attr, lno),
)
|
check that accessed members are defined
|
373,726
|
def modules():
*
cmd = .format(_detect_os())
ret = {}
ret[] = []
ret[] = []
out = __salt__[](cmd).splitlines()
for line in out:
comps = line.split()
if not comps:
continue
if in line:
ret[].append(comps[0])
if in line:
ret[].append(comps[0])
return ret
|
Return list of static and shared modules (``apachectl -M``)
CLI Example:
.. code-block:: bash
salt '*' apache.modules
|
373,727
|
def read_storage_class(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.read_storage_class_with_http_info(name, **kwargs)
return data
|
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
|
373,728
|
def metar(data: MetarData, units: Units) -> str:
speech = []
if data.wind_direction and data.wind_speed:
speech.append(wind(data.wind_direction, data.wind_speed,
data.wind_gust, data.wind_variable_direction,
units.wind_speed))
if data.visibility:
speech.append(visibility(data.visibility, units.visibility))
if data.temperature:
speech.append(temperature(, data.temperature, units.temperature))
if data.dewpoint:
speech.append(temperature(, data.dewpoint, units.temperature))
if data.altimeter:
speech.append(altimeter(data.altimeter, units.altimeter))
if data.other:
speech.append(other(data.other))
speech.append(translate.clouds(data.clouds,
units.altitude).replace(, ))
return (.join([l for l in speech if l])).replace(, )
|
Convert MetarData into a string for text-to-speech
|
373,729
|
def get_success_enrollment_message(cls, users, enrolled_in):
enrolled_count = len(users)
return (
,
ungettext(
,
,
enrolled_count,
).format(
enrolled_count=enrolled_count,
enrolled_in=enrolled_in,
)
)
|
Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of users who were successfully enrolled
enrolled_in (str): A string identifier for the course or program the users were enrolled in
Returns:
tuple: A 2-tuple containing a message type and message text
|
373,730
|
def get_route_to(self, destination=, protocol=):
routes = {}
if destination:
destination = "<destination>{0}</destination>".format(destination)
if protocol:
protocol = "<type>{0}</type>".format(protocol)
cmd = "<show><routing><route>{0}{1}</route></routing></show>".format(protocol, destination)
try:
self.device.op(cmd=cmd)
routes_table_xml = xmltodict.parse(self.device.xml_root())
routes_table_json = json.dumps(routes_table_xml[][][])
routes_table = json.loads(routes_table_json)
except (AttributeError, KeyError):
routes_table = []
if isinstance(routes_table, dict):
routes_table = [routes_table]
for route in routes_table:
d = {
: False,
: False,
: -1,
: u,
: u,
: u,
: -1,
: u,
: u,
: False,
: {}
}
destination = route[]
flags = route[]
if in flags:
d[] = True
else:
d[] = False
if in flags:
d[] = "connect"
if in flags:
d[] = "static"
if in flags:
d[] = "rip"
if in flags:
d[] = "rip"
if in flags:
d[] = "ospf"
if in flags:
d[] = "bgp"
if in flags:
d[] = "host"
if route[] is not None:
d[] = int(route[])
if route[] is not None:
d[] = route[]
if route[] is not None:
d[] = route[]
if route[] is not None:
d[] = int(route[])
if route[] is not None:
d[] = route[]
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
|
Return route details to a specific destination, learned from a certain protocol.
|
373,731
|
def copy(self):
return Particle(copy(self.position),
copy(self.velocity),
self.fitness)
|
Creates a copy of itself
|
373,732
|
def parallel_bulk(
client,
actions,
thread_count=4,
chunk_size=500,
max_chunk_bytes=100 * 1024 * 1024,
queue_size=4,
expand_action_callback=expand_action,
*args,
**kwargs
):
from multiprocessing.pool import ThreadPool
actions = map(expand_action_callback, actions)
class BlockingPool(ThreadPool):
def _setup_queues(self):
super(BlockingPool, self)._setup_queues()
self._inqueue = Queue(max(queue_size, thread_count))
self._quick_put = self._inqueue.put
pool = BlockingPool(thread_count)
try:
for result in pool.imap(
lambda bulk_chunk: list(
_process_bulk_chunk(
client, bulk_chunk[1], bulk_chunk[0], *args, **kwargs
)
),
_chunk_actions(
actions, chunk_size, max_chunk_bytes, client.transport.serializer
),
):
for item in result:
yield item
finally:
pool.close()
pool.join()
|
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
:arg queue_size: size of the task queue between the main thread (producing
chunks to send) and the processing threads.
|
373,733
|
def circles(st, layer, axis, ax=None, talpha=1.0, cedge=, cface=):
pos = st.obj_get_positions()
rad = st.obj_get_radii()
shape = st.ishape.shape.tolist()
shape.pop(axis)
if ax is None:
fig = plt.figure()
axisbg = if cface == else
sx, sy = ((1,shape[1]/float(shape[0])) if shape[0] > shape[1] else
(shape[0]/float(shape[1]), 1))
ax = fig.add_axes((0,0, sx, sy), axisbg=axisbg)
particles = np.arange(len(pos))[np.abs(pos[:,axis] - layer) < rad]
scale = 1.0
for i in particles:
p = pos[i].copy()
r = 2*np.sqrt(rad[i]**2 - (p[axis] - layer)**2)
if axis==0:
ix = 1; iy = 2
elif axis == 1:
ix = 0; iy = 2
elif axis==2:
ix = 0; iy = 1
c = Circle((p[ix]/scale, p[iy]/scale), radius=r/2/scale, fc=cface,
ec=cedge, alpha=talpha)
ax.add_patch(c)
plt.axis()
return ax
|
Plots a set of circles corresponding to a slice through the platonic
structure. Copied from twoslice_overlay with comments, standaloneness.
Inputs
------
pos : array of particle positions; [N,3]
rad : array of particle radii; [N]
ax : plt.axis instance
layer : Which layer of the slice to use.
axis : The slice of the image, 0, 1, or 2.
cedge : edge color
cface : face color
talpha : Alpha of the thing
|
373,734
|
def _syspath(dev):
dev = _devbase(dev)
dev = re.sub(r, r, dev)
return os.path.join(, dev)
|
Full SysFS path of a device
|
373,735
|
def update_note(note, **kwargs):
note_i = _get_note(note.id)
if note.ref_key != note_i.ref_key:
raise HydraError("Cannot convert a %s note to a %s note. Please create a new note instead."%(note_i.ref_key, note.ref_key))
note_i.set_ref(note.ref_key, note.ref_id)
note_i.value = note.value
db.DBSession.flush()
return note_i
|
Update a note
|
373,736
|
def server_list(endpoint_id):
endpoint, server_list = get_endpoint_w_server_list(endpoint_id)
if server_list == "S3":
server_list = {"s3_url": endpoint["s3_url"]}
fields = [("S3 URL", "s3_url")]
text_format = FORMAT_TEXT_RECORD
else:
fields = (
("ID", "id"),
("URI", lambda s: (s["uri"] or "none (Globus Connect Personal)")),
)
text_format = FORMAT_TEXT_TABLE
formatted_print(server_list, text_format=text_format, fields=fields)
|
Executor for `globus endpoint server list`
|
373,737
|
def add_hotkey(control, key, func, id = None):
if win32con is None:
raise RuntimeError()
logger.debug(, key, control, func)
modifiers, keycode = str_to_key(key, key_table = win32con, accel_format = , key_format = , key_transpositions = {: })
id = get_id(id)
control.Bind(wx.EVT_HOTKEY, func, id = id)
l = _hotkeys.get(control, [])
l.append([key, id])
_hotkeys[control] = l
return control.RegisterHotKey(id, modifiers, keycode)
|
Add a global hotkey bound to control via id that should call func.
control: The control to bind to.
key: The hotkey to use.
func: The func to call.
id: The new ID to use (defaults to creating a new ID.
|
373,738
|
def modify_product(self, product_id, name=None, description=None, attributes={}):
request_data = {: product_id}
if name: request_data[]=name
if description: request_data[]=description
if attributes: request_data[]=attributes
return self._call_rest_api(, , data=request_data, error=)
|
modify_product(self, product_id, name=None, description=None, attributes={})
Modify an existing product
:Parameters:
* *product_id* (`string`) -- identifier of an existing product
* *name* (`string`) -- name of the product
* *description* (`string`) -- product description
* *attributes* (`object`) -- product attributes to modify
|
373,739
|
def name_scope(name=None):
def name_scope_wrapper_decorator(method):
@functools.wraps(method)
def name_scope_wrapper(*args, **kwargs):
scope_name = name if name is not None else method.__name__
with tf.name_scope(scope_name):
return method(*args, **kwargs)
return name_scope_wrapper
return name_scope_wrapper_decorator
|
This decorator wraps a function so that it runs inside a TensorFlow
name scope. The name is given by the `name` option; if this is None,
then the name of the function will be used.
```
>>> @name_scope()
>>> def foo(...):
>>> # now runs inside scope "foo"
>>> @name_scope('bar')
>>> def baz(...):
>>> # now runs inside scope "bar", not "baz"
```
|
373,740
|
def build_vep_string(vep_info, vep_columns):
logger = getLogger(__name__)
logger.debug("Building vep string from {0}".format(vep_info))
logger.debug("Found vep headers {0}".format(vep_columns))
vep_strings = []
for vep_annotation in vep_info:
try:
vep_info_list = [
vep_annotation[vep_key] for vep_key in vep_columns
]
except KeyError:
raise SyntaxError("Vep entry does not correspond to vep headers")
vep_strings.append(.join(vep_info_list))
return .join(vep_strings)
|
Build a vep string formatted string.
Take a list with vep annotations and build a new vep string
Args:
vep_info (list): A list with vep annotation dictionaries
vep_columns (list): A list with the vep column names found in the
header of the vcf
Returns:
string: A string with the proper vep annotations
|
373,741
|
def _attempting(self, text):
consumed = len(self.original_text) - len(text)
self.most_consumed = max(consumed, self.most_consumed)
|
Keeps track of the furthest point in the source code the parser has reached to this point.
|
373,742
|
def Pn(x):
Pn = {}
Pn[] = P0(x)
Pn[] = P1(x)
Pn[] = P2(x)
Pn[] = P3(x)
Pn[] = P4(x)
Pn[] = P5(x)
Pn[] = P6(x)
Pn[] = P8(x)
Pn[] = P10(x)
Pn[] = P12(x)
Pn[] = P14(x)
Pn[] = P16(x)
Pn[] = P18(x)
Pn[] = P20(x)
Pn[] = P22(x)
Pn[] = P24(x)
Pn[] = P26(x)
Pn[] = P28(x)
return Pn
|
Calculate Legendre polyomials P0 to P28 and returns them
in a dictionary ``Pn``.
:param float x: argument to calculate Legendre polynomials
:return Pn: dictionary which contains order of Legendre polynomials
(from 0 to 28) as keys and the corresponding evaluation
of Legendre polynomials as values.
:rtype: dict
|
373,743
|
def get_rsa_pub_key(path):
log.debug()
if HAS_M2:
with salt.utils.files.fopen(path, ) as f:
data = f.read().replace(b, b)
bio = BIO.MemoryBuffer(data)
key = RSA.load_pub_key_bio(bio)
else:
with salt.utils.files.fopen(path) as f:
key = RSA.importKey(f.read())
return key
|
Read a public key off the disk.
|
373,744
|
def from_config(cls, cp, **kwargs):
r
args = cls._init_args_from_config(cp)
args[] = low_frequency_cutoff_from_config(cp)
args[] = high_frequency_cutoff_from_config(cp)
ignore_args = [, , ]
args.update(cls.extra_args_from_config(cp, "model",
skip_args=ignore_args))
args.update(kwargs)
return cls(**args)
|
r"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
|
373,745
|
def on_connection_open(self, connection):
LOGGER.debug()
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
|
This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
|
373,746
|
def get_string_plus_property_value(value):
if value:
if isinstance(value, str):
return [value]
if isinstance(value, list):
return value
if isinstance(value, tuple):
return list(value)
return None
|
Converts a string or list of string into a list of strings
:param value: A string or a list of strings
:return: A list of strings or None
|
373,747
|
def _curvelength(self, x0, y0, x1, y1, x2, y2, x3, y3, n=20):
length = 0
xi = x0
yi = y0
for i in range(n):
t = 1.0 * (i + 1) / n
pt_x, pt_y, pt_c1x, pt_c1y, pt_c2x, pt_c2y = \
self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3)
c = sqrt(pow(abs(xi - pt_x), 2) + pow(abs(yi - pt_y), 2))
length += c
xi = pt_x
yi = pt_y
return length
|
Returns the length of the spline.
Integrates the estimated length of the cubic bezier spline defined by x0, y0, ... x3, y3,
by adding the lengths of lineair lines between points at t.
The number of points is defined by n
(n=10 would add the lengths of lines between 0.0 and 0.1, between 0.1 and 0.2, and so on).
The default n=20 is fine for most cases, usually resulting in a deviation of less than 0.01.
|
373,748
|
def split_sequence_as_iterable(self, values):
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
|
Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
|
373,749
|
def from_grpc_status(status_code, message, **kwargs):
error_class = exception_class_for_grpc_status(status_code)
error = error_class(message, **kwargs)
if error.grpc_status_code is None:
error.grpc_status_code = status_code
return error
|
Create a :class:`GoogleAPICallError` from a :class:`grpc.StatusCode`.
Args:
status_code (grpc.StatusCode): The gRPC status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
|
373,750
|
def clean(self):
logger.info()
shutil.move(
os.path.join(
self.data_dir,
),
self.final_path
)
shutil.rmtree(os.path.join(self.data_dir, ))
os.remove(self.zip_path)
|
Get the .txt file from within the many-layered
directory structure, then delete the directories.
|
373,751
|
def update_submit_s3_uri(estimator, job_name):
if estimator.uploaded_code is None:
return
pattern = r
submit_uri = estimator.uploaded_code.s3_prefix
submit_uri = re.sub(pattern, job_name, submit_uri)
script_name = estimator.uploaded_code.script_name
estimator.uploaded_code = fw_utils.UploadedCode(submit_uri, script_name)
|
Updated the S3 URI of the framework source directory in given estimator.
Args:
estimator (sagemaker.estimator.Framework): The Framework estimator to update.
job_name (str): The new job name included in the submit S3 URI
Returns:
str: The updated S3 URI of framework source directory
|
373,752
|
def p_LD_reg_val(p):
s = % p[2]
if p[2] in REGS16:
s +=
p[0] = Asm(p.lineno(1), s, p[4])
|
asm : LD reg8 COMMA expr
| LD reg8 COMMA pexpr
| LD reg16 COMMA expr
| LD reg8_hl COMMA expr
| LD A COMMA expr
| LD SP COMMA expr
| LD reg8i COMMA expr
|
373,753
|
def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port,
inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num,
scheduler_cls, threads, get_object=False):
g = ctx.obj
Scheduler = load_cls(None, None, scheduler_cls)
kwargs = dict(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb,
newtask_queue=g.newtask_queue, status_queue=g.status_queue,
out_queue=g.scheduler2fetcher, data_path=g.get(, ))
if threads:
kwargs[] = int(threads)
scheduler = Scheduler(**kwargs)
scheduler.INQUEUE_LIMIT = inqueue_limit
scheduler.DELETE_TIME = delete_time
scheduler.ACTIVE_TASKS = active_tasks
scheduler.LOOP_LIMIT = loop_limit
scheduler.FAIL_PAUSE_NUM = fail_pause_num
g.instances.append(scheduler)
if g.get() or get_object:
return scheduler
if xmlrpc:
utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
scheduler.run()
|
Run Scheduler, only one scheduler is allowed.
|
373,754
|
def delete_model_translation(self, request, translation):
master = translation.master
for qs in self.get_translation_objects(request, translation.language_code, obj=master, inlines=self.delete_inline_translations):
if isinstance(qs, (tuple, list)):
for obj in qs:
obj.delete()
else:
qs.delete()
|
Hook for deleting a translation.
This calls :func:`get_translation_objects` to collect all related objects for the translation.
By default, that includes the translations for inline objects.
|
373,755
|
def is_floating(self):
return (
self.is_numpy_compatible and np.issubdtype(self.as_numpy_dtype, np.floating)
) or self.base_dtype == bfloat16
|
Returns whether this is a (non-quantized, real) floating point type.
|
373,756
|
def getstats(self, save=True, filename=None, samples=None, subset=None, ablation_time=False):
slst = []
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in self.stats_calced:
for nm in [n for n in samples if self.srm_identifier
not in n]:
if self.stats[nm][s].ndim == 2:
reps = np.arange(self.stats[nm][s].shape[-1])
ss = np.array([s] * reps.size)
nms = np.array([nm] * reps.size)
stdf = pd.DataFrame(self.stats[nm][s].T,
columns=self.stats[nm][],
index=[ss, nms, reps])
stdf.index.set_names([, , ],
inplace=True)
else:
stdf = pd.DataFrame(self.stats[nm][s],
index=self.stats[nm][],
columns=[[s], [nm]]).T
stdf.index.set_names([, ],
inplace=True)
slst.append(stdf)
out = pd.concat(slst)
if ablation_time:
ats = self.ablation_times(samples=samples, subset=subset)
ats[] =
ats.set_index(, append=True, inplace=True)
ats = ats.reorder_levels([, , ])
out = out.join(ats)
out.drop(self.internal_standard, 1, inplace=True)
if save:
if filename is None:
filename =
out.to_csv(self.export_dir + + filename)
self.stats_df = out
return out
|
Return pandas dataframe of all sample statistics.
|
373,757
|
def handle(self, argv=None):
desc = (
)
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
, , action=,
version=pkg_resources.get_distribution("zappa").version,
help=
)
parser.add_argument(
, default=, choices=[,,]
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = (
)
me_group.add_argument(, action=, help=all_help)
me_group.add_argument(, nargs=)
group = env_parser.add_argument_group()
group.add_argument(
, , help=
)
group.add_argument(
, , help=
)
group.add_argument(
, , action=, help=
)
group.add_argument(
, , action=, help=
)
group.add_argument(
, action=, help=
)
subparsers = parser.add_subparsers(title=, dest=)
cert_parser = subparsers.add_parser(
, parents=[env_parser],
help=
)
cert_parser.add_argument(
, action=,
help=("Gets new Let-y--yesstore_trueAuto confirm yes.deployDeploy application.-z--zipDeploy Lambda with specific local or S3 hosted zip packageinitInitialize Zappa app.packageBuild the application zip package locally.-o--outputName of file to output the package to.templateCreate a CloudFormation template for this API Gateway.-l--lambda-arnARN of the Lambda function to template to.-r--role-arnARN of the Role to template with.-o--outputName of file to output the template to.invokeInvoke remote function.--rawstore_trueWhen invoking remotely, invoke this python as a string, not as a modular path.--no-colorstore_truet color the output")
)
invoke_parser.add_argument()
manage_parser = subparsers.add_parser(
,
help=
)
rest_help = ("Command in the form of <env> <command>. <env> is not "
"required if --all is specified")
manage_parser.add_argument(, action=, help=all_help)
manage_parser.add_argument(, nargs=, help=rest_help)
manage_parser.add_argument(
, action=,
help=("Dont inherit from env_parser
manage_parser.add_argument(
, , help=
)
def positive_int(s):
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
, parents=[env_parser],
help=
)
rollback_parser.add_argument(
, , type=positive_int, default=1,
help=
)
subparsers.add_parser(
, parents=[env_parser],
help=
)
status_parser = subparsers.add_parser(
, parents=[env_parser],
help=
)
tail_parser = subparsers.add_parser(
, parents=[env_parser], help=
)
tail_parser.add_argument(
, action=,
help="Don--httpstore_trueOnly show HTTP requests in tail output.--non-httpstore_trueOnly show non-HTTP requests in tail output.--since--filter--force-colorstore_trueForce coloring log tail output even if coloring support is not auto-detected. (example: piping)--disable-keep-openstore_trueundeployUndeploy application.--remove-logsstore_trueRemoves log groups of api gateway and lambda task during the undeployment.-y--yesstore_trueAuto confirm yes.unscheduleUnschedule functions.updateUpdate deployed application.-z--zipUpdate Lambda with specific local or S3 hosted zip package-n--no-uploadt upload new code"
)
subparsers.add_parser(
, parents=[env_parser], help=
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == :
disable_click_colors()
elif args.color == :
pass
elif args.color == :
pass
if not args.command:
parser.print_help()
return
if args.command == and not self.vargs.get():
self.stage_env = self.vargs[].pop(0)
else:
self.stage_env = self.vargs.get()
if args.command == :
self.load_credentials = False
self.command = args.command
self.disable_progress = self.vargs.get()
if self.vargs.get():
self.silence()
if not self.vargs.get():
self.check_for_update()
self.load_settings_file(self.vargs.get())
all_stages = self.vargs.get()
stages = []
if all_stages:
stages = self.zappa_settings.keys()
else:
if not self.stage_env:
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
e.show()
sys.exit(e.exit_code)
|
Main function.
Parses command, load settings and dispatches accordingly.
|
373,758
|
def serviceQueues(self, limit=None):
r = self.dequeue_pre_prepares()
r += self.inBoxRouter.handleAllSync(self.inBox, limit)
r += self.send_3pc_batch()
r += self._serviceActions()
return r
|
Process `limit` number of messages in the inBox.
:param limit: the maximum number of messages to process
:return: the number of messages successfully processed
|
373,759
|
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
logger = logging.getLogger(__name__)
logger.info( % seqs_fp)
log_name = "%s.log" % output_fp
params = [, , seqs_fp,
, output_fp, ,
, , , str(min_size),
, , str(threads)]
if use_log:
params.extend([, log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error( %
seqs_fp)
logger.debug( % params)
logger.debug( % sout)
logger.debug( % serr)
return
|
Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
|
373,760
|
def from_spec(spec, kwargs=None):
layer = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.networks.layers,
kwargs=kwargs
)
assert isinstance(layer, Layer)
return layer
|
Creates a layer from a specification dict.
|
373,761
|
def lines(self):
if self._cache.lines is None:
self._cache.lines = _ImmutableLineList(self.text.split())
return self._cache.lines
|
Array of all the lines.
|
373,762
|
def get_rsn_ie(defcipher, defauth, data):
answers = dict()
answers[] = data[0] + (data[1] << 8)
data = data[2:]
if len(data) < 4:
answers[] = answers[] = defcipher
return answers
answers[] = get_cipher(data)
data = data[4:]
if len(data) < 2:
answers[] = defcipher
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers[] = data
return answers
answers[] = .join(get_cipher(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) < 2:
answers[] = defauth
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers[] = data
return answers
answers[] = .join(get_auth(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) >= 2:
capa = data[0] | (data[1] << 8)
answers[] = list()
if capa & 0x0001:
answers[].append()
if capa & 0x0002:
answers[].append()
case = {0: , 1: , 2: , 3: }.get((capa & 0x000c) >> 2)
if case:
answers[].append(case)
case = {0: , 1: , 2: , 3: }.get((capa & 0x0030) >> 4)
if case:
answers[].append(case)
if capa & 0x0040:
answers[].append()
if capa & 0x0080:
answers[].append()
if capa & 0x0200:
answers[].append()
if capa & 0x0400:
answers[].append()
if capa & 0x0800:
answers[].append()
answers[].append(.format(capa))
data = data[2:]
invalid = False
if len(data) >= 2:
pmkid_count = data[0] | (data[1] << 8)
if len(data) >= 2 + 16 * pmkid_count:
answers[] = pmkid_count
data = data[2 + 16 * pmkid_count:]
else:
invalid = True
if len(data) >= 4 and not invalid:
answers[] = get_cipher(data)
data = data[4:]
if data:
answers[.format(len(data))] = .join(format(x, ) for x in data)
return answers
|
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n441.
Positional arguments:
defcipher -- default cipher if not in data (string).
defauth -- default authentication suites if not in data (string).
data -- bytearray data to read.
Returns:
Dict.
|
373,763
|
def root(self):
sector = self.header.directory_sector_start
position = (sector + 1) << self.header.sector_shift
return RootEntry(self, position)
|
Property provides access to root object in CFB.
|
373,764
|
def songs_iter(self, *, continuation_token=None, export_type=1):
def track_info_to_dict(track_info):
return dict(
(field.name, value)
for field, value in track_info.ListFields()
)
while True:
response = self._call(
mm_calls.ExportIDs,
self.uploader_id,
continuation_token=continuation_token,
export_type=export_type
)
items = [
track_info_to_dict(track_info)
for track_info in response.body.download_track_info
]
if items:
yield items
continuation_token = response.body.continuation_token
if not continuation_token:
break
|
Get a paged iterator of Music Library songs.
Parameters:
continuation_token (str, Optional): The token of the page to return.
Default: Not sent to get first page.
export_type (int, Optional): The type of tracks to return. 1 for all tracks, 2 for promotional and purchased.
Default: ``1``
Yields:
list: Song dicts.
|
373,765
|
def _cdata_io(self, action, data, ctype, frames):
assert ctype in _ffi_types.values()
self._check_if_closed()
if self.seekable():
curr = self.tell()
func = getattr(_snd, + action + + ctype)
frames = func(self._file, data, frames)
_error_check(self._errorcode)
if self.seekable():
self.seek(curr + frames, SEEK_SET)
return frames
|
Call one of libsndfile's read/write functions.
|
373,766
|
def set_attributes(self, **kwargs):
self.clear_derived()
kwargs = dict(kwargs)
for name, value in kwargs.items():
try:
self.getp(name)
except KeyError:
print ("Warning: %s does not have attribute %s" %
(type(self), name))
try:
self.setp(name, clear_derived=False, **value)
except TypeError:
try:
self.setp(name, clear_derived=False, *value)
except (TypeError, KeyError):
try:
self.setp(name, clear_derived=False, value=value)
except (TypeError, KeyError):
self.__setattr__(name, value)
self._missing.pop(name, None)
if self._missing:
raise ValueError(
"One or more required properties are missing ",
self._missing.keys())
|
Set a group of attributes (parameters and members). Calls
`setp` directly, so kwargs can include more than just the
parameter value (e.g., bounds, free, etc.).
|
373,767
|
def pairdists(alignment, subs_model, alpha=None, ncat=4, tolerance=1e-6, verbose=False):
if not isinstance(subs_model, phylo_utils.models.Model):
raise ValueError("Can't handle this model: {}".format(model))
if alpha is None:
alpha = 1.0
ncat = 1
tm = TransitionMatrix(subs_model)
gamma_rates = discrete_gamma(alpha, ncat)
partials = alignment_to_partials(alignment)
seqnames = alignment.get_names()
nseq = len(seqnames)
distances = np.zeros((nseq, nseq))
variances = np.zeros((nseq, nseq))
if not subs_model.size == partials[seqnames[0]].shape[1]:
raise ValueError("Model {} expects {} states, but the alignment has {}".format(model.name,
model.size,
partials[seqnames[0]].shape[1]))
nodes = [phylo_utils.likelihood.LnlModel(tm) for seq in range(nseq)]
for node, header in zip(nodes, seqnames):
node.set_partials(partials[header])
for i, j in itertools.combinations(range(nseq), 2):
brlen, var = brent_optimise(nodes[i], nodes[j], verbose=verbose)
distances[i, j] = distances[j, i] = brlen
variances[i, j] = variances[j, i] = var
dm = DistanceMatrix.from_array(distances, names=seqnames)
vm = DistanceMatrix.from_array(variances, names=seqnames)
return dm, vm
|
Load an alignment, calculate all pairwise distances and variances
model parameter must be a Substitution model type from phylo_utils
|
373,768
|
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam):
work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs"))
hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample)
with open(calls) as in_handle:
with open(hla_file, "w") as out_handle:
next(in_handle)
for line in in_handle:
_, _, a, _, _ = line.strip().split(",")
a1, a2 = a.split(";")
out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n")
out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n")
return hla_file
|
Convert HLAs into ABSOLUTE format for use with LOHHLA.
LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move
|
373,769
|
def evaluation_get(self, service_staff_id, start_date, end_date, session):
request = TOPRequest()
request[] = service_staff_id
request[] = start_date
request[] = end_date
self.create(self.execute(request, session))
return self.staff_eval_stat_on_days
|
taobao.wangwang.eservice.evaluation.get 客服评价统计
根据操作者ID,返回被查者ID指定日期内每个帐号每日的"客服评价统计" 备注:
- 1、如果是操作者ID=被查者ID,返回被查者ID的"客服评价统计"。
- 2、如果操作者是组管理员,他可以查询他的组中的所有子帐号的"客服评价统计"。
- 3、如果操作者是主账户,他可以查询所有子帐号的"客服评价统计"。
- 4、被查者ID可以是多个,用 "," 隔开,id数不能超过30。
- 5、开始时间与结束时间之间的间隔不能超过7天
- 6、不能查询90天以前的数据
- 7、不能查询当天的记录
|
373,770
|
def genealogic_types(self):
types = []
parent = self
while parent:
types.append(parent.rest_name)
parent = parent.parent_object
return types
|
Get genealogic types
Returns:
Returns a list of all parent types
|
373,771
|
def _delete(self, namespace, stream, start_id, end_time, configuration):
start_id_event = Event(start_id)
end_id_event = Event(uuid_from_kronos_time(end_time,
_type=UUIDType.HIGHEST))
stream_events = self.db[namespace][stream]
lo = bisect.bisect_left(stream_events, start_id_event)
if lo + 1 > len(stream_events):
return 0, []
if stream_events[lo] == start_id_event:
lo += 1
hi = bisect.bisect_right(stream_events, end_id_event)
del stream_events[lo:hi]
return max(0, hi - lo), []
|
Delete events with id > `start_id` and end_time <= `end_time`.
|
373,772
|
def delete_lbaas_member(self, lbaas_member, lbaas_pool):
return self.delete(self.lbaas_member_path % (lbaas_pool, lbaas_member))
|
Deletes the specified lbaas_member.
|
373,773
|
def json_minify(string, strip_space=True):
in_string = False
in_multi = False
in_single = False
new_str = []
index = 0
for match in re.finditer(TOKENIZER, string):
if not (in_multi or in_single):
tmp = string[index:match.start()]
if not in_string and strip_space:
tmp = re.sub(, , tmp)
new_str.append(tmp)
index = match.end()
val = match.group()
if val == and not (in_multi or in_single):
escaped = END_SLASHES_RE.search(string, 0, match.start())
if not in_string or (escaped is None or len(escaped.group()) % 2 == 0):
in_string = not in_string
index -= 1
elif not (in_string or in_multi or in_single):
if val == :
in_multi = True
elif val == :
in_single = True
elif val == and in_multi and not (in_string or in_single):
in_multi = False
elif val in and not (in_multi or in_string) and in_single:
in_single = False
elif not ((in_multi or in_single) or (val in and strip_space)):
new_str.append(val)
new_str.append(string[index:])
return .join(new_str)
|
Removes whitespace from json strings, returning the string
|
373,774
|
def removeFriend(self, friend_id=None):
payload = {"friend_id": friend_id, "unref": "none", "confirm": "Confirm"}
r = self._post(self.req_url.REMOVE_FRIEND, payload)
query = parse_qs(urlparse(r.url).query)
if "err" not in query:
log.debug("Remove was successful!")
return True
else:
log.warning("Error while removing friend")
return False
|
Removes a specifed friend from your friend list
:param friend_id: The ID of the friend that you want to remove
:return: Returns error if the removing was unsuccessful, returns True when successful.
|
373,775
|
def create(obj: PersistedObject, obj_type: Type[T], errors: Dict[Type, Exception]):
e = NoParserFoundForUnionType(
.format(obj=obj, typ=get_pretty_type_str(obj_type), errs=errors))
e.errors = errors
return e
|
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param errors: a dictionary of the errors raised for each alternate type tried
:return:
|
373,776
|
def delete_joined_table_sql(qualified_name, removing_qualified_name, primary_key):
condition_template =
where_clause = .join(condition_template.format(pkey, pkey)
for pkey in primary_key)
delete_statement = (
).format(table=qualified_name,
delete_table=removing_qualified_name,
where_clause=where_clause)
return delete_statement
|
SQL statement for a joined delete from.
Generate SQL statement for deleting the intersection of rows between
both tables from table referenced by tablename.
|
373,777
|
def parse_colors(path):
if path.endswith(".txt"):
return parse_rgb_txt_file(path)
elif path.endswith(".json"):
return parse_json_color_file(path)
raise TypeError("colorful only supports .txt and .json files for colors")
|
Parse the given color files.
Supported are:
* .txt for X11 colors
* .json for colornames
|
373,778
|
def has_option(self, option_name):
if self.prefix:
option_name = self.prefix + self.seperator + option_name
item_names = option_name.split(self.seperator)
node = self._storage
for item_name in item_names:
if node is None:
return False
if not item_name in node:
return False
node = node[item_name]
return True
|
Check that an option exists.
:param str option_name: The name of the option to check.
:return: True of the option exists in the configuration.
:rtype: bool
|
373,779
|
def derive_from_seed(self, offset):
seed = int(hexlify(bytes(self)).decode("ascii"), 16)
z = int(hexlify(offset).decode("ascii"), 16)
order = ecdsa.SECP256k1.order
secexp = (seed + z) % order
secret = "%0x" % secexp
if len(secret) < 64:
secret = ("0" * (64-len(secret))) + secret
return PrivateKey(secret, prefix=self.pubkey.prefix)
|
Derive private key using "generate_from_seed" method.
Here, the key itself serves as a `seed`, and `offset`
is expected to be a sha256 digest.
|
373,780
|
def __shouldSysExit(self, iteration):
if self._exitAfter is None \
or iteration < self._exitAfter:
return False
results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, [])
modelIDs = [e[0] for e in results]
modelNums = [json.loads(e[1][0])[][] for e in results]
sameModelNumbers = filter(lambda x: x[1] == self.modelIndex,
zip(modelIDs, modelNums))
firstModelID = min(zip(*sameModelNumbers)[0])
return firstModelID == self._modelID
|
Checks to see if the model should exit based on the exitAfter dummy
parameter
|
373,781
|
def items_to_extract(self, offset=0, length=None):
endoffset = length and offset + length
qs = self.origin_data()[offset:endoffset]
self.items_to_extract_length = qs.count()
return qs
|
Return an iterable of specific items to extract.
As a side-effect, set self.items_to_extract_length.
:param offset: where to start extracting
:param length: how many to extract
:return: An iterable of the specific
|
373,782
|
def team_absent(name, profile="github", **kwargs):
test
ret = {
: name,
: {},
: None,
:
}
target = __salt__[](name, profile=profile, **kwargs)
if not target:
ret[] = .format(name)
ret[] = True
return ret
else:
if __opts__[]:
ret[] = "Team {0} will be deleted".format(name)
ret[] = None
return ret
result = __salt__[](name, profile=profile, **kwargs)
if result:
ret[] = .format(name)
ret[].setdefault(, .format(name))
ret[].setdefault(, .format(name))
ret[] = True
else:
ret[] = .format(name)
ret[] = False
return ret
|
Ensure a team is absent.
Example:
.. code-block:: yaml
ensure team test is present in github:
github.team_absent:
- name: 'test'
The following parameters are required:
name
This is the name of the team in the organization.
.. versionadded:: 2016.11.0
|
373,783
|
def findCaller(self, stack_info=False, callers=0):
f = logging.currentframe()
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
|
Find the stack frame of the caller so that we can note the source
file name, line number and function name. Not only ignore this class's
and logger's source, but also as many callers as requested.
|
373,784
|
def _call_lrem(self, command, count, value, *args, **kwargs):
if not count:
if self.indexable:
self.deindex([value])
return self._traverse_command(command, count, value, *args, **kwargs)
else:
return self._reset(command, count, value, *args, **kwargs)
|
If count is 0, we remove all elements equal to value, so we know we have
nothing to index, and this value to deindex. In other case, we don't
know how much elements will remain in the list, so we have to do a full
deindex/reindex. So do it carefuly.
|
373,785
|
def ellplot (mjr, mnr, pa):
_ellcheck (mjr, mnr, pa)
import omega as om
th = np.linspace (0, 2 * np.pi, 200)
x, y = ellpoint (mjr, mnr, pa, th)
return om.quickXY (x, y, %
(mjr, mnr, pa * 180 / np.pi))
|
Utility for debugging.
|
373,786
|
def members(self, as_set=False):
if as_set:
return frozenset(map(self._members.__getitem__, self._indexes()))
return tuple(map(self._members.__getitem__, self._indexes()))
|
Return the set members tuple/frozenset.
|
373,787
|
def get_context_from_xlsx(self):
if re.search(, self.project.CONTEXT_SOURCE_FILE):
resp = requests.get(self.project.CONTEXT_SOURCE_FILE)
content = resp.content
else:
try:
with open(self.project.CONTEXT_SOURCE_FILE) as xlsxfile:
content = xlsxfile.read()
except IOError:
filepath = "%s/%s" % (
os.path.abspath(self.path),
self.project.CONTEXT_SOURCE_FILE)
with open(filepath) as xlsxfile:
content = xlsxfile.read()
data = process_xlsx(content)
if in data:
data = copy_global_values(data)
return data
|
Get context from an Excel file
|
373,788
|
def update_repository(self, repository_form):
if self._catalog_session is not None:
return self._catalog_session.update_catalog(catalog_form=repository_form)
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
if not isinstance(repository_form, ABCRepositoryForm):
raise errors.InvalidArgument()
if not repository_form.is_for_update():
raise errors.InvalidArgument()
try:
if self._forms[repository_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState()
except KeyError:
raise errors.Unsupported()
if not repository_form.is_valid():
raise errors.InvalidArgument()
collection.save(repository_form._my_map)
self._forms[repository_form.get_id().get_identifier()] = UPDATED
return objects.Repository(osid_object_map=repository_form._my_map, runtime=self._runtime, proxy=self._proxy)
|
Updates an existing repository.
arg: repository_form (osid.repository.RepositoryForm): the
form containing the elements to be updated
raise: IllegalState - ``repository_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``repository_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``repository_form`` did not originate from
``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
|
373,789
|
def as_list(self):
"return some attributes as a list"
netloc =
if self.vpath_connector:
netloc = +self.vpath_connector+
elif self.authority:
netloc = self.authority
else:
netloc = self.netloc
return [
self.scheme,
netloc,
self.path,
self.query,
,
]
|
return some attributes as a list
|
373,790
|
def grab_zipped_url(zipped_url, ensure=True, appname=,
download_dir=None, force_commonprefix=True, cleanup=False,
redownload=False, spoof=False):
r
zipped_url = clean_dropbox_link(zipped_url)
zip_fname = split(zipped_url)[1]
data_name = split_archive_ext(zip_fname)[0]
if download_dir is None:
download_dir = util_cplat.get_app_cache_dir(appname)
data_dir = join(download_dir, data_name)
if ensure or redownload:
if redownload:
util_path.remove_dirs(data_dir)
util_path.ensurepath(download_dir)
if not exists(data_dir) or redownload:
zip_fpath = realpath(join(download_dir, zip_fname))
if not exists(zip_fpath) or redownload:
download_url(zipped_url, zip_fpath, spoof=spoof)
unarchive_file(zip_fpath, force_commonprefix)
if cleanup:
util_path.delete(zip_fpath)
if cleanup:
util_path.assert_exists(data_dir)
return util_path.unixpath(data_dir)
|
r"""
downloads and unzips the url
Args:
zipped_url (str): url which must be either a .zip of a .tar.gz file
ensure (bool): eager evaluation if True(default = True)
appname (str): (default = 'utool')
download_dir (str): containing downloading directory
force_commonprefix (bool): (default = True)
cleanup (bool): (default = False)
redownload (bool): (default = False)
spoof (bool): (default = False)
CommandLine:
python -m utool.util_grabdata --exec-grab_zipped_url --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> zipped_url = '?'
>>> ensure = True
>>> appname = 'utool'
>>> download_dir = None
>>> force_commonprefix = True
>>> cleanup = False
>>> redownload = False
>>> spoof = False
>>> result = grab_zipped_url(zipped_url, ensure, appname, download_dir,
>>> force_commonprefix, cleanup, redownload,
>>> spoof)
>>> print(result)
Examples:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> zipped_url = 'https://lev.cs.rpi.edu/public/data/testdata.zip'
>>> zipped_url = 'http://www.spam.com/eggs/data.zip'
|
373,791
|
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=, ignored_columns=None, significance=None):
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
|
Print a summary of the difference between the two files.
|
373,792
|
def refreshResults( self ):
if ( self.currentMode() == XOrbBrowserWidget.Mode.Detail ):
self.refreshDetails()
elif ( self.currentMode() == XOrbBrowserWidget.Mode.Card ):
self.refreshCards()
else:
self.refreshThumbnails()
|
Joins together the queries from the fixed system, the search, and the
query builder to generate a query for the browser to display.
|
373,793
|
def _set_collector_ip(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=collector_ip.collector_ip, is_container=, presence=False, yang_name="collector-ip", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: None, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__collector_ip = t
if hasattr(self, ):
self._set()
|
Setter method for collector_ip, mapped from YANG variable /telemetry/collector/collector_ip (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_ip() directly.
|
373,794
|
def is_dec(ip):
try:
dec = int(str(ip))
except ValueError:
return False
if dec > 4294967295 or dec < 0:
return False
return True
|
Return true if the IP address is in decimal notation.
|
373,795
|
def check_membership(self, groups):
if not groups or groups == []:
return True
if self.request.user.is_superuser:
return True
user_groups = self.request.user.groups.values_list("name", flat=True)
return set(groups).intersection(set(user_groups))
|
Allows for objects with no required groups
|
373,796
|
def connect(self, output_port, input_port):
ConnectionClass = output_port.connection_class
self.connections.append(ConnectionClass(output_port, input_port))
|
Connect two :class:`.Effect` instances in this pedalboard.
For this, is necessary informs the output port origin and the input port destination::
>>> pedalboard.append(driver)
>>> pedalboard.append(reverb)
>>> driver_output = driver.outputs[0]
>>> reverb_input = reverb.inputs[0]
>>> Connection(driver_output, reverb_input) in driver.connections
False
>>> pedalboard.connect(driver_output, reverb_input)
>>> Connection(driver_output, reverb_input) in driver.connections
True
:param Port output_port: Effect output port
:param Port input_port: Effect input port
|
373,797
|
def get_hla(sample, cromwell_dir, hla_glob):
hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0]
fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq")
calls = os.path.join(hla_dir, "%s-optitype.csv" % sample)
return fastq, calls
|
Retrieve HLA calls and input fastqs for a sample.
|
373,798
|
def set_max_image_pixels(self, pixels):
if (pixels):
Image.MAX_IMAGE_PIXELS = pixels
Image.warnings.simplefilter(
, Image.DecompressionBombWarning)
|
Set PIL limit on pixel size of images to load if non-zero.
WARNING: This is a global setting in PIL, it is
not local to this manipulator instance!
Setting a value here will not only set the given limit but
also convert the PIL "DecompressionBombWarning" into an
error. Thus setting a moderate limit sets a hard limit on
image size loaded, setting a very large limit will have the
effect of disabling the warning.
|
373,799
|
def _compute_stddevs(self, C, mag, rjb, imt, stddev_types):
sigma_ale_m = np.interp(mag, [5.0, 5.5, 8.0],
[C[], C[], C[]])
sigma_ale_rjb = np.interp(rjb, [5.0, 20.0], [C[], C[]])
sigma_ale = np.sqrt(sigma_ale_m ** 2 + sigma_ale_rjb ** 2)
if imt.period < 1:
sigma_epi = 0.36 + 0.07 * (mag - 6)
else:
sigma_epi = 0.34 + 0.06 * (mag - 6)
sigma_total = np.sqrt(sigma_ale ** 2 + sigma_epi ** 2)
stddevs = []
for _ in stddev_types:
stddevs.append(sigma_total)
return stddevs
|
Compute total standard deviation, equations 5 and 6, page 48.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.