text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def query(self, model):
'''Return a query for ``model`` when it needs to be indexed.
'''
session = self.router.session()
fields = tuple((f.name for f in model._meta.scalarfields
if f.type == 'text'))
qs = session.query(model).load_only(*fields)
for related in self.get_related_fields(model):
qs = qs.load_related(related)
return qs | [
"def",
"query",
"(",
"self",
",",
"model",
")",
":",
"session",
"=",
"self",
".",
"router",
".",
"session",
"(",
")",
"fields",
"=",
"tuple",
"(",
"(",
"f",
".",
"name",
"for",
"f",
"in",
"model",
".",
"_meta",
".",
"scalarfields",
"if",
"f",
".",
"type",
"==",
"'text'",
")",
")",
"qs",
"=",
"session",
".",
"query",
"(",
"model",
")",
".",
"load_only",
"(",
"*",
"fields",
")",
"for",
"related",
"in",
"self",
".",
"get_related_fields",
"(",
"model",
")",
":",
"qs",
"=",
"qs",
".",
"load_related",
"(",
"related",
")",
"return",
"qs"
] | 42.3 | 15.7 |
def handle_cable(cable, handler, standalone=True):
"""\
Emits event from the provided `cable` to the handler.
`cable`
A cable object.
`handler`
A ICableHandler instance.
`standalone`
Indicates if a `start` and `end` event should be
issued (default: ``True``).
If `standalone` is set to ``False``, no ``handler.start()``
and ``handler.end()`` event will be issued.
"""
def datetime(dt):
date, time = dt.split(u' ')
if len(time) == 5:
time += u':00'
time += u'Z'
return u'T'.join([date, time])
if standalone:
handler.start()
handler.start_cable(cable.reference_id, cable.canonical_id)
for iri in cable.wl_uris:
handler.handle_wikileaks_iri(iri)
handler.handle_creation_datetime(datetime(cable.created))
if cable.released:
handler.handle_release_date(cable.released[:10])
if cable.nondisclosure_deadline:
handler.handle_nondisclosure_deadline(cable.nondisclosure_deadline)
if cable.transmission_id:
handler.handle_transmission_id(cable.transmission_id)
if cable.subject:
handler.handle_subject(cable.subject)
if cable.summary:
handler.handle_summary(cable.summary)
if cable.comment:
handler.handle_comment(cable.comment)
handler.handle_header(cable.header)
handler.handle_content(cable.content)
handler.handle_origin(cable.origin)
handler.handle_classification(cable.classification)
handler.handle_partial(cable.partial)
for cat in cable.classification_categories:
handler.handle_classification_category(cat)
for classificationist in cable.classificationists:
handler.handle_classificationist(classificationist)
for signer in cable.signers:
handler.handle_signer(signer)
for tag in cable.tags:
handler.handle_tag(tag)
for iri in cable.media_uris:
handler.handle_media_iri(iri)
for rec in cable.recipients:
handler.handle_recipient(rec)
for rec in cable.info_recipients:
handler.handle_info_recipient(rec)
for ref in cable.references:
handler.handle_reference(ref)
handler.end_cable()
if standalone:
handler.end() | [
"def",
"handle_cable",
"(",
"cable",
",",
"handler",
",",
"standalone",
"=",
"True",
")",
":",
"def",
"datetime",
"(",
"dt",
")",
":",
"date",
",",
"time",
"=",
"dt",
".",
"split",
"(",
"u' '",
")",
"if",
"len",
"(",
"time",
")",
"==",
"5",
":",
"time",
"+=",
"u':00'",
"time",
"+=",
"u'Z'",
"return",
"u'T'",
".",
"join",
"(",
"[",
"date",
",",
"time",
"]",
")",
"if",
"standalone",
":",
"handler",
".",
"start",
"(",
")",
"handler",
".",
"start_cable",
"(",
"cable",
".",
"reference_id",
",",
"cable",
".",
"canonical_id",
")",
"for",
"iri",
"in",
"cable",
".",
"wl_uris",
":",
"handler",
".",
"handle_wikileaks_iri",
"(",
"iri",
")",
"handler",
".",
"handle_creation_datetime",
"(",
"datetime",
"(",
"cable",
".",
"created",
")",
")",
"if",
"cable",
".",
"released",
":",
"handler",
".",
"handle_release_date",
"(",
"cable",
".",
"released",
"[",
":",
"10",
"]",
")",
"if",
"cable",
".",
"nondisclosure_deadline",
":",
"handler",
".",
"handle_nondisclosure_deadline",
"(",
"cable",
".",
"nondisclosure_deadline",
")",
"if",
"cable",
".",
"transmission_id",
":",
"handler",
".",
"handle_transmission_id",
"(",
"cable",
".",
"transmission_id",
")",
"if",
"cable",
".",
"subject",
":",
"handler",
".",
"handle_subject",
"(",
"cable",
".",
"subject",
")",
"if",
"cable",
".",
"summary",
":",
"handler",
".",
"handle_summary",
"(",
"cable",
".",
"summary",
")",
"if",
"cable",
".",
"comment",
":",
"handler",
".",
"handle_comment",
"(",
"cable",
".",
"comment",
")",
"handler",
".",
"handle_header",
"(",
"cable",
".",
"header",
")",
"handler",
".",
"handle_content",
"(",
"cable",
".",
"content",
")",
"handler",
".",
"handle_origin",
"(",
"cable",
".",
"origin",
")",
"handler",
".",
"handle_classification",
"(",
"cable",
".",
"classification",
")",
"handler",
".",
"handle_partial",
"(",
"cable",
".",
"partial",
")",
"for",
"cat",
"in",
"cable",
".",
"classification_categories",
":",
"handler",
".",
"handle_classification_category",
"(",
"cat",
")",
"for",
"classificationist",
"in",
"cable",
".",
"classificationists",
":",
"handler",
".",
"handle_classificationist",
"(",
"classificationist",
")",
"for",
"signer",
"in",
"cable",
".",
"signers",
":",
"handler",
".",
"handle_signer",
"(",
"signer",
")",
"for",
"tag",
"in",
"cable",
".",
"tags",
":",
"handler",
".",
"handle_tag",
"(",
"tag",
")",
"for",
"iri",
"in",
"cable",
".",
"media_uris",
":",
"handler",
".",
"handle_media_iri",
"(",
"iri",
")",
"for",
"rec",
"in",
"cable",
".",
"recipients",
":",
"handler",
".",
"handle_recipient",
"(",
"rec",
")",
"for",
"rec",
"in",
"cable",
".",
"info_recipients",
":",
"handler",
".",
"handle_info_recipient",
"(",
"rec",
")",
"for",
"ref",
"in",
"cable",
".",
"references",
":",
"handler",
".",
"handle_reference",
"(",
"ref",
")",
"handler",
".",
"end_cable",
"(",
")",
"if",
"standalone",
":",
"handler",
".",
"end",
"(",
")"
] | 35.564516 | 12.516129 |
def getElementsByAttr(self, attrName, attrValue):
'''
getElementsByAttr - Search children of this tag for tags with an attribute name/value pair
@param attrName - Attribute name (lowercase)
@param attrValue - Attribute value
@return - TagCollection of matching elements
'''
elements = []
for child in self.children:
if child.getAttribute(attrName) == attrValue:
elements.append(child)
elements += child.getElementsByAttr(attrName, attrValue)
return TagCollection(elements) | [
"def",
"getElementsByAttr",
"(",
"self",
",",
"attrName",
",",
"attrValue",
")",
":",
"elements",
"=",
"[",
"]",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"child",
".",
"getAttribute",
"(",
"attrName",
")",
"==",
"attrValue",
":",
"elements",
".",
"append",
"(",
"child",
")",
"elements",
"+=",
"child",
".",
"getElementsByAttr",
"(",
"attrName",
",",
"attrValue",
")",
"return",
"TagCollection",
"(",
"elements",
")"
] | 39.2 | 21.333333 |
def use_args(
self,
argmap,
req=None,
locations=core.Parser.DEFAULT_LOCATIONS,
as_kwargs=False,
validate=None,
error_status_code=None,
error_headers=None,
):
"""Decorator that injects parsed arguments into a view callable.
Supports the *Class-based View* pattern where `request` is saved as an instance
attribute on a view class.
:param dict argmap: Either a `marshmallow.Schema`, a `dict`
of argname -> `marshmallow.fields.Field` pairs, or a callable
which accepts a request and returns a `marshmallow.Schema`.
:param req: The request object to parse. Pulled off of the view by default.
:param tuple locations: Where on the request to search for values.
:param bool as_kwargs: Whether to insert arguments as keyword arguments.
:param callable validate: Validation function that receives the dictionary
of parsed arguments. If the function returns ``False``, the parser
will raise a :exc:`ValidationError`.
:param int error_status_code: Status code passed to error handler functions when
a `ValidationError` is raised.
:param dict error_headers: Headers passed to error handler functions when a
a `ValidationError` is raised.
"""
locations = locations or self.locations
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
if isinstance(argmap, collections.Mapping):
argmap = core.dict2schema(argmap, self.schema_class)()
def decorator(func):
@functools.wraps(func)
def wrapper(obj, *args, **kwargs):
# The first argument is either `self` or `request`
try: # get self.request
request = req or obj.request
except AttributeError: # first arg is request
request = obj
# NOTE: At this point, argmap may be a Schema, callable, or dict
parsed_args = self.parse(
argmap,
req=request,
locations=locations,
validate=validate,
error_status_code=error_status_code,
error_headers=error_headers,
)
if as_kwargs:
kwargs.update(parsed_args)
return func(obj, *args, **kwargs)
else:
return func(obj, parsed_args, *args, **kwargs)
wrapper.__wrapped__ = func
return wrapper
return decorator | [
"def",
"use_args",
"(",
"self",
",",
"argmap",
",",
"req",
"=",
"None",
",",
"locations",
"=",
"core",
".",
"Parser",
".",
"DEFAULT_LOCATIONS",
",",
"as_kwargs",
"=",
"False",
",",
"validate",
"=",
"None",
",",
"error_status_code",
"=",
"None",
",",
"error_headers",
"=",
"None",
",",
")",
":",
"locations",
"=",
"locations",
"or",
"self",
".",
"locations",
"# Optimization: If argmap is passed as a dictionary, we only need",
"# to generate a Schema once",
"if",
"isinstance",
"(",
"argmap",
",",
"collections",
".",
"Mapping",
")",
":",
"argmap",
"=",
"core",
".",
"dict2schema",
"(",
"argmap",
",",
"self",
".",
"schema_class",
")",
"(",
")",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# The first argument is either `self` or `request`",
"try",
":",
"# get self.request",
"request",
"=",
"req",
"or",
"obj",
".",
"request",
"except",
"AttributeError",
":",
"# first arg is request",
"request",
"=",
"obj",
"# NOTE: At this point, argmap may be a Schema, callable, or dict",
"parsed_args",
"=",
"self",
".",
"parse",
"(",
"argmap",
",",
"req",
"=",
"request",
",",
"locations",
"=",
"locations",
",",
"validate",
"=",
"validate",
",",
"error_status_code",
"=",
"error_status_code",
",",
"error_headers",
"=",
"error_headers",
",",
")",
"if",
"as_kwargs",
":",
"kwargs",
".",
"update",
"(",
"parsed_args",
")",
"return",
"func",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"func",
"(",
"obj",
",",
"parsed_args",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"wrapper",
".",
"__wrapped__",
"=",
"func",
"return",
"wrapper",
"return",
"decorator"
] | 43.311475 | 19.52459 |
def process_memory_map(attrs=None, where=None):
'''
Return process_memory_map information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.process_memory_map
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='process_memory_map', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'} | [
"def",
"process_memory_map",
"(",
"attrs",
"=",
"None",
",",
"where",
"=",
"None",
")",
":",
"if",
"__grains__",
"[",
"'os_family'",
"]",
"in",
"[",
"'RedHat'",
",",
"'Debian'",
"]",
":",
"return",
"_osquery_cmd",
"(",
"table",
"=",
"'process_memory_map'",
",",
"attrs",
"=",
"attrs",
",",
"where",
"=",
"where",
")",
"return",
"{",
"'result'",
":",
"False",
",",
"'comment'",
":",
"'Only available on Red Hat or Debian based systems.'",
"}"
] | 32.846154 | 27.615385 |
def delete_project(self, project):
"""
Deletes all versions of a project. First class, maps to Scrapyd's
delete project endpoint.
"""
url = self._build_url(constants.DELETE_PROJECT_ENDPOINT)
data = {
'project': project,
}
self.client.post(url, data=data, timeout=self.timeout)
return True | [
"def",
"delete_project",
"(",
"self",
",",
"project",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"DELETE_PROJECT_ENDPOINT",
")",
"data",
"=",
"{",
"'project'",
":",
"project",
",",
"}",
"self",
".",
"client",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"return",
"True"
] | 32.909091 | 16.181818 |
def transition(self, duration, brightness=None):
""" Transition wrapper.
Short-circuit transition if necessary.
:param duration: Duration of transition.
:param brightness: Transition to this brightness.
"""
if duration == 0:
if brightness is not None:
self.brightness = brightness
return
if brightness != self.brightness:
self._transition(duration, brightness) | [
"def",
"transition",
"(",
"self",
",",
"duration",
",",
"brightness",
"=",
"None",
")",
":",
"if",
"duration",
"==",
"0",
":",
"if",
"brightness",
"is",
"not",
"None",
":",
"self",
".",
"brightness",
"=",
"brightness",
"return",
"if",
"brightness",
"!=",
"self",
".",
"brightness",
":",
"self",
".",
"_transition",
"(",
"duration",
",",
"brightness",
")"
] | 32.642857 | 12.357143 |
def initinfo(self) -> Tuple[Union[float, int, bool], bool]:
"""The actual initial value of the given parameter.
Some |Parameter| subclasses define another value for class
attribute `INIT` than |None| to provide a default value.
Let's define a parameter test class and prepare a function for
initialising it and connecting the resulting instance to a
|SubParameters| object:
>>> from hydpy.core.parametertools import Parameter, SubParameters
>>> class Test(Parameter):
... NDIM = 0
... TYPE = float
... TIME = None
... INIT = 2.0
>>> class SubGroup(SubParameters):
... CLASSES = (Test,)
>>> def prepare():
... subpars = SubGroup(None)
... test = Test(subpars)
... test.__hydpy__connect_variable2subgroup__()
... return test
By default, making use of the `INIT` attribute is disabled:
>>> test = prepare()
>>> test
test(?)
Enable it through setting |Options.usedefaultvalues| to |True|:
>>> from hydpy import pub
>>> pub.options.usedefaultvalues = True
>>> test = prepare()
>>> test
test(2.0)
When no `INIT` attribute is defined, enabling
|Options.usedefaultvalues| has no effect, of course:
>>> del Test.INIT
>>> test = prepare()
>>> test
test(?)
For time-dependent parameter values, the `INIT` attribute is assumed
to be related to a |Parameterstep| of one day:
>>> test.parameterstep = '2d'
>>> test.simulationstep = '12h'
>>> Test.INIT = 2.0
>>> Test.TIME = True
>>> test = prepare()
>>> test
test(4.0)
>>> test.value
1.0
"""
init = self.INIT
if (init is not None) and hydpy.pub.options.usedefaultvalues:
with Parameter.parameterstep('1d'):
return self.apply_timefactor(init), True
return variabletools.TYPE2MISSINGVALUE[self.TYPE], False | [
"def",
"initinfo",
"(",
"self",
")",
"->",
"Tuple",
"[",
"Union",
"[",
"float",
",",
"int",
",",
"bool",
"]",
",",
"bool",
"]",
":",
"init",
"=",
"self",
".",
"INIT",
"if",
"(",
"init",
"is",
"not",
"None",
")",
"and",
"hydpy",
".",
"pub",
".",
"options",
".",
"usedefaultvalues",
":",
"with",
"Parameter",
".",
"parameterstep",
"(",
"'1d'",
")",
":",
"return",
"self",
".",
"apply_timefactor",
"(",
"init",
")",
",",
"True",
"return",
"variabletools",
".",
"TYPE2MISSINGVALUE",
"[",
"self",
".",
"TYPE",
"]",
",",
"False"
] | 32.0625 | 20.40625 |
def dump_to_stream(self, cnf, stream, **kwargs):
"""
Dump config 'cnf' to a file or file-like object 'stream'.
:param cnf: Shell variables data to dump
:param stream: Shell script file or file like object
:param kwargs: backend-specific optional keyword parameters :: dict
"""
for key, val in anyconfig.compat.iteritems(cnf):
stream.write("%s='%s'%s" % (key, val, os.linesep)) | [
"def",
"dump_to_stream",
"(",
"self",
",",
"cnf",
",",
"stream",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"key",
",",
"val",
"in",
"anyconfig",
".",
"compat",
".",
"iteritems",
"(",
"cnf",
")",
":",
"stream",
".",
"write",
"(",
"\"%s='%s'%s\"",
"%",
"(",
"key",
",",
"val",
",",
"os",
".",
"linesep",
")",
")"
] | 43.6 | 17.4 |
def getInfo(self):
"""
Returns a DevInfo instance, a named tuple with the following items:
- bustype: one of BUS_USB, BUS_HIL, BUS_BLUETOOTH or BUS_VIRTUAL
- vendor: device's vendor number
- product: device's product number
"""
devinfo = _hidraw_devinfo()
self._ioctl(_HIDIOCGRAWINFO, devinfo, True)
return DevInfo(devinfo.bustype, devinfo.vendor, devinfo.product) | [
"def",
"getInfo",
"(",
"self",
")",
":",
"devinfo",
"=",
"_hidraw_devinfo",
"(",
")",
"self",
".",
"_ioctl",
"(",
"_HIDIOCGRAWINFO",
",",
"devinfo",
",",
"True",
")",
"return",
"DevInfo",
"(",
"devinfo",
".",
"bustype",
",",
"devinfo",
".",
"vendor",
",",
"devinfo",
".",
"product",
")"
] | 42.7 | 13.9 |
def update(self, roomId, title=None, **request_parameters):
"""Update details for a room, by ID.
Args:
roomId(basestring): The room ID.
title(basestring): A user-friendly name for the room.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
Room: A Room object with the updated Webex Teams room details.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(roomId, basestring, may_be_none=False)
check_type(roomId, basestring)
put_data = dict_from_items_with_values(
request_parameters,
title=title,
)
# API request
json_data = self._session.put(API_ENDPOINT + '/' + roomId,
json=put_data)
# Return a room object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data) | [
"def",
"update",
"(",
"self",
",",
"roomId",
",",
"title",
"=",
"None",
",",
"*",
"*",
"request_parameters",
")",
":",
"check_type",
"(",
"roomId",
",",
"basestring",
",",
"may_be_none",
"=",
"False",
")",
"check_type",
"(",
"roomId",
",",
"basestring",
")",
"put_data",
"=",
"dict_from_items_with_values",
"(",
"request_parameters",
",",
"title",
"=",
"title",
",",
")",
"# API request",
"json_data",
"=",
"self",
".",
"_session",
".",
"put",
"(",
"API_ENDPOINT",
"+",
"'/'",
"+",
"roomId",
",",
"json",
"=",
"put_data",
")",
"# Return a room object created from the response JSON data",
"return",
"self",
".",
"_object_factory",
"(",
"OBJECT_TYPE",
",",
"json_data",
")"
] | 34.83871 | 23.580645 |
def hpx_to_axes(h, npix):
""" Generate a sequence of bin edge vectors corresponding to the
axes of a HPX object."""
x = h.ebins
z = np.arange(npix[-1] + 1)
return x, z | [
"def",
"hpx_to_axes",
"(",
"h",
",",
"npix",
")",
":",
"x",
"=",
"h",
".",
"ebins",
"z",
"=",
"np",
".",
"arange",
"(",
"npix",
"[",
"-",
"1",
"]",
"+",
"1",
")",
"return",
"x",
",",
"z"
] | 26 | 16.285714 |
def solve_one(self, expr, constrain=False):
"""
Concretize a symbolic :class:`~manticore.core.smtlib.expression.Expression` into
one solution.
:param manticore.core.smtlib.Expression expr: Symbolic value to concretize
:param bool constrain: If True, constrain expr to concretized value
:return: Concrete value
:rtype: int
"""
expr = self.migrate_expression(expr)
value = self._solver.get_value(self._constraints, expr)
if constrain:
self.constrain(expr == value)
#Include forgiveness here
if isinstance(value, bytearray):
value = bytes(value)
return value | [
"def",
"solve_one",
"(",
"self",
",",
"expr",
",",
"constrain",
"=",
"False",
")",
":",
"expr",
"=",
"self",
".",
"migrate_expression",
"(",
"expr",
")",
"value",
"=",
"self",
".",
"_solver",
".",
"get_value",
"(",
"self",
".",
"_constraints",
",",
"expr",
")",
"if",
"constrain",
":",
"self",
".",
"constrain",
"(",
"expr",
"==",
"value",
")",
"#Include forgiveness here",
"if",
"isinstance",
"(",
"value",
",",
"bytearray",
")",
":",
"value",
"=",
"bytes",
"(",
"value",
")",
"return",
"value"
] | 37.5 | 16.611111 |
def show_network(self):
"""!
@brief Shows structure of the network: neurons and connections between them.
"""
dimension = len(self.__location[0])
if (dimension != 3) and (dimension != 2):
raise NameError('Network that is located in different from 2-d and 3-d dimensions can not be represented')
(fig, axes) = self.__create_surface(dimension)
for i in range(0, self.__num_osc, 1):
if dimension == 2:
axes.plot(self.__location[i][0], self.__location[i][1], 'bo')
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
if self.__weights[i][j] > 0.0:
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], 'b-', linewidth = 0.5)
elif dimension == 3:
axes.scatter(self.__location[i][0], self.__location[i][1], self.__location[i][2], c = 'b', marker = 'o')
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
if self.__weights[i][j] > 0.0:
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], [self.__location[i][2], self.__location[j][2]], 'b-', linewidth = 0.5)
plt.grid()
plt.show() | [
"def",
"show_network",
"(",
"self",
")",
":",
"dimension",
"=",
"len",
"(",
"self",
".",
"__location",
"[",
"0",
"]",
")",
"if",
"(",
"dimension",
"!=",
"3",
")",
"and",
"(",
"dimension",
"!=",
"2",
")",
":",
"raise",
"NameError",
"(",
"'Network that is located in different from 2-d and 3-d dimensions can not be represented'",
")",
"(",
"fig",
",",
"axes",
")",
"=",
"self",
".",
"__create_surface",
"(",
"dimension",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"__num_osc",
",",
"1",
")",
":",
"if",
"dimension",
"==",
"2",
":",
"axes",
".",
"plot",
"(",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"'bo'",
")",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"self",
".",
"__num_osc",
",",
"1",
")",
":",
"# draw connection between two points only one time\r",
"if",
"self",
".",
"__weights",
"[",
"i",
"]",
"[",
"j",
"]",
">",
"0.0",
":",
"axes",
".",
"plot",
"(",
"[",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"__location",
"[",
"j",
"]",
"[",
"0",
"]",
"]",
",",
"[",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"self",
".",
"__location",
"[",
"j",
"]",
"[",
"1",
"]",
"]",
",",
"'b-'",
",",
"linewidth",
"=",
"0.5",
")",
"elif",
"dimension",
"==",
"3",
":",
"axes",
".",
"scatter",
"(",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"2",
"]",
",",
"c",
"=",
"'b'",
",",
"marker",
"=",
"'o'",
")",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"self",
".",
"__num_osc",
",",
"1",
")",
":",
"# draw connection between two points only one time\r",
"if",
"self",
".",
"__weights",
"[",
"i",
"]",
"[",
"j",
"]",
">",
"0.0",
":",
"axes",
".",
"plot",
"(",
"[",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"__location",
"[",
"j",
"]",
"[",
"0",
"]",
"]",
",",
"[",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"self",
".",
"__location",
"[",
"j",
"]",
"[",
"1",
"]",
"]",
",",
"[",
"self",
".",
"__location",
"[",
"i",
"]",
"[",
"2",
"]",
",",
"self",
".",
"__location",
"[",
"j",
"]",
"[",
"2",
"]",
"]",
",",
"'b-'",
",",
"linewidth",
"=",
"0.5",
")",
"plt",
".",
"grid",
"(",
")",
"plt",
".",
"show",
"(",
")"
] | 53.392857 | 35.357143 |
def auto_data_specs(self):
"""
Data specs in the sub-study class that are not explicitly provided
in the name map
"""
for spec in self.study_class.data_specs():
if spec.name not in self._name_map:
yield spec | [
"def",
"auto_data_specs",
"(",
"self",
")",
":",
"for",
"spec",
"in",
"self",
".",
"study_class",
".",
"data_specs",
"(",
")",
":",
"if",
"spec",
".",
"name",
"not",
"in",
"self",
".",
"_name_map",
":",
"yield",
"spec"
] | 33.5 | 12 |
def clearFilter(self):
"""
Clear all filters.
"""
if self._dataFrameOriginal is not None:
self.layoutAboutToBeChanged.emit()
self._dataFrame = self._dataFrameOriginal
self._dataFrameOriginal = None
self.layoutChanged.emit() | [
"def",
"clearFilter",
"(",
"self",
")",
":",
"if",
"self",
".",
"_dataFrameOriginal",
"is",
"not",
"None",
":",
"self",
".",
"layoutAboutToBeChanged",
".",
"emit",
"(",
")",
"self",
".",
"_dataFrame",
"=",
"self",
".",
"_dataFrameOriginal",
"self",
".",
"_dataFrameOriginal",
"=",
"None",
"self",
".",
"layoutChanged",
".",
"emit",
"(",
")"
] | 32.777778 | 7 |
def _draw_number(screen, x_offset, y_offset, number, style='class:clock',
transparent=False):
" Write number at position. "
fg = Char(' ', 'class:clock')
bg = Char(' ', '')
for y, row in enumerate(_numbers[number]):
screen_row = screen.data_buffer[y + y_offset]
for x, n in enumerate(row):
if n == '#':
screen_row[x + x_offset] = fg
elif not transparent:
screen_row[x + x_offset] = bg | [
"def",
"_draw_number",
"(",
"screen",
",",
"x_offset",
",",
"y_offset",
",",
"number",
",",
"style",
"=",
"'class:clock'",
",",
"transparent",
"=",
"False",
")",
":",
"fg",
"=",
"Char",
"(",
"' '",
",",
"'class:clock'",
")",
"bg",
"=",
"Char",
"(",
"' '",
",",
"''",
")",
"for",
"y",
",",
"row",
"in",
"enumerate",
"(",
"_numbers",
"[",
"number",
"]",
")",
":",
"screen_row",
"=",
"screen",
".",
"data_buffer",
"[",
"y",
"+",
"y_offset",
"]",
"for",
"x",
",",
"n",
"in",
"enumerate",
"(",
"row",
")",
":",
"if",
"n",
"==",
"'#'",
":",
"screen_row",
"[",
"x",
"+",
"x_offset",
"]",
"=",
"fg",
"elif",
"not",
"transparent",
":",
"screen_row",
"[",
"x",
"+",
"x_offset",
"]",
"=",
"bg"
] | 36.769231 | 12.769231 |
def _set_int(self, commands, name):
"""
set integer value from commands
"""
if name in commands:
try:
value = int(commands[name])
setattr(self, name, value)
except ValueError:
pass | [
"def",
"_set_int",
"(",
"self",
",",
"commands",
",",
"name",
")",
":",
"if",
"name",
"in",
"commands",
":",
"try",
":",
"value",
"=",
"int",
"(",
"commands",
"[",
"name",
"]",
")",
"setattr",
"(",
"self",
",",
"name",
",",
"value",
")",
"except",
"ValueError",
":",
"pass"
] | 27.5 | 7.7 |
def deleted(self):
'Return datetime.datetime or None if the file isnt deleted'
_d = self.folder.attrib.get('deleted', None)
if _d is None: return None
return dateutil.parser.parse(str(_d)) | [
"def",
"deleted",
"(",
"self",
")",
":",
"_d",
"=",
"self",
".",
"folder",
".",
"attrib",
".",
"get",
"(",
"'deleted'",
",",
"None",
")",
"if",
"_d",
"is",
"None",
":",
"return",
"None",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"str",
"(",
"_d",
")",
")"
] | 43.2 | 14.4 |
def provide_label(self):
"""The name and shape of label provided by this iterator"""
return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.label] | [
"def",
"provide_label",
"(",
"self",
")",
":",
"return",
"[",
"(",
"k",
",",
"tuple",
"(",
"[",
"self",
".",
"batch_size",
"]",
"+",
"list",
"(",
"v",
".",
"shape",
"[",
"1",
":",
"]",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"label",
"]"
] | 60 | 21.666667 |
def endpoint_delete(service, region=None, profile=None, interface=None, **connection_args):
'''
Delete endpoints of an Openstack service
CLI Examples:
.. code-block:: bash
salt 'v2' keystone.endpoint_delete nova [region=RegionOne]
salt 'v3' keystone.endpoint_delete nova interface=admin [region=RegionOne]
'''
kstone = auth(profile, **connection_args)
endpoint = endpoint_get(service, region, profile, interface, **connection_args)
if not endpoint or 'Error' in endpoint:
return {'Error': 'Could not find any endpoints for the service'}
kstone.endpoints.delete(endpoint['id'])
endpoint = endpoint_get(service, region, profile, interface, **connection_args)
if not endpoint or 'Error' in endpoint:
return True | [
"def",
"endpoint_delete",
"(",
"service",
",",
"region",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"interface",
"=",
"None",
",",
"*",
"*",
"connection_args",
")",
":",
"kstone",
"=",
"auth",
"(",
"profile",
",",
"*",
"*",
"connection_args",
")",
"endpoint",
"=",
"endpoint_get",
"(",
"service",
",",
"region",
",",
"profile",
",",
"interface",
",",
"*",
"*",
"connection_args",
")",
"if",
"not",
"endpoint",
"or",
"'Error'",
"in",
"endpoint",
":",
"return",
"{",
"'Error'",
":",
"'Could not find any endpoints for the service'",
"}",
"kstone",
".",
"endpoints",
".",
"delete",
"(",
"endpoint",
"[",
"'id'",
"]",
")",
"endpoint",
"=",
"endpoint_get",
"(",
"service",
",",
"region",
",",
"profile",
",",
"interface",
",",
"*",
"*",
"connection_args",
")",
"if",
"not",
"endpoint",
"or",
"'Error'",
"in",
"endpoint",
":",
"return",
"True"
] | 38.45 | 27.05 |
def _link_rels(obj, fields=None, save=False, overwrite=False):
"""Populate any database related fields (ForeignKeyField, OneToOneField) that have `_get`ters to populate them with"""
if not fields:
meta = obj._meta
fields = [f.name for f in meta.fields if hasattr(f, 'do_related_class') and not f.primary_key and hasattr(meta, '_get_' + f.name) and hasattr(meta, '_' + f.name)]
for field in fields:
# skip fields if they contain non-null data and `overwrite` option wasn't set
if not overwrite and not isinstance(getattr(obj, field, None), NoneType):
# print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None))
continue
if hasattr(obj, field):
setattr(obj, field, getattr(obj, '_' + field, None))
if save:
obj.save()
return obj | [
"def",
"_link_rels",
"(",
"obj",
",",
"fields",
"=",
"None",
",",
"save",
"=",
"False",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"not",
"fields",
":",
"meta",
"=",
"obj",
".",
"_meta",
"fields",
"=",
"[",
"f",
".",
"name",
"for",
"f",
"in",
"meta",
".",
"fields",
"if",
"hasattr",
"(",
"f",
",",
"'do_related_class'",
")",
"and",
"not",
"f",
".",
"primary_key",
"and",
"hasattr",
"(",
"meta",
",",
"'_get_'",
"+",
"f",
".",
"name",
")",
"and",
"hasattr",
"(",
"meta",
",",
"'_'",
"+",
"f",
".",
"name",
")",
"]",
"for",
"field",
"in",
"fields",
":",
"# skip fields if they contain non-null data and `overwrite` option wasn't set",
"if",
"not",
"overwrite",
"and",
"not",
"isinstance",
"(",
"getattr",
"(",
"obj",
",",
"field",
",",
"None",
")",
",",
"NoneType",
")",
":",
"# print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None))",
"continue",
"if",
"hasattr",
"(",
"obj",
",",
"field",
")",
":",
"setattr",
"(",
"obj",
",",
"field",
",",
"getattr",
"(",
"obj",
",",
"'_'",
"+",
"field",
",",
"None",
")",
")",
"if",
"save",
":",
"obj",
".",
"save",
"(",
")",
"return",
"obj"
] | 56.466667 | 32.2 |
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index array n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndarray covering at least -2 to +5.
Returns
-------
x : ndarray of signal samples in x
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(-5,8)
>>> x = ss.ex6_2(n)
>>> plt.stem(n,x) # creates a stem plot of x vs n
"""
x = np.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x | [
"def",
"ex6_2",
"(",
"n",
")",
":",
"x",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"n",
")",
")",
"for",
"k",
",",
"nn",
"in",
"enumerate",
"(",
"n",
")",
":",
"if",
"nn",
">=",
"-",
"2",
"and",
"nn",
"<=",
"5",
":",
"x",
"[",
"k",
"]",
"=",
"8",
"-",
"nn",
"return",
"x"
] | 25 | 19.466667 |
def get_function(rule, domain, normalize, **parameters):
"""
Create a quadrature function and set default parameter values.
Args:
rule (str):
Name of quadrature rule defined in ``QUAD_FUNCTIONS``.
domain (Dist, numpy.ndarray):
Defines ``lower`` and ``upper`` that is passed quadrature rule. If
``Dist``, ``domain`` is renamed to ``dist`` and also
passed.
normalize (bool):
In the case of distributions, the abscissas and weights are not
tailored to a distribution beyond matching the bounds. If True, the
samples are normalized multiplying the weights with the density of
the distribution evaluated at the abscissas and normalized
afterwards to sum to one.
parameters (:py:data:typing.Any):
Redefining of the parameter defaults. Only add parameters that the
quadrature rule expect.
Returns:
(:py:data:typing.Callable):
Function that can be called only using argument ``order``.
"""
from ...distributions.baseclass import Dist
if isinstance(domain, Dist):
lower, upper = domain.range()
parameters["dist"] = domain
else:
lower, upper = numpy.array(domain)
parameters["lower"] = lower
parameters["upper"] = upper
quad_function = QUAD_FUNCTIONS[rule]
parameters_spec = inspect.getargspec(quad_function)[0]
parameters_spec = {key: None for key in parameters_spec}
del parameters_spec["order"]
for key in parameters_spec:
if key in parameters:
parameters_spec[key] = parameters[key]
def _quad_function(order, *args, **kws):
"""Implementation of quadrature function."""
params = parameters_spec.copy()
params.update(kws)
abscissas, weights = quad_function(order, *args, **params)
# normalize if prudent:
if rule in UNORMALIZED_QUADRATURE_RULES and normalize:
if isinstance(domain, Dist):
if len(domain) == 1:
weights *= domain.pdf(abscissas).flatten()
else:
weights *= domain.pdf(abscissas)
weights /= numpy.sum(weights)
return abscissas, weights
return _quad_function | [
"def",
"get_function",
"(",
"rule",
",",
"domain",
",",
"normalize",
",",
"*",
"*",
"parameters",
")",
":",
"from",
".",
".",
".",
"distributions",
".",
"baseclass",
"import",
"Dist",
"if",
"isinstance",
"(",
"domain",
",",
"Dist",
")",
":",
"lower",
",",
"upper",
"=",
"domain",
".",
"range",
"(",
")",
"parameters",
"[",
"\"dist\"",
"]",
"=",
"domain",
"else",
":",
"lower",
",",
"upper",
"=",
"numpy",
".",
"array",
"(",
"domain",
")",
"parameters",
"[",
"\"lower\"",
"]",
"=",
"lower",
"parameters",
"[",
"\"upper\"",
"]",
"=",
"upper",
"quad_function",
"=",
"QUAD_FUNCTIONS",
"[",
"rule",
"]",
"parameters_spec",
"=",
"inspect",
".",
"getargspec",
"(",
"quad_function",
")",
"[",
"0",
"]",
"parameters_spec",
"=",
"{",
"key",
":",
"None",
"for",
"key",
"in",
"parameters_spec",
"}",
"del",
"parameters_spec",
"[",
"\"order\"",
"]",
"for",
"key",
"in",
"parameters_spec",
":",
"if",
"key",
"in",
"parameters",
":",
"parameters_spec",
"[",
"key",
"]",
"=",
"parameters",
"[",
"key",
"]",
"def",
"_quad_function",
"(",
"order",
",",
"*",
"args",
",",
"*",
"*",
"kws",
")",
":",
"\"\"\"Implementation of quadrature function.\"\"\"",
"params",
"=",
"parameters_spec",
".",
"copy",
"(",
")",
"params",
".",
"update",
"(",
"kws",
")",
"abscissas",
",",
"weights",
"=",
"quad_function",
"(",
"order",
",",
"*",
"args",
",",
"*",
"*",
"params",
")",
"# normalize if prudent:",
"if",
"rule",
"in",
"UNORMALIZED_QUADRATURE_RULES",
"and",
"normalize",
":",
"if",
"isinstance",
"(",
"domain",
",",
"Dist",
")",
":",
"if",
"len",
"(",
"domain",
")",
"==",
"1",
":",
"weights",
"*=",
"domain",
".",
"pdf",
"(",
"abscissas",
")",
".",
"flatten",
"(",
")",
"else",
":",
"weights",
"*=",
"domain",
".",
"pdf",
"(",
"abscissas",
")",
"weights",
"/=",
"numpy",
".",
"sum",
"(",
"weights",
")",
"return",
"abscissas",
",",
"weights",
"return",
"_quad_function"
] | 37.616667 | 17.65 |
def veoh_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
'''Get item_id'''
if re.match(r'http://www.veoh.com/watch/\w+', url):
item_id = match1(url, r'http://www.veoh.com/watch/(\w+)')
elif re.match(r'http://www.veoh.com/m/watch.php\?v=\.*', url):
item_id = match1(url, r'http://www.veoh.com/m/watch.php\?v=(\w+)')
else:
raise NotImplementedError('Cannot find item ID')
veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = info_only, **kwargs) | [
"def",
"veoh_download",
"(",
"url",
",",
"output_dir",
"=",
"'.'",
",",
"merge",
"=",
"False",
",",
"info_only",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"re",
".",
"match",
"(",
"r'http://www.veoh.com/watch/\\w+'",
",",
"url",
")",
":",
"item_id",
"=",
"match1",
"(",
"url",
",",
"r'http://www.veoh.com/watch/(\\w+)'",
")",
"elif",
"re",
".",
"match",
"(",
"r'http://www.veoh.com/m/watch.php\\?v=\\.*'",
",",
"url",
")",
":",
"item_id",
"=",
"match1",
"(",
"url",
",",
"r'http://www.veoh.com/m/watch.php\\?v=(\\w+)'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Cannot find item ID'",
")",
"veoh_download_by_id",
"(",
"item_id",
",",
"output_dir",
"=",
"'.'",
",",
"merge",
"=",
"False",
",",
"info_only",
"=",
"info_only",
",",
"*",
"*",
"kwargs",
")"
] | 58.777778 | 29.888889 |
def make_association(self, record):
"""
contstruct the association
:param record:
:return: modeled association of genotype to mammalian phenotype
"""
model = Model(self.graph)
record['relation']['id'] = self.resolve("has phenotype")
# define the triple
gene = record['subject']['id']
relation = record['relation']['id']
phenotype = record['object']['id']
# instantiate the association
g2p_assoc = Assoc(self.graph, self.name, sub=gene, obj=phenotype, pred=relation)
# add the references
references = record['evidence']['has_supporting_reference']
# created RGDRef prefix in curie map to route to proper reference URL in RGD
references = [
x.replace('RGD', 'RGDRef') if 'PMID' not in x else x for x in references]
if len(references) > 0:
# make first ref in list the source
g2p_assoc.add_source(identifier=references[0])
ref_model = Reference(
self.graph, references[0],
self.globaltt['publication']
)
ref_model.addRefToGraph()
if len(references) > 1:
# create equivalent source for any other refs in list
# This seems to be specific to this source and
# there could be non-equivalent references in this list
for ref in references[1:]:
model.addSameIndividual(sub=references[0], obj=ref)
# add the date created on
g2p_assoc.add_date(date=record['date'])
g2p_assoc.add_evidence(self.resolve(record['evidence']['type'])) # ?set where?
g2p_assoc.add_association_to_graph()
return | [
"def",
"make_association",
"(",
"self",
",",
"record",
")",
":",
"model",
"=",
"Model",
"(",
"self",
".",
"graph",
")",
"record",
"[",
"'relation'",
"]",
"[",
"'id'",
"]",
"=",
"self",
".",
"resolve",
"(",
"\"has phenotype\"",
")",
"# define the triple",
"gene",
"=",
"record",
"[",
"'subject'",
"]",
"[",
"'id'",
"]",
"relation",
"=",
"record",
"[",
"'relation'",
"]",
"[",
"'id'",
"]",
"phenotype",
"=",
"record",
"[",
"'object'",
"]",
"[",
"'id'",
"]",
"# instantiate the association",
"g2p_assoc",
"=",
"Assoc",
"(",
"self",
".",
"graph",
",",
"self",
".",
"name",
",",
"sub",
"=",
"gene",
",",
"obj",
"=",
"phenotype",
",",
"pred",
"=",
"relation",
")",
"# add the references",
"references",
"=",
"record",
"[",
"'evidence'",
"]",
"[",
"'has_supporting_reference'",
"]",
"# created RGDRef prefix in curie map to route to proper reference URL in RGD",
"references",
"=",
"[",
"x",
".",
"replace",
"(",
"'RGD'",
",",
"'RGDRef'",
")",
"if",
"'PMID'",
"not",
"in",
"x",
"else",
"x",
"for",
"x",
"in",
"references",
"]",
"if",
"len",
"(",
"references",
")",
">",
"0",
":",
"# make first ref in list the source",
"g2p_assoc",
".",
"add_source",
"(",
"identifier",
"=",
"references",
"[",
"0",
"]",
")",
"ref_model",
"=",
"Reference",
"(",
"self",
".",
"graph",
",",
"references",
"[",
"0",
"]",
",",
"self",
".",
"globaltt",
"[",
"'publication'",
"]",
")",
"ref_model",
".",
"addRefToGraph",
"(",
")",
"if",
"len",
"(",
"references",
")",
">",
"1",
":",
"# create equivalent source for any other refs in list",
"# This seems to be specific to this source and",
"# there could be non-equivalent references in this list",
"for",
"ref",
"in",
"references",
"[",
"1",
":",
"]",
":",
"model",
".",
"addSameIndividual",
"(",
"sub",
"=",
"references",
"[",
"0",
"]",
",",
"obj",
"=",
"ref",
")",
"# add the date created on",
"g2p_assoc",
".",
"add_date",
"(",
"date",
"=",
"record",
"[",
"'date'",
"]",
")",
"g2p_assoc",
".",
"add_evidence",
"(",
"self",
".",
"resolve",
"(",
"record",
"[",
"'evidence'",
"]",
"[",
"'type'",
"]",
")",
")",
"# ?set where?",
"g2p_assoc",
".",
"add_association_to_graph",
"(",
")",
"return"
] | 37.777778 | 19.2 |
def _openssl_key_iv(passphrase, salt):
"""
Returns a (key, iv) tuple that can be used in AES symmetric encryption
from a *passphrase* (a byte or unicode string) and *salt* (a byte array).
"""
def _openssl_kdf(req):
if hasattr(passphrase, 'encode'):
passwd = passphrase.encode('ascii', 'ignore')
else:
passwd = passphrase
prev = b''
while req > 0:
digest = hashes.Hash(hashes.MD5(), backend=default_backend())
digest.update(prev + passwd + salt)
prev = digest.finalize()
req -= IV_BLOCK_SIZE
yield prev
assert passphrase is not None
assert salt is not None
# AES key: 32 bytes, IV: 16 bytes
mat = b''.join([x for x in _openssl_kdf(32 + IV_BLOCK_SIZE)])
return mat[0:32], mat[32:32 + IV_BLOCK_SIZE] | [
"def",
"_openssl_key_iv",
"(",
"passphrase",
",",
"salt",
")",
":",
"def",
"_openssl_kdf",
"(",
"req",
")",
":",
"if",
"hasattr",
"(",
"passphrase",
",",
"'encode'",
")",
":",
"passwd",
"=",
"passphrase",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"else",
":",
"passwd",
"=",
"passphrase",
"prev",
"=",
"b''",
"while",
"req",
">",
"0",
":",
"digest",
"=",
"hashes",
".",
"Hash",
"(",
"hashes",
".",
"MD5",
"(",
")",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"digest",
".",
"update",
"(",
"prev",
"+",
"passwd",
"+",
"salt",
")",
"prev",
"=",
"digest",
".",
"finalize",
"(",
")",
"req",
"-=",
"IV_BLOCK_SIZE",
"yield",
"prev",
"assert",
"passphrase",
"is",
"not",
"None",
"assert",
"salt",
"is",
"not",
"None",
"# AES key: 32 bytes, IV: 16 bytes",
"mat",
"=",
"b''",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"_openssl_kdf",
"(",
"32",
"+",
"IV_BLOCK_SIZE",
")",
"]",
")",
"return",
"mat",
"[",
"0",
":",
"32",
"]",
",",
"mat",
"[",
"32",
":",
"32",
"+",
"IV_BLOCK_SIZE",
"]"
] | 37.772727 | 13.954545 |
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix | [
"def",
"_set_suffix_links",
"(",
"self",
")",
":",
"self",
".",
"_suffix_links_set",
"=",
"True",
"for",
"current",
",",
"parent",
"in",
"self",
".",
"bfs",
"(",
")",
":",
"# skip the root node",
"if",
"parent",
"is",
"None",
":",
"continue",
"current",
".",
"longest_prefix",
"=",
"parent",
".",
"longest_prefix",
"if",
"parent",
".",
"has_value",
":",
"current",
".",
"longest_prefix",
"=",
"parent",
"# the root doesn't get a suffix link",
"# also, skip previously set suffix links",
"if",
"current",
".",
"has_suffix",
":",
"continue",
"# current is not the root and has no suffix",
"# set current's suffix to parent's suffix",
"suffix",
"=",
"parent",
"while",
"True",
":",
"if",
"not",
"suffix",
".",
"has_suffix",
":",
"current",
".",
"suffix",
"=",
"self",
".",
"root",
"break",
"else",
":",
"suffix",
"=",
"suffix",
".",
"suffix",
"if",
"current",
".",
"uplink",
"in",
"suffix",
":",
"current",
".",
"suffix",
"=",
"suffix",
"[",
"current",
".",
"uplink",
"]",
"break",
"# now find the dict_suffix value",
"suffix",
"=",
"current",
".",
"suffix",
"while",
"not",
"suffix",
".",
"has_value",
"and",
"suffix",
".",
"has_suffix",
":",
"suffix",
"=",
"suffix",
".",
"suffix",
"if",
"suffix",
".",
"has_value",
":",
"current",
".",
"dict_suffix",
"=",
"suffix"
] | 37.676471 | 11.264706 |
def get_client_parameters(username: str, ip_address: str, user_agent: str) -> dict:
"""
Get query parameters for filtering AccessAttempt queryset.
This method returns a dict that guarantees iteration order for keys and values,
and can so be used in e.g. the generation of hash keys or other deterministic functions.
"""
filter_kwargs = dict()
if settings.AXES_ONLY_USER_FAILURES:
# 1. Only individual usernames can be tracked with parametrization
filter_kwargs['username'] = username
else:
if settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP:
# 2. A combination of username and IP address can be used as well
filter_kwargs['username'] = username
filter_kwargs['ip_address'] = ip_address
else:
# 3. Default case is to track the IP address only, which is the most secure option
filter_kwargs['ip_address'] = ip_address
if settings.AXES_USE_USER_AGENT:
# 4. The HTTP User-Agent can be used to track e.g. one browser
filter_kwargs['user_agent'] = user_agent
return filter_kwargs | [
"def",
"get_client_parameters",
"(",
"username",
":",
"str",
",",
"ip_address",
":",
"str",
",",
"user_agent",
":",
"str",
")",
"->",
"dict",
":",
"filter_kwargs",
"=",
"dict",
"(",
")",
"if",
"settings",
".",
"AXES_ONLY_USER_FAILURES",
":",
"# 1. Only individual usernames can be tracked with parametrization",
"filter_kwargs",
"[",
"'username'",
"]",
"=",
"username",
"else",
":",
"if",
"settings",
".",
"AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP",
":",
"# 2. A combination of username and IP address can be used as well",
"filter_kwargs",
"[",
"'username'",
"]",
"=",
"username",
"filter_kwargs",
"[",
"'ip_address'",
"]",
"=",
"ip_address",
"else",
":",
"# 3. Default case is to track the IP address only, which is the most secure option",
"filter_kwargs",
"[",
"'ip_address'",
"]",
"=",
"ip_address",
"if",
"settings",
".",
"AXES_USE_USER_AGENT",
":",
"# 4. The HTTP User-Agent can be used to track e.g. one browser",
"filter_kwargs",
"[",
"'user_agent'",
"]",
"=",
"user_agent",
"return",
"filter_kwargs"
] | 41.259259 | 25.037037 |
def convert_vectored_io_slice_to_final_path_name(local_path, ase):
# type: (pathlib.Path,
# blobxfer.models.azure.StorageEntity) -> pathlib.Path
"""Convert vectored io slice to final path name
:param pathlib.Path local_path: local path
:param blobxfer.models.azure.StorageEntity ase: Storage Entity
:rtype: pathlib.Path
:return: converted final path
"""
name = blobxfer.models.metadata.\
remove_vectored_io_slice_suffix_from_name(
local_path.name, ase.vectored_io.slice_id)
_tmp = list(local_path.parts[:-1])
_tmp.append(name)
return pathlib.Path(*_tmp) | [
"def",
"convert_vectored_io_slice_to_final_path_name",
"(",
"local_path",
",",
"ase",
")",
":",
"# type: (pathlib.Path,",
"# blobxfer.models.azure.StorageEntity) -> pathlib.Path",
"name",
"=",
"blobxfer",
".",
"models",
".",
"metadata",
".",
"remove_vectored_io_slice_suffix_from_name",
"(",
"local_path",
".",
"name",
",",
"ase",
".",
"vectored_io",
".",
"slice_id",
")",
"_tmp",
"=",
"list",
"(",
"local_path",
".",
"parts",
"[",
":",
"-",
"1",
"]",
")",
"_tmp",
".",
"append",
"(",
"name",
")",
"return",
"pathlib",
".",
"Path",
"(",
"*",
"_tmp",
")"
] | 44.666667 | 11.733333 |
def return_max_phrase(run, idx, dictionary):
"""
Finds the maximal phrase in the run starting from the given index. It uses the dictionary to find sequences of ids
that can be merged into a phrase.
:param run: a run of ids
:param idx: the position in the run to start looking for a merge sequence
:param dictionary: the dictionary to use to determine if a merge id is present. This should be a dictionary of
dictionaries. Each inner dictionary is a continuation of a mergable run. The end of a run is donated by a None key
in a dictionary. The value associated with the None key is the integer of the merge id.
:return: phrase_id or None, index after the phrase_id or the current index if no phrase was found.
"""
if idx < len(run) and run[idx] in dictionary:
id = run[idx]
rv, rv_idx = PhraseDictionary.return_max_phrase(run, idx + 1, dictionary[id])
if rv is not None:
return rv, rv_idx
if None in dictionary:
return dictionary[None], idx
else:
return None, None | [
"def",
"return_max_phrase",
"(",
"run",
",",
"idx",
",",
"dictionary",
")",
":",
"if",
"idx",
"<",
"len",
"(",
"run",
")",
"and",
"run",
"[",
"idx",
"]",
"in",
"dictionary",
":",
"id",
"=",
"run",
"[",
"idx",
"]",
"rv",
",",
"rv_idx",
"=",
"PhraseDictionary",
".",
"return_max_phrase",
"(",
"run",
",",
"idx",
"+",
"1",
",",
"dictionary",
"[",
"id",
"]",
")",
"if",
"rv",
"is",
"not",
"None",
":",
"return",
"rv",
",",
"rv_idx",
"if",
"None",
"in",
"dictionary",
":",
"return",
"dictionary",
"[",
"None",
"]",
",",
"idx",
"else",
":",
"return",
"None",
",",
"None"
] | 53.857143 | 28.809524 |
def compensate_system_time_change(self, difference): # pragma: no cover,
# not with unit tests
"""If a system time change occurs we have to update
properties time related to reflect change
:param difference: difference between new time and old time
:type difference:
:return: None
"""
# We only need to change some value
for prop in ('last_notification', 'last_state_change', 'last_hard_state_change'):
val = getattr(self, prop) # current value
# Do not go below 1970 :)
val = max(0, val + difference) # diff may be negative
setattr(self, prop, val) | [
"def",
"compensate_system_time_change",
"(",
"self",
",",
"difference",
")",
":",
"# pragma: no cover,",
"# not with unit tests",
"# We only need to change some value",
"for",
"prop",
"in",
"(",
"'last_notification'",
",",
"'last_state_change'",
",",
"'last_hard_state_change'",
")",
":",
"val",
"=",
"getattr",
"(",
"self",
",",
"prop",
")",
"# current value",
"# Do not go below 1970 :)",
"val",
"=",
"max",
"(",
"0",
",",
"val",
"+",
"difference",
")",
"# diff may be negative",
"setattr",
"(",
"self",
",",
"prop",
",",
"val",
")"
] | 43.933333 | 16.866667 |
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column, or
when the index is meaningless and needs to be reset to the default
before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels
from the index. Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in
the new DataFrame.
name : object, optional
The name to use for the column containing the original Series
values. Uses ``self.name`` by default. This argument is ignored
when `drop` is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
See Also
--------
DataFrame.reset_index: Analogous function for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
The `level` parameter is interesting for Series with a multi-level
index.
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
... np.array(['one', 'two', 'one', 'two'])]
>>> s2 = pd.Series(
... range(4), name='foo',
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
To remove a specific level from the Index, use `level`.
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
If `level` is not set, all levels are removed from the Index.
>>> s2.reset_index()
a b foo
0 bar one 0
1 bar two 1
2 baz one 2
3 baz two 3
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if drop:
new_index = ibase.default_index(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(self._values.copy(),
index=new_index).__finalize__(self)
elif inplace:
raise TypeError('Cannot reset_index inplace on a Series '
'to create a DataFrame')
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop) | [
"def",
"reset_index",
"(",
"self",
",",
"level",
"=",
"None",
",",
"drop",
"=",
"False",
",",
"name",
"=",
"None",
",",
"inplace",
"=",
"False",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"if",
"drop",
":",
"new_index",
"=",
"ibase",
".",
"default_index",
"(",
"len",
"(",
"self",
")",
")",
"if",
"level",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"level",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"level",
"=",
"[",
"level",
"]",
"level",
"=",
"[",
"self",
".",
"index",
".",
"_get_level_number",
"(",
"lev",
")",
"for",
"lev",
"in",
"level",
"]",
"if",
"len",
"(",
"level",
")",
"<",
"self",
".",
"index",
".",
"nlevels",
":",
"new_index",
"=",
"self",
".",
"index",
".",
"droplevel",
"(",
"level",
")",
"if",
"inplace",
":",
"self",
".",
"index",
"=",
"new_index",
"# set name if it was passed, otherwise, keep the previous name",
"self",
".",
"name",
"=",
"name",
"or",
"self",
".",
"name",
"else",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"_values",
".",
"copy",
"(",
")",
",",
"index",
"=",
"new_index",
")",
".",
"__finalize__",
"(",
"self",
")",
"elif",
"inplace",
":",
"raise",
"TypeError",
"(",
"'Cannot reset_index inplace on a Series '",
"'to create a DataFrame'",
")",
"else",
":",
"df",
"=",
"self",
".",
"to_frame",
"(",
"name",
")",
"return",
"df",
".",
"reset_index",
"(",
"level",
"=",
"level",
",",
"drop",
"=",
"drop",
")"
] | 33.152672 | 22.78626 |
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = lambda: self.parse_primary(with_postfix=False)
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno) | [
"def",
"parse_tuple",
"(",
"self",
",",
"simplified",
"=",
"False",
",",
"with_condexpr",
"=",
"True",
",",
"extra_end_rules",
"=",
"None",
",",
"explicit_parentheses",
"=",
"False",
")",
":",
"lineno",
"=",
"self",
".",
"stream",
".",
"current",
".",
"lineno",
"if",
"simplified",
":",
"parse",
"=",
"lambda",
":",
"self",
".",
"parse_primary",
"(",
"with_postfix",
"=",
"False",
")",
"elif",
"with_condexpr",
":",
"parse",
"=",
"self",
".",
"parse_expression",
"else",
":",
"parse",
"=",
"lambda",
":",
"self",
".",
"parse_expression",
"(",
"with_condexpr",
"=",
"False",
")",
"args",
"=",
"[",
"]",
"is_tuple",
"=",
"False",
"while",
"1",
":",
"if",
"args",
":",
"self",
".",
"stream",
".",
"expect",
"(",
"'comma'",
")",
"if",
"self",
".",
"is_tuple_end",
"(",
"extra_end_rules",
")",
":",
"break",
"args",
".",
"append",
"(",
"parse",
"(",
")",
")",
"if",
"self",
".",
"stream",
".",
"current",
".",
"type",
"==",
"'comma'",
":",
"is_tuple",
"=",
"True",
"else",
":",
"break",
"lineno",
"=",
"self",
".",
"stream",
".",
"current",
".",
"lineno",
"if",
"not",
"is_tuple",
":",
"if",
"args",
":",
"return",
"args",
"[",
"0",
"]",
"# if we don't have explicit parentheses, an empty tuple is",
"# not a valid expression. This would mean nothing (literally",
"# nothing) in the spot of an expression would be an empty",
"# tuple.",
"if",
"not",
"explicit_parentheses",
":",
"self",
".",
"fail",
"(",
"'Expected an expression, got \\'%s\\''",
"%",
"describe_token",
"(",
"self",
".",
"stream",
".",
"current",
")",
")",
"return",
"nodes",
".",
"Tuple",
"(",
"args",
",",
"'load'",
",",
"lineno",
"=",
"lineno",
")"
] | 41.888889 | 21.537037 |
def limit_x(
self,
limit_lower = None, # float
limit_upper = None # float
):
"""
get or set x limits of the current axes
x_min, x_max = limit_x() # return the current limit_x
limit_x(x_min, x_max) # set the limit_x to x_min, x_max
"""
if limit_lower is None and limit_upper is None:
return self._limit_x
elif hasattr(limit_lower, "__iter__"):
self._limit_x = limit_lower[:2]
else:
self._limit_x = [limit_lower, limit_upper]
if self._limit_x[0] == self._limit_x[1]:
self._limit_x[1] += 1
self._limit_x[0] -= self.mod_x
self._limit_x[1] += self.mod_x | [
"def",
"limit_x",
"(",
"self",
",",
"limit_lower",
"=",
"None",
",",
"# float",
"limit_upper",
"=",
"None",
"# float",
")",
":",
"if",
"limit_lower",
"is",
"None",
"and",
"limit_upper",
"is",
"None",
":",
"return",
"self",
".",
"_limit_x",
"elif",
"hasattr",
"(",
"limit_lower",
",",
"\"__iter__\"",
")",
":",
"self",
".",
"_limit_x",
"=",
"limit_lower",
"[",
":",
"2",
"]",
"else",
":",
"self",
".",
"_limit_x",
"=",
"[",
"limit_lower",
",",
"limit_upper",
"]",
"if",
"self",
".",
"_limit_x",
"[",
"0",
"]",
"==",
"self",
".",
"_limit_x",
"[",
"1",
"]",
":",
"self",
".",
"_limit_x",
"[",
"1",
"]",
"+=",
"1",
"self",
".",
"_limit_x",
"[",
"0",
"]",
"-=",
"self",
".",
"mod_x",
"self",
".",
"_limit_x",
"[",
"1",
"]",
"+=",
"self",
".",
"mod_x"
] | 30.478261 | 15.695652 |
def get_cell_shift(flow_model):
"""Get flow direction induced cell shift dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
"""
assert flow_model.lower() in FlowModelConst.d8_deltas
return FlowModelConst.d8_deltas.get(flow_model.lower()) | [
"def",
"get_cell_shift",
"(",
"flow_model",
")",
":",
"assert",
"flow_model",
".",
"lower",
"(",
")",
"in",
"FlowModelConst",
".",
"d8_deltas",
"return",
"FlowModelConst",
".",
"d8_deltas",
".",
"get",
"(",
"flow_model",
".",
"lower",
"(",
")",
")"
] | 45.285714 | 17.714286 |
def computeMatchProbabilityOmega(k, bMax, theta, nTrials=100):
"""
The Omega match probability estimates the probability of matching when
both vectors have exactly b components in common. This function computes
this probability for b=1 to bMax.
For each value of b this function:
1) Creates nTrials instances of Xw(b) which are vectors with b components
where each component is uniform in [-1/k, 1/k].
2) Creates nTrials instances of Xi(b) which are vectors with b components
where each component is uniform in [0, 2/k].
3) Does every possible dot product of Xw(b) dot Xi(b), i.e. nTrials * nTrials
dot products.
4) Counts the fraction of cases where Xw(b) dot Xi(b) >= theta
Returns an array with bMax entries, where each entry contains the
probability computed in 4).
"""
omegaProb = np.zeros(bMax+1)
for b in range(1, bMax+1):
xwb = getSparseTensor(b, b, nTrials, fixedRange=1.0/k)
xib = getSparseTensor(b, b, nTrials, onlyPositive=True, fixedRange=2.0/k)
r = xwb.matmul(xib.t())
numMatches = ((r >= theta).sum()).item()
omegaProb[b] = numMatches / float(nTrials * nTrials)
print(omegaProb)
return omegaProb | [
"def",
"computeMatchProbabilityOmega",
"(",
"k",
",",
"bMax",
",",
"theta",
",",
"nTrials",
"=",
"100",
")",
":",
"omegaProb",
"=",
"np",
".",
"zeros",
"(",
"bMax",
"+",
"1",
")",
"for",
"b",
"in",
"range",
"(",
"1",
",",
"bMax",
"+",
"1",
")",
":",
"xwb",
"=",
"getSparseTensor",
"(",
"b",
",",
"b",
",",
"nTrials",
",",
"fixedRange",
"=",
"1.0",
"/",
"k",
")",
"xib",
"=",
"getSparseTensor",
"(",
"b",
",",
"b",
",",
"nTrials",
",",
"onlyPositive",
"=",
"True",
",",
"fixedRange",
"=",
"2.0",
"/",
"k",
")",
"r",
"=",
"xwb",
".",
"matmul",
"(",
"xib",
".",
"t",
"(",
")",
")",
"numMatches",
"=",
"(",
"(",
"r",
">=",
"theta",
")",
".",
"sum",
"(",
")",
")",
".",
"item",
"(",
")",
"omegaProb",
"[",
"b",
"]",
"=",
"numMatches",
"/",
"float",
"(",
"nTrials",
"*",
"nTrials",
")",
"print",
"(",
"omegaProb",
")",
"return",
"omegaProb"
] | 32.714286 | 24.657143 |
def validate_enum_attribute(self, attribute: str,
candidates: Set[Union[str, int, float]]) -> None:
""" Validates that the attribute value is among the candidates """
self.add_errors(
validate_enum_attribute(self.fully_qualified_name, self._spec, attribute, candidates)) | [
"def",
"validate_enum_attribute",
"(",
"self",
",",
"attribute",
":",
"str",
",",
"candidates",
":",
"Set",
"[",
"Union",
"[",
"str",
",",
"int",
",",
"float",
"]",
"]",
")",
"->",
"None",
":",
"self",
".",
"add_errors",
"(",
"validate_enum_attribute",
"(",
"self",
".",
"fully_qualified_name",
",",
"self",
".",
"_spec",
",",
"attribute",
",",
"candidates",
")",
")"
] | 65.2 | 24.8 |
def _prepare_b_jkl_mn(readout_povm, pauli_basis, pre_channel_ops, post_channel_ops, rho0):
"""
Prepare the coefficient matrix for process tomography. This function uses sparse matrices
for much greater efficiency. The coefficient matrix is defined as:
.. math::
B_{(jkl)(mn)}=\sum_{r,q}\pi_{jr}(\mathcal{R}_{k})_{rm} (\mathcal{R}_{l})_{nq} (\rho_0)_q
where :math:`\mathcal{R}_{k}` is the transfer matrix of the quantum map corresponding to the
k-th pre-measurement channel, while :math:`\mathcal{R}_{l}` is the transfer matrix of the l-th
state preparation process. We also require the overlap
between the (generalized) Pauli basis ops and the projection operators
:math:`\pi_{jl}:=\sbraket{\Pi_j}{P_l} = \tr{\Pi_j P_l}`.
See the grove documentation on tomography for detailed information.
:param DiagonalPOVM readout_povm: The POVM corresponding to the readout plus classifier.
:param OperatorBasis pauli_basis: The (generalized) Pauli basis employed in the estimation.
:param list pre_channel_ops: The state preparation channel operators as `qutip.Qobj`
:param list post_channel_ops: The pre-measurement (post circuit) channel operators as `qutip.Qobj`
:param qutip.Qobj rho0: The initial state as a density matrix.
:return: The coefficient matrix necessary to set up the binomial state tomography problem.
:rtype: scipy.sparse.csr_matrix
"""
c_jk_m = state_tomography._prepare_c_jk_m(readout_povm, pauli_basis, post_channel_ops)
pre_channel_transfer_matrices = [pauli_basis.transfer_matrix(qt.to_super(ek))
for ek in pre_channel_ops]
rho0_q = pauli_basis.project_op(rho0)
# These next lines hide some very serious (sparse-)matrix index magic,
# basically we exploit the same index math as in `qutip.sprepost()`
# i.e., if a matrix X is linearly mapped `X -> A.dot(X).dot(B)`
# then this can be rewritten as
# `np.kron(B.T, A).dot(X.T.ravel()).reshape((B.shape[1], A.shape[0])).T`
# The extra matrix transpose operations are necessary because numpy by default
# uses row-major storage, whereas these operations are conventionally defined for column-major
# storage.
d_ln = spvstack([(rlnq * rho0_q).T for rlnq in pre_channel_transfer_matrices]).tocoo()
b_jkl_mn = spkron(d_ln, c_jk_m).real
return b_jkl_mn | [
"def",
"_prepare_b_jkl_mn",
"(",
"readout_povm",
",",
"pauli_basis",
",",
"pre_channel_ops",
",",
"post_channel_ops",
",",
"rho0",
")",
":",
"c_jk_m",
"=",
"state_tomography",
".",
"_prepare_c_jk_m",
"(",
"readout_povm",
",",
"pauli_basis",
",",
"post_channel_ops",
")",
"pre_channel_transfer_matrices",
"=",
"[",
"pauli_basis",
".",
"transfer_matrix",
"(",
"qt",
".",
"to_super",
"(",
"ek",
")",
")",
"for",
"ek",
"in",
"pre_channel_ops",
"]",
"rho0_q",
"=",
"pauli_basis",
".",
"project_op",
"(",
"rho0",
")",
"# These next lines hide some very serious (sparse-)matrix index magic,",
"# basically we exploit the same index math as in `qutip.sprepost()`",
"# i.e., if a matrix X is linearly mapped `X -> A.dot(X).dot(B)`",
"# then this can be rewritten as",
"# `np.kron(B.T, A).dot(X.T.ravel()).reshape((B.shape[1], A.shape[0])).T`",
"# The extra matrix transpose operations are necessary because numpy by default",
"# uses row-major storage, whereas these operations are conventionally defined for column-major",
"# storage.",
"d_ln",
"=",
"spvstack",
"(",
"[",
"(",
"rlnq",
"*",
"rho0_q",
")",
".",
"T",
"for",
"rlnq",
"in",
"pre_channel_transfer_matrices",
"]",
")",
".",
"tocoo",
"(",
")",
"b_jkl_mn",
"=",
"spkron",
"(",
"d_ln",
",",
"c_jk_m",
")",
".",
"real",
"return",
"b_jkl_mn"
] | 57.560976 | 34.97561 |
def from_rotation_matrix(rot, nonorthogonal=True):
"""Convert input 3x3 rotation matrix to unit quaternion
By default, if scipy.linalg is available, this function uses
Bar-Itzhack's algorithm to allow for non-orthogonal matrices.
[J. Guidance, Vol. 23, No. 6, p. 1085 <http://dx.doi.org/10.2514/2.4654>]
This will almost certainly be quite a bit slower than simpler versions,
though it will be more robust to numerical errors in the rotation matrix.
Also note that Bar-Itzhack uses some pretty weird conventions. The last
component of the quaternion appears to represent the scalar, and the
quaternion itself is conjugated relative to the convention used
throughout this module.
If scipy.linalg is not available or if the optional
`nonorthogonal` parameter is set to `False`, this function falls
back to the possibly faster, but less robust, algorithm of Markley
[J. Guidance, Vol. 31, No. 2, p. 440
<http://dx.doi.org/10.2514/1.31730>].
Parameters
----------
rot: (...Nx3x3) float array
Each 3x3 matrix represents a rotation by multiplying (from the left)
a column vector to produce a rotated column vector. Note that this
input may actually have ndims>3; it is just assumed that the last
two dimensions have size 3, representing the matrix.
nonorthogonal: bool, optional
If scipy.linalg is available, use the more robust algorithm of
Bar-Itzhack. Default value is True.
Returns
-------
q: array of quaternions
Unit quaternions resulting in rotations corresponding to input
rotations. Output shape is rot.shape[:-2].
Raises
------
LinAlgError
If any of the eigenvalue solutions does not converge
"""
try:
from scipy import linalg
except ImportError:
linalg = False
rot = np.array(rot, copy=False)
shape = rot.shape[:-2]
if linalg and nonorthogonal:
from operator import mul
from functools import reduce
K3 = np.empty(shape+(4, 4))
K3[..., 0, 0] = (rot[..., 0, 0] - rot[..., 1, 1] - rot[..., 2, 2])/3.0
K3[..., 0, 1] = (rot[..., 1, 0] + rot[..., 0, 1])/3.0
K3[..., 0, 2] = (rot[..., 2, 0] + rot[..., 0, 2])/3.0
K3[..., 0, 3] = (rot[..., 1, 2] - rot[..., 2, 1])/3.0
K3[..., 1, 0] = K3[..., 0, 1]
K3[..., 1, 1] = (rot[..., 1, 1] - rot[..., 0, 0] - rot[..., 2, 2])/3.0
K3[..., 1, 2] = (rot[..., 2, 1] + rot[..., 1, 2])/3.0
K3[..., 1, 3] = (rot[..., 2, 0] - rot[..., 0, 2])/3.0
K3[..., 2, 0] = K3[..., 0, 2]
K3[..., 2, 1] = K3[..., 1, 2]
K3[..., 2, 2] = (rot[..., 2, 2] - rot[..., 0, 0] - rot[..., 1, 1])/3.0
K3[..., 2, 3] = (rot[..., 0, 1] - rot[..., 1, 0])/3.0
K3[..., 3, 0] = K3[..., 0, 3]
K3[..., 3, 1] = K3[..., 1, 3]
K3[..., 3, 2] = K3[..., 2, 3]
K3[..., 3, 3] = (rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2])/3.0
if not shape:
q = zero.copy()
eigvals, eigvecs = linalg.eigh(K3.T, eigvals=(3, 3))
q.components[0] = eigvecs[-1]
q.components[1:] = -eigvecs[:-1].flatten()
return q
else:
q = np.empty(shape+(4,), dtype=np.float)
for flat_index in range(reduce(mul, shape)):
multi_index = np.unravel_index(flat_index, shape)
eigvals, eigvecs = linalg.eigh(K3[multi_index], eigvals=(3, 3))
q[multi_index+(0,)] = eigvecs[-1]
q[multi_index+(slice(1,None),)] = -eigvecs[:-1].flatten()
return as_quat_array(q)
else: # No scipy.linalg or not `nonorthogonal`
diagonals = np.empty(shape+(4,))
diagonals[..., 0] = rot[..., 0, 0]
diagonals[..., 1] = rot[..., 1, 1]
diagonals[..., 2] = rot[..., 2, 2]
diagonals[..., 3] = rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2]
indices = np.argmax(diagonals, axis=-1)
q = diagonals # reuse storage space
indices_i = (indices == 0)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
q[indices_i, 1] = 1 + rot_i[..., 0, 0] - rot_i[..., 1, 1] - rot_i[..., 2, 2]
q[indices_i, 2] = rot_i[..., 0, 1] + rot_i[..., 1, 0]
q[indices_i, 3] = rot_i[..., 0, 2] + rot_i[..., 2, 0]
indices_i = (indices == 1)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
q[indices_i, 1] = rot_i[..., 1, 0] + rot_i[..., 0, 1]
q[indices_i, 2] = 1 - rot_i[..., 0, 0] + rot_i[..., 1, 1] - rot_i[..., 2, 2]
q[indices_i, 3] = rot_i[..., 1, 2] + rot_i[..., 2, 1]
indices_i = (indices == 2)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
q[indices_i, 1] = rot_i[..., 2, 0] + rot_i[..., 0, 2]
q[indices_i, 2] = rot_i[..., 2, 1] + rot_i[..., 1, 2]
q[indices_i, 3] = 1 - rot_i[..., 0, 0] - rot_i[..., 1, 1] + rot_i[..., 2, 2]
indices_i = (indices == 3)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = 1 + rot_i[..., 0, 0] + rot_i[..., 1, 1] + rot_i[..., 2, 2]
q[indices_i, 1] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
q[indices_i, 2] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
q[indices_i, 3] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
q /= np.linalg.norm(q, axis=-1)[..., np.newaxis]
return as_quat_array(q) | [
"def",
"from_rotation_matrix",
"(",
"rot",
",",
"nonorthogonal",
"=",
"True",
")",
":",
"try",
":",
"from",
"scipy",
"import",
"linalg",
"except",
"ImportError",
":",
"linalg",
"=",
"False",
"rot",
"=",
"np",
".",
"array",
"(",
"rot",
",",
"copy",
"=",
"False",
")",
"shape",
"=",
"rot",
".",
"shape",
"[",
":",
"-",
"2",
"]",
"if",
"linalg",
"and",
"nonorthogonal",
":",
"from",
"operator",
"import",
"mul",
"from",
"functools",
"import",
"reduce",
"K3",
"=",
"np",
".",
"empty",
"(",
"shape",
"+",
"(",
"4",
",",
"4",
")",
")",
"K3",
"[",
"...",
",",
"0",
",",
"0",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"0",
",",
"0",
"]",
"-",
"rot",
"[",
"...",
",",
"1",
",",
"1",
"]",
"-",
"rot",
"[",
"...",
",",
"2",
",",
"2",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"0",
",",
"1",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"1",
",",
"0",
"]",
"+",
"rot",
"[",
"...",
",",
"0",
",",
"1",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"0",
",",
"2",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"2",
",",
"0",
"]",
"+",
"rot",
"[",
"...",
",",
"0",
",",
"2",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"0",
",",
"3",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"1",
",",
"2",
"]",
"-",
"rot",
"[",
"...",
",",
"2",
",",
"1",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"1",
",",
"0",
"]",
"=",
"K3",
"[",
"...",
",",
"0",
",",
"1",
"]",
"K3",
"[",
"...",
",",
"1",
",",
"1",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"1",
",",
"1",
"]",
"-",
"rot",
"[",
"...",
",",
"0",
",",
"0",
"]",
"-",
"rot",
"[",
"...",
",",
"2",
",",
"2",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"1",
",",
"2",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"2",
",",
"1",
"]",
"+",
"rot",
"[",
"...",
",",
"1",
",",
"2",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"1",
",",
"3",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"2",
",",
"0",
"]",
"-",
"rot",
"[",
"...",
",",
"0",
",",
"2",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"2",
",",
"0",
"]",
"=",
"K3",
"[",
"...",
",",
"0",
",",
"2",
"]",
"K3",
"[",
"...",
",",
"2",
",",
"1",
"]",
"=",
"K3",
"[",
"...",
",",
"1",
",",
"2",
"]",
"K3",
"[",
"...",
",",
"2",
",",
"2",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"2",
",",
"2",
"]",
"-",
"rot",
"[",
"...",
",",
"0",
",",
"0",
"]",
"-",
"rot",
"[",
"...",
",",
"1",
",",
"1",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"2",
",",
"3",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"0",
",",
"1",
"]",
"-",
"rot",
"[",
"...",
",",
"1",
",",
"0",
"]",
")",
"/",
"3.0",
"K3",
"[",
"...",
",",
"3",
",",
"0",
"]",
"=",
"K3",
"[",
"...",
",",
"0",
",",
"3",
"]",
"K3",
"[",
"...",
",",
"3",
",",
"1",
"]",
"=",
"K3",
"[",
"...",
",",
"1",
",",
"3",
"]",
"K3",
"[",
"...",
",",
"3",
",",
"2",
"]",
"=",
"K3",
"[",
"...",
",",
"2",
",",
"3",
"]",
"K3",
"[",
"...",
",",
"3",
",",
"3",
"]",
"=",
"(",
"rot",
"[",
"...",
",",
"0",
",",
"0",
"]",
"+",
"rot",
"[",
"...",
",",
"1",
",",
"1",
"]",
"+",
"rot",
"[",
"...",
",",
"2",
",",
"2",
"]",
")",
"/",
"3.0",
"if",
"not",
"shape",
":",
"q",
"=",
"zero",
".",
"copy",
"(",
")",
"eigvals",
",",
"eigvecs",
"=",
"linalg",
".",
"eigh",
"(",
"K3",
".",
"T",
",",
"eigvals",
"=",
"(",
"3",
",",
"3",
")",
")",
"q",
".",
"components",
"[",
"0",
"]",
"=",
"eigvecs",
"[",
"-",
"1",
"]",
"q",
".",
"components",
"[",
"1",
":",
"]",
"=",
"-",
"eigvecs",
"[",
":",
"-",
"1",
"]",
".",
"flatten",
"(",
")",
"return",
"q",
"else",
":",
"q",
"=",
"np",
".",
"empty",
"(",
"shape",
"+",
"(",
"4",
",",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"for",
"flat_index",
"in",
"range",
"(",
"reduce",
"(",
"mul",
",",
"shape",
")",
")",
":",
"multi_index",
"=",
"np",
".",
"unravel_index",
"(",
"flat_index",
",",
"shape",
")",
"eigvals",
",",
"eigvecs",
"=",
"linalg",
".",
"eigh",
"(",
"K3",
"[",
"multi_index",
"]",
",",
"eigvals",
"=",
"(",
"3",
",",
"3",
")",
")",
"q",
"[",
"multi_index",
"+",
"(",
"0",
",",
")",
"]",
"=",
"eigvecs",
"[",
"-",
"1",
"]",
"q",
"[",
"multi_index",
"+",
"(",
"slice",
"(",
"1",
",",
"None",
")",
",",
")",
"]",
"=",
"-",
"eigvecs",
"[",
":",
"-",
"1",
"]",
".",
"flatten",
"(",
")",
"return",
"as_quat_array",
"(",
"q",
")",
"else",
":",
"# No scipy.linalg or not `nonorthogonal`",
"diagonals",
"=",
"np",
".",
"empty",
"(",
"shape",
"+",
"(",
"4",
",",
")",
")",
"diagonals",
"[",
"...",
",",
"0",
"]",
"=",
"rot",
"[",
"...",
",",
"0",
",",
"0",
"]",
"diagonals",
"[",
"...",
",",
"1",
"]",
"=",
"rot",
"[",
"...",
",",
"1",
",",
"1",
"]",
"diagonals",
"[",
"...",
",",
"2",
"]",
"=",
"rot",
"[",
"...",
",",
"2",
",",
"2",
"]",
"diagonals",
"[",
"...",
",",
"3",
"]",
"=",
"rot",
"[",
"...",
",",
"0",
",",
"0",
"]",
"+",
"rot",
"[",
"...",
",",
"1",
",",
"1",
"]",
"+",
"rot",
"[",
"...",
",",
"2",
",",
"2",
"]",
"indices",
"=",
"np",
".",
"argmax",
"(",
"diagonals",
",",
"axis",
"=",
"-",
"1",
")",
"q",
"=",
"diagonals",
"# reuse storage space",
"indices_i",
"=",
"(",
"indices",
"==",
"0",
")",
"if",
"np",
".",
"any",
"(",
"indices_i",
")",
":",
"if",
"indices_i",
".",
"shape",
"==",
"(",
")",
":",
"indices_i",
"=",
"Ellipsis",
"rot_i",
"=",
"rot",
"[",
"indices_i",
",",
":",
",",
":",
"]",
"q",
"[",
"indices_i",
",",
"0",
"]",
"=",
"rot_i",
"[",
"...",
",",
"2",
",",
"1",
"]",
"-",
"rot_i",
"[",
"...",
",",
"1",
",",
"2",
"]",
"q",
"[",
"indices_i",
",",
"1",
"]",
"=",
"1",
"+",
"rot_i",
"[",
"...",
",",
"0",
",",
"0",
"]",
"-",
"rot_i",
"[",
"...",
",",
"1",
",",
"1",
"]",
"-",
"rot_i",
"[",
"...",
",",
"2",
",",
"2",
"]",
"q",
"[",
"indices_i",
",",
"2",
"]",
"=",
"rot_i",
"[",
"...",
",",
"0",
",",
"1",
"]",
"+",
"rot_i",
"[",
"...",
",",
"1",
",",
"0",
"]",
"q",
"[",
"indices_i",
",",
"3",
"]",
"=",
"rot_i",
"[",
"...",
",",
"0",
",",
"2",
"]",
"+",
"rot_i",
"[",
"...",
",",
"2",
",",
"0",
"]",
"indices_i",
"=",
"(",
"indices",
"==",
"1",
")",
"if",
"np",
".",
"any",
"(",
"indices_i",
")",
":",
"if",
"indices_i",
".",
"shape",
"==",
"(",
")",
":",
"indices_i",
"=",
"Ellipsis",
"rot_i",
"=",
"rot",
"[",
"indices_i",
",",
":",
",",
":",
"]",
"q",
"[",
"indices_i",
",",
"0",
"]",
"=",
"rot_i",
"[",
"...",
",",
"0",
",",
"2",
"]",
"-",
"rot_i",
"[",
"...",
",",
"2",
",",
"0",
"]",
"q",
"[",
"indices_i",
",",
"1",
"]",
"=",
"rot_i",
"[",
"...",
",",
"1",
",",
"0",
"]",
"+",
"rot_i",
"[",
"...",
",",
"0",
",",
"1",
"]",
"q",
"[",
"indices_i",
",",
"2",
"]",
"=",
"1",
"-",
"rot_i",
"[",
"...",
",",
"0",
",",
"0",
"]",
"+",
"rot_i",
"[",
"...",
",",
"1",
",",
"1",
"]",
"-",
"rot_i",
"[",
"...",
",",
"2",
",",
"2",
"]",
"q",
"[",
"indices_i",
",",
"3",
"]",
"=",
"rot_i",
"[",
"...",
",",
"1",
",",
"2",
"]",
"+",
"rot_i",
"[",
"...",
",",
"2",
",",
"1",
"]",
"indices_i",
"=",
"(",
"indices",
"==",
"2",
")",
"if",
"np",
".",
"any",
"(",
"indices_i",
")",
":",
"if",
"indices_i",
".",
"shape",
"==",
"(",
")",
":",
"indices_i",
"=",
"Ellipsis",
"rot_i",
"=",
"rot",
"[",
"indices_i",
",",
":",
",",
":",
"]",
"q",
"[",
"indices_i",
",",
"0",
"]",
"=",
"rot_i",
"[",
"...",
",",
"1",
",",
"0",
"]",
"-",
"rot_i",
"[",
"...",
",",
"0",
",",
"1",
"]",
"q",
"[",
"indices_i",
",",
"1",
"]",
"=",
"rot_i",
"[",
"...",
",",
"2",
",",
"0",
"]",
"+",
"rot_i",
"[",
"...",
",",
"0",
",",
"2",
"]",
"q",
"[",
"indices_i",
",",
"2",
"]",
"=",
"rot_i",
"[",
"...",
",",
"2",
",",
"1",
"]",
"+",
"rot_i",
"[",
"...",
",",
"1",
",",
"2",
"]",
"q",
"[",
"indices_i",
",",
"3",
"]",
"=",
"1",
"-",
"rot_i",
"[",
"...",
",",
"0",
",",
"0",
"]",
"-",
"rot_i",
"[",
"...",
",",
"1",
",",
"1",
"]",
"+",
"rot_i",
"[",
"...",
",",
"2",
",",
"2",
"]",
"indices_i",
"=",
"(",
"indices",
"==",
"3",
")",
"if",
"np",
".",
"any",
"(",
"indices_i",
")",
":",
"if",
"indices_i",
".",
"shape",
"==",
"(",
")",
":",
"indices_i",
"=",
"Ellipsis",
"rot_i",
"=",
"rot",
"[",
"indices_i",
",",
":",
",",
":",
"]",
"q",
"[",
"indices_i",
",",
"0",
"]",
"=",
"1",
"+",
"rot_i",
"[",
"...",
",",
"0",
",",
"0",
"]",
"+",
"rot_i",
"[",
"...",
",",
"1",
",",
"1",
"]",
"+",
"rot_i",
"[",
"...",
",",
"2",
",",
"2",
"]",
"q",
"[",
"indices_i",
",",
"1",
"]",
"=",
"rot_i",
"[",
"...",
",",
"2",
",",
"1",
"]",
"-",
"rot_i",
"[",
"...",
",",
"1",
",",
"2",
"]",
"q",
"[",
"indices_i",
",",
"2",
"]",
"=",
"rot_i",
"[",
"...",
",",
"0",
",",
"2",
"]",
"-",
"rot_i",
"[",
"...",
",",
"2",
",",
"0",
"]",
"q",
"[",
"indices_i",
",",
"3",
"]",
"=",
"rot_i",
"[",
"...",
",",
"1",
",",
"0",
"]",
"-",
"rot_i",
"[",
"...",
",",
"0",
",",
"1",
"]",
"q",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"q",
",",
"axis",
"=",
"-",
"1",
")",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"return",
"as_quat_array",
"(",
"q",
")"
] | 42.934307 | 20.014599 |
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
) | [
"def",
"get_hwclock",
"(",
")",
":",
"if",
"salt",
".",
"utils",
".",
"path",
".",
"which",
"(",
"'timedatectl'",
")",
":",
"ret",
"=",
"_timedatectl",
"(",
")",
"for",
"line",
"in",
"(",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"ret",
"[",
"'stdout'",
"]",
".",
"splitlines",
"(",
")",
")",
":",
"if",
"'rtc in local tz'",
"in",
"line",
".",
"lower",
"(",
")",
":",
"try",
":",
"if",
"line",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'yes'",
":",
"return",
"'localtime'",
"else",
":",
"return",
"'UTC'",
"except",
"IndexError",
":",
"pass",
"msg",
"=",
"(",
"'Failed to parse timedatectl output: {0}\\n'",
"'Please file an issue with SaltStack'",
")",
".",
"format",
"(",
"ret",
"[",
"'stdout'",
"]",
")",
"raise",
"CommandExecutionError",
"(",
"msg",
")",
"else",
":",
"os_family",
"=",
"__grains__",
"[",
"'os_family'",
"]",
"for",
"family",
"in",
"(",
"'RedHat'",
",",
"'Suse'",
",",
"'NILinuxRT'",
")",
":",
"if",
"family",
"in",
"os_family",
":",
"return",
"_get_adjtime_timezone",
"(",
")",
"if",
"'Debian'",
"in",
"__grains__",
"[",
"'os_family'",
"]",
":",
"# Original way to look up hwclock on Debian-based systems",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'/etc/default/rcS'",
",",
"'r'",
")",
"as",
"fp_",
":",
"for",
"line",
"in",
"fp_",
":",
"line",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"line",
")",
"if",
"re",
".",
"match",
"(",
"r'^\\s*#'",
",",
"line",
")",
":",
"continue",
"if",
"'UTC='",
"in",
"line",
":",
"is_utc",
"=",
"line",
".",
"rstrip",
"(",
"'\\n'",
")",
".",
"split",
"(",
"'='",
")",
"[",
"-",
"1",
"]",
".",
"lower",
"(",
")",
"if",
"is_utc",
"==",
"'yes'",
":",
"return",
"'UTC'",
"else",
":",
"return",
"'localtime'",
"except",
"IOError",
"as",
"exc",
":",
"pass",
"# Since Wheezy",
"return",
"_get_adjtime_timezone",
"(",
")",
"if",
"'Gentoo'",
"in",
"__grains__",
"[",
"'os_family'",
"]",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"'/etc/adjtime'",
")",
":",
"offset_file",
"=",
"'/etc/conf.d/hwclock'",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"offset_file",
",",
"'r'",
")",
"as",
"fp_",
":",
"for",
"line",
"in",
"fp_",
":",
"line",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"line",
")",
"if",
"line",
".",
"startswith",
"(",
"'clock='",
")",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
"'\\n'",
")",
"line",
"=",
"line",
".",
"split",
"(",
"'='",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
"'\\'\"'",
")",
"if",
"line",
"==",
"'UTC'",
":",
"return",
"line",
"if",
"line",
"==",
"'local'",
":",
"return",
"'LOCAL'",
"raise",
"CommandExecutionError",
"(",
"'Correct offset value not found in {0}'",
".",
"format",
"(",
"offset_file",
")",
")",
"except",
"IOError",
"as",
"exc",
":",
"raise",
"CommandExecutionError",
"(",
"'Problem reading offset file {0}: {1}'",
".",
"format",
"(",
"offset_file",
",",
"exc",
".",
"strerror",
")",
")",
"return",
"_get_adjtime_timezone",
"(",
")",
"if",
"'Solaris'",
"in",
"__grains__",
"[",
"'os_family'",
"]",
":",
"offset_file",
"=",
"'/etc/rtc_config'",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"offset_file",
",",
"'r'",
")",
"as",
"fp_",
":",
"for",
"line",
"in",
"fp_",
":",
"line",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"line",
")",
"if",
"line",
".",
"startswith",
"(",
"'zone_info=GMT'",
")",
":",
"return",
"'UTC'",
"return",
"'localtime'",
"except",
"IOError",
"as",
"exc",
":",
"if",
"exc",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"# offset file does not exist",
"return",
"'UTC'",
"raise",
"CommandExecutionError",
"(",
"'Problem reading offset file {0}: {1}'",
".",
"format",
"(",
"offset_file",
",",
"exc",
".",
"strerror",
")",
")",
"if",
"'AIX'",
"in",
"__grains__",
"[",
"'os_family'",
"]",
":",
"offset_file",
"=",
"'/etc/environment'",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"offset_file",
",",
"'r'",
")",
"as",
"fp_",
":",
"for",
"line",
"in",
"fp_",
":",
"line",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"line",
")",
"if",
"line",
".",
"startswith",
"(",
"'TZ=UTC'",
")",
":",
"return",
"'UTC'",
"return",
"'localtime'",
"except",
"IOError",
"as",
"exc",
":",
"if",
"exc",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"# offset file does not exist",
"return",
"'UTC'",
"raise",
"CommandExecutionError",
"(",
"'Problem reading offset file {0}: {1}'",
".",
"format",
"(",
"offset_file",
",",
"exc",
".",
"strerror",
")",
")"
] | 40.288288 | 15.945946 |
def tool_classpath_from_products(products, key, scope):
"""Get a classpath for the tool previously registered under key in the given scope.
:param products: The products of the current pants run.
:type products: :class:`pants.goal.products.Products`
:param string key: The key the tool configuration was registered under.
:param string scope: The scope the tool configuration was registered under.
:returns: A list of paths.
:rtype: list
"""
callback_product_map = products.get_data('jvm_build_tools_classpath_callbacks') or {}
callback = callback_product_map.get(scope, {}).get(key)
if not callback:
raise TaskError('No bootstrap callback registered for {key} in {scope}'
.format(key=key, scope=scope))
return callback() | [
"def",
"tool_classpath_from_products",
"(",
"products",
",",
"key",
",",
"scope",
")",
":",
"callback_product_map",
"=",
"products",
".",
"get_data",
"(",
"'jvm_build_tools_classpath_callbacks'",
")",
"or",
"{",
"}",
"callback",
"=",
"callback_product_map",
".",
"get",
"(",
"scope",
",",
"{",
"}",
")",
".",
"get",
"(",
"key",
")",
"if",
"not",
"callback",
":",
"raise",
"TaskError",
"(",
"'No bootstrap callback registered for {key} in {scope}'",
".",
"format",
"(",
"key",
"=",
"key",
",",
"scope",
"=",
"scope",
")",
")",
"return",
"callback",
"(",
")"
] | 48.9375 | 22.1875 |
def footprints_from_address(address, distance, footprint_type='building', retain_invalid=False):
"""
Get footprints within some distance north, south, east, and west of
an address.
Parameters
----------
address : string
the address to geocode to a lat-long point
distance : numeric
distance in meters
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame
"""
# geocode the address string to a (lat, lon) point
point = geocode(query=address)
# get footprints within distance of this point
return footprints_from_point(point, distance, footprint_type=footprint_type,
retain_invalid=retain_invalid) | [
"def",
"footprints_from_address",
"(",
"address",
",",
"distance",
",",
"footprint_type",
"=",
"'building'",
",",
"retain_invalid",
"=",
"False",
")",
":",
"# geocode the address string to a (lat, lon) point",
"point",
"=",
"geocode",
"(",
"query",
"=",
"address",
")",
"# get footprints within distance of this point",
"return",
"footprints_from_point",
"(",
"point",
",",
"distance",
",",
"footprint_type",
"=",
"footprint_type",
",",
"retain_invalid",
"=",
"retain_invalid",
")"
] | 32.407407 | 24.851852 |
def timeout_at(clock, coro=None, *args):
'''Execute the specified coroutine and return its result. However,
issue a cancellation request to the calling task after seconds
have elapsed. When this happens, a TaskTimeout exception is
raised. If coro is None, the result of this function serves
as an asynchronous context manager that applies a timeout to a
block of statements.
timeout_after() may be composed with other timeout_after()
operations (i.e., nested timeouts). If an outer timeout expires
first, then TimeoutCancellationError is raised instead of
TaskTimeout. If an inner timeout expires and fails to properly
TaskTimeout, a UncaughtTimeoutError is raised in the outer
timeout.
'''
if coro:
return _timeout_after_func(clock, True, coro, args)
return TimeoutAfter(clock, absolute=True) | [
"def",
"timeout_at",
"(",
"clock",
",",
"coro",
"=",
"None",
",",
"*",
"args",
")",
":",
"if",
"coro",
":",
"return",
"_timeout_after_func",
"(",
"clock",
",",
"True",
",",
"coro",
",",
"args",
")",
"return",
"TimeoutAfter",
"(",
"clock",
",",
"absolute",
"=",
"True",
")"
] | 42.45 | 24.95 |
def build(templates=None, schemes=None, base_output_dir=None):
"""Main build function to initiate building process."""
template_dirs = templates or get_template_dirs()
scheme_files = get_scheme_files(schemes)
base_output_dir = base_output_dir or rel_to_cwd('output')
# raise LookupError if there is not at least one template or scheme
# to work with
if not template_dirs or not scheme_files:
raise LookupError
# raise PermissionError if user has no write acces for $base_output_dir
try:
os.makedirs(base_output_dir)
except FileExistsError:
pass
if not os.access(base_output_dir, os.W_OK):
raise PermissionError
templates = [TemplateGroup(path) for path in template_dirs]
build_from_job_list(scheme_files, templates, base_output_dir)
print('Finished building process.') | [
"def",
"build",
"(",
"templates",
"=",
"None",
",",
"schemes",
"=",
"None",
",",
"base_output_dir",
"=",
"None",
")",
":",
"template_dirs",
"=",
"templates",
"or",
"get_template_dirs",
"(",
")",
"scheme_files",
"=",
"get_scheme_files",
"(",
"schemes",
")",
"base_output_dir",
"=",
"base_output_dir",
"or",
"rel_to_cwd",
"(",
"'output'",
")",
"# raise LookupError if there is not at least one template or scheme",
"# to work with",
"if",
"not",
"template_dirs",
"or",
"not",
"scheme_files",
":",
"raise",
"LookupError",
"# raise PermissionError if user has no write acces for $base_output_dir",
"try",
":",
"os",
".",
"makedirs",
"(",
"base_output_dir",
")",
"except",
"FileExistsError",
":",
"pass",
"if",
"not",
"os",
".",
"access",
"(",
"base_output_dir",
",",
"os",
".",
"W_OK",
")",
":",
"raise",
"PermissionError",
"templates",
"=",
"[",
"TemplateGroup",
"(",
"path",
")",
"for",
"path",
"in",
"template_dirs",
"]",
"build_from_job_list",
"(",
"scheme_files",
",",
"templates",
",",
"base_output_dir",
")",
"print",
"(",
"'Finished building process.'",
")"
] | 34.916667 | 21.291667 |
def run_plasmid_extractor(self):
"""
Create and run the plasmid extractor system call
"""
logging.info('Extracting plasmids')
# Define the system call
extract_command = 'PlasmidExtractor.py -i {inf} -o {outf} -p {plasdb} -d {db} -t {cpus} -nc' \
.format(inf=self.path,
outf=self.plasmid_output,
plasdb=os.path.join(self.plasmid_db, 'plasmid_db.fasta'),
db=self.plasmid_db,
cpus=self.cpus)
# Only attempt to extract plasmids if the report doesn't already exist
if not os.path.isfile(self.plasmid_report):
# Run the system calls
out, err = run_subprocess(extract_command)
# Acquire thread lock, and write the logs to file
self.threadlock.acquire()
write_to_logfile(extract_command, extract_command, self.logfile)
write_to_logfile(out, err, self.logfile)
self.threadlock.release() | [
"def",
"run_plasmid_extractor",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Extracting plasmids'",
")",
"# Define the system call",
"extract_command",
"=",
"'PlasmidExtractor.py -i {inf} -o {outf} -p {plasdb} -d {db} -t {cpus} -nc'",
".",
"format",
"(",
"inf",
"=",
"self",
".",
"path",
",",
"outf",
"=",
"self",
".",
"plasmid_output",
",",
"plasdb",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"plasmid_db",
",",
"'plasmid_db.fasta'",
")",
",",
"db",
"=",
"self",
".",
"plasmid_db",
",",
"cpus",
"=",
"self",
".",
"cpus",
")",
"# Only attempt to extract plasmids if the report doesn't already exist",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"plasmid_report",
")",
":",
"# Run the system calls",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"extract_command",
")",
"# Acquire thread lock, and write the logs to file",
"self",
".",
"threadlock",
".",
"acquire",
"(",
")",
"write_to_logfile",
"(",
"extract_command",
",",
"extract_command",
",",
"self",
".",
"logfile",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
")",
"self",
".",
"threadlock",
".",
"release",
"(",
")"
] | 47.47619 | 14.047619 |
def start(self):
"""
Start process.
:returns: itself
:rtype: OutputExecutor
.. note::
Process will be considered started, when defined banner will appear
in process output.
"""
super(OutputExecutor, self).start()
# get a polling object
self.poll_obj = select.poll()
# register a file descriptor
# POLLIN because we will wait for data to read
self.poll_obj.register(self.output(), select.POLLIN)
try:
self.wait_for(self._wait_for_output)
# unregister the file descriptor and delete the polling object
self.poll_obj.unregister(self.output())
finally:
del self.poll_obj
return self | [
"def",
"start",
"(",
"self",
")",
":",
"super",
"(",
"OutputExecutor",
",",
"self",
")",
".",
"start",
"(",
")",
"# get a polling object",
"self",
".",
"poll_obj",
"=",
"select",
".",
"poll",
"(",
")",
"# register a file descriptor",
"# POLLIN because we will wait for data to read",
"self",
".",
"poll_obj",
".",
"register",
"(",
"self",
".",
"output",
"(",
")",
",",
"select",
".",
"POLLIN",
")",
"try",
":",
"self",
".",
"wait_for",
"(",
"self",
".",
"_wait_for_output",
")",
"# unregister the file descriptor and delete the polling object",
"self",
".",
"poll_obj",
".",
"unregister",
"(",
"self",
".",
"output",
"(",
")",
")",
"finally",
":",
"del",
"self",
".",
"poll_obj",
"return",
"self"
] | 25.827586 | 21.068966 |
def get(datasets_identifiers, identifier_type='hid', history_id=None):
"""
Given the history_id that is displayed to the user, this function will
download the file[s] from the history and stores them under /import/
Return value[s] are the path[s] to the dataset[s] stored under /import/
"""
history_id = history_id or os.environ['HISTORY_ID']
# The object version of bioblend is to slow in retrieving all datasets from a history
# fallback to the non-object path
gi = get_galaxy_connection(history_id=history_id, obj=False)
for dataset_identifier in datasets_identifiers:
file_path = '/import/%s' % dataset_identifier
log.debug('Downloading gx=%s history=%s dataset=%s', gi, history_id, dataset_identifier)
# Cache the file requests. E.g. in the example of someone doing something
# silly like a get() for a Galaxy file in a for-loop, wouldn't want to
# re-download every time and add that overhead.
if not os.path.exists(file_path):
hc = HistoryClient(gi)
dc = DatasetClient(gi)
history = hc.show_history(history_id, contents=True)
datasets = {ds[identifier_type]: ds['id'] for ds in history}
if identifier_type == 'hid':
dataset_identifier = int(dataset_identifier)
dc.download_dataset(datasets[dataset_identifier], file_path=file_path, use_default_filename=False)
else:
log.debug('Cached, not re-downloading')
return file_path | [
"def",
"get",
"(",
"datasets_identifiers",
",",
"identifier_type",
"=",
"'hid'",
",",
"history_id",
"=",
"None",
")",
":",
"history_id",
"=",
"history_id",
"or",
"os",
".",
"environ",
"[",
"'HISTORY_ID'",
"]",
"# The object version of bioblend is to slow in retrieving all datasets from a history",
"# fallback to the non-object path",
"gi",
"=",
"get_galaxy_connection",
"(",
"history_id",
"=",
"history_id",
",",
"obj",
"=",
"False",
")",
"for",
"dataset_identifier",
"in",
"datasets_identifiers",
":",
"file_path",
"=",
"'/import/%s'",
"%",
"dataset_identifier",
"log",
".",
"debug",
"(",
"'Downloading gx=%s history=%s dataset=%s'",
",",
"gi",
",",
"history_id",
",",
"dataset_identifier",
")",
"# Cache the file requests. E.g. in the example of someone doing something",
"# silly like a get() for a Galaxy file in a for-loop, wouldn't want to",
"# re-download every time and add that overhead.",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"hc",
"=",
"HistoryClient",
"(",
"gi",
")",
"dc",
"=",
"DatasetClient",
"(",
"gi",
")",
"history",
"=",
"hc",
".",
"show_history",
"(",
"history_id",
",",
"contents",
"=",
"True",
")",
"datasets",
"=",
"{",
"ds",
"[",
"identifier_type",
"]",
":",
"ds",
"[",
"'id'",
"]",
"for",
"ds",
"in",
"history",
"}",
"if",
"identifier_type",
"==",
"'hid'",
":",
"dataset_identifier",
"=",
"int",
"(",
"dataset_identifier",
")",
"dc",
".",
"download_dataset",
"(",
"datasets",
"[",
"dataset_identifier",
"]",
",",
"file_path",
"=",
"file_path",
",",
"use_default_filename",
"=",
"False",
")",
"else",
":",
"log",
".",
"debug",
"(",
"'Cached, not re-downloading'",
")",
"return",
"file_path"
] | 54.107143 | 23.75 |
def add_or_replace_filter(self, new_filter):
"""Replaces null or blank filter or adds filter to existing list."""
if self.filter.lower() in self._FILTERS_TO_REPLACE:
self.filter = new_filter
elif new_filter not in self.filter.split(";"):
self.filter = ";".join([self.filter,
new_filter]) | [
"def",
"add_or_replace_filter",
"(",
"self",
",",
"new_filter",
")",
":",
"if",
"self",
".",
"filter",
".",
"lower",
"(",
")",
"in",
"self",
".",
"_FILTERS_TO_REPLACE",
":",
"self",
".",
"filter",
"=",
"new_filter",
"elif",
"new_filter",
"not",
"in",
"self",
".",
"filter",
".",
"split",
"(",
"\";\"",
")",
":",
"self",
".",
"filter",
"=",
"\";\"",
".",
"join",
"(",
"[",
"self",
".",
"filter",
",",
"new_filter",
"]",
")"
] | 52.142857 | 8.142857 |
def traverse(fn, expr, type=ir.Expr, container=Stack):
"""Utility for generic expression tree traversal
Parameters
----------
fn : Callable[[ir.Expr], Tuple[Union[Boolean, Iterable], Any]]
This function will be applied on each expressions, it must
return a tuple. The first element of the tuple controls the
traversal, and the second is the result if its not None.
expr: ir.Expr
The traversable expression or a list of expressions.
type: Type
Only the instances if this type are traversed.
container: Union[Stack, Queue], default Stack
Defines the traversing order.
"""
args = expr if isinstance(expr, collections.abc.Iterable) else [expr]
todo = container(arg for arg in args if isinstance(arg, type))
seen = set()
while todo:
expr = todo.get()
op = expr.op()
if op in seen:
continue
else:
seen.add(op)
control, result = fn(expr)
if result is not None:
yield result
if control is not halt:
if control is proceed:
args = op.flat_args()
elif isinstance(control, collections.abc.Iterable):
args = control
else:
raise TypeError(
'First item of the returned tuple must be '
'an instance of boolean or iterable'
)
todo.extend(
arg for arg in todo.visitor(args) if isinstance(arg, type)
) | [
"def",
"traverse",
"(",
"fn",
",",
"expr",
",",
"type",
"=",
"ir",
".",
"Expr",
",",
"container",
"=",
"Stack",
")",
":",
"args",
"=",
"expr",
"if",
"isinstance",
"(",
"expr",
",",
"collections",
".",
"abc",
".",
"Iterable",
")",
"else",
"[",
"expr",
"]",
"todo",
"=",
"container",
"(",
"arg",
"for",
"arg",
"in",
"args",
"if",
"isinstance",
"(",
"arg",
",",
"type",
")",
")",
"seen",
"=",
"set",
"(",
")",
"while",
"todo",
":",
"expr",
"=",
"todo",
".",
"get",
"(",
")",
"op",
"=",
"expr",
".",
"op",
"(",
")",
"if",
"op",
"in",
"seen",
":",
"continue",
"else",
":",
"seen",
".",
"add",
"(",
"op",
")",
"control",
",",
"result",
"=",
"fn",
"(",
"expr",
")",
"if",
"result",
"is",
"not",
"None",
":",
"yield",
"result",
"if",
"control",
"is",
"not",
"halt",
":",
"if",
"control",
"is",
"proceed",
":",
"args",
"=",
"op",
".",
"flat_args",
"(",
")",
"elif",
"isinstance",
"(",
"control",
",",
"collections",
".",
"abc",
".",
"Iterable",
")",
":",
"args",
"=",
"control",
"else",
":",
"raise",
"TypeError",
"(",
"'First item of the returned tuple must be '",
"'an instance of boolean or iterable'",
")",
"todo",
".",
"extend",
"(",
"arg",
"for",
"arg",
"in",
"todo",
".",
"visitor",
"(",
"args",
")",
"if",
"isinstance",
"(",
"arg",
",",
"type",
")",
")"
] | 32.826087 | 20.413043 |
def js_reverse_inline(context):
"""
Outputs a string of javascript that can generate URLs via the use
of the names given to those URLs.
"""
if 'request' in context:
default_urlresolver = get_resolver(getattr(context['request'], 'urlconf', None))
else:
default_urlresolver = get_resolver(None)
return mark_safe(generate_js(default_urlresolver)) | [
"def",
"js_reverse_inline",
"(",
"context",
")",
":",
"if",
"'request'",
"in",
"context",
":",
"default_urlresolver",
"=",
"get_resolver",
"(",
"getattr",
"(",
"context",
"[",
"'request'",
"]",
",",
"'urlconf'",
",",
"None",
")",
")",
"else",
":",
"default_urlresolver",
"=",
"get_resolver",
"(",
"None",
")",
"return",
"mark_safe",
"(",
"generate_js",
"(",
"default_urlresolver",
")",
")"
] | 37.8 | 15.4 |
def hincrby(self, hashkey, attribute, increment=1):
"""Emulate hincrby."""
return self._hincrby(hashkey, attribute, 'HINCRBY', long, increment) | [
"def",
"hincrby",
"(",
"self",
",",
"hashkey",
",",
"attribute",
",",
"increment",
"=",
"1",
")",
":",
"return",
"self",
".",
"_hincrby",
"(",
"hashkey",
",",
"attribute",
",",
"'HINCRBY'",
",",
"long",
",",
"increment",
")"
] | 39.25 | 21.75 |
def _parse_config(self):
"""Parses the mlag global configuration
Returns:
dict: A dict object that is intended to be merged into the
resource dict
"""
config = self.get_block('mlag configuration')
cfg = dict()
cfg.update(self._parse_domain_id(config))
cfg.update(self._parse_local_interface(config))
cfg.update(self._parse_peer_address(config))
cfg.update(self._parse_peer_link(config))
cfg.update(self._parse_shutdown(config))
return dict(config=cfg) | [
"def",
"_parse_config",
"(",
"self",
")",
":",
"config",
"=",
"self",
".",
"get_block",
"(",
"'mlag configuration'",
")",
"cfg",
"=",
"dict",
"(",
")",
"cfg",
".",
"update",
"(",
"self",
".",
"_parse_domain_id",
"(",
"config",
")",
")",
"cfg",
".",
"update",
"(",
"self",
".",
"_parse_local_interface",
"(",
"config",
")",
")",
"cfg",
".",
"update",
"(",
"self",
".",
"_parse_peer_address",
"(",
"config",
")",
")",
"cfg",
".",
"update",
"(",
"self",
".",
"_parse_peer_link",
"(",
"config",
")",
")",
"cfg",
".",
"update",
"(",
"self",
".",
"_parse_shutdown",
"(",
"config",
")",
")",
"return",
"dict",
"(",
"config",
"=",
"cfg",
")"
] | 36.933333 | 14.4 |
def mode(self):
"""
Reading returns the currently selected mode. Writing sets the mode.
Generally speaking when the mode changes any sensor or motor devices
associated with the port will be removed new ones loaded, however this
this will depend on the individual driver implementing this class.
"""
self._mode, value = self.get_attr_string(self._mode, 'mode')
return value | [
"def",
"mode",
"(",
"self",
")",
":",
"self",
".",
"_mode",
",",
"value",
"=",
"self",
".",
"get_attr_string",
"(",
"self",
".",
"_mode",
",",
"'mode'",
")",
"return",
"value"
] | 47.555556 | 24 |
def quote_value(value):
"""
convert values to mysql code for the same
mostly delegate directly to the mysql lib, but some exceptions exist
"""
try:
if value == None:
return SQL_NULL
elif isinstance(value, SQL):
return quote_sql(value.template, value.param)
elif is_text(value):
return SQL("'" + "".join(ESCAPE_DCT.get(c, c) for c in value) + "'")
elif is_data(value):
return quote_value(json_encode(value))
elif is_number(value):
return SQL(text_type(value))
elif isinstance(value, datetime):
return SQL("str_to_date('" + value.strftime("%Y%m%d%H%M%S.%f") + "', '%Y%m%d%H%i%s.%f')")
elif isinstance(value, Date):
return SQL("str_to_date('" + value.format("%Y%m%d%H%M%S.%f") + "', '%Y%m%d%H%i%s.%f')")
elif hasattr(value, '__iter__'):
return quote_value(json_encode(value))
else:
return quote_value(text_type(value))
except Exception as e:
Log.error("problem quoting SQL {{value}}", value=repr(value), cause=e) | [
"def",
"quote_value",
"(",
"value",
")",
":",
"try",
":",
"if",
"value",
"==",
"None",
":",
"return",
"SQL_NULL",
"elif",
"isinstance",
"(",
"value",
",",
"SQL",
")",
":",
"return",
"quote_sql",
"(",
"value",
".",
"template",
",",
"value",
".",
"param",
")",
"elif",
"is_text",
"(",
"value",
")",
":",
"return",
"SQL",
"(",
"\"'\"",
"+",
"\"\"",
".",
"join",
"(",
"ESCAPE_DCT",
".",
"get",
"(",
"c",
",",
"c",
")",
"for",
"c",
"in",
"value",
")",
"+",
"\"'\"",
")",
"elif",
"is_data",
"(",
"value",
")",
":",
"return",
"quote_value",
"(",
"json_encode",
"(",
"value",
")",
")",
"elif",
"is_number",
"(",
"value",
")",
":",
"return",
"SQL",
"(",
"text_type",
"(",
"value",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
")",
":",
"return",
"SQL",
"(",
"\"str_to_date('\"",
"+",
"value",
".",
"strftime",
"(",
"\"%Y%m%d%H%M%S.%f\"",
")",
"+",
"\"', '%Y%m%d%H%i%s.%f')\"",
")",
"elif",
"isinstance",
"(",
"value",
",",
"Date",
")",
":",
"return",
"SQL",
"(",
"\"str_to_date('\"",
"+",
"value",
".",
"format",
"(",
"\"%Y%m%d%H%M%S.%f\"",
")",
"+",
"\"', '%Y%m%d%H%i%s.%f')\"",
")",
"elif",
"hasattr",
"(",
"value",
",",
"'__iter__'",
")",
":",
"return",
"quote_value",
"(",
"json_encode",
"(",
"value",
")",
")",
"else",
":",
"return",
"quote_value",
"(",
"text_type",
"(",
"value",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"Log",
".",
"error",
"(",
"\"problem quoting SQL {{value}}\"",
",",
"value",
"=",
"repr",
"(",
"value",
")",
",",
"cause",
"=",
"e",
")"
] | 42.153846 | 16.923077 |
def _compute_term3(self, C, rrup):
"""
This computes the term f3 in equation 34, page 1021 but corrected
according to the erratum.
"""
f3 = np.zeros_like(rrup)
idx_between_70_130 = (rrup > 70) & (rrup <= 130)
idx_greater_130 = rrup > 130
f3[idx_between_70_130] = (
C['c9'] * (np.log(rrup[idx_between_70_130]) - np.log(70))
)
f3[idx_greater_130] = (
C['c9'] * (np.log(rrup[idx_greater_130]) - np.log(70)) +
C['c10'] * (np.log(rrup[idx_greater_130]) - np.log(130))
)
return f3 | [
"def",
"_compute_term3",
"(",
"self",
",",
"C",
",",
"rrup",
")",
":",
"f3",
"=",
"np",
".",
"zeros_like",
"(",
"rrup",
")",
"idx_between_70_130",
"=",
"(",
"rrup",
">",
"70",
")",
"&",
"(",
"rrup",
"<=",
"130",
")",
"idx_greater_130",
"=",
"rrup",
">",
"130",
"f3",
"[",
"idx_between_70_130",
"]",
"=",
"(",
"C",
"[",
"'c9'",
"]",
"*",
"(",
"np",
".",
"log",
"(",
"rrup",
"[",
"idx_between_70_130",
"]",
")",
"-",
"np",
".",
"log",
"(",
"70",
")",
")",
")",
"f3",
"[",
"idx_greater_130",
"]",
"=",
"(",
"C",
"[",
"'c9'",
"]",
"*",
"(",
"np",
".",
"log",
"(",
"rrup",
"[",
"idx_greater_130",
"]",
")",
"-",
"np",
".",
"log",
"(",
"70",
")",
")",
"+",
"C",
"[",
"'c10'",
"]",
"*",
"(",
"np",
".",
"log",
"(",
"rrup",
"[",
"idx_greater_130",
"]",
")",
"-",
"np",
".",
"log",
"(",
"130",
")",
")",
")",
"return",
"f3"
] | 29.55 | 20.95 |
def ls_packages(self):
"""
List packages in this store.
"""
packages = []
pkgdir = os.path.join(self._path, self.PKG_DIR)
if not os.path.isdir(pkgdir):
return []
for team in sub_dirs(pkgdir):
for user in sub_dirs(self.team_path(team)):
for pkg in sub_dirs(self.user_path(team, user)):
pkgpath = self.package_path(team, user, pkg)
pkgmap = {h : [] for h in sub_files(os.path.join(pkgpath, PackageStore.CONTENTS_DIR))}
for tag in sub_files(os.path.join(pkgpath, PackageStore.TAGS_DIR)):
with open(os.path.join(pkgpath, PackageStore.TAGS_DIR, tag), 'r') as tagfile:
pkghash = tagfile.read()
pkgmap[pkghash].append(tag)
for pkghash, tags in pkgmap.items():
# add teams here if any other than DEFAULT_TEAM should be hidden.
team_token = '' if team in (DEFAULT_TEAM,) else team + ':'
fullpkg = "{team}{owner}/{pkg}".format(team=team_token, owner=user, pkg=pkg)
# Add an empty string tag for untagged hashes
displaytags = tags if tags else [""]
# Display a separate full line per tag like Docker
for tag in displaytags:
packages.append((fullpkg, str(tag), pkghash))
return packages | [
"def",
"ls_packages",
"(",
"self",
")",
":",
"packages",
"=",
"[",
"]",
"pkgdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_path",
",",
"self",
".",
"PKG_DIR",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"pkgdir",
")",
":",
"return",
"[",
"]",
"for",
"team",
"in",
"sub_dirs",
"(",
"pkgdir",
")",
":",
"for",
"user",
"in",
"sub_dirs",
"(",
"self",
".",
"team_path",
"(",
"team",
")",
")",
":",
"for",
"pkg",
"in",
"sub_dirs",
"(",
"self",
".",
"user_path",
"(",
"team",
",",
"user",
")",
")",
":",
"pkgpath",
"=",
"self",
".",
"package_path",
"(",
"team",
",",
"user",
",",
"pkg",
")",
"pkgmap",
"=",
"{",
"h",
":",
"[",
"]",
"for",
"h",
"in",
"sub_files",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pkgpath",
",",
"PackageStore",
".",
"CONTENTS_DIR",
")",
")",
"}",
"for",
"tag",
"in",
"sub_files",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pkgpath",
",",
"PackageStore",
".",
"TAGS_DIR",
")",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pkgpath",
",",
"PackageStore",
".",
"TAGS_DIR",
",",
"tag",
")",
",",
"'r'",
")",
"as",
"tagfile",
":",
"pkghash",
"=",
"tagfile",
".",
"read",
"(",
")",
"pkgmap",
"[",
"pkghash",
"]",
".",
"append",
"(",
"tag",
")",
"for",
"pkghash",
",",
"tags",
"in",
"pkgmap",
".",
"items",
"(",
")",
":",
"# add teams here if any other than DEFAULT_TEAM should be hidden.",
"team_token",
"=",
"''",
"if",
"team",
"in",
"(",
"DEFAULT_TEAM",
",",
")",
"else",
"team",
"+",
"':'",
"fullpkg",
"=",
"\"{team}{owner}/{pkg}\"",
".",
"format",
"(",
"team",
"=",
"team_token",
",",
"owner",
"=",
"user",
",",
"pkg",
"=",
"pkg",
")",
"# Add an empty string tag for untagged hashes",
"displaytags",
"=",
"tags",
"if",
"tags",
"else",
"[",
"\"\"",
"]",
"# Display a separate full line per tag like Docker",
"for",
"tag",
"in",
"displaytags",
":",
"packages",
".",
"append",
"(",
"(",
"fullpkg",
",",
"str",
"(",
"tag",
")",
",",
"pkghash",
")",
")",
"return",
"packages"
] | 53.857143 | 24.714286 |
def new(cls, freeform_builder, x, y):
"""Return a new _LineSegment object ending at point *(x, y)*.
Both *x* and *y* are rounded to the nearest integer before use.
"""
return cls(freeform_builder, int(round(x)), int(round(y))) | [
"def",
"new",
"(",
"cls",
",",
"freeform_builder",
",",
"x",
",",
"y",
")",
":",
"return",
"cls",
"(",
"freeform_builder",
",",
"int",
"(",
"round",
"(",
"x",
")",
")",
",",
"int",
"(",
"round",
"(",
"y",
")",
")",
")"
] | 42.333333 | 16.666667 |
def _credssp_processor(self, context):
"""
Implements a state machine
:return:
"""
http_response = (yield)
credssp_context = self._get_credssp_header(http_response)
if credssp_context is None:
raise Exception('The remote host did not respond with a \'www-authenticate\' header containing '
'\'CredSSP\' as an available authentication mechanism')
# 1. First, secure the channel with a TLS Handshake
if not credssp_context:
self.tls_connection = SSL.Connection(self.tls_context)
self.tls_connection.set_connect_state()
while True:
try:
self.tls_connection.do_handshake()
except SSL.WantReadError:
http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096))
credssp_context = self._get_credssp_header(http_response)
if credssp_context is None or not credssp_context:
raise Exception('The remote host rejected the CredSSP TLS handshake')
self.tls_connection.bio_write(credssp_context)
else:
break
# add logging to display the negotiated cipher (move to a function)
openssl_lib = _util.binding.lib
ffi = _util.binding.ffi
cipher = openssl_lib.SSL_get_current_cipher(self.tls_connection._ssl)
cipher_name = ffi.string( openssl_lib.SSL_CIPHER_get_name(cipher))
log.debug("Negotiated TLS Cipher: %s", cipher_name)
# 2. Send an TSRequest containing an NTLM Negotiate Request
context_generator = context.initialize_security_context()
negotiate_token = context_generator.send(None)
log.debug("NTLM Type 1: %s", AsHex(negotiate_token))
ts_request = TSRequest()
ts_request['negoTokens'] = negotiate_token
self.tls_connection.send(ts_request.getData())
http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096))
# Extract and decrypt the encoded TSRequest response struct from the Negotiate header
authenticate_header = self._get_credssp_header(http_response)
if not authenticate_header or authenticate_header is None:
raise Exception("The remote host rejected the CredSSP negotiation token")
self.tls_connection.bio_write(authenticate_header)
# NTLM Challenge Response and Server Public Key Validation
ts_request = TSRequest()
ts_request.fromString(self.tls_connection.recv(8192))
challenge_token = ts_request['negoTokens']
log.debug("NTLM Type 2: %s", AsHex(challenge_token))
server_cert = self.tls_connection.get_peer_certificate()
# not using channel bindings
#certificate_digest = base64.b16decode(server_cert.digest('SHA256').replace(':', ''))
## channel_binding_structure = gss_channel_bindings_struct()
## channel_binding_structure['application_data'] = "tls-server-end-point:" + certificate_digest
public_key = HttpCredSSPAuth._get_rsa_public_key(server_cert)
# The _RSAPublicKey must be 'wrapped' using the negotiated GSSAPI mechanism and send to the server along with
# the final SPNEGO token. This step of the CredSSP protocol is designed to thwart 'man-in-the-middle' attacks
# Build and encrypt the response to the server
ts_request = TSRequest()
type3= context_generator.send(challenge_token)
log.debug("NTLM Type 3: %s", AsHex(type3))
ts_request['negoTokens'] = type3
public_key_encrypted, signature = context.wrap_message(public_key)
ts_request['pubKeyAuth'] = signature + public_key_encrypted
self.tls_connection.send(ts_request.getData())
enc_type3 = self.tls_connection.bio_read(8192)
http_response = yield self._set_credssp_header(http_response.request, enc_type3)
# TLS decrypt the response, then ASN decode and check the error code
auth_response = self._get_credssp_header(http_response)
if not auth_response or auth_response is None:
raise Exception("The remote host rejected the challenge response")
self.tls_connection.bio_write(auth_response)
ts_request = TSRequest()
ts_request.fromString(self.tls_connection.recv(8192))
# TODO: determine how to validate server certificate here
#a = ts_request['pubKeyAuth']
# print ":".join("{:02x}".format(ord(c)) for c in a)
# 4. Send the Credentials to be delegated, these are encrypted with both NTLM v2 and then by TLS
tsp = TSPasswordCreds()
tsp['domain'] = self.password_authenticator.get_domain()
tsp['username'] = self.password_authenticator.get_username()
tsp['password'] = self.password_authenticator.get_password()
tsc = TSCredentials()
tsc['type'] = 1
tsc['credentials'] = tsp.getData()
ts_request = TSRequest()
encrypted, signature = context.wrap_message(tsc.getData())
ts_request['authInfo'] = signature + encrypted
self.tls_connection.send(ts_request.getData())
token = self.tls_connection.bio_read(8192)
http_response.request.body = self.body
http_response = yield self._set_credssp_header(self._encrypt(http_response.request, self.tls_connection), token)
if http_response.status_code == 401:
raise Exception('Authentication Failed') | [
"def",
"_credssp_processor",
"(",
"self",
",",
"context",
")",
":",
"http_response",
"=",
"(",
"yield",
")",
"credssp_context",
"=",
"self",
".",
"_get_credssp_header",
"(",
"http_response",
")",
"if",
"credssp_context",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'The remote host did not respond with a \\'www-authenticate\\' header containing '",
"'\\'CredSSP\\' as an available authentication mechanism'",
")",
"# 1. First, secure the channel with a TLS Handshake",
"if",
"not",
"credssp_context",
":",
"self",
".",
"tls_connection",
"=",
"SSL",
".",
"Connection",
"(",
"self",
".",
"tls_context",
")",
"self",
".",
"tls_connection",
".",
"set_connect_state",
"(",
")",
"while",
"True",
":",
"try",
":",
"self",
".",
"tls_connection",
".",
"do_handshake",
"(",
")",
"except",
"SSL",
".",
"WantReadError",
":",
"http_response",
"=",
"yield",
"self",
".",
"_set_credssp_header",
"(",
"http_response",
".",
"request",
",",
"self",
".",
"tls_connection",
".",
"bio_read",
"(",
"4096",
")",
")",
"credssp_context",
"=",
"self",
".",
"_get_credssp_header",
"(",
"http_response",
")",
"if",
"credssp_context",
"is",
"None",
"or",
"not",
"credssp_context",
":",
"raise",
"Exception",
"(",
"'The remote host rejected the CredSSP TLS handshake'",
")",
"self",
".",
"tls_connection",
".",
"bio_write",
"(",
"credssp_context",
")",
"else",
":",
"break",
"# add logging to display the negotiated cipher (move to a function)",
"openssl_lib",
"=",
"_util",
".",
"binding",
".",
"lib",
"ffi",
"=",
"_util",
".",
"binding",
".",
"ffi",
"cipher",
"=",
"openssl_lib",
".",
"SSL_get_current_cipher",
"(",
"self",
".",
"tls_connection",
".",
"_ssl",
")",
"cipher_name",
"=",
"ffi",
".",
"string",
"(",
"openssl_lib",
".",
"SSL_CIPHER_get_name",
"(",
"cipher",
")",
")",
"log",
".",
"debug",
"(",
"\"Negotiated TLS Cipher: %s\"",
",",
"cipher_name",
")",
"# 2. Send an TSRequest containing an NTLM Negotiate Request",
"context_generator",
"=",
"context",
".",
"initialize_security_context",
"(",
")",
"negotiate_token",
"=",
"context_generator",
".",
"send",
"(",
"None",
")",
"log",
".",
"debug",
"(",
"\"NTLM Type 1: %s\"",
",",
"AsHex",
"(",
"negotiate_token",
")",
")",
"ts_request",
"=",
"TSRequest",
"(",
")",
"ts_request",
"[",
"'negoTokens'",
"]",
"=",
"negotiate_token",
"self",
".",
"tls_connection",
".",
"send",
"(",
"ts_request",
".",
"getData",
"(",
")",
")",
"http_response",
"=",
"yield",
"self",
".",
"_set_credssp_header",
"(",
"http_response",
".",
"request",
",",
"self",
".",
"tls_connection",
".",
"bio_read",
"(",
"4096",
")",
")",
"# Extract and decrypt the encoded TSRequest response struct from the Negotiate header",
"authenticate_header",
"=",
"self",
".",
"_get_credssp_header",
"(",
"http_response",
")",
"if",
"not",
"authenticate_header",
"or",
"authenticate_header",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"The remote host rejected the CredSSP negotiation token\"",
")",
"self",
".",
"tls_connection",
".",
"bio_write",
"(",
"authenticate_header",
")",
"# NTLM Challenge Response and Server Public Key Validation",
"ts_request",
"=",
"TSRequest",
"(",
")",
"ts_request",
".",
"fromString",
"(",
"self",
".",
"tls_connection",
".",
"recv",
"(",
"8192",
")",
")",
"challenge_token",
"=",
"ts_request",
"[",
"'negoTokens'",
"]",
"log",
".",
"debug",
"(",
"\"NTLM Type 2: %s\"",
",",
"AsHex",
"(",
"challenge_token",
")",
")",
"server_cert",
"=",
"self",
".",
"tls_connection",
".",
"get_peer_certificate",
"(",
")",
"# not using channel bindings",
"#certificate_digest = base64.b16decode(server_cert.digest('SHA256').replace(':', ''))",
"## channel_binding_structure = gss_channel_bindings_struct()",
"## channel_binding_structure['application_data'] = \"tls-server-end-point:\" + certificate_digest",
"public_key",
"=",
"HttpCredSSPAuth",
".",
"_get_rsa_public_key",
"(",
"server_cert",
")",
"# The _RSAPublicKey must be 'wrapped' using the negotiated GSSAPI mechanism and send to the server along with",
"# the final SPNEGO token. This step of the CredSSP protocol is designed to thwart 'man-in-the-middle' attacks",
"# Build and encrypt the response to the server",
"ts_request",
"=",
"TSRequest",
"(",
")",
"type3",
"=",
"context_generator",
".",
"send",
"(",
"challenge_token",
")",
"log",
".",
"debug",
"(",
"\"NTLM Type 3: %s\"",
",",
"AsHex",
"(",
"type3",
")",
")",
"ts_request",
"[",
"'negoTokens'",
"]",
"=",
"type3",
"public_key_encrypted",
",",
"signature",
"=",
"context",
".",
"wrap_message",
"(",
"public_key",
")",
"ts_request",
"[",
"'pubKeyAuth'",
"]",
"=",
"signature",
"+",
"public_key_encrypted",
"self",
".",
"tls_connection",
".",
"send",
"(",
"ts_request",
".",
"getData",
"(",
")",
")",
"enc_type3",
"=",
"self",
".",
"tls_connection",
".",
"bio_read",
"(",
"8192",
")",
"http_response",
"=",
"yield",
"self",
".",
"_set_credssp_header",
"(",
"http_response",
".",
"request",
",",
"enc_type3",
")",
"# TLS decrypt the response, then ASN decode and check the error code",
"auth_response",
"=",
"self",
".",
"_get_credssp_header",
"(",
"http_response",
")",
"if",
"not",
"auth_response",
"or",
"auth_response",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"The remote host rejected the challenge response\"",
")",
"self",
".",
"tls_connection",
".",
"bio_write",
"(",
"auth_response",
")",
"ts_request",
"=",
"TSRequest",
"(",
")",
"ts_request",
".",
"fromString",
"(",
"self",
".",
"tls_connection",
".",
"recv",
"(",
"8192",
")",
")",
"# TODO: determine how to validate server certificate here",
"#a = ts_request['pubKeyAuth']",
"# print \":\".join(\"{:02x}\".format(ord(c)) for c in a)",
"# 4. Send the Credentials to be delegated, these are encrypted with both NTLM v2 and then by TLS",
"tsp",
"=",
"TSPasswordCreds",
"(",
")",
"tsp",
"[",
"'domain'",
"]",
"=",
"self",
".",
"password_authenticator",
".",
"get_domain",
"(",
")",
"tsp",
"[",
"'username'",
"]",
"=",
"self",
".",
"password_authenticator",
".",
"get_username",
"(",
")",
"tsp",
"[",
"'password'",
"]",
"=",
"self",
".",
"password_authenticator",
".",
"get_password",
"(",
")",
"tsc",
"=",
"TSCredentials",
"(",
")",
"tsc",
"[",
"'type'",
"]",
"=",
"1",
"tsc",
"[",
"'credentials'",
"]",
"=",
"tsp",
".",
"getData",
"(",
")",
"ts_request",
"=",
"TSRequest",
"(",
")",
"encrypted",
",",
"signature",
"=",
"context",
".",
"wrap_message",
"(",
"tsc",
".",
"getData",
"(",
")",
")",
"ts_request",
"[",
"'authInfo'",
"]",
"=",
"signature",
"+",
"encrypted",
"self",
".",
"tls_connection",
".",
"send",
"(",
"ts_request",
".",
"getData",
"(",
")",
")",
"token",
"=",
"self",
".",
"tls_connection",
".",
"bio_read",
"(",
"8192",
")",
"http_response",
".",
"request",
".",
"body",
"=",
"self",
".",
"body",
"http_response",
"=",
"yield",
"self",
".",
"_set_credssp_header",
"(",
"self",
".",
"_encrypt",
"(",
"http_response",
".",
"request",
",",
"self",
".",
"tls_connection",
")",
",",
"token",
")",
"if",
"http_response",
".",
"status_code",
"==",
"401",
":",
"raise",
"Exception",
"(",
"'Authentication Failed'",
")"
] | 48.699115 | 26.557522 |
def sortDictList(dictList,**kwargs):
'''
students = [
{'name':'john','class':'A', 'year':15},
{'name':'jane','class':'B', 'year':12},
{'name':'dave','class':'B', 'year':10}
]
rslt = sortDictList(students,cond_keys=['name','class','year'])
pobj(rslt)
rslt = sortDictList(students,cond_keys=['name','year','class'])
pobj(rslt)
rslt = sortDictList(students,cond_keys=['class','name','year'])
pobj(rslt)
rslt = sortDictList(students,cond_keys=['class','year','name'])
pobj(rslt)
rslt = sortDictList(students,cond_keys=['year','name','class'])
pobj(rslt)
rslt = sortDictList(students,cond_keys=['year','name','class'])
pobj(rslt)
'''
def default_eq_func(value1,value2):
cond = (value1 == value2)
return(cond)
def default_gt_func(value1,value2):
cond = (value1 > value2)
return(cond)
def default_lt_func(value1,value2):
cond = (value1 < value2)
return(cond)
if('eq_func' in kwargs):
eq_func = kwargs['eq_func']
else:
eq_func = default_eq_func
if('gt_func' in kwargs):
gt_func = kwargs['gt_func']
else:
gt_func = default_gt_func
if('lt_func' in kwargs):
lt_func = kwargs['lt_func']
else:
lt_func = default_lt_func
if('reverse' in kwargs):
reverse = kwargs['reverse']
else:
reverse = False
keys = kwargs['cond_keys']
def cmp_dict(d1,d2):
'''
'''
length = keys.__len__()
for i in range(0,length):
key = keys[i]
cond = eq_func(d1[key],d2[key])
if(cond):
pass
else:
cond = gt_func(d1[key],d2[key])
if(cond):
return(1)
else:
return(-1)
return(0)
ndl = dictList
ndl = sorted(ndl,key=functools.cmp_to_key(cmp_dict),reverse=reverse)
return(ndl) | [
"def",
"sortDictList",
"(",
"dictList",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"default_eq_func",
"(",
"value1",
",",
"value2",
")",
":",
"cond",
"=",
"(",
"value1",
"==",
"value2",
")",
"return",
"(",
"cond",
")",
"def",
"default_gt_func",
"(",
"value1",
",",
"value2",
")",
":",
"cond",
"=",
"(",
"value1",
">",
"value2",
")",
"return",
"(",
"cond",
")",
"def",
"default_lt_func",
"(",
"value1",
",",
"value2",
")",
":",
"cond",
"=",
"(",
"value1",
"<",
"value2",
")",
"return",
"(",
"cond",
")",
"if",
"(",
"'eq_func'",
"in",
"kwargs",
")",
":",
"eq_func",
"=",
"kwargs",
"[",
"'eq_func'",
"]",
"else",
":",
"eq_func",
"=",
"default_eq_func",
"if",
"(",
"'gt_func'",
"in",
"kwargs",
")",
":",
"gt_func",
"=",
"kwargs",
"[",
"'gt_func'",
"]",
"else",
":",
"gt_func",
"=",
"default_gt_func",
"if",
"(",
"'lt_func'",
"in",
"kwargs",
")",
":",
"lt_func",
"=",
"kwargs",
"[",
"'lt_func'",
"]",
"else",
":",
"lt_func",
"=",
"default_lt_func",
"if",
"(",
"'reverse'",
"in",
"kwargs",
")",
":",
"reverse",
"=",
"kwargs",
"[",
"'reverse'",
"]",
"else",
":",
"reverse",
"=",
"False",
"keys",
"=",
"kwargs",
"[",
"'cond_keys'",
"]",
"def",
"cmp_dict",
"(",
"d1",
",",
"d2",
")",
":",
"'''\n '''",
"length",
"=",
"keys",
".",
"__len__",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"length",
")",
":",
"key",
"=",
"keys",
"[",
"i",
"]",
"cond",
"=",
"eq_func",
"(",
"d1",
"[",
"key",
"]",
",",
"d2",
"[",
"key",
"]",
")",
"if",
"(",
"cond",
")",
":",
"pass",
"else",
":",
"cond",
"=",
"gt_func",
"(",
"d1",
"[",
"key",
"]",
",",
"d2",
"[",
"key",
"]",
")",
"if",
"(",
"cond",
")",
":",
"return",
"(",
"1",
")",
"else",
":",
"return",
"(",
"-",
"1",
")",
"return",
"(",
"0",
")",
"ndl",
"=",
"dictList",
"ndl",
"=",
"sorted",
"(",
"ndl",
",",
"key",
"=",
"functools",
".",
"cmp_to_key",
"(",
"cmp_dict",
")",
",",
"reverse",
"=",
"reverse",
")",
"return",
"(",
"ndl",
")"
] | 30.257576 | 17.621212 |
def find(self,
signature = None, order = 0,
since = None, until = None,
offset = None, limit = None):
"""
Retrieve all crash dumps in the database, optionally filtering them by
signature and timestamp, and/or sorting them by timestamp.
Results can be paged to avoid consuming too much memory if the database
is large.
@see: L{find_by_example}
@type signature: object
@param signature: (Optional) Return only through crashes matching
this signature. See L{Crash.signature} for more details.
@type order: int
@param order: (Optional) Sort by timestamp.
If C{== 0}, results are not sorted.
If C{> 0}, results are sorted from older to newer.
If C{< 0}, results are sorted from newer to older.
@type since: datetime
@param since: (Optional) Return only the crashes after and
including this date and time.
@type until: datetime
@param until: (Optional) Return only the crashes before this date
and time, not including it.
@type offset: int
@param offset: (Optional) Skip the first I{offset} results.
@type limit: int
@param limit: (Optional) Return at most I{limit} results.
@rtype: list(L{Crash})
@return: List of Crash objects.
"""
# Validate the parameters.
if since and until and since > until:
warnings.warn("CrashDAO.find() got the 'since' and 'until'"
" arguments reversed, corrected automatically.")
since, until = until, since
if limit is not None and not limit:
warnings.warn("CrashDAO.find() was set a limit of 0 results,"
" returning without executing a query.")
return []
# Build the SQL query.
query = self._session.query(CrashDTO)
if signature is not None:
sig_pickled = pickle.dumps(signature, protocol = 0)
query = query.filter(CrashDTO.signature == sig_pickled)
if since:
query = query.filter(CrashDTO.timestamp >= since)
if until:
query = query.filter(CrashDTO.timestamp < until)
if order:
if order > 0:
query = query.order_by(asc(CrashDTO.timestamp))
else:
query = query.order_by(desc(CrashDTO.timestamp))
else:
# Default ordering is by row ID, to get consistent results.
# Also some database engines require ordering when using offsets.
query = query.order_by(asc(CrashDTO.id))
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
# Execute the SQL query and convert the results.
try:
return [dto.toCrash() for dto in query.all()]
except NoResultFound:
return [] | [
"def",
"find",
"(",
"self",
",",
"signature",
"=",
"None",
",",
"order",
"=",
"0",
",",
"since",
"=",
"None",
",",
"until",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"# Validate the parameters.",
"if",
"since",
"and",
"until",
"and",
"since",
">",
"until",
":",
"warnings",
".",
"warn",
"(",
"\"CrashDAO.find() got the 'since' and 'until'\"",
"\" arguments reversed, corrected automatically.\"",
")",
"since",
",",
"until",
"=",
"until",
",",
"since",
"if",
"limit",
"is",
"not",
"None",
"and",
"not",
"limit",
":",
"warnings",
".",
"warn",
"(",
"\"CrashDAO.find() was set a limit of 0 results,\"",
"\" returning without executing a query.\"",
")",
"return",
"[",
"]",
"# Build the SQL query.",
"query",
"=",
"self",
".",
"_session",
".",
"query",
"(",
"CrashDTO",
")",
"if",
"signature",
"is",
"not",
"None",
":",
"sig_pickled",
"=",
"pickle",
".",
"dumps",
"(",
"signature",
",",
"protocol",
"=",
"0",
")",
"query",
"=",
"query",
".",
"filter",
"(",
"CrashDTO",
".",
"signature",
"==",
"sig_pickled",
")",
"if",
"since",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"CrashDTO",
".",
"timestamp",
">=",
"since",
")",
"if",
"until",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"CrashDTO",
".",
"timestamp",
"<",
"until",
")",
"if",
"order",
":",
"if",
"order",
">",
"0",
":",
"query",
"=",
"query",
".",
"order_by",
"(",
"asc",
"(",
"CrashDTO",
".",
"timestamp",
")",
")",
"else",
":",
"query",
"=",
"query",
".",
"order_by",
"(",
"desc",
"(",
"CrashDTO",
".",
"timestamp",
")",
")",
"else",
":",
"# Default ordering is by row ID, to get consistent results.",
"# Also some database engines require ordering when using offsets.",
"query",
"=",
"query",
".",
"order_by",
"(",
"asc",
"(",
"CrashDTO",
".",
"id",
")",
")",
"if",
"offset",
":",
"query",
"=",
"query",
".",
"offset",
"(",
"offset",
")",
"if",
"limit",
":",
"query",
"=",
"query",
".",
"limit",
"(",
"limit",
")",
"# Execute the SQL query and convert the results.",
"try",
":",
"return",
"[",
"dto",
".",
"toCrash",
"(",
")",
"for",
"dto",
"in",
"query",
".",
"all",
"(",
")",
"]",
"except",
"NoResultFound",
":",
"return",
"[",
"]"
] | 37.164557 | 20.734177 |
def execute(filename, formatted_name):
"""Renames a file based on the name generated using metadata.
:param str filename: absolute path and filename of original file
:param str formatted_name: absolute path and new filename
"""
if os.path.isfile(formatted_name):
# If the destination exists, skip rename unless overwrite enabled
if not cfg.CONF.overwrite_file_enabled:
LOG.info('File %s already exists not forcefully moving %s',
formatted_name, filename)
return
LOG.info('renaming [%s] to [%s]', filename, formatted_name)
if not cfg.CONF.dryrun:
shutil.move(filename, formatted_name) | [
"def",
"execute",
"(",
"filename",
",",
"formatted_name",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"formatted_name",
")",
":",
"# If the destination exists, skip rename unless overwrite enabled",
"if",
"not",
"cfg",
".",
"CONF",
".",
"overwrite_file_enabled",
":",
"LOG",
".",
"info",
"(",
"'File %s already exists not forcefully moving %s'",
",",
"formatted_name",
",",
"filename",
")",
"return",
"LOG",
".",
"info",
"(",
"'renaming [%s] to [%s]'",
",",
"filename",
",",
"formatted_name",
")",
"if",
"not",
"cfg",
".",
"CONF",
".",
"dryrun",
":",
"shutil",
".",
"move",
"(",
"filename",
",",
"formatted_name",
")"
] | 39.235294 | 18.411765 |
def transform_data(function, input_data):
''' a function to apply a function to each value in a nested dictionary
:param function: callable function with a single input of any datatype
:param input_data: dictionary or list with nested data to transform
:return: dictionary or list with data transformed by function
'''
# construct copy
try:
from copy import deepcopy
output_data = deepcopy(input_data)
except:
raise ValueError('transform_data() input_data argument cannot contain module datatypes.')
# walk over data and apply function
for dot_path, value in walk_data(input_data):
current_endpoint = output_data
segment_list = segment_path(dot_path)
segment = None
if segment_list:
for i in range(len(segment_list)):
try:
segment = int(segment_list[i])
except:
segment = segment_list[i]
if i + 1 == len(segment_list):
pass
else:
current_endpoint = current_endpoint[segment]
current_endpoint[segment] = function(value)
return output_data | [
"def",
"transform_data",
"(",
"function",
",",
"input_data",
")",
":",
"# construct copy",
"try",
":",
"from",
"copy",
"import",
"deepcopy",
"output_data",
"=",
"deepcopy",
"(",
"input_data",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'transform_data() input_data argument cannot contain module datatypes.'",
")",
"# walk over data and apply function",
"for",
"dot_path",
",",
"value",
"in",
"walk_data",
"(",
"input_data",
")",
":",
"current_endpoint",
"=",
"output_data",
"segment_list",
"=",
"segment_path",
"(",
"dot_path",
")",
"segment",
"=",
"None",
"if",
"segment_list",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"segment_list",
")",
")",
":",
"try",
":",
"segment",
"=",
"int",
"(",
"segment_list",
"[",
"i",
"]",
")",
"except",
":",
"segment",
"=",
"segment_list",
"[",
"i",
"]",
"if",
"i",
"+",
"1",
"==",
"len",
"(",
"segment_list",
")",
":",
"pass",
"else",
":",
"current_endpoint",
"=",
"current_endpoint",
"[",
"segment",
"]",
"current_endpoint",
"[",
"segment",
"]",
"=",
"function",
"(",
"value",
")",
"return",
"output_data"
] | 34.382353 | 21.205882 |
def remove_actor(self, actor, reset_camera=False):
"""
Removes an actor from the Plotter.
Parameters
----------
actor : vtk.vtkActor
Actor that has previously added to the Renderer.
reset_camera : bool, optional
Resets camera so all actors can be seen.
Returns
-------
success : bool
True when actor removed. False when actor has not been
removed.
"""
for renderer in self.renderers:
renderer.remove_actor(actor, reset_camera)
return True | [
"def",
"remove_actor",
"(",
"self",
",",
"actor",
",",
"reset_camera",
"=",
"False",
")",
":",
"for",
"renderer",
"in",
"self",
".",
"renderers",
":",
"renderer",
".",
"remove_actor",
"(",
"actor",
",",
"reset_camera",
")",
"return",
"True"
] | 27.52381 | 17.809524 |
def momentum(data, period):
"""
Momentum.
Formula:
DATA[i] - DATA[i - period]
"""
catch_errors.check_for_period_error(data, period)
momentum = [data[idx] - data[idx+1-period] for idx in range(period-1, len(data))]
momentum = fill_for_noncomputable_vals(data, momentum)
return momentum | [
"def",
"momentum",
"(",
"data",
",",
"period",
")",
":",
"catch_errors",
".",
"check_for_period_error",
"(",
"data",
",",
"period",
")",
"momentum",
"=",
"[",
"data",
"[",
"idx",
"]",
"-",
"data",
"[",
"idx",
"+",
"1",
"-",
"period",
"]",
"for",
"idx",
"in",
"range",
"(",
"period",
"-",
"1",
",",
"len",
"(",
"data",
")",
")",
"]",
"momentum",
"=",
"fill_for_noncomputable_vals",
"(",
"data",
",",
"momentum",
")",
"return",
"momentum"
] | 25.916667 | 21.25 |
def binary(self, new_binary):
"""Sets location of the browser binary, either by string or
``FirefoxBinary`` instance.
"""
if not isinstance(new_binary, FirefoxBinary):
new_binary = FirefoxBinary(new_binary)
self._binary = new_binary | [
"def",
"binary",
"(",
"self",
",",
"new_binary",
")",
":",
"if",
"not",
"isinstance",
"(",
"new_binary",
",",
"FirefoxBinary",
")",
":",
"new_binary",
"=",
"FirefoxBinary",
"(",
"new_binary",
")",
"self",
".",
"_binary",
"=",
"new_binary"
] | 34.75 | 10.75 |
def _main(self, client, copy_source, bucket, key, upload_id, part_number,
extra_args, callbacks, size):
"""
:param client: The client to use when calling PutObject
:param copy_source: The CopySource parameter to use
:param bucket: The name of the bucket to upload to
:param key: The name of the key to upload to
:param upload_id: The id of the upload
:param part_number: The number representing the part of the multipart
upload
:param extra_args: A dictionary of any extra arguments that may be
used in the upload.
:param callbacks: List of callbacks to call after copy part
:param size: The size of the transfer. This value is passed into
the callbacks
:rtype: dict
:returns: A dictionary representing a part::
{'Etag': etag_value, 'PartNumber': part_number}
This value can be appended to a list to be used to complete
the multipart upload.
"""
response = client.upload_part_copy(
CopySource=copy_source, Bucket=bucket, Key=key,
UploadId=upload_id, PartNumber=part_number, **extra_args)
for callback in callbacks:
callback(bytes_transferred=size)
etag = response['CopyPartResult']['ETag']
return {'ETag': etag, 'PartNumber': part_number} | [
"def",
"_main",
"(",
"self",
",",
"client",
",",
"copy_source",
",",
"bucket",
",",
"key",
",",
"upload_id",
",",
"part_number",
",",
"extra_args",
",",
"callbacks",
",",
"size",
")",
":",
"response",
"=",
"client",
".",
"upload_part_copy",
"(",
"CopySource",
"=",
"copy_source",
",",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"key",
",",
"UploadId",
"=",
"upload_id",
",",
"PartNumber",
"=",
"part_number",
",",
"*",
"*",
"extra_args",
")",
"for",
"callback",
"in",
"callbacks",
":",
"callback",
"(",
"bytes_transferred",
"=",
"size",
")",
"etag",
"=",
"response",
"[",
"'CopyPartResult'",
"]",
"[",
"'ETag'",
"]",
"return",
"{",
"'ETag'",
":",
"etag",
",",
"'PartNumber'",
":",
"part_number",
"}"
] | 44.16129 | 18.870968 |
def validate_public_key(value):
"""
Check that the given value is a valid RSA Public key in either PEM or OpenSSH format. If it is invalid,
raises ``django.core.exceptions.ValidationError``.
"""
is_valid = False
exc = None
for load in (load_pem_public_key, load_ssh_public_key):
if not is_valid:
try:
load(value.encode('utf-8'), default_backend())
is_valid = True
except Exception as e:
exc = e
if not is_valid:
raise ValidationError('Public key is invalid: %s' % exc) | [
"def",
"validate_public_key",
"(",
"value",
")",
":",
"is_valid",
"=",
"False",
"exc",
"=",
"None",
"for",
"load",
"in",
"(",
"load_pem_public_key",
",",
"load_ssh_public_key",
")",
":",
"if",
"not",
"is_valid",
":",
"try",
":",
"load",
"(",
"value",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"default_backend",
"(",
")",
")",
"is_valid",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"exc",
"=",
"e",
"if",
"not",
"is_valid",
":",
"raise",
"ValidationError",
"(",
"'Public key is invalid: %s'",
"%",
"exc",
")"
] | 31.833333 | 20.722222 |
def accepted(self):
"""Successfully close the widget and return the loaded model.
This method is also a `SLOT`.
The dialog will be closed, when the `ok` button is pressed. If
a `DataFrame` was loaded, it will be emitted by the signal `load`.
"""
model = self._previewTableView.model()
if model is not None:
df = model.dataFrame().copy()
dfModel = DataFrameModel(df)
self.load.emit(dfModel, self._filename)
print(("Emitted model for {}".format(self._filename)))
self._resetWidgets()
self.accept() | [
"def",
"accepted",
"(",
"self",
")",
":",
"model",
"=",
"self",
".",
"_previewTableView",
".",
"model",
"(",
")",
"if",
"model",
"is",
"not",
"None",
":",
"df",
"=",
"model",
".",
"dataFrame",
"(",
")",
".",
"copy",
"(",
")",
"dfModel",
"=",
"DataFrameModel",
"(",
"df",
")",
"self",
".",
"load",
".",
"emit",
"(",
"dfModel",
",",
"self",
".",
"_filename",
")",
"print",
"(",
"(",
"\"Emitted model for {}\"",
".",
"format",
"(",
"self",
".",
"_filename",
")",
")",
")",
"self",
".",
"_resetWidgets",
"(",
")",
"self",
".",
"accept",
"(",
")"
] | 37.625 | 15.875 |
def send_animation(
self,
chat_id: Union[int, str],
animation: str,
caption: str = "",
parse_mode: str = "",
duration: int = 0,
width: int = 0,
height: int = 0,
thumb: str = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: Union[
"pyrogram.InlineKeyboardMarkup",
"pyrogram.ReplyKeyboardMarkup",
"pyrogram.ReplyKeyboardRemove",
"pyrogram.ForceReply"
] = None,
progress: callable = None,
progress_args: tuple = ()
) -> Union["pyrogram.Message", None]:
"""Use this method to send animation files (animation or H.264/MPEG-4 AVC video without sound).
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
animation (``str``):
Animation to send.
Pass a file_id as string to send an animation that exists on the Telegram servers,
pass an HTTP URL as a string for Telegram to get an animation from the Internet, or
pass a file path as string to upload a new animation that exists on your local machine.
caption (``str``, *optional*):
Animation caption, 0-1024 characters.
parse_mode (``str``, *optional*):
Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>`
if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption.
Defaults to Markdown.
duration (``int``, *optional*):
Duration of sent animation in seconds.
width (``int``, *optional*):
Animation width.
height (``int``, *optional*):
Animation height.
thumb (``str``, *optional*):
Thumbnail of the animation file sent.
The thumbnail should be in JPEG format and less than 200 KB in size.
A thumbnail's width and height should not exceed 90 pixels.
Thumbnails can't be reused and can be only uploaded as a new file.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the sent :obj:`Message <pyrogram.Message>` is returned.
In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
file = None
style = self.html if parse_mode.lower() == "html" else self.markdown
try:
if os.path.exists(animation):
thumb = None if thumb is None else self.save_file(thumb)
file = self.save_file(animation, progress=progress, progress_args=progress_args)
media = types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(animation) or "video/mp4",
file=file,
thumb=thumb,
attributes=[
types.DocumentAttributeVideo(
supports_streaming=True,
duration=duration,
w=width,
h=height
),
types.DocumentAttributeFilename(file_name=os.path.basename(animation)),
types.DocumentAttributeAnimated()
]
)
elif animation.startswith("http"):
media = types.InputMediaDocumentExternal(
url=animation
)
else:
try:
decoded = utils.decode(animation)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 10:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
while True:
try:
r = self.send(
functions.messages.SendMedia(
peer=self.resolve_peer(chat_id),
media=media,
silent=disable_notification or None,
reply_to_msg_id=reply_to_message_id,
random_id=self.rnd_id(),
reply_markup=reply_markup.write() if reply_markup else None,
**style.parse(caption)
)
)
except FilePartMissing as e:
self.save_file(animation, file_id=file.id, file_part=e.x)
else:
for i in r.updates:
if isinstance(i, (types.UpdateNewMessage, types.UpdateNewChannelMessage)):
return pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
)
except BaseClient.StopTransmission:
return None | [
"def",
"send_animation",
"(",
"self",
",",
"chat_id",
":",
"Union",
"[",
"int",
",",
"str",
"]",
",",
"animation",
":",
"str",
",",
"caption",
":",
"str",
"=",
"\"\"",
",",
"parse_mode",
":",
"str",
"=",
"\"\"",
",",
"duration",
":",
"int",
"=",
"0",
",",
"width",
":",
"int",
"=",
"0",
",",
"height",
":",
"int",
"=",
"0",
",",
"thumb",
":",
"str",
"=",
"None",
",",
"disable_notification",
":",
"bool",
"=",
"None",
",",
"reply_to_message_id",
":",
"int",
"=",
"None",
",",
"reply_markup",
":",
"Union",
"[",
"\"pyrogram.InlineKeyboardMarkup\"",
",",
"\"pyrogram.ReplyKeyboardMarkup\"",
",",
"\"pyrogram.ReplyKeyboardRemove\"",
",",
"\"pyrogram.ForceReply\"",
"]",
"=",
"None",
",",
"progress",
":",
"callable",
"=",
"None",
",",
"progress_args",
":",
"tuple",
"=",
"(",
")",
")",
"->",
"Union",
"[",
"\"pyrogram.Message\"",
",",
"None",
"]",
":",
"file",
"=",
"None",
"style",
"=",
"self",
".",
"html",
"if",
"parse_mode",
".",
"lower",
"(",
")",
"==",
"\"html\"",
"else",
"self",
".",
"markdown",
"try",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"animation",
")",
":",
"thumb",
"=",
"None",
"if",
"thumb",
"is",
"None",
"else",
"self",
".",
"save_file",
"(",
"thumb",
")",
"file",
"=",
"self",
".",
"save_file",
"(",
"animation",
",",
"progress",
"=",
"progress",
",",
"progress_args",
"=",
"progress_args",
")",
"media",
"=",
"types",
".",
"InputMediaUploadedDocument",
"(",
"mime_type",
"=",
"self",
".",
"guess_mime_type",
"(",
"animation",
")",
"or",
"\"video/mp4\"",
",",
"file",
"=",
"file",
",",
"thumb",
"=",
"thumb",
",",
"attributes",
"=",
"[",
"types",
".",
"DocumentAttributeVideo",
"(",
"supports_streaming",
"=",
"True",
",",
"duration",
"=",
"duration",
",",
"w",
"=",
"width",
",",
"h",
"=",
"height",
")",
",",
"types",
".",
"DocumentAttributeFilename",
"(",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"animation",
")",
")",
",",
"types",
".",
"DocumentAttributeAnimated",
"(",
")",
"]",
")",
"elif",
"animation",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"media",
"=",
"types",
".",
"InputMediaDocumentExternal",
"(",
"url",
"=",
"animation",
")",
"else",
":",
"try",
":",
"decoded",
"=",
"utils",
".",
"decode",
"(",
"animation",
")",
"fmt",
"=",
"\"<iiqqqqi\"",
"if",
"len",
"(",
"decoded",
")",
">",
"24",
"else",
"\"<iiqq\"",
"unpacked",
"=",
"struct",
".",
"unpack",
"(",
"fmt",
",",
"decoded",
")",
"except",
"(",
"AssertionError",
",",
"binascii",
".",
"Error",
",",
"struct",
".",
"error",
")",
":",
"raise",
"FileIdInvalid",
"from",
"None",
"else",
":",
"if",
"unpacked",
"[",
"0",
"]",
"!=",
"10",
":",
"media_type",
"=",
"BaseClient",
".",
"MEDIA_TYPE_ID",
".",
"get",
"(",
"unpacked",
"[",
"0",
"]",
",",
"None",
")",
"if",
"media_type",
":",
"raise",
"FileIdInvalid",
"(",
"\"The file_id belongs to a {}\"",
".",
"format",
"(",
"media_type",
")",
")",
"else",
":",
"raise",
"FileIdInvalid",
"(",
"\"Unknown media type: {}\"",
".",
"format",
"(",
"unpacked",
"[",
"0",
"]",
")",
")",
"media",
"=",
"types",
".",
"InputMediaDocument",
"(",
"id",
"=",
"types",
".",
"InputDocument",
"(",
"id",
"=",
"unpacked",
"[",
"2",
"]",
",",
"access_hash",
"=",
"unpacked",
"[",
"3",
"]",
",",
"file_reference",
"=",
"b\"\"",
")",
")",
"while",
"True",
":",
"try",
":",
"r",
"=",
"self",
".",
"send",
"(",
"functions",
".",
"messages",
".",
"SendMedia",
"(",
"peer",
"=",
"self",
".",
"resolve_peer",
"(",
"chat_id",
")",
",",
"media",
"=",
"media",
",",
"silent",
"=",
"disable_notification",
"or",
"None",
",",
"reply_to_msg_id",
"=",
"reply_to_message_id",
",",
"random_id",
"=",
"self",
".",
"rnd_id",
"(",
")",
",",
"reply_markup",
"=",
"reply_markup",
".",
"write",
"(",
")",
"if",
"reply_markup",
"else",
"None",
",",
"*",
"*",
"style",
".",
"parse",
"(",
"caption",
")",
")",
")",
"except",
"FilePartMissing",
"as",
"e",
":",
"self",
".",
"save_file",
"(",
"animation",
",",
"file_id",
"=",
"file",
".",
"id",
",",
"file_part",
"=",
"e",
".",
"x",
")",
"else",
":",
"for",
"i",
"in",
"r",
".",
"updates",
":",
"if",
"isinstance",
"(",
"i",
",",
"(",
"types",
".",
"UpdateNewMessage",
",",
"types",
".",
"UpdateNewChannelMessage",
")",
")",
":",
"return",
"pyrogram",
".",
"Message",
".",
"_parse",
"(",
"self",
",",
"i",
".",
"message",
",",
"{",
"i",
".",
"id",
":",
"i",
"for",
"i",
"in",
"r",
".",
"users",
"}",
",",
"{",
"i",
".",
"id",
":",
"i",
"for",
"i",
"in",
"r",
".",
"chats",
"}",
")",
"except",
"BaseClient",
".",
"StopTransmission",
":",
"return",
"None"
] | 44.913793 | 24.074713 |
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self))) | [
"def",
"overlaps",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"network_address",
"in",
"other",
"or",
"(",
"self",
".",
"broadcast_address",
"in",
"other",
"or",
"(",
"other",
".",
"network_address",
"in",
"self",
"or",
"(",
"other",
".",
"broadcast_address",
"in",
"self",
")",
")",
")"
] | 47.166667 | 9.166667 |
def updateflags(self, flags):
"""
Thin wrapper around build_update(flags=X). This only handles simple
status changes, anything like needinfo requestee needs to call
build_update + update_bugs directly
:param flags: Dictionary of the form {"flagname": "status"}, example
{"needinfo": "?", "devel_ack": "+"}
"""
flaglist = []
for key, value in flags.items():
flaglist.append({"name": key, "status": value})
return self.bugzilla.update_bugs([self.bug_id],
self.bugzilla.build_update(flags=flaglist)) | [
"def",
"updateflags",
"(",
"self",
",",
"flags",
")",
":",
"flaglist",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"flags",
".",
"items",
"(",
")",
":",
"flaglist",
".",
"append",
"(",
"{",
"\"name\"",
":",
"key",
",",
"\"status\"",
":",
"value",
"}",
")",
"return",
"self",
".",
"bugzilla",
".",
"update_bugs",
"(",
"[",
"self",
".",
"bug_id",
"]",
",",
"self",
".",
"bugzilla",
".",
"build_update",
"(",
"flags",
"=",
"flaglist",
")",
")"
] | 42.285714 | 16.428571 |
def apply_changes(self, event):
"""
applies the changes in the various attribute boxes of this object to all highlighted fit objects in the logger, these changes are reflected both in this object and in the Zeq_GUI parent object.
@param: event -> the wx.ButtonEvent that triggered this function
"""
new_name = self.name_box.GetLineText(0)
new_color = self.color_box.GetValue()
new_tmin = self.tmin_box.GetValue()
new_tmax = self.tmax_box.GetValue()
next_i = -1
changed_i = []
while True:
next_i = self.logger.GetNextSelected(next_i)
if next_i == -1:
break
specimen = self.fit_list[next_i][1]
fit = self.fit_list[next_i][0]
if new_name:
if new_name not in [x.name for x in self.parent.pmag_results_data['specimens'][specimen]]: fit.name = new_name
if new_color:
fit.color = self.color_dict[new_color]
#testing
not_both = True
if new_tmin and new_tmax:
if fit == self.parent.current_fit:
self.parent.tmin_box.SetStringSelection(new_tmin)
self.parent.tmax_box.SetStringSelection(new_tmax)
fit.put(specimen,self.parent.COORDINATE_SYSTEM, self.parent.get_PCA_parameters(specimen,fit,new_tmin,new_tmax,self.parent.COORDINATE_SYSTEM,fit.PCA_type))
not_both = False
if new_tmin and not_both:
if fit == self.parent.current_fit:
self.parent.tmin_box.SetStringSelection(new_tmin)
fit.put(specimen,self.parent.COORDINATE_SYSTEM, self.parent.get_PCA_parameters(specimen,fit,new_tmin,fit.tmax,self.parent.COORDINATE_SYSTEM,fit.PCA_type))
if new_tmax and not_both:
if fit == self.parent.current_fit:
self.parent.tmax_box.SetStringSelection(new_tmax)
fit.put(specimen,self.parent.COORDINATE_SYSTEM, self.parent.get_PCA_parameters(specimen,fit,fit.tmin,new_tmax,self.parent.COORDINATE_SYSTEM,fit.PCA_type))
changed_i.append(next_i)
offset = 0
for i in changed_i:
i -= offset
v = self.update_logger_entry(i)
if v == "s":
offset += 1
self.parent.update_selection() | [
"def",
"apply_changes",
"(",
"self",
",",
"event",
")",
":",
"new_name",
"=",
"self",
".",
"name_box",
".",
"GetLineText",
"(",
"0",
")",
"new_color",
"=",
"self",
".",
"color_box",
".",
"GetValue",
"(",
")",
"new_tmin",
"=",
"self",
".",
"tmin_box",
".",
"GetValue",
"(",
")",
"new_tmax",
"=",
"self",
".",
"tmax_box",
".",
"GetValue",
"(",
")",
"next_i",
"=",
"-",
"1",
"changed_i",
"=",
"[",
"]",
"while",
"True",
":",
"next_i",
"=",
"self",
".",
"logger",
".",
"GetNextSelected",
"(",
"next_i",
")",
"if",
"next_i",
"==",
"-",
"1",
":",
"break",
"specimen",
"=",
"self",
".",
"fit_list",
"[",
"next_i",
"]",
"[",
"1",
"]",
"fit",
"=",
"self",
".",
"fit_list",
"[",
"next_i",
"]",
"[",
"0",
"]",
"if",
"new_name",
":",
"if",
"new_name",
"not",
"in",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"self",
".",
"parent",
".",
"pmag_results_data",
"[",
"'specimens'",
"]",
"[",
"specimen",
"]",
"]",
":",
"fit",
".",
"name",
"=",
"new_name",
"if",
"new_color",
":",
"fit",
".",
"color",
"=",
"self",
".",
"color_dict",
"[",
"new_color",
"]",
"#testing",
"not_both",
"=",
"True",
"if",
"new_tmin",
"and",
"new_tmax",
":",
"if",
"fit",
"==",
"self",
".",
"parent",
".",
"current_fit",
":",
"self",
".",
"parent",
".",
"tmin_box",
".",
"SetStringSelection",
"(",
"new_tmin",
")",
"self",
".",
"parent",
".",
"tmax_box",
".",
"SetStringSelection",
"(",
"new_tmax",
")",
"fit",
".",
"put",
"(",
"specimen",
",",
"self",
".",
"parent",
".",
"COORDINATE_SYSTEM",
",",
"self",
".",
"parent",
".",
"get_PCA_parameters",
"(",
"specimen",
",",
"fit",
",",
"new_tmin",
",",
"new_tmax",
",",
"self",
".",
"parent",
".",
"COORDINATE_SYSTEM",
",",
"fit",
".",
"PCA_type",
")",
")",
"not_both",
"=",
"False",
"if",
"new_tmin",
"and",
"not_both",
":",
"if",
"fit",
"==",
"self",
".",
"parent",
".",
"current_fit",
":",
"self",
".",
"parent",
".",
"tmin_box",
".",
"SetStringSelection",
"(",
"new_tmin",
")",
"fit",
".",
"put",
"(",
"specimen",
",",
"self",
".",
"parent",
".",
"COORDINATE_SYSTEM",
",",
"self",
".",
"parent",
".",
"get_PCA_parameters",
"(",
"specimen",
",",
"fit",
",",
"new_tmin",
",",
"fit",
".",
"tmax",
",",
"self",
".",
"parent",
".",
"COORDINATE_SYSTEM",
",",
"fit",
".",
"PCA_type",
")",
")",
"if",
"new_tmax",
"and",
"not_both",
":",
"if",
"fit",
"==",
"self",
".",
"parent",
".",
"current_fit",
":",
"self",
".",
"parent",
".",
"tmax_box",
".",
"SetStringSelection",
"(",
"new_tmax",
")",
"fit",
".",
"put",
"(",
"specimen",
",",
"self",
".",
"parent",
".",
"COORDINATE_SYSTEM",
",",
"self",
".",
"parent",
".",
"get_PCA_parameters",
"(",
"specimen",
",",
"fit",
",",
"fit",
".",
"tmin",
",",
"new_tmax",
",",
"self",
".",
"parent",
".",
"COORDINATE_SYSTEM",
",",
"fit",
".",
"PCA_type",
")",
")",
"changed_i",
".",
"append",
"(",
"next_i",
")",
"offset",
"=",
"0",
"for",
"i",
"in",
"changed_i",
":",
"i",
"-=",
"offset",
"v",
"=",
"self",
".",
"update_logger_entry",
"(",
"i",
")",
"if",
"v",
"==",
"\"s\"",
":",
"offset",
"+=",
"1",
"self",
".",
"parent",
".",
"update_selection",
"(",
")"
] | 47.938776 | 26.591837 |
def vignetting(xy, f=100, alpha=0, rot=0, tilt=0, cx=50, cy=50):
'''
Vignetting equation using the KANG-WEISS-MODEL
see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf
f - focal length
alpha - coefficient in the geometric vignetting factor
tilt - tilt angle of a planar scene
rot - rotation angle of a planar scene
cx - image center, x
cy - image center, y
'''
x, y = xy
# distance to image center:
dist = ((x - cx)**2 + (y - cy)**2)**0.5
# OFF_AXIS ILLUMINATION FACTOR:
A = 1.0 / (1 + (dist / f)**2)**2
# GEOMETRIC FACTOR:
if alpha != 0:
G = (1 - alpha * dist)
else:
G = 1
# TILT FACTOR:
if tilt != 0:
T = tiltFactor((x, y), f, tilt, rot, (cy, cx))
else:
T = 1
return A * G * T | [
"def",
"vignetting",
"(",
"xy",
",",
"f",
"=",
"100",
",",
"alpha",
"=",
"0",
",",
"rot",
"=",
"0",
",",
"tilt",
"=",
"0",
",",
"cx",
"=",
"50",
",",
"cy",
"=",
"50",
")",
":",
"x",
",",
"y",
"=",
"xy",
"# distance to image center:\r",
"dist",
"=",
"(",
"(",
"x",
"-",
"cx",
")",
"**",
"2",
"+",
"(",
"y",
"-",
"cy",
")",
"**",
"2",
")",
"**",
"0.5",
"# OFF_AXIS ILLUMINATION FACTOR:\r",
"A",
"=",
"1.0",
"/",
"(",
"1",
"+",
"(",
"dist",
"/",
"f",
")",
"**",
"2",
")",
"**",
"2",
"# GEOMETRIC FACTOR:\r",
"if",
"alpha",
"!=",
"0",
":",
"G",
"=",
"(",
"1",
"-",
"alpha",
"*",
"dist",
")",
"else",
":",
"G",
"=",
"1",
"# TILT FACTOR:\r",
"if",
"tilt",
"!=",
"0",
":",
"T",
"=",
"tiltFactor",
"(",
"(",
"x",
",",
"y",
")",
",",
"f",
",",
"tilt",
",",
"rot",
",",
"(",
"cy",
",",
"cx",
")",
")",
"else",
":",
"T",
"=",
"1",
"return",
"A",
"*",
"G",
"*",
"T"
] | 28.689655 | 19.931034 |
def pkginfo_to_metadata(egg_info_path, pkginfo_path):
"""
Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka
old-draft Metadata 2.0 format.
"""
pkg_info = read_pkg_info(pkginfo_path)
pkg_info.replace_header('Metadata-Version', '2.0')
requires_path = os.path.join(egg_info_path, 'requires.txt')
if os.path.exists(requires_path):
requires = open(requires_path).read()
for extra, reqs in pkg_resources.split_sections(requires):
condition = ''
if extra and ':' in extra: # setuptools extra:condition syntax
extra, condition = extra.split(':', 1)
if extra:
pkg_info['Provides-Extra'] = extra
if condition:
condition += " and "
condition += 'extra == %s' % repr(extra)
if condition:
condition = '; ' + condition
for new_req in convert_requirements(reqs):
pkg_info['Requires-Dist'] = new_req + condition
description = pkg_info['Description']
if description:
pkg_info.set_payload(dedent_description(pkg_info))
del pkg_info['Description']
return pkg_info | [
"def",
"pkginfo_to_metadata",
"(",
"egg_info_path",
",",
"pkginfo_path",
")",
":",
"pkg_info",
"=",
"read_pkg_info",
"(",
"pkginfo_path",
")",
"pkg_info",
".",
"replace_header",
"(",
"'Metadata-Version'",
",",
"'2.0'",
")",
"requires_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"egg_info_path",
",",
"'requires.txt'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"requires_path",
")",
":",
"requires",
"=",
"open",
"(",
"requires_path",
")",
".",
"read",
"(",
")",
"for",
"extra",
",",
"reqs",
"in",
"pkg_resources",
".",
"split_sections",
"(",
"requires",
")",
":",
"condition",
"=",
"''",
"if",
"extra",
"and",
"':'",
"in",
"extra",
":",
"# setuptools extra:condition syntax",
"extra",
",",
"condition",
"=",
"extra",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"extra",
":",
"pkg_info",
"[",
"'Provides-Extra'",
"]",
"=",
"extra",
"if",
"condition",
":",
"condition",
"+=",
"\" and \"",
"condition",
"+=",
"'extra == %s'",
"%",
"repr",
"(",
"extra",
")",
"if",
"condition",
":",
"condition",
"=",
"'; '",
"+",
"condition",
"for",
"new_req",
"in",
"convert_requirements",
"(",
"reqs",
")",
":",
"pkg_info",
"[",
"'Requires-Dist'",
"]",
"=",
"new_req",
"+",
"condition",
"description",
"=",
"pkg_info",
"[",
"'Description'",
"]",
"if",
"description",
":",
"pkg_info",
".",
"set_payload",
"(",
"dedent_description",
"(",
"pkg_info",
")",
")",
"del",
"pkg_info",
"[",
"'Description'",
"]",
"return",
"pkg_info"
] | 39.5 | 14.7 |
def _get_policy_set(self, policy_set_id):
"""
Get a specific policy set by id.
"""
uri = self._get_policy_set_uri(guid=policy_set_id)
return self.service._get(uri) | [
"def",
"_get_policy_set",
"(",
"self",
",",
"policy_set_id",
")",
":",
"uri",
"=",
"self",
".",
"_get_policy_set_uri",
"(",
"guid",
"=",
"policy_set_id",
")",
"return",
"self",
".",
"service",
".",
"_get",
"(",
"uri",
")"
] | 33 | 3.666667 |
def pix2sky_vec(self, pixel, r, theta):
"""
Given and input position and vector in pixel coordinates, calculate
the equivalent position and vector in sky coordinates.
Parameters
----------
pixel : (int,int)
origin of vector in pixel coordinates
r : float
magnitude of vector in pixels
theta : float
angle of vector in degrees
Returns
-------
ra, dec : float
The (ra, dec) of the origin point (degrees).
r, pa : float
The magnitude and position angle of the vector (degrees).
"""
ra1, dec1 = self.pix2sky(pixel)
x, y = pixel
a = [x + r * np.cos(np.radians(theta)),
y + r * np.sin(np.radians(theta))]
locations = self.pix2sky(a)
ra2, dec2 = locations
a = gcd(ra1, dec1, ra2, dec2)
pa = bear(ra1, dec1, ra2, dec2)
return ra1, dec1, a, pa | [
"def",
"pix2sky_vec",
"(",
"self",
",",
"pixel",
",",
"r",
",",
"theta",
")",
":",
"ra1",
",",
"dec1",
"=",
"self",
".",
"pix2sky",
"(",
"pixel",
")",
"x",
",",
"y",
"=",
"pixel",
"a",
"=",
"[",
"x",
"+",
"r",
"*",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"theta",
")",
")",
",",
"y",
"+",
"r",
"*",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"theta",
")",
")",
"]",
"locations",
"=",
"self",
".",
"pix2sky",
"(",
"a",
")",
"ra2",
",",
"dec2",
"=",
"locations",
"a",
"=",
"gcd",
"(",
"ra1",
",",
"dec1",
",",
"ra2",
",",
"dec2",
")",
"pa",
"=",
"bear",
"(",
"ra1",
",",
"dec1",
",",
"ra2",
",",
"dec2",
")",
"return",
"ra1",
",",
"dec1",
",",
"a",
",",
"pa"
] | 31.6 | 14.866667 |
def _send_event_to_all(self, action, event):
"""
Send an event to all the client listening for notifications on all
projects
:param action: Action name
:param event: Event to send
"""
for project_listeners in self._listeners.values():
for listener in project_listeners:
listener.put_nowait((action, event, {})) | [
"def",
"_send_event_to_all",
"(",
"self",
",",
"action",
",",
"event",
")",
":",
"for",
"project_listeners",
"in",
"self",
".",
"_listeners",
".",
"values",
"(",
")",
":",
"for",
"listener",
"in",
"project_listeners",
":",
"listener",
".",
"put_nowait",
"(",
"(",
"action",
",",
"event",
",",
"{",
"}",
")",
")"
] | 35 | 13.909091 |
def convert_numpy_str_to_uint16(data):
""" Converts a numpy.unicode\_ to UTF-16 in numpy.uint16 form.
Convert a ``numpy.unicode_`` or an array of them (they are UTF-32
strings) to UTF-16 in the equivalent array of ``numpy.uint16``. The
conversion will throw an exception if any characters cannot be
converted to UTF-16. Strings are expanded along rows (across columns)
so a 2x3x4 array of 10 element strings will get turned into a 2x30x4
array of uint16's if every UTF-32 character converts easily to a
UTF-16 singlet, as opposed to a UTF-16 doublet.
Parameters
----------
data : numpy.unicode\_ or numpy.ndarray of numpy.unicode\_
The string or array of them to convert.
Returns
-------
array : numpy.ndarray of numpy.uint16
The result of the conversion.
Raises
------
UnicodeEncodeError
If a UTF-32 character has no UTF-16 representation.
See Also
--------
convert_numpy_str_to_uint32
convert_to_numpy_str
"""
# An empty string should be an empty uint16
if data.nbytes == 0:
return np.uint16([])
# We need to use the UTF-16 codec for our endianness. Using the
# right one means we don't have to worry about removing the BOM.
if sys.byteorder == 'little':
codec = 'UTF-16LE'
else:
codec = 'UTF-16BE'
# numpy.char.encode can do the conversion element wise. Then, we
# just have convert to uin16 with the appropriate dimensions. The
# dimensions are gotten from the shape of the converted data with
# the number of column increased by the number of words (pair of
# bytes) in the strings.
cdata = np.char.encode(np.atleast_1d(data), codec)
shape = list(cdata.shape)
shape[-1] *= (cdata.dtype.itemsize // 2)
return np.ndarray(shape=shape, dtype='uint16',
buffer=cdata.tostring()) | [
"def",
"convert_numpy_str_to_uint16",
"(",
"data",
")",
":",
"# An empty string should be an empty uint16",
"if",
"data",
".",
"nbytes",
"==",
"0",
":",
"return",
"np",
".",
"uint16",
"(",
"[",
"]",
")",
"# We need to use the UTF-16 codec for our endianness. Using the",
"# right one means we don't have to worry about removing the BOM.",
"if",
"sys",
".",
"byteorder",
"==",
"'little'",
":",
"codec",
"=",
"'UTF-16LE'",
"else",
":",
"codec",
"=",
"'UTF-16BE'",
"# numpy.char.encode can do the conversion element wise. Then, we",
"# just have convert to uin16 with the appropriate dimensions. The",
"# dimensions are gotten from the shape of the converted data with",
"# the number of column increased by the number of words (pair of",
"# bytes) in the strings.",
"cdata",
"=",
"np",
".",
"char",
".",
"encode",
"(",
"np",
".",
"atleast_1d",
"(",
"data",
")",
",",
"codec",
")",
"shape",
"=",
"list",
"(",
"cdata",
".",
"shape",
")",
"shape",
"[",
"-",
"1",
"]",
"*=",
"(",
"cdata",
".",
"dtype",
".",
"itemsize",
"//",
"2",
")",
"return",
"np",
".",
"ndarray",
"(",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"'uint16'",
",",
"buffer",
"=",
"cdata",
".",
"tostring",
"(",
")",
")"
] | 34.924528 | 21.886792 |
def basic_auth_header(username, password):
'''generate a base64 encoded header to ask for a token. This means
base64 encoding a username and password and adding to the
Authorization header to identify the client.
Parameters
==========
username: the username
password: the password
'''
s = "%s:%s" % (username, password)
if sys.version_info[0] >= 3:
s = bytes(s, 'utf-8')
credentials = base64.b64encode(s).decode('utf-8')
else:
credentials = base64.b64encode(s)
auth = {"Authorization": "Basic %s" % credentials}
return auth | [
"def",
"basic_auth_header",
"(",
"username",
",",
"password",
")",
":",
"s",
"=",
"\"%s:%s\"",
"%",
"(",
"username",
",",
"password",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"s",
"=",
"bytes",
"(",
"s",
",",
"'utf-8'",
")",
"credentials",
"=",
"base64",
".",
"b64encode",
"(",
"s",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"else",
":",
"credentials",
"=",
"base64",
".",
"b64encode",
"(",
"s",
")",
"auth",
"=",
"{",
"\"Authorization\"",
":",
"\"Basic %s\"",
"%",
"credentials",
"}",
"return",
"auth"
] | 32.105263 | 20.210526 |
def union(self, *others):
r"""Return a new multiset with all elements from the multiset and the others with maximal multiplicities.
>>> ms = Multiset('aab')
>>> sorted(ms.union('bc'))
['a', 'a', 'b', 'c']
You can also use the ``|`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms | Multiset('aaa'))
['a', 'a', 'a', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`union_update`.
Args:
*others: The other sets to union the multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the union.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in other.items():
old_multiplicity = _elements.get(element, 0)
if multiplicity > old_multiplicity:
_elements[element] = multiplicity
_total += multiplicity - old_multiplicity
result._total = _total
return result | [
"def",
"union",
"(",
"self",
",",
"*",
"others",
")",
":",
"result",
"=",
"self",
".",
"__copy__",
"(",
")",
"_elements",
"=",
"result",
".",
"_elements",
"_total",
"=",
"result",
".",
"_total",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_mapping",
",",
"others",
")",
":",
"for",
"element",
",",
"multiplicity",
"in",
"other",
".",
"items",
"(",
")",
":",
"old_multiplicity",
"=",
"_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"if",
"multiplicity",
">",
"old_multiplicity",
":",
"_elements",
"[",
"element",
"]",
"=",
"multiplicity",
"_total",
"+=",
"multiplicity",
"-",
"old_multiplicity",
"result",
".",
"_total",
"=",
"_total",
"return",
"result"
] | 40.342857 | 22.742857 |
def extend(self, *args, **kwargs):
"""Extend current MultiDict with more values.
This method must be used instead of update.
"""
self._extend(args, kwargs, 'extend', self._extend_items) | [
"def",
"extend",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_extend",
"(",
"args",
",",
"kwargs",
",",
"'extend'",
",",
"self",
".",
"_extend_items",
")"
] | 35.5 | 13.5 |
def handleHeader(self, key, value):
"""Handle header values."""
if key == 'CIMError':
self.CIMError = urllib.parse.unquote(value)
if key == 'PGErrorDetail':
self.PGErrorDetail = urllib.parse.unquote(value) | [
"def",
"handleHeader",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"==",
"'CIMError'",
":",
"self",
".",
"CIMError",
"=",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"value",
")",
"if",
"key",
"==",
"'PGErrorDetail'",
":",
"self",
".",
"PGErrorDetail",
"=",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"value",
")"
] | 35.428571 | 13.857143 |
def fix_for_specific_versions(self, rdf_data, to_replace):
"""
Fixes of the RDF before loading the graph. All of these are workaround
to circuvent known issues of the SPM and FSL exporters.
"""
# Load the graph as is so that we can query
g = self.parse(rdf_data)
# Retreive the exporter name and version
query = """
prefix nidm_spm_results_nidm: <http://purl.org/nidash/nidm#NIDM_0000168>
prefix nidm_nidmfsl: <http://purl.org/nidash/nidm#NIDM_0000167>
prefix nidm_softwareVersion: <http://purl.org/nidash/nidm#NIDM_0000122>
SELECT DISTINCT ?type ?version ?exp_act WHERE {
{?exporter a nidm_nidmfsl: .} UNION {?exporter a nidm_spm_results_nidm: .}.
?exporter a ?type ;
nidm_softwareVersion: ?version .
?exp_act prov:wasAssociatedWith ?exporter .
FILTER ( ?type NOT IN (prov:SoftwareAgent, prov:Agent))
}
"""
sd = g.query(query)
objects = dict()
if sd:
for row in sd:
argums = row.asdict()
if (argums['type'] == NIDM_SPM_RESULTS_NIDM and
(argums['version'].eq('12.6903') or
argums['version'].eq('12.575ac2c'))):
warnings.warn('Applying fixes for SPM exporter ' +
str(argums['version']))
# crypto namespace inconsistent with NIDM-Results spec
to_replace[('http://id.loc.gov/vocabulary/preservation/' +
'cryptographicHashFunctions/')] = (
'http://id.loc.gov/vocabulary/preservation/' +
'cryptographicHashFunctions#')
# Missing 'activity' attribute in qualified Generation
to_replace['a prov:Generation .'] = (
'a prov:Generation ; prov:activity <' +
str(argums['exp_act']) + '> .')
# Avoid confusion between attribute and
# class uncorrected p-value
# cf. https://github.com/incf-nidash/nidm/issues/421
to_replace[('@prefix nidm_PValueUncorrected: ' +
'<http://purl.org/nidash/nidm#NIDM_0000160>')] = (
'@prefix nidm_UncorrectedPValue: ' +
'<http://purl.org/nidash/nidm#NIDM_0000160>')
to_replace['nidm_PValueUncorrected'] = 'nidm_UncorrectedPValue'
if to_replace is not None:
for to_rep, replacement in to_replace.items():
rdf_data = rdf_data.replace(to_rep, replacement)
return rdf_data | [
"def",
"fix_for_specific_versions",
"(",
"self",
",",
"rdf_data",
",",
"to_replace",
")",
":",
"# Load the graph as is so that we can query",
"g",
"=",
"self",
".",
"parse",
"(",
"rdf_data",
")",
"# Retreive the exporter name and version",
"query",
"=",
"\"\"\"\nprefix nidm_spm_results_nidm: <http://purl.org/nidash/nidm#NIDM_0000168>\nprefix nidm_nidmfsl: <http://purl.org/nidash/nidm#NIDM_0000167>\nprefix nidm_softwareVersion: <http://purl.org/nidash/nidm#NIDM_0000122>\n\nSELECT DISTINCT ?type ?version ?exp_act WHERE {\n {?exporter a nidm_nidmfsl: .} UNION {?exporter a nidm_spm_results_nidm: .}.\n ?exporter a ?type ;\n nidm_softwareVersion: ?version .\n\n ?exp_act prov:wasAssociatedWith ?exporter .\n\n FILTER ( ?type NOT IN (prov:SoftwareAgent, prov:Agent))\n}\n \"\"\"",
"sd",
"=",
"g",
".",
"query",
"(",
"query",
")",
"objects",
"=",
"dict",
"(",
")",
"if",
"sd",
":",
"for",
"row",
"in",
"sd",
":",
"argums",
"=",
"row",
".",
"asdict",
"(",
")",
"if",
"(",
"argums",
"[",
"'type'",
"]",
"==",
"NIDM_SPM_RESULTS_NIDM",
"and",
"(",
"argums",
"[",
"'version'",
"]",
".",
"eq",
"(",
"'12.6903'",
")",
"or",
"argums",
"[",
"'version'",
"]",
".",
"eq",
"(",
"'12.575ac2c'",
")",
")",
")",
":",
"warnings",
".",
"warn",
"(",
"'Applying fixes for SPM exporter '",
"+",
"str",
"(",
"argums",
"[",
"'version'",
"]",
")",
")",
"# crypto namespace inconsistent with NIDM-Results spec",
"to_replace",
"[",
"(",
"'http://id.loc.gov/vocabulary/preservation/'",
"+",
"'cryptographicHashFunctions/'",
")",
"]",
"=",
"(",
"'http://id.loc.gov/vocabulary/preservation/'",
"+",
"'cryptographicHashFunctions#'",
")",
"# Missing 'activity' attribute in qualified Generation",
"to_replace",
"[",
"'a prov:Generation .'",
"]",
"=",
"(",
"'a prov:Generation ; prov:activity <'",
"+",
"str",
"(",
"argums",
"[",
"'exp_act'",
"]",
")",
"+",
"'> .'",
")",
"# Avoid confusion between attribute and",
"# class uncorrected p-value",
"# cf. https://github.com/incf-nidash/nidm/issues/421",
"to_replace",
"[",
"(",
"'@prefix nidm_PValueUncorrected: '",
"+",
"'<http://purl.org/nidash/nidm#NIDM_0000160>'",
")",
"]",
"=",
"(",
"'@prefix nidm_UncorrectedPValue: '",
"+",
"'<http://purl.org/nidash/nidm#NIDM_0000160>'",
")",
"to_replace",
"[",
"'nidm_PValueUncorrected'",
"]",
"=",
"'nidm_UncorrectedPValue'",
"if",
"to_replace",
"is",
"not",
"None",
":",
"for",
"to_rep",
",",
"replacement",
"in",
"to_replace",
".",
"items",
"(",
")",
":",
"rdf_data",
"=",
"rdf_data",
".",
"replace",
"(",
"to_rep",
",",
"replacement",
")",
"return",
"rdf_data"
] | 42.833333 | 22.1 |
def embeddedFileDel(self, name):
"""Delete embedded file by name."""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document_embeddedFileDel(self, name) | [
"def",
"embeddedFileDel",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"isClosed",
"or",
"self",
".",
"isEncrypted",
":",
"raise",
"ValueError",
"(",
"\"operation illegal for closed / encrypted doc\"",
")",
"return",
"_fitz",
".",
"Document_embeddedFileDel",
"(",
"self",
",",
"name",
")"
] | 42.166667 | 17.666667 |
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a message sending object
into this object.
'''
self._targets = []
for c in node.getElementsByTagNameNS(RTS_NS, 'targets'):
if c.getElementsByTagNameNS(RTS_NS, 'WaitTime'):
new_target = WaitTime()
elif c.getElementsByTagNameNS(RTS_NS, 'Preceding'):
new_target = Preceding()
else:
new_target = Condition()
new_target.parse_xml_node(c)
self._targets.append(new_target)
return self | [
"def",
"parse_xml_node",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"_targets",
"=",
"[",
"]",
"for",
"c",
"in",
"node",
".",
"getElementsByTagNameNS",
"(",
"RTS_NS",
",",
"'targets'",
")",
":",
"if",
"c",
".",
"getElementsByTagNameNS",
"(",
"RTS_NS",
",",
"'WaitTime'",
")",
":",
"new_target",
"=",
"WaitTime",
"(",
")",
"elif",
"c",
".",
"getElementsByTagNameNS",
"(",
"RTS_NS",
",",
"'Preceding'",
")",
":",
"new_target",
"=",
"Preceding",
"(",
")",
"else",
":",
"new_target",
"=",
"Condition",
"(",
")",
"new_target",
".",
"parse_xml_node",
"(",
"c",
")",
"self",
".",
"_targets",
".",
"append",
"(",
"new_target",
")",
"return",
"self"
] | 37.25 | 16.25 |
def get_policy(self):
"""
Returns an instance of :attr:`~policy_class`.
:return: An instance of the current policy class.
:rtype: dockermap.map.policy.base.BasePolicy
"""
if not self._policy:
self._policy = self.policy_class(self._maps, self._clients)
return self._policy | [
"def",
"get_policy",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_policy",
":",
"self",
".",
"_policy",
"=",
"self",
".",
"policy_class",
"(",
"self",
".",
"_maps",
",",
"self",
".",
"_clients",
")",
"return",
"self",
".",
"_policy"
] | 33.1 | 15.7 |
def from_tsv(cls, line, field_names, timezone_in_use=pytz.utc, database=None):
'''
Create a model instance from a tab-separated line. The line may or may not include a newline.
The `field_names` list must match the fields defined in the model, but does not have to include all of them.
- `line`: the TSV-formatted data.
- `field_names`: names of the model fields in the data.
- `timezone_in_use`: the timezone to use when parsing dates and datetimes.
- `database`: if given, sets the database that this instance belongs to.
'''
from six import next
values = iter(parse_tsv(line))
kwargs = {}
for name in field_names:
field = getattr(cls, name)
kwargs[name] = field.to_python(next(values), timezone_in_use)
obj = cls(**kwargs)
if database is not None:
obj.set_database(database)
return obj | [
"def",
"from_tsv",
"(",
"cls",
",",
"line",
",",
"field_names",
",",
"timezone_in_use",
"=",
"pytz",
".",
"utc",
",",
"database",
"=",
"None",
")",
":",
"from",
"six",
"import",
"next",
"values",
"=",
"iter",
"(",
"parse_tsv",
"(",
"line",
")",
")",
"kwargs",
"=",
"{",
"}",
"for",
"name",
"in",
"field_names",
":",
"field",
"=",
"getattr",
"(",
"cls",
",",
"name",
")",
"kwargs",
"[",
"name",
"]",
"=",
"field",
".",
"to_python",
"(",
"next",
"(",
"values",
")",
",",
"timezone_in_use",
")",
"obj",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"if",
"database",
"is",
"not",
"None",
":",
"obj",
".",
"set_database",
"(",
"database",
")",
"return",
"obj"
] | 42.090909 | 26.454545 |
def span(self):
"""Return a contiguous range that is a superset of this range.
Returns:
A VersionRange object representing the span of this range. For
example, the span of "2+<4|6+<8" would be "2+<8".
"""
other = VersionRange(None)
bound = _Bound(self.bounds[0].lower, self.bounds[-1].upper)
other.bounds = [bound]
return other | [
"def",
"span",
"(",
"self",
")",
":",
"other",
"=",
"VersionRange",
"(",
"None",
")",
"bound",
"=",
"_Bound",
"(",
"self",
".",
"bounds",
"[",
"0",
"]",
".",
"lower",
",",
"self",
".",
"bounds",
"[",
"-",
"1",
"]",
".",
"upper",
")",
"other",
".",
"bounds",
"=",
"[",
"bound",
"]",
"return",
"other"
] | 36.181818 | 18.818182 |
def make_app(root, **kw):
'''
Utility for creating the Pecan application object. This function should
generally be called from the ``setup_app`` function in your project's
``app.py`` file.
:param root: A string representing a root controller object (e.g.,
"myapp.controller.root.RootController")
:param static_root: The relative path to a directory containing static
files. Serving static files is only enabled when
debug mode is set.
:param debug: A flag to enable debug mode. This enables the debug
middleware and serving static files.
:param wrap_app: A function or middleware class to wrap the Pecan app.
This must either be a wsgi middleware class or a
function that returns a wsgi application. This wrapper
is applied first before wrapping the application in
other middlewares such as Pecan's debug middleware.
This should be used if you want to use middleware to
perform authentication or intercept all requests before
they are routed to the root controller.
:param logging: A dictionary used to configure logging. This uses
``logging.config.dictConfig``.
All other keyword arguments are passed in to the Pecan app constructor.
:returns: a ``Pecan`` object.
'''
# Pass logging configuration (if it exists) on to the Python logging module
logging = kw.get('logging', {})
debug = kw.get('debug', False)
if logging:
if debug:
try:
#
# By default, Python 2.7+ silences DeprecationWarnings.
# However, if conf.app.debug is True, we should probably ensure
# that users see these types of warnings.
#
from logging import captureWarnings
captureWarnings(True)
warnings.simplefilter("default", DeprecationWarning)
except ImportError:
# No captureWarnings on Python 2.6, DeprecationWarnings are on
pass
if isinstance(logging, Config):
logging = logging.to_dict()
if 'version' not in logging:
logging['version'] = 1
load_logging_config(logging)
# Instantiate the WSGI app by passing **kw onward
app = Pecan(root, **kw)
# Optionally wrap the app in another WSGI app
wrap_app = kw.get('wrap_app', None)
if wrap_app:
app = wrap_app(app)
# Configuration for serving custom error messages
errors = kw.get('errors', getattr(conf.app, 'errors', {}))
if errors:
app = middleware.errordocument.ErrorDocumentMiddleware(app, errors)
# Included for internal redirect support
app = middleware.recursive.RecursiveMiddleware(app)
# When in debug mode, load exception debugging middleware
static_root = kw.get('static_root', None)
if debug:
debug_kwargs = getattr(conf, 'debug', {})
debug_kwargs.setdefault('context_injectors', []).append(
lambda environ: {
'request': environ.get('pecan.locals', {}).get('request')
}
)
app = DebugMiddleware(
app,
**debug_kwargs
)
# Support for serving static files (for development convenience)
if static_root:
app = middleware.static.StaticFileMiddleware(app, static_root)
elif static_root:
warnings.warn(
"`static_root` is only used when `debug` is True, ignoring",
RuntimeWarning
)
if hasattr(conf, 'requestviewer'):
warnings.warn(''.join([
"`pecan.conf.requestviewer` is deprecated. To apply the ",
"`RequestViewerHook` to your application, add it to ",
"`pecan.conf.app.hooks` or manually in your project's `app.py` ",
"file."]),
DeprecationWarning
)
return app | [
"def",
"make_app",
"(",
"root",
",",
"*",
"*",
"kw",
")",
":",
"# Pass logging configuration (if it exists) on to the Python logging module",
"logging",
"=",
"kw",
".",
"get",
"(",
"'logging'",
",",
"{",
"}",
")",
"debug",
"=",
"kw",
".",
"get",
"(",
"'debug'",
",",
"False",
")",
"if",
"logging",
":",
"if",
"debug",
":",
"try",
":",
"#",
"# By default, Python 2.7+ silences DeprecationWarnings.",
"# However, if conf.app.debug is True, we should probably ensure",
"# that users see these types of warnings.",
"#",
"from",
"logging",
"import",
"captureWarnings",
"captureWarnings",
"(",
"True",
")",
"warnings",
".",
"simplefilter",
"(",
"\"default\"",
",",
"DeprecationWarning",
")",
"except",
"ImportError",
":",
"# No captureWarnings on Python 2.6, DeprecationWarnings are on",
"pass",
"if",
"isinstance",
"(",
"logging",
",",
"Config",
")",
":",
"logging",
"=",
"logging",
".",
"to_dict",
"(",
")",
"if",
"'version'",
"not",
"in",
"logging",
":",
"logging",
"[",
"'version'",
"]",
"=",
"1",
"load_logging_config",
"(",
"logging",
")",
"# Instantiate the WSGI app by passing **kw onward",
"app",
"=",
"Pecan",
"(",
"root",
",",
"*",
"*",
"kw",
")",
"# Optionally wrap the app in another WSGI app",
"wrap_app",
"=",
"kw",
".",
"get",
"(",
"'wrap_app'",
",",
"None",
")",
"if",
"wrap_app",
":",
"app",
"=",
"wrap_app",
"(",
"app",
")",
"# Configuration for serving custom error messages",
"errors",
"=",
"kw",
".",
"get",
"(",
"'errors'",
",",
"getattr",
"(",
"conf",
".",
"app",
",",
"'errors'",
",",
"{",
"}",
")",
")",
"if",
"errors",
":",
"app",
"=",
"middleware",
".",
"errordocument",
".",
"ErrorDocumentMiddleware",
"(",
"app",
",",
"errors",
")",
"# Included for internal redirect support",
"app",
"=",
"middleware",
".",
"recursive",
".",
"RecursiveMiddleware",
"(",
"app",
")",
"# When in debug mode, load exception debugging middleware",
"static_root",
"=",
"kw",
".",
"get",
"(",
"'static_root'",
",",
"None",
")",
"if",
"debug",
":",
"debug_kwargs",
"=",
"getattr",
"(",
"conf",
",",
"'debug'",
",",
"{",
"}",
")",
"debug_kwargs",
".",
"setdefault",
"(",
"'context_injectors'",
",",
"[",
"]",
")",
".",
"append",
"(",
"lambda",
"environ",
":",
"{",
"'request'",
":",
"environ",
".",
"get",
"(",
"'pecan.locals'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'request'",
")",
"}",
")",
"app",
"=",
"DebugMiddleware",
"(",
"app",
",",
"*",
"*",
"debug_kwargs",
")",
"# Support for serving static files (for development convenience)",
"if",
"static_root",
":",
"app",
"=",
"middleware",
".",
"static",
".",
"StaticFileMiddleware",
"(",
"app",
",",
"static_root",
")",
"elif",
"static_root",
":",
"warnings",
".",
"warn",
"(",
"\"`static_root` is only used when `debug` is True, ignoring\"",
",",
"RuntimeWarning",
")",
"if",
"hasattr",
"(",
"conf",
",",
"'requestviewer'",
")",
":",
"warnings",
".",
"warn",
"(",
"''",
".",
"join",
"(",
"[",
"\"`pecan.conf.requestviewer` is deprecated. To apply the \"",
",",
"\"`RequestViewerHook` to your application, add it to \"",
",",
"\"`pecan.conf.app.hooks` or manually in your project's `app.py` \"",
",",
"\"file.\"",
"]",
")",
",",
"DeprecationWarning",
")",
"return",
"app"
] | 39.019608 | 23.568627 |
def marshal_dict(
obj,
types,
method=None,
fields=None,
**m_kwargs
):
""" Recursively marshal a Python object to a dict
that can be passed to json.{dump,dumps}, a web client,
or a web server, document database, etc...
Args:
obj: object, It's members can be nested Python
objects which will be converted to dictionaries
types: tuple-of-types, The primitive types that can be
serialized
method: None-or-str, None to use 'marshal_dict' recursively,
or a str that corresponds to the name of a class method
to use. Any nested types that are not an instance of
@types must have this method defined.
fields: None-list-of-str, Explicitly marshal only these fields
m_kwargs: Keyword arguments to pass to @method
Returns:
dict
"""
has_slots, d = _get_dict(obj)
if fields:
for field in fields:
assert field in d
return {
k: v if isinstance(v, types) else (
getattr(v, method)(**m_kwargs)
if method
else marshal_dict(v, types)
)
for k, v in d.items()
if k in fields
}
excl = getattr(obj, '_marshal_exclude', [])
if (
has_slots
or
getattr(obj, '_marshal_only_init_args', False)
):
args = init_args(obj)
excl.extend([x for x in d if x not in args])
if getattr(obj, '_marshal_exclude_none', False):
excl.extend(k for k, v in d.items() if v is None)
else:
none_keys = getattr(obj, '_marshal_exclude_none_keys', [])
if none_keys:
excl.extend(x for x in none_keys if d.get(x) is None)
return {
k: v if isinstance(v, types) else (
getattr(v, method)(**m_kwargs)
if method
else marshal_dict(v, types)
)
for k, v in d.items()
if k not in excl
} | [
"def",
"marshal_dict",
"(",
"obj",
",",
"types",
",",
"method",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"*",
"*",
"m_kwargs",
")",
":",
"has_slots",
",",
"d",
"=",
"_get_dict",
"(",
"obj",
")",
"if",
"fields",
":",
"for",
"field",
"in",
"fields",
":",
"assert",
"field",
"in",
"d",
"return",
"{",
"k",
":",
"v",
"if",
"isinstance",
"(",
"v",
",",
"types",
")",
"else",
"(",
"getattr",
"(",
"v",
",",
"method",
")",
"(",
"*",
"*",
"m_kwargs",
")",
"if",
"method",
"else",
"marshal_dict",
"(",
"v",
",",
"types",
")",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"if",
"k",
"in",
"fields",
"}",
"excl",
"=",
"getattr",
"(",
"obj",
",",
"'_marshal_exclude'",
",",
"[",
"]",
")",
"if",
"(",
"has_slots",
"or",
"getattr",
"(",
"obj",
",",
"'_marshal_only_init_args'",
",",
"False",
")",
")",
":",
"args",
"=",
"init_args",
"(",
"obj",
")",
"excl",
".",
"extend",
"(",
"[",
"x",
"for",
"x",
"in",
"d",
"if",
"x",
"not",
"in",
"args",
"]",
")",
"if",
"getattr",
"(",
"obj",
",",
"'_marshal_exclude_none'",
",",
"False",
")",
":",
"excl",
".",
"extend",
"(",
"k",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"if",
"v",
"is",
"None",
")",
"else",
":",
"none_keys",
"=",
"getattr",
"(",
"obj",
",",
"'_marshal_exclude_none_keys'",
",",
"[",
"]",
")",
"if",
"none_keys",
":",
"excl",
".",
"extend",
"(",
"x",
"for",
"x",
"in",
"none_keys",
"if",
"d",
".",
"get",
"(",
"x",
")",
"is",
"None",
")",
"return",
"{",
"k",
":",
"v",
"if",
"isinstance",
"(",
"v",
",",
"types",
")",
"else",
"(",
"getattr",
"(",
"v",
",",
"method",
")",
"(",
"*",
"*",
"m_kwargs",
")",
"if",
"method",
"else",
"marshal_dict",
"(",
"v",
",",
"types",
")",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"excl",
"}"
] | 29.537313 | 21.80597 |
def dp990(self, value=None):
""" Corresponds to IDD Field `dp990`
Dew-point temperature corresponding to 90.0% annual cumulative
frequency of occurrence (cold conditions)
Args:
value (float): value for IDD Field `dp990`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dp990`'.format(value))
self._dp990 = value | [
"def",
"dp990",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'value {} need to be of type float '",
"'for field `dp990`'",
".",
"format",
"(",
"value",
")",
")",
"self",
".",
"_dp990",
"=",
"value"
] | 35.090909 | 20.090909 |
def withSize(cls, minimum, maximum):
"""Creates a subclass with value size constraint.
"""
class X(cls):
subtypeSpec = cls.subtypeSpec + constraint.ValueSizeConstraint(
minimum, maximum)
X.__name__ = cls.__name__
return X | [
"def",
"withSize",
"(",
"cls",
",",
"minimum",
",",
"maximum",
")",
":",
"class",
"X",
"(",
"cls",
")",
":",
"subtypeSpec",
"=",
"cls",
".",
"subtypeSpec",
"+",
"constraint",
".",
"ValueSizeConstraint",
"(",
"minimum",
",",
"maximum",
")",
"X",
".",
"__name__",
"=",
"cls",
".",
"__name__",
"return",
"X"
] | 28.2 | 17.6 |
def notify(self, message):
"""
TODO: Add code to lpush to redis stack
rpop when stack hits size 'X'
"""
data = dict(
payload=self.payload,
attempt=self.attempt,
success=self.success,
response_message=self.response_content,
hash_value=self.hash_value,
response_status=self.response.status_code,
notification=message,
created=timezone.now()
)
value = json.dumps(data, cls=StandardJSONEncoder)
key = make_key(self.event, self.owner.username, self.identifier)
redis.lpush(key, value) | [
"def",
"notify",
"(",
"self",
",",
"message",
")",
":",
"data",
"=",
"dict",
"(",
"payload",
"=",
"self",
".",
"payload",
",",
"attempt",
"=",
"self",
".",
"attempt",
",",
"success",
"=",
"self",
".",
"success",
",",
"response_message",
"=",
"self",
".",
"response_content",
",",
"hash_value",
"=",
"self",
".",
"hash_value",
",",
"response_status",
"=",
"self",
".",
"response",
".",
"status_code",
",",
"notification",
"=",
"message",
",",
"created",
"=",
"timezone",
".",
"now",
"(",
")",
")",
"value",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"StandardJSONEncoder",
")",
"key",
"=",
"make_key",
"(",
"self",
".",
"event",
",",
"self",
".",
"owner",
".",
"username",
",",
"self",
".",
"identifier",
")",
"redis",
".",
"lpush",
"(",
"key",
",",
"value",
")"
] | 37.888889 | 10.444444 |
def new_driver(browser_name, *args, **kwargs):
"""Instantiates a new WebDriver instance, determining class by environment variables
"""
if browser_name == FIREFOX:
return webdriver.Firefox(*args, **kwargs)
# elif options['local'] and options['browser_name'] == CHROME:
# return webdriver.Chrome(*args, **kwargs)
#
# elif options['local'] and options['browser_name'] == IE:
# return webdriver.Ie(*args, **kwargs)
#
# elif options['local'] and options['browser_name'] == OPERA:
# return webdriver.Opera(*args, **kwargs)
elif browser_name == PHANTOMJS:
executable_path = os.path.join(os.path.dirname(__file__), 'phantomjs/executable/phantomjs_64bit')
driver = webdriver.PhantomJS(executable_path=executable_path, **kwargs)
driver.set_window_size(1280, 800) # Set a default because phantom needs it
return driver
else: # remote
driver = webdriver.Remote(*args, **kwargs)
return driver | [
"def",
"new_driver",
"(",
"browser_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"browser_name",
"==",
"FIREFOX",
":",
"return",
"webdriver",
".",
"Firefox",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# elif options['local'] and options['browser_name'] == CHROME:",
"# return webdriver.Chrome(*args, **kwargs)",
"#",
"# elif options['local'] and options['browser_name'] == IE:",
"# return webdriver.Ie(*args, **kwargs)",
"#",
"# elif options['local'] and options['browser_name'] == OPERA:",
"# return webdriver.Opera(*args, **kwargs)",
"elif",
"browser_name",
"==",
"PHANTOMJS",
":",
"executable_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'phantomjs/executable/phantomjs_64bit'",
")",
"driver",
"=",
"webdriver",
".",
"PhantomJS",
"(",
"executable_path",
"=",
"executable_path",
",",
"*",
"*",
"kwargs",
")",
"driver",
".",
"set_window_size",
"(",
"1280",
",",
"800",
")",
"# Set a default because phantom needs it",
"return",
"driver",
"else",
":",
"# remote",
"driver",
"=",
"webdriver",
".",
"Remote",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"driver"
] | 44.25 | 22.875 |
def handle(self, key, value):
'''
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
'''
# very similar to stop
# break down key
elements = key.split(":")
spiderid = elements[1]
appid = elements[2]
crawlid = elements[3]
# log ack of expire
extras = self.get_log_dict('expire', appid,
spiderid, crawlid=crawlid)
self.logger.info("Expiring crawl found", extra=extras)
# add crawl to blacklist so it doesnt propagate
redis_key = spiderid + ":blacklist"
value = '{appid}||{crawlid}'.format(appid=appid,
crawlid=crawlid)
# add this to the blacklist set
self.redis_conn.sadd(redis_key, value)
# everything stored in the queue is now expired
result = self._purge_crawl(spiderid, appid, crawlid)
# add result to our dict
master = {}
master['server_time'] = int(self.get_current_time())
master['crawlid'] = crawlid
master['spiderid'] = spiderid
master['appid'] = appid
master['total_expired'] = result
master['action'] = 'expired'
if self._send_to_kafka(master):
master['success'] = True
self.logger.info('Sent expired ack to kafka', extra=master)
else:
master['success'] = False
self.logger.error('Failed to send expired ack to kafka',
extra=master) | [
"def",
"handle",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"# very similar to stop",
"# break down key",
"elements",
"=",
"key",
".",
"split",
"(",
"\":\"",
")",
"spiderid",
"=",
"elements",
"[",
"1",
"]",
"appid",
"=",
"elements",
"[",
"2",
"]",
"crawlid",
"=",
"elements",
"[",
"3",
"]",
"# log ack of expire",
"extras",
"=",
"self",
".",
"get_log_dict",
"(",
"'expire'",
",",
"appid",
",",
"spiderid",
",",
"crawlid",
"=",
"crawlid",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Expiring crawl found\"",
",",
"extra",
"=",
"extras",
")",
"# add crawl to blacklist so it doesnt propagate",
"redis_key",
"=",
"spiderid",
"+",
"\":blacklist\"",
"value",
"=",
"'{appid}||{crawlid}'",
".",
"format",
"(",
"appid",
"=",
"appid",
",",
"crawlid",
"=",
"crawlid",
")",
"# add this to the blacklist set",
"self",
".",
"redis_conn",
".",
"sadd",
"(",
"redis_key",
",",
"value",
")",
"# everything stored in the queue is now expired",
"result",
"=",
"self",
".",
"_purge_crawl",
"(",
"spiderid",
",",
"appid",
",",
"crawlid",
")",
"# add result to our dict",
"master",
"=",
"{",
"}",
"master",
"[",
"'server_time'",
"]",
"=",
"int",
"(",
"self",
".",
"get_current_time",
"(",
")",
")",
"master",
"[",
"'crawlid'",
"]",
"=",
"crawlid",
"master",
"[",
"'spiderid'",
"]",
"=",
"spiderid",
"master",
"[",
"'appid'",
"]",
"=",
"appid",
"master",
"[",
"'total_expired'",
"]",
"=",
"result",
"master",
"[",
"'action'",
"]",
"=",
"'expired'",
"if",
"self",
".",
"_send_to_kafka",
"(",
"master",
")",
":",
"master",
"[",
"'success'",
"]",
"=",
"True",
"self",
".",
"logger",
".",
"info",
"(",
"'Sent expired ack to kafka'",
",",
"extra",
"=",
"master",
")",
"else",
":",
"master",
"[",
"'success'",
"]",
"=",
"False",
"self",
".",
"logger",
".",
"error",
"(",
"'Failed to send expired ack to kafka'",
",",
"extra",
"=",
"master",
")"
] | 35.288889 | 16.4 |
def main():
"""
NAME
download_magic.py
DESCRIPTION
unpacks a magic formatted smartbook .txt file from the MagIC database into the
tab delimited MagIC format txt files for use with the MagIC-Py programs.
SYNTAX
download_magic.py command line options]
INPUT
takes either the upload.txt file created by upload_magic.py or a file
downloaded from the MagIC database (http://earthref.org/MagIC)
OPTIONS
-h prints help message and quits
-i allows interactive entry of filename
-f FILE specifies input file name
-sep write location data to separate subdirectories (Location_*), (default False)
-O do not overwrite duplicate Location_* directories while downloading
-DM data model (2 or 3, default 3)
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
# interactive entry
if '-i' in sys.argv:
infile=input("Magic txt file for unpacking? ")
dir_path = '.'
input_dir_path = '.'
# non-interactive
else:
infile = pmag.get_named_arg("-f", reqd=True)
# if -O flag is present, overwrite is False
overwrite = pmag.get_flag_arg_from_sys("-O", true=False, false=True)
# if -sep flag is present, sep is True
sep = pmag.get_flag_arg_from_sys("-sep", true=True, false=False)
data_model = pmag.get_named_arg("-DM", default_val=3, reqd=False)
dir_path = pmag.get_named_arg("-WD", default_val=".", reqd=False)
input_dir_path = pmag.get_named_arg("-ID", default_val=".", reqd=False)
#if '-ID' not in sys.argv and '-WD' in sys.argv:
# input_dir_path = dir_path
if "-WD" not in sys.argv and "-ID" not in sys.argv:
input_dir_path = os.path.split(infile)[0]
if not input_dir_path:
input_dir_path = "."
ipmag.download_magic(infile, dir_path, input_dir_path, overwrite, True, data_model, sep) | [
"def",
"main",
"(",
")",
":",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-WD'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-WD'",
")",
"dir_path",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"# interactive entry",
"if",
"'-i'",
"in",
"sys",
".",
"argv",
":",
"infile",
"=",
"input",
"(",
"\"Magic txt file for unpacking? \"",
")",
"dir_path",
"=",
"'.'",
"input_dir_path",
"=",
"'.'",
"# non-interactive",
"else",
":",
"infile",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-f\"",
",",
"reqd",
"=",
"True",
")",
"# if -O flag is present, overwrite is False",
"overwrite",
"=",
"pmag",
".",
"get_flag_arg_from_sys",
"(",
"\"-O\"",
",",
"true",
"=",
"False",
",",
"false",
"=",
"True",
")",
"# if -sep flag is present, sep is True",
"sep",
"=",
"pmag",
".",
"get_flag_arg_from_sys",
"(",
"\"-sep\"",
",",
"true",
"=",
"True",
",",
"false",
"=",
"False",
")",
"data_model",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-DM\"",
",",
"default_val",
"=",
"3",
",",
"reqd",
"=",
"False",
")",
"dir_path",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-WD\"",
",",
"default_val",
"=",
"\".\"",
",",
"reqd",
"=",
"False",
")",
"input_dir_path",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-ID\"",
",",
"default_val",
"=",
"\".\"",
",",
"reqd",
"=",
"False",
")",
"#if '-ID' not in sys.argv and '-WD' in sys.argv:",
"# input_dir_path = dir_path",
"if",
"\"-WD\"",
"not",
"in",
"sys",
".",
"argv",
"and",
"\"-ID\"",
"not",
"in",
"sys",
".",
"argv",
":",
"input_dir_path",
"=",
"os",
".",
"path",
".",
"split",
"(",
"infile",
")",
"[",
"0",
"]",
"if",
"not",
"input_dir_path",
":",
"input_dir_path",
"=",
"\".\"",
"ipmag",
".",
"download_magic",
"(",
"infile",
",",
"dir_path",
",",
"input_dir_path",
",",
"overwrite",
",",
"True",
",",
"data_model",
",",
"sep",
")"
] | 37 | 22.555556 |
def vel_disp(self, kwargs_mass, kwargs_light, kwargs_anisotropy, kwargs_apertur):
"""
computes the averaged LOS velocity dispersion in the slit (convolved)
:param kwargs_mass: mass model parameters (following lenstronomy lens model conventions)
:param kwargs_light: deflector light parameters (following lenstronomy light model conventions)
:param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen.
We refer to the Anisotropy() class for details on the parameters.
:param kwargs_apertur: Aperture parameters, may vary depending on aperture type chosen.
We refer to the Aperture() class for details on the parameters.
:return: integrated LOS velocity dispersion in units [km/s]
"""
sigma2_R_sum = 0
for i in range(0, self._num_sampling):
sigma2_R = self.draw_one_sigma2(kwargs_mass, kwargs_light, kwargs_anisotropy, kwargs_apertur)
sigma2_R_sum += sigma2_R
sigma_s2_average = sigma2_R_sum / self._num_sampling
# apply unit conversion from arc seconds and deflections to physical velocity disperison in (km/s)
sigma_s2_average *= 2 * const.G # correcting for integral prefactor
return np.sqrt(sigma_s2_average/(const.arcsec**2 * self.cosmo.D_d**2 * const.Mpc))/1000. | [
"def",
"vel_disp",
"(",
"self",
",",
"kwargs_mass",
",",
"kwargs_light",
",",
"kwargs_anisotropy",
",",
"kwargs_apertur",
")",
":",
"sigma2_R_sum",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_num_sampling",
")",
":",
"sigma2_R",
"=",
"self",
".",
"draw_one_sigma2",
"(",
"kwargs_mass",
",",
"kwargs_light",
",",
"kwargs_anisotropy",
",",
"kwargs_apertur",
")",
"sigma2_R_sum",
"+=",
"sigma2_R",
"sigma_s2_average",
"=",
"sigma2_R_sum",
"/",
"self",
".",
"_num_sampling",
"# apply unit conversion from arc seconds and deflections to physical velocity disperison in (km/s)",
"sigma_s2_average",
"*=",
"2",
"*",
"const",
".",
"G",
"# correcting for integral prefactor",
"return",
"np",
".",
"sqrt",
"(",
"sigma_s2_average",
"/",
"(",
"const",
".",
"arcsec",
"**",
"2",
"*",
"self",
".",
"cosmo",
".",
"D_d",
"**",
"2",
"*",
"const",
".",
"Mpc",
")",
")",
"/",
"1000."
] | 67.2 | 36.1 |
def query_by_account(self, account_id, end_time=None, start_time=None):
"""
Query by account.
List authentication events for a given account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/authentication/accounts/{account_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/authentication/accounts/{account_id}".format(**path), data=data, params=params, no_data=True) | [
"def",
"query_by_account",
"(",
"self",
",",
"account_id",
",",
"end_time",
"=",
"None",
",",
"start_time",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - account_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"account_id\"",
"]",
"=",
"account_id",
"# OPTIONAL - start_time\r",
"\"\"\"The beginning of the time range from which you want events.\"\"\"",
"if",
"start_time",
"is",
"not",
"None",
":",
"params",
"[",
"\"start_time\"",
"]",
"=",
"start_time",
"# OPTIONAL - end_time\r",
"\"\"\"The end of the time range from which you want events.\"\"\"",
"if",
"end_time",
"is",
"not",
"None",
":",
"params",
"[",
"\"end_time\"",
"]",
"=",
"end_time",
"self",
".",
"logger",
".",
"debug",
"(",
"\"GET /api/v1/audit/authentication/accounts/{account_id} with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"GET\"",
",",
"\"/api/v1/audit/authentication/accounts/{account_id}\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"no_data",
"=",
"True",
")"
] | 39.269231 | 23.307692 |
def assign_region_to_channels(channels, anat, parc_type='aparc', max_approx=3,
exclude_regions=None):
"""Assign a brain region based on the channel location.
Parameters
----------
channels : instance of wonambi.attr.chan.Channels
channels to assign regions to
anat : instance of wonambi.attr.anat.Freesurfer
anatomical information taken from freesurfer.
parc_type : str
'aparc', 'aparc.a2009s', 'BA', 'BA.thresh', or 'aparc.DKTatlas40'
'aparc.DKTatlas40' is only for recent freesurfer versions
max_approx : int, optional
approximation to define position of the electrode.
exclude_regions : list of str or empty list
do not report regions if they contain these substrings. None means
that it does not exclude any region. For example, to exclude white
matter regions and unknown regions you can use
exclude_regions=('White', 'WM', 'Unknown')
Returns
-------
instance of wonambi.attr.chan.Channels
same instance as before, now Chan have attr 'region'
"""
for one_chan in channels.chan:
one_region, approx = anat.find_brain_region(one_chan.xyz,
parc_type,
max_approx,
exclude_regions)
one_chan.attr.update({'region': one_region, 'approx': approx})
return channels | [
"def",
"assign_region_to_channels",
"(",
"channels",
",",
"anat",
",",
"parc_type",
"=",
"'aparc'",
",",
"max_approx",
"=",
"3",
",",
"exclude_regions",
"=",
"None",
")",
":",
"for",
"one_chan",
"in",
"channels",
".",
"chan",
":",
"one_region",
",",
"approx",
"=",
"anat",
".",
"find_brain_region",
"(",
"one_chan",
".",
"xyz",
",",
"parc_type",
",",
"max_approx",
",",
"exclude_regions",
")",
"one_chan",
".",
"attr",
".",
"update",
"(",
"{",
"'region'",
":",
"one_region",
",",
"'approx'",
":",
"approx",
"}",
")",
"return",
"channels"
] | 43.147059 | 20.676471 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.