text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def reduce_by_device(parallelism, data, reduce_fn):
"""Reduces data per device.
This can be useful, for example, if we want to all-reduce n tensors on k<n
devices (like during eval when we have only one device). We call
reduce_by_device() to first sum the tensors per device, then call our usual
all-reduce operation to create one sum per device, followed by
expand_by_device, to create the appropriate number of pointers to these
results. See all_reduce_ring() below for an example of how this is used.
Args:
parallelism: a expert_utils.Parallelism object
data: a list of Tensors with length parallelism.n
reduce_fn: a function taking a list of Tensors. e.g. tf.add_n
Returns:
device_parallelism: a Parallelism object with each device listed only once.
reduced_data: A list of Tensors, one per device.
"""
unique_devices = []
device_to_data = {}
for dev, datum in zip(parallelism.devices, data):
if dev not in device_to_data:
unique_devices.append(dev)
device_to_data[dev] = [datum]
else:
device_to_data[dev].append(datum)
device_parallelism = Parallelism(unique_devices)
grouped_data = [device_to_data[dev] for dev in unique_devices]
return device_parallelism, device_parallelism(reduce_fn, grouped_data) | [
"def",
"reduce_by_device",
"(",
"parallelism",
",",
"data",
",",
"reduce_fn",
")",
":",
"unique_devices",
"=",
"[",
"]",
"device_to_data",
"=",
"{",
"}",
"for",
"dev",
",",
"datum",
"in",
"zip",
"(",
"parallelism",
".",
"devices",
",",
"data",
")",
":",
... | 42.033333 | 21.766667 |
def _patch_stats_request(request):
'''If the request has no filter config, add one that should do what is
expected (include all items)
see: PE-11813
'''
filt = request.get('filter', {})
if not filt.get('config', None):
request['filter'] = filters.date_range('acquired',
gt='1970-01-01T00:00:00Z')
return request | [
"def",
"_patch_stats_request",
"(",
"request",
")",
":",
"filt",
"=",
"request",
".",
"get",
"(",
"'filter'",
",",
"{",
"}",
")",
"if",
"not",
"filt",
".",
"get",
"(",
"'config'",
",",
"None",
")",
":",
"request",
"[",
"'filter'",
"]",
"=",
"filters"... | 38.5 | 18.5 |
def frame_msg_ipc(body, header=None, raw_body=False): # pylint: disable=unused-argument
'''
Frame the given message with our wire protocol for IPC
For IPC, we don't need to be backwards compatible, so
use the more efficient "use_bin_type=True" on Python 3.
'''
framed_msg = {}
if header is None:
header = {}
framed_msg['head'] = header
framed_msg['body'] = body
if six.PY2:
return salt.utils.msgpack.dumps(framed_msg)
else:
return salt.utils.msgpack.dumps(framed_msg, use_bin_type=True) | [
"def",
"frame_msg_ipc",
"(",
"body",
",",
"header",
"=",
"None",
",",
"raw_body",
"=",
"False",
")",
":",
"# pylint: disable=unused-argument",
"framed_msg",
"=",
"{",
"}",
"if",
"header",
"is",
"None",
":",
"header",
"=",
"{",
"}",
"framed_msg",
"[",
"'hea... | 31.823529 | 25 |
def clean(input_string,
tag_dictionary=constants.SUPPORTED_TAGS):
"""
Sanitizes HTML. Tags not contained as keys in the tag_dictionary input are
removed, and child nodes are recursively moved to parent of removed node.
Attributes not contained as arguments in tag_dictionary are removed.
Doctype is set to <!DOCTYPE html>.
Args:
input_string (basestring): A (possibly unicode) string representing HTML.
tag_dictionary (Option[dict]): A dictionary with tags as keys and
attributes as values. This operates as a whitelist--i.e. if a tag
isn't contained, it will be removed. By default, this is set to
use the supported tags and attributes for the Amazon Kindle,
as found at https://kdp.amazon.com/help?topicId=A1JPUWCSD6F59O
Returns:
str: A (possibly unicode) string representing HTML.
Raises:
TypeError: Raised if input_string isn't a unicode string or string.
"""
try:
assert isinstance(input_string, basestring)
except AssertionError:
raise TypeError
root = BeautifulSoup(input_string, 'html.parser')
article_tag = root.find_all('article')
if article_tag:
root = article_tag[0]
stack = root.findAll(True, recursive=False)
while stack:
current_node = stack.pop()
child_node_list = current_node.findAll(True, recursive=False)
if current_node.name not in tag_dictionary.keys():
parent_node = current_node.parent
current_node.extract()
for n in child_node_list:
parent_node.append(n)
else:
attribute_dict = current_node.attrs
for attribute in attribute_dict.keys():
if attribute not in tag_dictionary[current_node.name]:
attribute_dict.pop(attribute)
stack.extend(child_node_list)
#wrap partial tree if necessary
if root.find_all('html') == []:
root = create_html_from_fragment(root)
# Remove img tags without src attribute
image_node_list = root.find_all('img')
for node in image_node_list:
if not node.has_attr('src'):
node.extract()
unformatted_html_unicode_string = unicode(root.prettify(encoding='utf-8',
formatter=EntitySubstitution.substitute_html),
encoding='utf-8')
# fix <br> tags since not handled well by default by bs4
unformatted_html_unicode_string = unformatted_html_unicode_string.replace('<br>', '<br/>')
# remove and replace with space since not handled well by certain e-readers
unformatted_html_unicode_string = unformatted_html_unicode_string.replace(' ', ' ')
return unformatted_html_unicode_string | [
"def",
"clean",
"(",
"input_string",
",",
"tag_dictionary",
"=",
"constants",
".",
"SUPPORTED_TAGS",
")",
":",
"try",
":",
"assert",
"isinstance",
"(",
"input_string",
",",
"basestring",
")",
"except",
"AssertionError",
":",
"raise",
"TypeError",
"root",
"=",
... | 45.590164 | 20.770492 |
def simxGetObjectSelection(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
objectCount = ct.c_int()
objectHandles = ct.POINTER(ct.c_int)()
ret = c_GetObjectSelection(clientID, ct.byref(objectHandles), ct.byref(objectCount), operationMode)
newobj = []
if ret == 0:
for i in range(objectCount.value):
newobj.append(objectHandles[i])
return ret, newobj | [
"def",
"simxGetObjectSelection",
"(",
"clientID",
",",
"operationMode",
")",
":",
"objectCount",
"=",
"ct",
".",
"c_int",
"(",
")",
"objectHandles",
"=",
"ct",
".",
"POINTER",
"(",
"ct",
".",
"c_int",
")",
"(",
")",
"ret",
"=",
"c_GetObjectSelection",
"(",... | 33.428571 | 25.285714 |
def validate_periods(periods):
"""
If a `periods` argument is passed to the Datetime/Timedelta Array/Index
constructor, cast it to an integer.
Parameters
----------
periods : None, float, int
Returns
-------
periods : None or int
Raises
------
TypeError
if periods is None, float, or int
"""
if periods is not None:
if lib.is_float(periods):
periods = int(periods)
elif not lib.is_integer(periods):
raise TypeError('periods must be a number, got {periods}'
.format(periods=periods))
return periods | [
"def",
"validate_periods",
"(",
"periods",
")",
":",
"if",
"periods",
"is",
"not",
"None",
":",
"if",
"lib",
".",
"is_float",
"(",
"periods",
")",
":",
"periods",
"=",
"int",
"(",
"periods",
")",
"elif",
"not",
"lib",
".",
"is_integer",
"(",
"periods",... | 24.48 | 19.2 |
def create_variable(self, varname, vtype=None):
"""Create a tk variable.
If the variable was created previously return that instance.
"""
var_types = ('string', 'int', 'boolean', 'double')
vname = varname
var = None
type_from_name = 'string' # default type
if ':' in varname:
type_from_name, vname = varname.split(':')
# Fix incorrect order bug #33
if type_from_name not in (var_types):
# Swap order
type_from_name, vname = vname, type_from_name
if type_from_name not in (var_types):
raise Exception('Undefined variable type in "{0}"'.format(varname))
if vname in self.tkvariables:
var = self.tkvariables[vname]
else:
if vtype is None:
# get type from name
if type_from_name == 'int':
var = tkinter.IntVar()
elif type_from_name == 'boolean':
var = tkinter.BooleanVar()
elif type_from_name == 'double':
var = tkinter.DoubleVar()
else:
var = tkinter.StringVar()
else:
var = vtype()
self.tkvariables[vname] = var
return var | [
"def",
"create_variable",
"(",
"self",
",",
"varname",
",",
"vtype",
"=",
"None",
")",
":",
"var_types",
"=",
"(",
"'string'",
",",
"'int'",
",",
"'boolean'",
",",
"'double'",
")",
"vname",
"=",
"varname",
"var",
"=",
"None",
"type_from_name",
"=",
"'str... | 36.305556 | 14.222222 |
def split(self, fragment_height):
"""
Split an image into multiple fragments after fragment_height pixels
:param fragment_height: height of fragment
:return: list of PIL objects
"""
passes = int(math.ceil(self.height/fragment_height))
fragments = []
for n in range(0, passes):
left = 0
right = self.width
upper = n * fragment_height
lower = min((n + 1) * fragment_height, self.height)
box = (left, upper, right, lower)
fragments.append(self.img_original.crop(box))
return fragments | [
"def",
"split",
"(",
"self",
",",
"fragment_height",
")",
":",
"passes",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"self",
".",
"height",
"/",
"fragment_height",
")",
")",
"fragments",
"=",
"[",
"]",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"pass... | 35.882353 | 13.647059 |
def applications(self):
"""
Access the applications
:returns: twilio.rest.api.v2010.account.application.ApplicationList
:rtype: twilio.rest.api.v2010.account.application.ApplicationList
"""
if self._applications is None:
self._applications = ApplicationList(self._version, account_sid=self._solution['sid'], )
return self._applications | [
"def",
"applications",
"(",
"self",
")",
":",
"if",
"self",
".",
"_applications",
"is",
"None",
":",
"self",
".",
"_applications",
"=",
"ApplicationList",
"(",
"self",
".",
"_version",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
... | 39.5 | 20.3 |
def parse_ics(icsfile):
"""Takes an icsfilename, parses it, and returns Events."""
events = []
cal = Calendar.from_ical(open(icsfile, 'rb').read())
for component in cal.walk('vevent'):
dtstart = component['dtstart'].dt
rrule = component['rrule']
freq, args = convert_rrule(rrule)
args['dtstart'] = dtstart
rrule = dateutil.rrule.rrule(freq, **args)
summary = vText.from_ical(component.get('summary', u''))
description = vText.from_ical(component.get('description', u''))
organizer = vText.from_ical(component.get('organizer', u''))
# TODO: Find an event id. If it's not there, then compose one
# with dtstart, summary, and organizer.
event_id = "::".join((str(dtstart), summary, organizer))
events.append(Event(event_id, rrule, summary, description))
return events | [
"def",
"parse_ics",
"(",
"icsfile",
")",
":",
"events",
"=",
"[",
"]",
"cal",
"=",
"Calendar",
".",
"from_ical",
"(",
"open",
"(",
"icsfile",
",",
"'rb'",
")",
".",
"read",
"(",
")",
")",
"for",
"component",
"in",
"cal",
".",
"walk",
"(",
"'vevent'... | 36 | 21.583333 |
def blacken(
c, line_length=79, folders=None, check=False, diff=False, find_opts=None
):
r"""
Run black on the current source tree (all ``.py`` files).
.. warning::
``black`` only runs on Python 3.6 or above. (However, it can be
executed against Python 2 compatible code.)
:param int line_length:
Line length argument. Default: ``79``.
:param list folders:
List of folders (or, on the CLI, an argument that can be given N times)
to search within for ``.py`` files. Default: ``["."]``. Honors the
``blacken.folders`` config option.
:param bool check:
Whether to run ``black --check``. Default: ``False``.
:param bool diff:
Whether to run ``black --diff``. Default: ``False``.
:param str find_opts:
Extra option string appended to the end of the internal ``find``
command. For example, skip a vendor directory with ``"-and -not -path
./vendor\*"``, add ``-mtime N``, or etc. Honors the
``blacken.find_opts`` config option.
.. versionadded:: 1.2
.. versionchanged:: 1.4
Added the ``find_opts`` argument.
"""
config = c.config.get("blacken", {})
default_folders = ["."]
configured_folders = config.get("folders", default_folders)
folders = folders or configured_folders
default_find_opts = ""
configured_find_opts = config.get("find_opts", default_find_opts)
find_opts = find_opts or configured_find_opts
black_command_line = "black -l {}".format(line_length)
if check:
black_command_line = "{} --check".format(black_command_line)
if diff:
black_command_line = "{} --diff".format(black_command_line)
if find_opts:
find_opts = " {}".format(find_opts)
else:
find_opts = ""
cmd = "find {} -name '*.py'{} | xargs {}".format(
" ".join(folders), find_opts, black_command_line
)
c.run(cmd, pty=True) | [
"def",
"blacken",
"(",
"c",
",",
"line_length",
"=",
"79",
",",
"folders",
"=",
"None",
",",
"check",
"=",
"False",
",",
"diff",
"=",
"False",
",",
"find_opts",
"=",
"None",
")",
":",
"config",
"=",
"c",
".",
"config",
".",
"get",
"(",
"\"blacken\"... | 35.735849 | 21.264151 |
def fit(self, sequences, y=None):
"""Fit the kcenters clustering on the data
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries, or ``md.Trajectory``. Each
sequence may have a different length, but they all must have the
same number of features, or the same number of atoms if they are
``md.Trajectory``s.
Returns
-------
self
"""
MultiSequenceClusterMixin.fit(self, sequences)
self.cluster_center_indices_ = self._split_indices(self.cluster_center_indices_)
return self | [
"def",
"fit",
"(",
"self",
",",
"sequences",
",",
"y",
"=",
"None",
")",
":",
"MultiSequenceClusterMixin",
".",
"fit",
"(",
"self",
",",
"sequences",
")",
"self",
".",
"cluster_center_indices_",
"=",
"self",
".",
"_split_indices",
"(",
"self",
".",
"cluste... | 37.333333 | 24.944444 |
def is_repeated_suggestion(params, history):
"""
Parameters
----------
params : dict
Trial param set
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
Returns
-------
is_repeated_suggestion : bool
"""
if any(params == hparams and hstatus == 'SUCCEEDED'
for hparams, hscore, hstatus in history):
return True
else:
return False | [
"def",
"is_repeated_suggestion",
"(",
"params",
",",
"history",
")",
":",
"if",
"any",
"(",
"params",
"==",
"hparams",
"and",
"hstatus",
"==",
"'SUCCEEDED'",
"for",
"hparams",
",",
"hscore",
",",
"hstatus",
"in",
"history",
")",
":",
"return",
"True",
"els... | 31.25 | 17.65 |
def update(self, mini_batch, num_sequences):
"""
Performs update on model.
:param mini_batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
"""
feed_dict = {self.model.dropout_rate: self.update_rate,
self.model.batch_size: num_sequences,
self.model.sequence_length: self.sequence_length}
if self.use_continuous_act:
feed_dict[self.model.true_action] = mini_batch['actions']. \
reshape([-1, self.brain.vector_action_space_size[0]])
else:
feed_dict[self.model.true_action] = mini_batch['actions'].reshape(
[-1, len(self.brain.vector_action_space_size)])
feed_dict[self.model.action_masks] = np.ones(
(num_sequences, sum(self.brain.vector_action_space_size)))
if self.use_vec_obs:
apparent_obs_size = self.brain.vector_observation_space_size * \
self.brain.num_stacked_vector_observations
feed_dict[self.model.vector_in] = mini_batch['vector_obs'] \
.reshape([-1,apparent_obs_size])
for i, _ in enumerate(self.model.visual_in):
visual_obs = mini_batch['visual_obs%d' % i]
feed_dict[self.model.visual_in[i]] = visual_obs
if self.use_recurrent:
feed_dict[self.model.memory_in] = np.zeros([num_sequences, self.m_size])
run_out = self._execute_model(feed_dict, self.update_dict)
return run_out | [
"def",
"update",
"(",
"self",
",",
"mini_batch",
",",
"num_sequences",
")",
":",
"feed_dict",
"=",
"{",
"self",
".",
"model",
".",
"dropout_rate",
":",
"self",
".",
"update_rate",
",",
"self",
".",
"model",
".",
"batch_size",
":",
"num_sequences",
",",
"... | 50.354839 | 20.225806 |
def near(self, key, point):
"""
增加查询条件,限制返回结果指定字段值的位置与给定地理位置临近。
:param key: 查询条件字段名
:param point: 需要查询的地理位置
:rtype: Query
"""
if point is None:
raise ValueError('near query does not accept None')
self._add_condition(key, '$nearSphere', point)
return self | [
"def",
"near",
"(",
"self",
",",
"key",
",",
"point",
")",
":",
"if",
"point",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'near query does not accept None'",
")",
"self",
".",
"_add_condition",
"(",
"key",
",",
"'$nearSphere'",
",",
"point",
")",
"ret... | 25.230769 | 16 |
def _stroke_simplification(self, pointlist):
"""The Douglas-Peucker line simplification takes a list of points as an
argument. It tries to simplifiy this list by removing as many points
as possible while still maintaining the overall shape of the stroke.
It does so by taking the first and the last point, connecting them
by a straight line and searchin for the point with the highest
distance. If that distance is bigger than 'epsilon', the point is
important and the algorithm continues recursively."""
# Find the point with the biggest distance
dmax = 0
index = 0
for i in range(1, len(pointlist)):
d = geometry.perpendicular_distance(pointlist[i],
pointlist[0],
pointlist[-1])
if d > dmax:
index = i
dmax = d
# If the maximum distance is bigger than the threshold 'epsilon', then
# simplify the pointlist recursively
if dmax >= self.epsilon:
# Recursive call
rec_results1 = self._stroke_simplification(pointlist[0:index])
rec_results2 = self._stroke_simplification(pointlist[index:])
result_list = rec_results1[:-1] + rec_results2
else:
result_list = [pointlist[0], pointlist[-1]]
return result_list | [
"def",
"_stroke_simplification",
"(",
"self",
",",
"pointlist",
")",
":",
"# Find the point with the biggest distance",
"dmax",
"=",
"0",
"index",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"pointlist",
")",
")",
":",
"d",
"=",
"geomet... | 46.258065 | 21.967742 |
def read_value(hive, key, vname=None, use_32bit_registry=False):
r'''
Reads a registry value entry or the default value for a key. To read the
default value, don't pass ``vname``
Args:
hive (str): The name of the hive. Can be one of the following:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
key (str):
The key (looks like a path) to the value name.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be returned.
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64bit installations.
On 32bit machines this is ignored.
Returns:
dict: A dictionary containing the passed settings as well as the
value_data if successful. If unsuccessful, sets success to False.
bool: Returns False if the key is not found
If vname is not passed:
- Returns the first unnamed value (Default) as a string.
- Returns none if first unnamed value is empty.
CLI Example:
The following will get the value of the ``version`` value name in the
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` key
.. code-block:: bash
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
CLI Example:
The following will get the default value of the
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` key
.. code-block:: bash
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt'
'''
return __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry) | [
"def",
"read_value",
"(",
"hive",
",",
"key",
",",
"vname",
"=",
"None",
",",
"use_32bit_registry",
"=",
"False",
")",
":",
"return",
"__utils__",
"[",
"'reg.read_value'",
"]",
"(",
"hive",
"=",
"hive",
",",
"key",
"=",
"key",
",",
"vname",
"=",
"vname... | 32.661017 | 26.084746 |
def icon(self):
"""
Returns the icon filepath for this plugin.
:return <str>
"""
path = self._icon
if not path:
return ''
path = os.path.expandvars(os.path.expanduser(path))
if path.startswith('.'):
base_path = os.path.dirname(self.filepath())
path = os.path.abspath(os.path.join(base_path, path))
return path | [
"def",
"icon",
"(",
"self",
")",
":",
"path",
"=",
"self",
".",
"_icon",
"if",
"not",
"path",
":",
"return",
"''",
"path",
"=",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
")",
"if",
"path",
... | 26.0625 | 19.0625 |
def resolve_redirects_if_needed(self, uri):
"""
substitute with final uri after 303 redirects (if it's a www location!)
:param uri:
:return:
"""
if type(uri) == type("string") or type(uri) == type(u"unicode"):
if uri.startswith("www."): # support for lazy people
uri = "http://%s" % str(uri)
if uri.startswith("http://"):
# headers = "Accept: application/rdf+xml" # old way
headers = {'Accept': "application/rdf+xml"}
req = urllib2.Request(uri, headers=headers)
res = urllib2.urlopen(req)
uri = res.geturl()
else:
raise Exception("A URI must be in string format.")
return uri | [
"def",
"resolve_redirects_if_needed",
"(",
"self",
",",
"uri",
")",
":",
"if",
"type",
"(",
"uri",
")",
"==",
"type",
"(",
"\"string\"",
")",
"or",
"type",
"(",
"uri",
")",
"==",
"type",
"(",
"u\"unicode\"",
")",
":",
"if",
"uri",
".",
"startswith",
... | 36 | 19.714286 |
def from_file(filename, use_cores=True, thresh=1.e-4):
"""
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
"""
with zopen(filename, "rt") as f:
return Xr.from_string(
f.read(), use_cores=use_cores,
thresh=thresh) | [
"def",
"from_file",
"(",
"filename",
",",
"use_cores",
"=",
"True",
",",
"thresh",
"=",
"1.e-4",
")",
":",
"with",
"zopen",
"(",
"filename",
",",
"\"rt\"",
")",
"as",
"f",
":",
"return",
"Xr",
".",
"from_string",
"(",
"f",
".",
"read",
"(",
")",
",... | 41.142857 | 18.952381 |
def hash(obj, hash_name='md5', coerce_mmap=False):
""" Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
"""
if 'numpy' in sys.modules:
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj) | [
"def",
"hash",
"(",
"obj",
",",
"hash_name",
"=",
"'md5'",
",",
"coerce_mmap",
"=",
"False",
")",
":",
"if",
"'numpy'",
"in",
"sys",
".",
"modules",
":",
"hasher",
"=",
"NumpyHasher",
"(",
"hash_name",
"=",
"hash_name",
",",
"coerce_mmap",
"=",
"coerce_m... | 37.3125 | 15.4375 |
def bezier_roots(coeffs):
r"""Compute polynomial roots from a polynomial in the Bernstein basis.
.. note::
This assumes the caller passes in a 1D array but does not check.
This takes the polynomial
.. math::
f(s) = \sum_{j = 0}^n b_{j, n} \cdot C_j.
and uses the variable :math:`\sigma = \frac{s}{1 - s}` to rewrite as
.. math::
\begin{align*}
f(s) &= (1 - s)^n \sum_{j = 0}^n \binom{n}{j} C_j \sigma^j \\
&= (1 - s)^n \sum_{j = 0}^n \widetilde{C_j} \sigma^j.
\end{align*}
Then it uses an eigenvalue solver to find the roots of
.. math::
g(\sigma) = \sum_{j = 0}^n \widetilde{C_j} \sigma^j
and convert them back into roots of :math:`f(s)` via
:math:`s = \frac{\sigma}{1 + \sigma}`.
For example, consider
.. math::
\begin{align*}
f_0(s) &= 2 (2 - s)(3 + s) \\
&= 12(1 - s)^2 + 11 \cdot 2s(1 - s) + 8 s^2
\end{align*}
First, we compute the companion matrix for
.. math::
g_0(\sigma) = 12 + 22 \sigma + 8 \sigma^2
.. testsetup:: bezier-roots0, bezier-roots1, bezier-roots2
import numpy as np
import numpy.linalg
from bezier._algebraic_intersection import bernstein_companion
from bezier._algebraic_intersection import bezier_roots
.. doctest:: bezier-roots0
>>> coeffs0 = np.asfortranarray([12.0, 11.0, 8.0])
>>> companion0, _, _ = bernstein_companion(coeffs0)
>>> companion0
array([[-2.75, -1.5 ],
[ 1. , 0. ]])
then take the eigenvalues of the companion matrix:
.. doctest:: bezier-roots0
>>> sigma_values0 = np.linalg.eigvals(companion0)
>>> sigma_values0
array([-2. , -0.75])
after transforming them, we have the roots of :math:`f(s)`:
.. doctest:: bezier-roots0
>>> sigma_values0 / (1.0 + sigma_values0)
array([ 2., -3.])
>>> bezier_roots(coeffs0)
array([ 2., -3.])
In cases where :math:`s = 1` is a root, the lead coefficient of
:math:`g` would be :math:`0`, so there is a reduction in the
companion matrix.
.. math::
\begin{align*}
f_1(s) &= 6 (s - 1)^2 (s - 3) (s - 5) \\
&= 90 (1 - s)^4 + 33 \cdot 4s(1 - s)^3 + 8 \cdot 6s^2(1 - s)^2
\end{align*}
.. doctest:: bezier-roots1
:options: +NORMALIZE_WHITESPACE
>>> coeffs1 = np.asfortranarray([90.0, 33.0, 8.0, 0.0, 0.0])
>>> companion1, degree1, effective_degree1 = bernstein_companion(
... coeffs1)
>>> companion1
array([[-2.75 , -1.875],
[ 1. , 0. ]])
>>> degree1
4
>>> effective_degree1
2
so the roots are a combination of the roots determined from
:math:`s = \frac{\sigma}{1 + \sigma}` and the number of factors
of :math:`(1 - s)` (i.e. the difference between the degree and
the effective degree):
.. doctest:: bezier-roots1
>>> bezier_roots(coeffs1)
array([3., 5., 1., 1.])
In some cases, a polynomial is represented with an "elevated" degree:
.. math::
\begin{align*}
f_2(s) &= 3 (s^2 + 1) \\
&= 3 (1 - s)^3 + 3 \cdot 3s(1 - s)^2 +
4 \cdot 3s^2(1 - s) + 6 s^3
\end{align*}
This results in a "point at infinity"
:math:`\sigma = -1 \Longleftrightarrow s = \infty`:
.. doctest:: bezier-roots2
>>> coeffs2 = np.asfortranarray([3.0, 3.0, 4.0, 6.0])
>>> companion2, _, _ = bernstein_companion(coeffs2)
>>> companion2
array([[-2. , -1.5, -0.5],
[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ]])
>>> sigma_values2 = np.linalg.eigvals(companion2)
>>> sigma_values2
array([-1. +0.j , -0.5+0.5j, -0.5-0.5j])
so we drop any values :math:`\sigma` that are sufficiently close to
:math:`-1`:
.. doctest:: bezier-roots2
>>> expected2 = np.asfortranarray([1.0j, -1.0j])
>>> roots2 = bezier_roots(coeffs2)
>>> np.allclose(expected2, roots2, rtol=2e-15, atol=0.0)
True
Args:
coeffs (numpy.ndarray): A 1D array of coefficients in
the Bernstein basis.
Returns:
numpy.ndarray: A 1D array containing the roots.
"""
companion, degree, effective_degree = bernstein_companion(coeffs)
if effective_degree:
sigma_roots = np.linalg.eigvals(companion)
# Filter out `sigma = -1`, i.e. "points at infinity".
# We want the error ||(sigma - (-1))|| ~= 2^{-52}
to_keep = np.abs(sigma_roots + 1.0) > _SIGMA_THRESHOLD
sigma_roots = sigma_roots[to_keep]
s_vals = sigma_roots / (1.0 + sigma_roots)
else:
s_vals = np.empty((0,), order="F")
if effective_degree != degree:
delta = degree - effective_degree
s_vals = np.hstack([s_vals, [1] * delta])
return s_vals | [
"def",
"bezier_roots",
"(",
"coeffs",
")",
":",
"companion",
",",
"degree",
",",
"effective_degree",
"=",
"bernstein_companion",
"(",
"coeffs",
")",
"if",
"effective_degree",
":",
"sigma_roots",
"=",
"np",
".",
"linalg",
".",
"eigvals",
"(",
"companion",
")",
... | 27.941176 | 23.382353 |
def parse_add_loopback():
"""
Validate params when adding a loopback adapter
"""
class Add(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
try:
ipaddress.IPv4Interface("{}/{}".format(values[1], values[2]))
except ipaddress.AddressValueError as e:
raise argparse.ArgumentTypeError("Invalid IP address: {}".format(e))
except ipaddress.NetmaskValueError as e:
raise argparse.ArgumentTypeError("Invalid subnet mask: {}".format(e))
setattr(args, self.dest, values)
return Add | [
"def",
"parse_add_loopback",
"(",
")",
":",
"class",
"Add",
"(",
"argparse",
".",
"Action",
")",
":",
"def",
"__call__",
"(",
"self",
",",
"parser",
",",
"args",
",",
"values",
",",
"option_string",
"=",
"None",
")",
":",
"try",
":",
"ipaddress",
".",
... | 38.3125 | 21.6875 |
def update_token(self):
"""Request a new token and store it for future use"""
logger.info('updating token')
if None in self.credentials.values():
raise RuntimeError("You must provide an username and a password")
credentials = dict(auth=self.credentials)
url = self.test_url if self.test else self.url
response = requests.post(url + "auth",
json=credentials)
data = response.json()["response"]
if "error_id" in data and data["error_id"] == "NOAUTH":
raise BadCredentials()
if "error_code" in data and data["error_code"] == "RATE_EXCEEDED":
time.sleep(150)
return
if "error_code" in data or "error_id" in data:
raise AppNexusException(response)
self.token = data["token"]
self.save_token()
return self.token | [
"def",
"update_token",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'updating token'",
")",
"if",
"None",
"in",
"self",
".",
"credentials",
".",
"values",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"You must provide an username and a password\"",
")",... | 44.15 | 12.8 |
def data(self, index, role = QtCore.Qt.DisplayRole):
"""Reimplemented from QtCore.QAbstractItemModel
The value gets validated and is red if validation fails
and green if it passes.
"""
if not index.isValid():
return None
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if index.column() == 0:
p = index.internalPointer()
k = self.get_key(p, index.row())
return k
if index.column() == 1:
v = self.get_value(index)
if not isinstance(v, Section):
return self._val_to_str(v)
if index.column() == 2:
return self.get_configspec_str(index)
if role == QtCore.Qt.ForegroundRole:
if index.column() == 1:
v = self.get_value(index)
if not isinstance(v, Section):
spec = self.get_configspec_str(index)
if spec is None or isinstance(spec, Section):
return
try:
self._vld.check(spec, v)
except ValidateError:
return QtGui.QBrush(self._invalid_col)
else:
return QtGui.QBrush(self._valid_col) | [
"def",
"data",
"(",
"self",
",",
"index",
",",
"role",
"=",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
")",
":",
"if",
"not",
"index",
".",
"isValid",
"(",
")",
":",
"return",
"None",
"if",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
"or",... | 41.28125 | 11.21875 |
def _get_envs_from_ref_paths(self, refs):
'''
Return the names of remote refs (stripped of the remote name) and tags
which are map to the branches and tags.
'''
def _check_ref(env_set, rname):
'''
Add the appropriate saltenv(s) to the set
'''
if rname in self.saltenv_revmap:
env_set.update(self.saltenv_revmap[rname])
else:
if rname == self.base:
env_set.add('base')
elif not self.disable_saltenv_mapping:
env_set.add(rname)
use_branches = 'branch' in self.ref_types
use_tags = 'tag' in self.ref_types
ret = set()
if salt.utils.stringutils.is_hex(self.base):
# gitfs_base or per-saltenv 'base' may point to a commit ID, which
# would not show up in the refs. Make sure we include it.
ret.add('base')
for ref in salt.utils.data.decode(refs):
if ref.startswith('refs/'):
ref = ref[5:]
rtype, rname = ref.split('/', 1)
if rtype == 'remotes' and use_branches:
parted = rname.partition('/')
rname = parted[2] if parted[2] else parted[0]
_check_ref(ret, rname)
elif rtype == 'tags' and use_tags:
_check_ref(ret, rname)
return ret | [
"def",
"_get_envs_from_ref_paths",
"(",
"self",
",",
"refs",
")",
":",
"def",
"_check_ref",
"(",
"env_set",
",",
"rname",
")",
":",
"'''\n Add the appropriate saltenv(s) to the set\n '''",
"if",
"rname",
"in",
"self",
".",
"saltenv_revmap",
":",
... | 37.594595 | 15.378378 |
def xorc_constraint(v=0, sense="maximize"):
""" XOR (r as variable) custom constraint"""
assert v in [0,1], "v must be 0 or 1 instead of %s" % v.__repr__()
model, x, y, z = _init()
r = model.addVar("r", "B")
n = model.addVar("n", "I") # auxiliary
model.addCons(r+quicksum([x,y,z]) == 2*n)
model.addCons(x==v)
model.setObjective(r, sense=sense)
_optimize("Custom XOR (as variable)", model) | [
"def",
"xorc_constraint",
"(",
"v",
"=",
"0",
",",
"sense",
"=",
"\"maximize\"",
")",
":",
"assert",
"v",
"in",
"[",
"0",
",",
"1",
"]",
",",
"\"v must be 0 or 1 instead of %s\"",
"%",
"v",
".",
"__repr__",
"(",
")",
"model",
",",
"x",
",",
"y",
",",... | 41.5 | 8.9 |
def init(project_name):
"""
build a minimal flask project
"""
# the destination path
dst_path = os.path.join(os.getcwd(), project_name)
start_init_info(dst_path)
# create dst path
_mkdir_p(dst_path)
os.chdir(dst_path)
# create files
init_code('manage.py', _manage_basic_code)
init_code('requirement.txt', _requirement_code)
# create app/
app_path = os.path.join(dst_path, 'app')
_mkdir_p(app_path)
os.chdir(app_path)
# create files
init_code('views.py', _views_basic_code)
init_code('forms.py', _forms_basic_code)
init_code('__init__.py', _init_basic_code)
create_templates_static_files(app_path)
init_done_info() | [
"def",
"init",
"(",
"project_name",
")",
":",
"# the destination path",
"dst_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"project_name",
")",
"start_init_info",
"(",
"dst_path",
")",
"# create dst path",
"_mkdir_p",
"... | 22.633333 | 18.633333 |
def get_vm_config_file(name, datacenter, placement, datastore,
service_instance=None):
'''
Queries the virtual machine config file and returns
vim.host.DatastoreBrowser.SearchResults object on success None on failure
name
Name of the virtual machine
datacenter
Datacenter name
datastore
Datastore where the virtual machine files are stored
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
'''
browser_spec = vim.host.DatastoreBrowser.SearchSpec()
directory = name
browser_spec.query = [vim.host.DatastoreBrowser.VmConfigQuery()]
datacenter_object = salt.utils.vmware.get_datacenter(service_instance,
datacenter)
if 'cluster' in placement:
container_object = salt.utils.vmware.get_cluster(datacenter_object,
placement['cluster'])
else:
container_objects = salt.utils.vmware.get_hosts(
service_instance,
datacenter_name=datacenter,
host_names=[placement['host']])
if not container_objects:
raise salt.exceptions.VMwareObjectRetrievalError(
'ESXi host named \'{0}\' wasn\'t '
'found.'.format(placement['host']))
container_object = container_objects[0]
# list of vim.host.DatastoreBrowser.SearchResults objects
files = salt.utils.vmware.get_datastore_files(service_instance,
directory,
[datastore],
container_object,
browser_spec)
if files and len(files[0].file) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(
'Multiple configuration files found in '
'the same virtual machine folder')
elif files and files[0].file:
return files[0]
else:
return None | [
"def",
"get_vm_config_file",
"(",
"name",
",",
"datacenter",
",",
"placement",
",",
"datastore",
",",
"service_instance",
"=",
"None",
")",
":",
"browser_spec",
"=",
"vim",
".",
"host",
".",
"DatastoreBrowser",
".",
"SearchSpec",
"(",
")",
"directory",
"=",
... | 38.867925 | 21.698113 |
def create_md5(path):
"""Create the md5 hash of a file using the hashlib library."""
m = hashlib.md5()
# rb necessary to run correctly in windows.
with open(path, "rb") as f:
while True:
data = f.read(8192)
if not data:
break
m.update(data)
return m.hexdigest() | [
"def",
"create_md5",
"(",
"path",
")",
":",
"m",
"=",
"hashlib",
".",
"md5",
"(",
")",
"# rb necessary to run correctly in windows.",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"while",
"True",
":",
"data",
"=",
"f",
".",
"read",
... | 27.583333 | 15.75 |
def handler(self):
"""Handler on explicitly closing the GUI window."""
self.pauseMovie()
if tkMessageBox.askokcancel("Quit?", "Are you sure you want to quit?"):
self.exitClient()
else: # When the user presses cancel, resume playing.
#self.playMovie()
print "Playing Movie"
threading.Thread(target=self.listenRtp).start()
#self.playEvent = threading.Event()
#self.playEvent.clear()
self.sendRtspRequest(self.PLAY) | [
"def",
"handler",
"(",
"self",
")",
":",
"self",
".",
"pauseMovie",
"(",
")",
"if",
"tkMessageBox",
".",
"askokcancel",
"(",
"\"Quit?\"",
",",
"\"Are you sure you want to quit?\"",
")",
":",
"self",
".",
"exitClient",
"(",
")",
"else",
":",
"# When the user pr... | 38.25 | 14 |
def read_first_available_value(filename, field_name):
"""Reads the first assigned value of the given field in the CSV table.
"""
if not os.path.exists(filename):
return None
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
value = row.get(field_name)
if value:
return value
return None | [
"def",
"read_first_available_value",
"(",
"filename",
",",
"field_name",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"None",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"csvfile",
":",
"reade... | 33.25 | 9.166667 |
def _binaryconfig(path, conf, dtype=None, shape=None, credentials=None):
"""
Collects parameters to use for binary series loading.
"""
import json
from thunder.readers import get_file_reader, FileNotFoundError
reader = get_file_reader(path)(credentials=credentials)
try:
buf = reader.read(path, filename=conf)
params = json.loads(str(buf.decode('utf-8')))
except FileNotFoundError:
params = {}
if dtype:
params['dtype'] = dtype
if shape:
params['shape'] = shape
if 'dtype' not in params.keys():
raise ValueError('dtype not specified either in conf.json or as argument')
if 'shape' not in params.keys():
raise ValueError('shape not specified either in conf.json or as argument')
return params['shape'], params['dtype'] | [
"def",
"_binaryconfig",
"(",
"path",
",",
"conf",
",",
"dtype",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"import",
"json",
"from",
"thunder",
".",
"readers",
"import",
"get_file_reader",
",",
"FileNotFoundError",
"... | 29.814815 | 22.555556 |
def write_response(
self, status_code: Union[int, constants.HttpStatusCode], *,
headers: Optional[_HeaderType]=None
) -> "writers.HttpResponseWriter":
"""
Write a response to the client.
"""
self._writer = self.__delegate.write_response(
constants.HttpStatusCode(status_code),
headers=headers)
return self._writer | [
"def",
"write_response",
"(",
"self",
",",
"status_code",
":",
"Union",
"[",
"int",
",",
"constants",
".",
"HttpStatusCode",
"]",
",",
"*",
",",
"headers",
":",
"Optional",
"[",
"_HeaderType",
"]",
"=",
"None",
")",
"->",
"\"writers.HttpResponseWriter\"",
":... | 32.916667 | 12.25 |
def render(ont, query_ids, args):
"""
Writes or displays graph
"""
if args.slim.find('m') > -1:
logging.info("SLIMMING")
g = get_minimal_subgraph(g, query_ids)
w = GraphRenderer.create(args.to)
if args.showdefs:
w.config.show_text_definition = True
if args.render:
if 'd' in args.render:
logging.info("Showing text defs")
w.config.show_text_definition = True
if args.outfile is not None:
w.outfile = args.outfile
w.write(ont, query_ids=query_ids, container_predicates=args.container_properties) | [
"def",
"render",
"(",
"ont",
",",
"query_ids",
",",
"args",
")",
":",
"if",
"args",
".",
"slim",
".",
"find",
"(",
"'m'",
")",
">",
"-",
"1",
":",
"logging",
".",
"info",
"(",
"\"SLIMMING\"",
")",
"g",
"=",
"get_minimal_subgraph",
"(",
"g",
",",
... | 34 | 10.117647 |
def emit(self, what, *args):
''' what can be either name of the op, or node, or a list of statements.'''
if isinstance(what, basestring):
return self.exe.emit(what, *args)
elif isinstance(what, list):
self._emit_statement_list(what)
else:
return getattr(self, what['type'])(**what) | [
"def",
"emit",
"(",
"self",
",",
"what",
",",
"*",
"args",
")",
":",
"if",
"isinstance",
"(",
"what",
",",
"basestring",
")",
":",
"return",
"self",
".",
"exe",
".",
"emit",
"(",
"what",
",",
"*",
"args",
")",
"elif",
"isinstance",
"(",
"what",
"... | 42.75 | 13.5 |
def unsubscribe(self, transform="", downlink=False):
"""Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.unsubscribe(streampath, transform) | [
"def",
"unsubscribe",
"(",
"self",
",",
"transform",
"=",
"\"\"",
",",
"downlink",
"=",
"False",
")",
":",
"streampath",
"=",
"self",
".",
"path",
"if",
"downlink",
":",
"streampath",
"+=",
"\"/downlink\"",
"return",
"self",
".",
"db",
".",
"unsubscribe",
... | 40.083333 | 16.416667 |
def _confidence(matches, ext=None):
""" Rough confidence based on string length and file extension"""
results = []
for match in matches:
con = (0.8 if len(match.extension) > 9 else
float("0.{0}".format(len(match.extension))))
if ext == match.extension:
con = 0.9
results.append(
PureMagicWithConfidence(confidence=con, **match._asdict()))
return sorted(results, key=lambda x: x.confidence, reverse=True) | [
"def",
"_confidence",
"(",
"matches",
",",
"ext",
"=",
"None",
")",
":",
"results",
"=",
"[",
"]",
"for",
"match",
"in",
"matches",
":",
"con",
"=",
"(",
"0.8",
"if",
"len",
"(",
"match",
".",
"extension",
")",
">",
"9",
"else",
"float",
"(",
"\"... | 42.909091 | 15.909091 |
def healthy_services(self, role=None):
'''
Look up healthy services in the registry.
A service is considered healthy if its 'last_heartbeat' was less than
'ttl' seconds ago
Args:
role (str, optional): role name
Returns:
If `role` is supplied, returns list of healthy services for the
given role, otherwise returns list of all healthy services. May
return an empty list.
'''
try:
query = self.rr.table(self.table)
if role:
query = query.get_all(role, index='role')
query = query.filter(
lambda svc: r.now().sub(svc["last_heartbeat"]) < svc["ttl"] #.default(20.0)
).order_by("load")
result = query.run()
return result
except r.ReqlNonExistenceError:
return [] | [
"def",
"healthy_services",
"(",
"self",
",",
"role",
"=",
"None",
")",
":",
"try",
":",
"query",
"=",
"self",
".",
"rr",
".",
"table",
"(",
"self",
".",
"table",
")",
"if",
"role",
":",
"query",
"=",
"query",
".",
"get_all",
"(",
"role",
",",
"in... | 33.576923 | 21.346154 |
def get_db_state(working_dir):
"""
Callback to the virtual chain state engine.
Get a *read-only* handle to our state engine implementation
(i.e. our name database).
Note that in this implementation, the database
handle returned will only support read-only operations by default.
Attempts to save state with the handle will lead to program abort.
Returns the handle on success
Raises on error
"""
impl = sys.modules[__name__]
db_inst = BlockstackDB.get_readonly_instance(working_dir)
assert db_inst, 'Failed to instantiate database handle'
return db_inst | [
"def",
"get_db_state",
"(",
"working_dir",
")",
":",
"impl",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"db_inst",
"=",
"BlockstackDB",
".",
"get_readonly_instance",
"(",
"working_dir",
")",
"assert",
"db_inst",
",",
"'Failed to instantiate database handle'",... | 35 | 17.588235 |
def _call(self, x):
"""Sum all values if indices are given multiple times."""
y = np.bincount(self._indices_flat, weights=x,
minlength=self.range.size)
out = y.reshape(self.range.shape)
if self.variant == 'dirac':
weights = getattr(self.range, 'cell_volume', 1.0)
elif self.variant == 'char_fun':
weights = 1.0
else:
raise RuntimeError('The variant "{!r}" is not yet supported'
''.format(self.variant))
if weights != 1.0:
out /= weights
return out | [
"def",
"_call",
"(",
"self",
",",
"x",
")",
":",
"y",
"=",
"np",
".",
"bincount",
"(",
"self",
".",
"_indices_flat",
",",
"weights",
"=",
"x",
",",
"minlength",
"=",
"self",
".",
"range",
".",
"size",
")",
"out",
"=",
"y",
".",
"reshape",
"(",
... | 31.578947 | 19.526316 |
def process(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
""" Process the input file and generate the instances. """
if self.logger:
self.logger.info("Reading %s", self.path)
self.readInstances(makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo)
self.reportProgress("done", 'stop') | [
"def",
"process",
"(",
"self",
",",
"makeGlyphs",
"=",
"True",
",",
"makeKerning",
"=",
"True",
",",
"makeInfo",
"=",
"True",
")",
":",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Reading %s\"",
",",
"self",
".",
"pa... | 57.666667 | 19 |
def is_correct(self):
"""
Check if the items list configuration is correct ::
* check if duplicate items exist in the list and warn about this
* set alias and display_name property for each item in the list if they do not exist
* check each item in the list
* log all previous warnings
* log all previous errors
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
# we are ok at the beginning. Hope we are still ok at the end...
valid = True
# Better check individual items before displaying the global items list errors and warnings
for i in self:
# Alias and display_name hook hook
# prop_name = getattr(self.__class__, 'name_property', None)
# if prop_name and not getattr(i, 'alias', '') and hasattr(i, prop_name):
# setattr(i, 'alias', getattr(i, prop_name))
# if prop_name and getattr(i, 'display_name', '') and hasattr(i, prop_name):
# setattr(i, 'display_name', getattr(i, prop_name))
# Now other checks
if not i.is_correct():
valid = False
i.add_error("Configuration in %s::%s is incorrect; from: %s"
% (i.my_type, i.get_name(), i.imported_from))
if i.configuration_errors:
self.configuration_errors += i.configuration_errors
if i.configuration_warnings:
self.configuration_warnings += i.configuration_warnings
# Raise all previous warnings
if self.configuration_warnings:
for msg in self.configuration_warnings:
logger.warning("[items] %s", msg)
# Raise all previous errors
if self.configuration_errors:
valid = False
for msg in self.configuration_errors:
logger.error("[items] %s", msg)
return valid | [
"def",
"is_correct",
"(",
"self",
")",
":",
"# we are ok at the beginning. Hope we are still ok at the end...",
"valid",
"=",
"True",
"# Better check individual items before displaying the global items list errors and warnings",
"for",
"i",
"in",
"self",
":",
"# Alias and display_nam... | 40.395833 | 21.9375 |
def luminosities_of_galaxies_within_circles_in_units(self, radius : dim.Length, unit_luminosity='eps', exposure_time=None):
"""Compute the total luminosity of all galaxies in this plane within a circle of specified radius.
See *galaxy.light_within_circle* and *light_profiles.light_within_circle* for details \
of how this is performed.
Parameters
----------
radius : float
The radius of the circle to compute the dimensionless mass within.
units_luminosity : str
The units the luminosity is returned in (eps | counts).
exposure_time : float
The exposure time of the observation, which converts luminosity from electrons per second units to counts.
"""
return list(map(lambda galaxy: galaxy.luminosity_within_circle_in_units(
radius=radius, unit_luminosity=unit_luminosity, kpc_per_arcsec=self.kpc_per_arcsec,
exposure_time=exposure_time),
self.galaxies)) | [
"def",
"luminosities_of_galaxies_within_circles_in_units",
"(",
"self",
",",
"radius",
":",
"dim",
".",
"Length",
",",
"unit_luminosity",
"=",
"'eps'",
",",
"exposure_time",
"=",
"None",
")",
":",
"return",
"list",
"(",
"map",
"(",
"lambda",
"galaxy",
":",
"ga... | 52.789474 | 28.842105 |
def get_offset_range(self, row_offset, column_offset):
"""
Gets an object which represents a range that's offset from the specified range.
The dimension of the returned range will match this range.
If the resulting range is forced outside the bounds of the worksheet grid,
an exception will be thrown.
:param int row_offset: The number of rows (positive, negative, or 0)
by which the range is to be offset.
:param int column_offset: he number of columns (positive, negative, or 0)
by which the range is to be offset.
:return: Range
"""
return self._get_range('offset_range', rowOffset=row_offset, columnOffset=column_offset) | [
"def",
"get_offset_range",
"(",
"self",
",",
"row_offset",
",",
"column_offset",
")",
":",
"return",
"self",
".",
"_get_range",
"(",
"'offset_range'",
",",
"rowOffset",
"=",
"row_offset",
",",
"columnOffset",
"=",
"column_offset",
")"
] | 54.923077 | 22.461538 |
def token_approve(self, spender_address, price, from_account):
"""
Approve the passed address to spend the specified amount of tokens.
:param spender_address: Account address, str
:param price: Asset price, int
:param from_account: Account address, str
:return: bool
"""
if not Web3Provider.get_web3().isChecksumAddress(spender_address):
spender_address = Web3Provider.get_web3().toChecksumAddress(spender_address)
tx_hash = self.send_transaction(
'approve',
(spender_address,
price),
transact={'from': from_account.address,
'passphrase': from_account.password}
)
return self.get_tx_receipt(tx_hash).status == 1 | [
"def",
"token_approve",
"(",
"self",
",",
"spender_address",
",",
"price",
",",
"from_account",
")",
":",
"if",
"not",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"isChecksumAddress",
"(",
"spender_address",
")",
":",
"spender_address",
"=",
"Web3Provider",
... | 38.25 | 19.25 |
def get_nameserver_detail_output_show_nameserver_nameserver_fabric_portname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_nameserver_detail = ET.Element("get_nameserver_detail")
config = get_nameserver_detail
output = ET.SubElement(get_nameserver_detail, "output")
show_nameserver = ET.SubElement(output, "show-nameserver")
nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid")
nameserver_portid_key.text = kwargs.pop('nameserver_portid')
nameserver_fabric_portname = ET.SubElement(show_nameserver, "nameserver-fabric-portname")
nameserver_fabric_portname.text = kwargs.pop('nameserver_fabric_portname')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_nameserver_detail_output_show_nameserver_nameserver_fabric_portname",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_nameserver_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_nameserver_de... | 54.8 | 24.6 |
def submit_task(rel_path, cache_string, buffer):
"""Put an upload job on the queue, and start the thread if required"""
global upload_queue
global upload_thread
upload_queue.put((rel_path, cache_string, buffer))
if upload_thread is None or not upload_thread.is_alive():
upload_thread = UploaderThread()
upload_thread.start() | [
"def",
"submit_task",
"(",
"rel_path",
",",
"cache_string",
",",
"buffer",
")",
":",
"global",
"upload_queue",
"global",
"upload_thread",
"upload_queue",
".",
"put",
"(",
"(",
"rel_path",
",",
"cache_string",
",",
"buffer",
")",
")",
"if",
"upload_thread",
"is... | 39.222222 | 14.111111 |
def _tower_loss(images, labels, num_classes, scope, reuse_variables=None):
"""Calculate the total loss on a single tower running the ImageNet model.
We perform 'batch splitting'. This means that we cut up a batch across
multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
then each tower will operate on an batch of 16 images.
Args:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
num_classes: number of classes
scope: unique prefix string identifying the ImageNet tower, e.g.
'tower_0'.
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# When fine-tuning a model, we do not restore the logits but instead we
# randomly initialize the logits. The number of classes in the output of the
# logit is the number of classes in specified Dataset.
restore_logits = not FLAGS.fine_tune
# Build inference Graph.
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
logits = inception.inference(images, num_classes, for_training=True,
restore_logits=restore_logits,
scope=scope)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
split_batch_size = images.get_shape().as_list()[0]
inception.loss(logits, labels, batch_size=split_batch_size)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on TensorBoard.
loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(loss_name +' (raw)', l)
tf.summary.scalar(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss | [
"def",
"_tower_loss",
"(",
"images",
",",
"labels",
",",
"num_classes",
",",
"scope",
",",
"reuse_variables",
"=",
"None",
")",
":",
"# When fine-tuning a model, we do not restore the logits but instead we",
"# randomly initialize the logits. The number of classes in the output of ... | 46.508475 | 24.983051 |
def phi_a(mass1, mass2, spin1x, spin1y, spin2x, spin2y):
""" Returns the angle between the in-plane perpendicular spins."""
phi1 = phi_from_spinx_spiny(primary_spin(mass1, mass2, spin1x, spin2x),
primary_spin(mass1, mass2, spin1y, spin2y))
phi2 = phi_from_spinx_spiny(secondary_spin(mass1, mass2, spin1x, spin2x),
secondary_spin(mass1, mass2, spin1y, spin2y))
return (phi1 - phi2) % (2 * numpy.pi) | [
"def",
"phi_a",
"(",
"mass1",
",",
"mass2",
",",
"spin1x",
",",
"spin1y",
",",
"spin2x",
",",
"spin2y",
")",
":",
"phi1",
"=",
"phi_from_spinx_spiny",
"(",
"primary_spin",
"(",
"mass1",
",",
"mass2",
",",
"spin1x",
",",
"spin2x",
")",
",",
"primary_spin"... | 67.285714 | 23 |
def update_db(self, giver, receiverkarma):
"""
Record a the giver of karma, the receiver of karma, and the karma
amount. Typically the count will be 1, but it can be any positive or
negative integer.
"""
for receiver in receiverkarma:
if receiver != giver:
urow = KarmaStatsTable(
ude(giver), ude(receiver), receiverkarma[receiver])
self.db.session.add(urow)
self.db.session.commit() | [
"def",
"update_db",
"(",
"self",
",",
"giver",
",",
"receiverkarma",
")",
":",
"for",
"receiver",
"in",
"receiverkarma",
":",
"if",
"receiver",
"!=",
"giver",
":",
"urow",
"=",
"KarmaStatsTable",
"(",
"ude",
"(",
"giver",
")",
",",
"ude",
"(",
"receiver"... | 37.846154 | 13.538462 |
def make_data(n,m):
"""make_data: prepare matrix of m times n random processing times"""
p = {}
for i in range(1,m+1):
for j in range(1,n+1):
p[i,j] = random.randint(1,10)
return p | [
"def",
"make_data",
"(",
"n",
",",
"m",
")",
":",
"p",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"m",
"+",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"p",
"[",
"i",
",",
"j",
"]",
... | 30 | 14.857143 |
def isometric_build_atlased_mesh(script, BorderSize=0.1):
"""Isometric parameterization: Build Atlased Mesh
This actually generates the UV mapping from the isometric parameterization
"""
filter_xml = ''.join([
' <filter name="Iso Parametrization Build Atlased Mesh">\n',
' <Param name="BorderSize"',
'value="%s"' % BorderSize,
'description="BorderSize ratio"',
'min="0.01"',
'max="0.5"',
'type="RichDynamicFloat"',
'tooltip="This parameter controls the amount of space that must be left between each diamond when building the atlas. It directly affects how many triangle are splitted during this conversion. In abstract parametrization mesh triangles can naturally cross the triangles of the abstract domain, so when converting to a standard parametrization we must cut all the triangles that protrudes outside each diamond more than the specified threshold. The unit of the threshold is in percentage of the size of the diamond, the bigger the threshold the less triangles are splitted, but the more UV space is used (wasted)."',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | [
"def",
"isometric_build_atlased_mesh",
"(",
"script",
",",
"BorderSize",
"=",
"0.1",
")",
":",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"Iso Parametrization Build Atlased Mesh\">\\n'",
",",
"' <Param name=\"BorderSize\"'",
",",
"'value=\"%s\"'",
... | 63.052632 | 46.315789 |
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value | [
"def",
"get_parameter",
"(",
"value",
")",
":",
"# It is possible CFWS would also be implicitly allowed between the section",
"# and the 'extended-attribute' marker (the '*') , but we've never seen that",
"# in the wild and we will therefore ignore the possibility.",
"param",
"=",
"Parameter",... | 39.594203 | 16.471014 |
def copy_with(self, **kwargs):
"""Return a copy with (a few) changed attributes
The keyword arguments are the attributes to be replaced by new
values. All other attributes are copied (or referenced) from the
original object. This only works if the constructor takes all
(read-only) attributes as arguments.
"""
attrs = {}
for key, descriptor in self.__class__.__dict__.items():
if isinstance(descriptor, ReadOnlyAttribute):
attrs[key] = descriptor.__get__(self)
for key in kwargs:
if key not in attrs:
raise TypeError("Unknown attribute: %s" % key)
attrs.update(kwargs)
return self.__class__(**attrs) | [
"def",
"copy_with",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"attrs",
"=",
"{",
"}",
"for",
"key",
",",
"descriptor",
"in",
"self",
".",
"__class__",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"descriptor",
",",
"... | 43.588235 | 17.058824 |
def product_status(request, form):
''' Summarises the inventory status of the given items, grouping by
invoice status. '''
products = form.cleaned_data["product"]
categories = form.cleaned_data["category"]
items = commerce.ProductItem.objects.filter(
Q(product__in=products) | Q(product__category__in=categories),
).select_related("cart", "product")
items = group_by_cart_status(
items,
["product__category__order", "product__order"],
["product", "product__category__name", "product__name"],
)
headings = [
"Product", "Paid", "Reserved", "Unreserved", "Refunded",
]
data = []
for item in items:
data.append([
"%s - %s" % (
item["product__category__name"], item["product__name"]
),
item["total_paid"],
item["total_reserved"],
item["total_unreserved"],
item["total_refunded"],
])
return ListReport("Inventory", headings, data) | [
"def",
"product_status",
"(",
"request",
",",
"form",
")",
":",
"products",
"=",
"form",
".",
"cleaned_data",
"[",
"\"product\"",
"]",
"categories",
"=",
"form",
".",
"cleaned_data",
"[",
"\"category\"",
"]",
"items",
"=",
"commerce",
".",
"ProductItem",
"."... | 29.205882 | 21.441176 |
def createPerson(self, nickname, vip=_NO_VIP):
"""
Create a new L{Person} with the given name in this organizer.
@type nickname: C{unicode}
@param nickname: The value for the new person's C{name} attribute.
@type vip: C{bool}
@param vip: Value to set the created person's C{vip} attribute to
(deprecated).
@rtype: L{Person}
"""
for person in (self.store.query(
Person, attributes.AND(
Person.name == nickname,
Person.organizer == self))):
raise ValueError("Person with name %r exists already." % (nickname,))
person = Person(
store=self.store,
created=extime.Time(),
organizer=self,
name=nickname)
if vip is not self._NO_VIP:
warn(
"Usage of Organizer.createPerson's 'vip' parameter"
" is deprecated",
category=DeprecationWarning)
person.vip = vip
self._callOnOrganizerPlugins('personCreated', person)
return person | [
"def",
"createPerson",
"(",
"self",
",",
"nickname",
",",
"vip",
"=",
"_NO_VIP",
")",
":",
"for",
"person",
"in",
"(",
"self",
".",
"store",
".",
"query",
"(",
"Person",
",",
"attributes",
".",
"AND",
"(",
"Person",
".",
"name",
"==",
"nickname",
","... | 32.969697 | 17.818182 |
def translate_latex2unicode(text, kb_file=None):
"""Translate latex text to unicode.
This function will take given text, presumably containing LaTeX symbols,
and attempts to translate it to Unicode using the given or default KB
translation table located under
CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb.
The translated Unicode string will then be returned.
If the translation table and compiled regular expression object is not
previously generated in the current session, they will be.
:param text: a text presumably containing LaTeX symbols.
:type text: string
:param kb_file: full path to file containing latex2unicode translations.
Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb
:type kb_file: string
:return: Unicode representation of translated text
:rtype: unicode
"""
if kb_file is None:
kb_file = get_kb_filename()
# First decode input text to Unicode
try:
text = decode_to_unicode(text)
except UnicodeDecodeError:
text = unicode(wash_for_utf8(text))
# Load translation table, if required
if CFG_LATEX_UNICODE_TRANSLATION_CONST == {}:
_load_latex2unicode_constants(kb_file)
# Find all matches and replace text
for match in CFG_LATEX_UNICODE_TRANSLATION_CONST['regexp_obj'] \
.finditer(text):
# If LaTeX style markers {, } and $ are before or after the
# matching text, it will replace those as well
text = re.sub("[\{\$]?%s[\}\$]?" % (re.escape(match.group()),),
CFG_LATEX_UNICODE_TRANSLATION_CONST[
'table'][match.group()],
text)
# Return Unicode representation of translated text
return text | [
"def",
"translate_latex2unicode",
"(",
"text",
",",
"kb_file",
"=",
"None",
")",
":",
"if",
"kb_file",
"is",
"None",
":",
"kb_file",
"=",
"get_kb_filename",
"(",
")",
"# First decode input text to Unicode",
"try",
":",
"text",
"=",
"decode_to_unicode",
"(",
"tex... | 40.348837 | 18.627907 |
def solve_full(z, Fval, DPhival, G, A):
M, N=G.shape
P, N=A.shape
"""Total number of inequality constraints"""
m=M
"""Primal variable"""
x=z[0:N]
"""Multiplier for equality constraints"""
nu=z[N:N+P]
"""Multiplier for inequality constraints"""
l=z[N+P:N+P+M]
"""Slacks"""
s=z[N+P+M:]
"""Dual infeasibility"""
rd = Fval[0:N]
"""Primal infeasibility"""
rp1 = Fval[N:N+P]
rp2 = Fval[N+P:N+P+M]
"""Centrality"""
rc = Fval[N+P+M:]
"""Sigma matrix"""
SIG = np.diag(l/s)
"""Condensed system"""
if issparse(DPhival):
if not issparse(A):
A = csr_matrix(A)
H = DPhival + mydot(G.T, mydot(SIG, G))
J = bmat([[H, A.T], [A, None]])
else:
if issparse(A):
A = A.toarray()
J = np.zeros((N+P, N+P))
J[0:N, 0:N] = DPhival + mydot(G.T, mydot(SIG, G))
J[0:N, N:] = A.T
J[N:, 0:N] = A
b1 = -rd - mydot(G.T, mydot(SIG, rp2)) + mydot(G.T, rc/s)
b2 = -rp1
b = np.hstack((b1, b2))
"""Prepare iterative solve via MINRES"""
sign = np.zeros(N+P)
sign[0:N/2] = 1.0
sign[N/2:] = -1.0
S = diags(sign, 0)
J_new = mydot(S, csr_matrix(J))
b_new = mydot(S, b)
dJ_new = np.abs(J_new.diagonal())
dPc = np.ones(J_new.shape[0])
ind = (dJ_new > 0.0)
dPc[ind] = 1.0/dJ_new[ind]
Pc = diags(dPc, 0)
dxnu, info = minres(J_new, b_new, tol=1e-8, M=Pc)
# dxnu = solve(J, b)
dx = dxnu[0:N]
dnu = dxnu[N:]
"""Obtain search directions for l and s"""
ds = -rp2 - mydot(G, dx)
dl = -mydot(SIG, ds) - rc/s
dz = np.hstack((dx, dnu, dl, ds))
return dz | [
"def",
"solve_full",
"(",
"z",
",",
"Fval",
",",
"DPhival",
",",
"G",
",",
"A",
")",
":",
"M",
",",
"N",
"=",
"G",
".",
"shape",
"P",
",",
"N",
"=",
"A",
".",
"shape",
"m",
"=",
"M",
"\"\"\"Primal variable\"\"\"",
"x",
"=",
"z",
"[",
"0",
":"... | 22.106667 | 18.733333 |
def queue(self, *args, **kwargs):
"""
A function to queue a RQ job, e.g.::
@rq.job(timeout=60)
def add(x, y):
return x + y
add.queue(1, 2, timeout=30)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param job_id: A custom ID for the new job. Defaults to an
:mod:`UUID <uuid>`.
:type job_id: str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
depends_on = kwargs.pop('depends_on', self._depends_on)
job_id = kwargs.pop('job_id', None)
at_front = kwargs.pop('at_front', self._at_front)
meta = kwargs.pop('meta', self._meta)
description = kwargs.pop('description', self._description)
return self.rq.get_queue(queue_name).enqueue_call(
self.wrapped,
args=args,
kwargs=kwargs,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
job_id=job_id,
at_front=at_front,
meta=meta,
description=description,
) | [
"def",
"queue",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"queue_name",
"=",
"kwargs",
".",
"pop",
"(",
"'queue'",
",",
"self",
".",
"queue_name",
")",
"timeout",
"=",
"kwargs",
".",
"pop",
"(",
"'timeout'",
",",
"self",
"."... | 34.746667 | 20.293333 |
def schema(self):
""" The DQL query that will construct this table's schema """
attrs = self.attrs.copy()
parts = ["CREATE", "TABLE", self.name, "(%s," % self.hash_key.schema]
del attrs[self.hash_key.name]
if self.range_key:
parts.append(self.range_key.schema + ",")
del attrs[self.range_key.name]
if attrs:
attr_def = ", ".join([attr.schema for attr in itervalues(attrs)])
parts.append(attr_def + ",")
parts.append(
"THROUGHPUT (%d, %d))" % (self.read_throughput, self.write_throughput)
)
parts.extend([g.schema for g in itervalues(self.global_indexes)])
return " ".join(parts) + ";" | [
"def",
"schema",
"(",
"self",
")",
":",
"attrs",
"=",
"self",
".",
"attrs",
".",
"copy",
"(",
")",
"parts",
"=",
"[",
"\"CREATE\"",
",",
"\"TABLE\"",
",",
"self",
".",
"name",
",",
"\"(%s,\"",
"%",
"self",
".",
"hash_key",
".",
"schema",
"]",
"del"... | 41.705882 | 19.294118 |
def start(self, *args, **kwargs):
r"""Starts the internal task in the event loop.
Parameters
------------
\*args
The arguments to to use.
\*\*kwargs
The keyword arguments to use.
Raises
--------
RuntimeError
A task has already been launched.
Returns
---------
:class:`asyncio.Task`
The task that has been created.
"""
if self._task is not None:
raise RuntimeError('Task is already launched.')
if self._injected is not None:
args = (self._injected, *args)
self._task = self.loop.create_task(self._loop(*args, **kwargs))
return self._task | [
"def",
"start",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_task",
"is",
"not",
"None",
":",
"raise",
"RuntimeError",
"(",
"'Task is already launched.'",
")",
"if",
"self",
".",
"_injected",
"is",
"not",
"None... | 24.62069 | 19.103448 |
def namespace_to_taxon() -> Dict[str, Node]:
"""
namespace to taxon mapping
"""
human_taxon = Node(
id='NCBITaxon:9606',
label='Homo sapiens'
)
return {
'MGI': Node(
id='NCBITaxon:10090',
label='Mus musculus'
),
'MONDO': human_taxon,
'OMIM': human_taxon,
'MONARCH': human_taxon,
'HGNC': human_taxon,
'FlyBase': Node(
id='NCBITaxon:7227',
label='Drosophila melanogaster'
),
'WormBase': Node(
id='NCBITaxon:6239',
label='Caenorhabditis elegans'
),
'ZFIN': Node(
id='NCBITaxon:7955',
label='Danio rerio'
)
} | [
"def",
"namespace_to_taxon",
"(",
")",
"->",
"Dict",
"[",
"str",
",",
"Node",
"]",
":",
"human_taxon",
"=",
"Node",
"(",
"id",
"=",
"'NCBITaxon:9606'",
",",
"label",
"=",
"'Homo sapiens'",
")",
"return",
"{",
"'MGI'",
":",
"Node",
"(",
"id",
"=",
"'NCB... | 23.966667 | 14.433333 |
def maximum_distance(value):
"""
:param value:
input string corresponding to a valid maximum distance
:returns:
a IntegrationDistance mapping
"""
dic = floatdict(value)
for trt, magdists in dic.items():
if isinstance(magdists, list): # could be a scalar otherwise
magdists.sort() # make sure the list is sorted by magnitude
for mag, dist in magdists: # validate the magnitudes
magnitude(mag)
return IntegrationDistance(dic) | [
"def",
"maximum_distance",
"(",
"value",
")",
":",
"dic",
"=",
"floatdict",
"(",
"value",
")",
"for",
"trt",
",",
"magdists",
"in",
"dic",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"magdists",
",",
"list",
")",
":",
"# could be a scalar other... | 36.071429 | 14.642857 |
def autoprefixCSS(sassPath):
'''
Take CSS file and automatically add browser prefixes with postCSS autoprefixer
'''
print("Autoprefixing CSS")
cssPath = os.path.splitext(sassPath)[0] + ".css"
command = "postcss --use autoprefixer --autoprefixer.browsers '> 5%' -o" + cssPath + " " + cssPath
subprocess.call(command, shell=True) | [
"def",
"autoprefixCSS",
"(",
"sassPath",
")",
":",
"print",
"(",
"\"Autoprefixing CSS\"",
")",
"cssPath",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"sassPath",
")",
"[",
"0",
"]",
"+",
"\".css\"",
"command",
"=",
"\"postcss --use autoprefixer --autoprefixer.... | 38.666667 | 27.111111 |
def apply_noise(self, noise_weights=None, uniform_amount=0.1):
"""
Add noise to every link in the network.
Can use either a ``uniform_amount`` or a ``noise_weight`` weight
profile. If ``noise_weight`` is set, ``uniform_amount`` will be
ignored.
Args:
noise_weights (list): a list of weight tuples
of form ``(float, float)`` corresponding to
``(amount, weight)`` describing the noise to be
added to each link in the graph
uniform_amount (float): the maximum amount of uniform noise
to be applied if ``noise_weights`` is not set
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link(node_1, 3)
>>> node_1.add_link(node_2, 5)
>>> node_2.add_link(node_1, 1)
>>> graph = Graph([node_1, node_2])
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(link.target.value, link.weight))
One 3
Two 5
>>> graph.apply_noise()
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(
... link.target.value, link.weight)) # doctest: +SKIP
One 3.154
Two 5.321
"""
# Main node loop
for node in self.node_list:
for link in node.link_list:
if noise_weights is not None:
noise_amount = round(weighted_rand(noise_weights), 3)
else:
noise_amount = round(random.uniform(
0, link.weight * uniform_amount), 3)
link.weight += noise_amount | [
"def",
"apply_noise",
"(",
"self",
",",
"noise_weights",
"=",
"None",
",",
"uniform_amount",
"=",
"0.1",
")",
":",
"# Main node loop",
"for",
"node",
"in",
"self",
".",
"node_list",
":",
"for",
"link",
"in",
"node",
".",
"link_list",
":",
"if",
"noise_weig... | 39.26087 | 17.565217 |
def mkdir_p(path_to_dir):
"""Make directory(ies).
This function behaves like mkdir -p.
Args:
path_to_dir (:obj:`str`): Path to the directory to make.
"""
try:
os.makedirs(path_to_dir)
except OSError as e: # Python >2.5
if e.errno == EEXIST and os.path.isdir(path_to_dir):
logger.debug(
"Directory %s already exists. Skipping." % path_to_dir)
else:
raise e | [
"def",
"mkdir_p",
"(",
"path_to_dir",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path_to_dir",
")",
"except",
"OSError",
"as",
"e",
":",
"# Python >2.5",
"if",
"e",
".",
"errno",
"==",
"EEXIST",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"p... | 27.4375 | 19.0625 |
def safe_filepath(file_path_name, dir_sep=None):
'''
Input the full path and filename, splits on directory separator and calls safe_filename_leaf for
each part of the path. dir_sep allows coder to force a directory separate to a particular character
.. versionadded:: 2017.7.2
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
if not dir_sep:
dir_sep = os.sep
# Normally if file_path_name or dir_sep is Unicode then the output will be Unicode
# This code ensure the output type is the same as file_path_name
if not isinstance(file_path_name, six.text_type) and isinstance(dir_sep, six.text_type):
dir_sep = dir_sep.encode('ascii') # This should not be executed under PY3
# splitdrive only set drive on windows platform
(drive, path) = os.path.splitdrive(file_path_name)
path = dir_sep.join([safe_filename_leaf(file_section) for file_section in path.rsplit(dir_sep)])
if drive:
path = dir_sep.join([drive, path])
return path | [
"def",
"safe_filepath",
"(",
"file_path_name",
",",
"dir_sep",
"=",
"None",
")",
":",
"if",
"not",
"dir_sep",
":",
"dir_sep",
"=",
"os",
".",
"sep",
"# Normally if file_path_name or dir_sep is Unicode then the output will be Unicode",
"# This code ensure the output type is th... | 47.809524 | 31.142857 |
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False) | [
"def",
"learn",
"(",
"epochs",
",",
"verbose_epoch",
",",
"X_train",
",",
"y_train",
",",
"test",
",",
"learning_rate",
",",
"weight_decay",
",",
"batch_size",
")",
":",
"net",
"=",
"get_net",
"(",
")",
"_",
"=",
"train",
"(",
"net",
",",
"X_train",
",... | 51.3 | 15.6 |
def load_snps(
self,
raw_data,
discrepant_snp_positions_threshold=100,
discrepant_genotypes_threshold=500,
save_output=False,
):
""" Load raw genotype data.
Parameters
----------
raw_data : list or str
path(s) to file(s) with raw genotype data
discrepant_snp_positions_threshold : int
threshold for discrepant SNP positions between existing data and data to be loaded,
a large value could indicate mismatched genome assemblies
discrepant_genotypes_threshold : int
threshold for discrepant genotype data between existing data and data to be loaded,
a large value could indicated mismatched individuals
save_output : bool
specifies whether to save discrepant SNP output to CSV files in the output directory
"""
if type(raw_data) is list:
for file in raw_data:
self._load_snps_helper(
file,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
elif type(raw_data) is str:
self._load_snps_helper(
raw_data,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
else:
raise TypeError("invalid filetype") | [
"def",
"load_snps",
"(",
"self",
",",
"raw_data",
",",
"discrepant_snp_positions_threshold",
"=",
"100",
",",
"discrepant_genotypes_threshold",
"=",
"500",
",",
"save_output",
"=",
"False",
",",
")",
":",
"if",
"type",
"(",
"raw_data",
")",
"is",
"list",
":",
... | 37.128205 | 17.641026 |
def feeling_lucky(cls, obj):
"""Tries to convert given object to an UTC timestamp is ms, based
on its type.
"""
if isinstance(obj, six.string_types):
return cls.from_str(obj)
elif isinstance(obj, six.integer_types) and obj <= MAX_POSIX_TIMESTAMP:
return cls.from_posix_timestamp(obj)
elif isinstance(obj, datetime):
return cls.from_datetime(obj)
else:
raise ValueError(
u"Don't know how to get timestamp from '{}'".format(obj)
) | [
"def",
"feeling_lucky",
"(",
"cls",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
":",
"return",
"cls",
".",
"from_str",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"six",
".",
"integer_types",
... | 39.071429 | 13.357143 |
def get_content_descendants_by_type(self, content_id, child_type, expand=None, start=None, limit=None,
callback=None):
"""
Returns the direct descendants of a piece of Content, limited to a single descendant type.
The {@link ContentType}(s) of the descendants returned is specified by the "type" path parameter in the request.
Currently the only supported descendants are comment descendants of non-comment Content.
:param content_id (string): A string containing the id of the content to retrieve descendants for
:param child_type (string): A {@link ContentType} to filter descendants on.
:param expand (string): OPTIONAL: A comma separated list of properties to expand on the descendants.
Default: Empty
:param start (int): OPTIONAL: The index of the first item within the result set that should be returned.
Default: 0.
:param limit (int): OPTIONAL: How many items should be returned after the start index.
Default: 25 or site limit.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/descendant/{type} endpoint, or the results of the
callback. Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if expand:
params["expand"] = expand
if start is not None:
params["start"] = int(start)
if limit is not None:
params["limit"] = int(limit)
return self._service_get_request("rest/api/content/{id}/descendant/{type}"
"".format(id=content_id, type=child_type), params=params, callback=callback) | [
"def",
"get_content_descendants_by_type",
"(",
"self",
",",
"content_id",
",",
"child_type",
",",
"expand",
"=",
"None",
",",
"start",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"expand"... | 63.966667 | 35.833333 |
def update(self, process_list):
"""Update the AMP"""
# Get the systemctl status
logger.debug('{}: Update stats using service {}'.format(self.NAME, self.get('service_cmd')))
try:
res = check_output(self.get('service_cmd').split(), stderr=STDOUT).decode('utf-8')
except OSError as e:
logger.debug('{}: Error while executing service ({})'.format(self.NAME, e))
else:
status = {'running': 0, 'stopped': 0, 'upstart': 0}
# For each line
for r in res.split('\n'):
# Split per space .*
l = r.split()
if len(l) < 4:
continue
if l[1] == '+':
status['running'] += 1
elif l[1] == '-':
status['stopped'] += 1
elif l[1] == '?':
status['upstart'] += 1
# Build the output (string) message
output = 'Services\n'
for k, v in iteritems(status):
output += '{}: {}\n'.format(k, v)
self.set_result(output, separator=' ')
return self.result() | [
"def",
"update",
"(",
"self",
",",
"process_list",
")",
":",
"# Get the systemctl status",
"logger",
".",
"debug",
"(",
"'{}: Update stats using service {}'",
".",
"format",
"(",
"self",
".",
"NAME",
",",
"self",
".",
"get",
"(",
"'service_cmd'",
")",
")",
")"... | 39.62069 | 15 |
def pexpire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` milliseconds.
``time`` can be represented by an integer or a Python timedelta
object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds() * 1000)
return self.execute_command('PEXPIRE', name, time) | [
"def",
"pexpire",
"(",
"self",
",",
"name",
",",
"time",
")",
":",
"if",
"isinstance",
"(",
"time",
",",
"datetime",
".",
"timedelta",
")",
":",
"time",
"=",
"int",
"(",
"time",
".",
"total_seconds",
"(",
")",
"*",
"1000",
")",
"return",
"self",
".... | 40.444444 | 14.666667 |
def make_fileitem_peinfo_exports_dllname(dll_name, condition='is', negate=False, preserve_case=False):
"""
Create a node for FileItem/PEInfo/Exports/DllName
:return: A IndicatorItem represented as an Element node
"""
document = 'FileItem'
search = 'FileItem/PEInfo/Exports/DllName'
content_type = 'string'
content = dll_name
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | [
"def",
"make_fileitem_peinfo_exports_dllname",
"(",
"dll_name",
",",
"condition",
"=",
"'is'",
",",
"negate",
"=",
"False",
",",
"preserve_case",
"=",
"False",
")",
":",
"document",
"=",
"'FileItem'",
"search",
"=",
"'FileItem/PEInfo/Exports/DllName'",
"content_type",... | 42.769231 | 23.846154 |
def _init_file(self, ti):
"""
Create log directory and give it correct permissions.
:param ti: task instance object
:return: relative log path of the given task instance
"""
# To handle log writing when tasks are impersonated, the log files need to
# be writable by the user that runs the Airflow command and the user
# that is impersonated. This is mainly to handle corner cases with the
# SubDagOperator. When the SubDagOperator is run, all of the operators
# run under the impersonated user and create appropriate log files
# as the impersonated user. However, if the user manually runs tasks
# of the SubDagOperator through the UI, then the log files are created
# by the user that runs the Airflow command. For example, the Airflow
# run command may be run by the `airflow_sudoable` user, but the Airflow
# tasks may be run by the `airflow` user. If the log files are not
# writable by both users, then it's possible that re-running a task
# via the UI (or vice versa) results in a permission error as the task
# tries to write to a log file created by the other user.
relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, relative_path)
directory = os.path.dirname(full_path)
# Create the log file and give it group writable permissions
# TODO(aoen): Make log dirs and logs globally readable for now since the SubDag
# operator is not compatible with impersonation (e.g. if a Celery executor is used
# for a SubDag operator and the SubDag operator has a different owner than the
# parent DAG)
if not os.path.exists(directory):
# Create the directory as globally writable using custom mkdirs
# as os.makedirs doesn't set mode properly.
mkdirs(directory, 0o777)
if not os.path.exists(full_path):
open(full_path, "a").close()
# TODO: Investigate using 444 instead of 666.
os.chmod(full_path, 0o666)
return full_path | [
"def",
"_init_file",
"(",
"self",
",",
"ti",
")",
":",
"# To handle log writing when tasks are impersonated, the log files need to",
"# be writable by the user that runs the Airflow command and the user",
"# that is impersonated. This is mainly to handle corner cases with the",
"# SubDagOperat... | 56.105263 | 24.842105 |
async def _dump_message_field(self, writer, msg, field, fvalue=None):
"""
Dumps a message field to the writer. Field is defined by the message field specification.
:param writer:
:param msg:
:param field:
:param fvalue:
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
fvalue = getattr(msg, fname, None) if fvalue is None else fvalue
await self.dump_field(writer, fvalue, ftype, params) | [
"async",
"def",
"_dump_message_field",
"(",
"self",
",",
"writer",
",",
"msg",
",",
"field",
",",
"fvalue",
"=",
"None",
")",
":",
"fname",
",",
"ftype",
",",
"params",
"=",
"field",
"[",
"0",
"]",
",",
"field",
"[",
"1",
"]",
",",
"field",
"[",
... | 36.923077 | 22.923077 |
def convert_to_merged_ids(self, id_run):
"""
Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases
:param id_run: a run of token ids
:param dictionary: a dictionary of acceptable phrases described as there component token ids
:return: a run of token and phrase ids.
"""
i = 0
rv = []
while i < len(id_run):
phrase_id, offset = self.max_phrase(id_run, i)
if phrase_id:
rv.append(phrase_id)
i = offset
else:
rv.append(id_run[i])
i += 1
return rv | [
"def",
"convert_to_merged_ids",
"(",
"self",
",",
"id_run",
")",
":",
"i",
"=",
"0",
"rv",
"=",
"[",
"]",
"while",
"i",
"<",
"len",
"(",
"id_run",
")",
":",
"phrase_id",
",",
"offset",
"=",
"self",
".",
"max_phrase",
"(",
"id_run",
",",
"i",
")",
... | 36.666667 | 18 |
def clean(text, cls=None, **kwargs):
"""Public facing function to clean ``text`` using the scrubber ``cls`` by
replacing all personal information with ``{{PLACEHOLDERS}}``.
"""
cls = cls or Scrubber
scrubber = cls()
return scrubber.clean(text, **kwargs) | [
"def",
"clean",
"(",
"text",
",",
"cls",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
"=",
"cls",
"or",
"Scrubber",
"scrubber",
"=",
"cls",
"(",
")",
"return",
"scrubber",
".",
"clean",
"(",
"text",
",",
"*",
"*",
"kwargs",
")"
] | 38.714286 | 9.285714 |
def secondary_mass(mass1, mass2):
"""Returns the smaller of mass1 and mass2 (s = secondary)."""
mass1, mass2, input_is_array = ensurearray(mass1, mass2)
if mass1.shape != mass2.shape:
raise ValueError("mass1 and mass2 must have same shape")
ms = copy.copy(mass2)
mask = mass1 < mass2
ms[mask] = mass1[mask]
return formatreturn(ms, input_is_array) | [
"def",
"secondary_mass",
"(",
"mass1",
",",
"mass2",
")",
":",
"mass1",
",",
"mass2",
",",
"input_is_array",
"=",
"ensurearray",
"(",
"mass1",
",",
"mass2",
")",
"if",
"mass1",
".",
"shape",
"!=",
"mass2",
".",
"shape",
":",
"raise",
"ValueError",
"(",
... | 41.555556 | 11.666667 |
def element(element, name, default=None):
"""
Returns the value of an element, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param name: The name of the element to evaluate
:type name: str
:param default: The default value to return if the element is not defined
"""
element_value = element.find(name)
return element_value.text if element_value is not None else default | [
"def",
"element",
"(",
"element",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"element_value",
"=",
"element",
".",
"find",
"(",
"name",
")",
"return",
"element_value",
".",
"text",
"if",
"element_value",
"is",
"not",
"None",
"else",
"default"
] | 32.714286 | 18.571429 |
def _transform(self, inp):
"""Basic MD5 step transforming the digest based on the input.
Note that if the Mysterious Constants are arranged backwards
in little-endian order and decrypted with the DES they produce
OCCULT MESSAGES!
"""
a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D
# Round 1.
S11, S12, S13, S14 = 7, 12, 17, 22
a = XX(F, a, b, c, d, inp[ 0], S11, 0xD76AA478L) # 1
d = XX(F, d, a, b, c, inp[ 1], S12, 0xE8C7B756L) # 2
c = XX(F, c, d, a, b, inp[ 2], S13, 0x242070DBL) # 3
b = XX(F, b, c, d, a, inp[ 3], S14, 0xC1BDCEEEL) # 4
a = XX(F, a, b, c, d, inp[ 4], S11, 0xF57C0FAFL) # 5
d = XX(F, d, a, b, c, inp[ 5], S12, 0x4787C62AL) # 6
c = XX(F, c, d, a, b, inp[ 6], S13, 0xA8304613L) # 7
b = XX(F, b, c, d, a, inp[ 7], S14, 0xFD469501L) # 8
a = XX(F, a, b, c, d, inp[ 8], S11, 0x698098D8L) # 9
d = XX(F, d, a, b, c, inp[ 9], S12, 0x8B44F7AFL) # 10
c = XX(F, c, d, a, b, inp[10], S13, 0xFFFF5BB1L) # 11
b = XX(F, b, c, d, a, inp[11], S14, 0x895CD7BEL) # 12
a = XX(F, a, b, c, d, inp[12], S11, 0x6B901122L) # 13
d = XX(F, d, a, b, c, inp[13], S12, 0xFD987193L) # 14
c = XX(F, c, d, a, b, inp[14], S13, 0xA679438EL) # 15
b = XX(F, b, c, d, a, inp[15], S14, 0x49B40821L) # 16
# Round 2.
S21, S22, S23, S24 = 5, 9, 14, 20
a = XX(G, a, b, c, d, inp[ 1], S21, 0xF61E2562L) # 17
d = XX(G, d, a, b, c, inp[ 6], S22, 0xC040B340L) # 18
c = XX(G, c, d, a, b, inp[11], S23, 0x265E5A51L) # 19
b = XX(G, b, c, d, a, inp[ 0], S24, 0xE9B6C7AAL) # 20
a = XX(G, a, b, c, d, inp[ 5], S21, 0xD62F105DL) # 21
d = XX(G, d, a, b, c, inp[10], S22, 0x02441453L) # 22
c = XX(G, c, d, a, b, inp[15], S23, 0xD8A1E681L) # 23
b = XX(G, b, c, d, a, inp[ 4], S24, 0xE7D3FBC8L) # 24
a = XX(G, a, b, c, d, inp[ 9], S21, 0x21E1CDE6L) # 25
d = XX(G, d, a, b, c, inp[14], S22, 0xC33707D6L) # 26
c = XX(G, c, d, a, b, inp[ 3], S23, 0xF4D50D87L) # 27
b = XX(G, b, c, d, a, inp[ 8], S24, 0x455A14EDL) # 28
a = XX(G, a, b, c, d, inp[13], S21, 0xA9E3E905L) # 29
d = XX(G, d, a, b, c, inp[ 2], S22, 0xFCEFA3F8L) # 30
c = XX(G, c, d, a, b, inp[ 7], S23, 0x676F02D9L) # 31
b = XX(G, b, c, d, a, inp[12], S24, 0x8D2A4C8AL) # 32
# Round 3.
S31, S32, S33, S34 = 4, 11, 16, 23
a = XX(H, a, b, c, d, inp[ 5], S31, 0xFFFA3942L) # 33
d = XX(H, d, a, b, c, inp[ 8], S32, 0x8771F681L) # 34
c = XX(H, c, d, a, b, inp[11], S33, 0x6D9D6122L) # 35
b = XX(H, b, c, d, a, inp[14], S34, 0xFDE5380CL) # 36
a = XX(H, a, b, c, d, inp[ 1], S31, 0xA4BEEA44L) # 37
d = XX(H, d, a, b, c, inp[ 4], S32, 0x4BDECFA9L) # 38
c = XX(H, c, d, a, b, inp[ 7], S33, 0xF6BB4B60L) # 39
b = XX(H, b, c, d, a, inp[10], S34, 0xBEBFBC70L) # 40
a = XX(H, a, b, c, d, inp[13], S31, 0x289B7EC6L) # 41
d = XX(H, d, a, b, c, inp[ 0], S32, 0xEAA127FAL) # 42
c = XX(H, c, d, a, b, inp[ 3], S33, 0xD4EF3085L) # 43
b = XX(H, b, c, d, a, inp[ 6], S34, 0x04881D05L) # 44
a = XX(H, a, b, c, d, inp[ 9], S31, 0xD9D4D039L) # 45
d = XX(H, d, a, b, c, inp[12], S32, 0xE6DB99E5L) # 46
c = XX(H, c, d, a, b, inp[15], S33, 0x1FA27CF8L) # 47
b = XX(H, b, c, d, a, inp[ 2], S34, 0xC4AC5665L) # 48
# Round 4.
S41, S42, S43, S44 = 6, 10, 15, 21
a = XX(I, a, b, c, d, inp[ 0], S41, 0xF4292244L) # 49
d = XX(I, d, a, b, c, inp[ 7], S42, 0x432AFF97L) # 50
c = XX(I, c, d, a, b, inp[14], S43, 0xAB9423A7L) # 51
b = XX(I, b, c, d, a, inp[ 5], S44, 0xFC93A039L) # 52
a = XX(I, a, b, c, d, inp[12], S41, 0x655B59C3L) # 53
d = XX(I, d, a, b, c, inp[ 3], S42, 0x8F0CCC92L) # 54
c = XX(I, c, d, a, b, inp[10], S43, 0xFFEFF47DL) # 55
b = XX(I, b, c, d, a, inp[ 1], S44, 0x85845DD1L) # 56
a = XX(I, a, b, c, d, inp[ 8], S41, 0x6FA87E4FL) # 57
d = XX(I, d, a, b, c, inp[15], S42, 0xFE2CE6E0L) # 58
c = XX(I, c, d, a, b, inp[ 6], S43, 0xA3014314L) # 59
b = XX(I, b, c, d, a, inp[13], S44, 0x4E0811A1L) # 60
a = XX(I, a, b, c, d, inp[ 4], S41, 0xF7537E82L) # 61
d = XX(I, d, a, b, c, inp[11], S42, 0xBD3AF235L) # 62
c = XX(I, c, d, a, b, inp[ 2], S43, 0x2AD7D2BBL) # 63
b = XX(I, b, c, d, a, inp[ 9], S44, 0xEB86D391L) # 64
A = (A + a) & 0xffffffffL
B = (B + b) & 0xffffffffL
C = (C + c) & 0xffffffffL
D = (D + d) & 0xffffffffL
self.A, self.B, self.C, self.D = A, B, C, D | [
"def",
"_transform",
"(",
"self",
",",
"inp",
")",
":",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"A",
",",
"B",
",",
"C",
",",
"D",
"=",
"self",
".",
"A",
",",
"self",
".",
"B",
",",
"self",
".",
"C",
",",
"self",
".",
"D",
"# Round 1.\r",... | 48.12 | 23.3 |
def to_native_units(self, motor):
"""
Return the native speed measurement required to achieve desired degrees-per-minute
"""
assert abs(self.degrees_per_minute) <= motor.max_dpm,\
"invalid degrees-per-minute: {} max DPM is {}, {} was requested".format(
motor, motor.max_dpm, self.degrees_per_minute)
return self.degrees_per_minute/motor.max_dpm * motor.max_speed | [
"def",
"to_native_units",
"(",
"self",
",",
"motor",
")",
":",
"assert",
"abs",
"(",
"self",
".",
"degrees_per_minute",
")",
"<=",
"motor",
".",
"max_dpm",
",",
"\"invalid degrees-per-minute: {} max DPM is {}, {} was requested\"",
".",
"format",
"(",
"motor",
",",
... | 52.375 | 21.375 |
def vnic_attach_to_network_distributed(nicspec, port_group, logger):
"""
Attach vNIC to a Distributed Port Group network
:param nicspec: <vim.vm.device.VirtualDeviceSpec>
:param port_group: <vim.dvs.DistributedVirtualPortgroup>
:param logger:
:return: updated 'nicspec'
"""
if nicspec and network_is_portgroup(port_group):
network_name = port_group.name
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = port_group.key
dvs_port_connection.switchUuid = port_group.config.distributedVirtualSwitch.uuid
nicspec.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nicspec.device.backing.port = dvs_port_connection
logger.debug(u"Assigning portgroup '{}' for vNIC".format(network_name))
else:
logger.warn(u"Cannot assigning portgroup for vNIC")
return nicspec | [
"def",
"vnic_attach_to_network_distributed",
"(",
"nicspec",
",",
"port_group",
",",
"logger",
")",
":",
"if",
"nicspec",
"and",
"network_is_portgroup",
"(",
"port_group",
")",
":",
"network_name",
"=",
"port_group",
".",
"name",
"dvs_port_connection",
"=",
"vim",
... | 44.5 | 24.318182 |
def log_p_blanket(self, beta):
""" Creates complete Markov blanket for latent variables
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
Markov blanket for latent variables
"""
states = np.zeros([self.state_no, self.data.shape[0]])
for state_i in range(self.state_no):
states[state_i,:] = beta[(self.z_no + (self.data.shape[0]*state_i)):(self.z_no + (self.data.shape[0]*(state_i+1)))]
return np.append(self.evo_blanket(beta,states),self.markov_blanket(beta,states)) | [
"def",
"log_p_blanket",
"(",
"self",
",",
"beta",
")",
":",
"states",
"=",
"np",
".",
"zeros",
"(",
"[",
"self",
".",
"state_no",
",",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
"]",
")",
"for",
"state_i",
"in",
"range",
"(",
"self",
".",
... | 38.705882 | 25.294118 |
def transform(self, data, test=False):
'''Transform image data to latent space.
Parameters
----------
data : array-like shape (n_images, image_width, image_height,
n_colors)
Input numpy array of images.
test [optional] : bool
Controls the test boolean for batch normalization.
Returns
-------
latent_vec : array-like shape (n_images, latent_width)
'''
#make sure that data has the right shape.
if not type(data) == Variable:
if len(data.shape) < 4:
data = data[np.newaxis]
if len(data.shape) != 4:
raise TypeError("Invalid dimensions for image data. Dim = %s.\
Must be 4d array." % str(data.shape))
if data.shape[1] != self.color_channels:
if data.shape[-1] == self.color_channels:
data = data.transpose(0, 3, 1, 2)
else:
raise TypeError("Invalid dimensions for image data. Dim = %s"
% str(data.shape))
data = Variable(data)
else:
if len(data.data.shape) < 4:
data.data = data.data[np.newaxis]
if len(data.data.shape) != 4:
raise TypeError("Invalid dimensions for image data. Dim = %s.\
Must be 4d array." % str(data.data.shape))
if data.data.shape[1] != self.color_channels:
if data.data.shape[-1] == self.color_channels:
data.data = data.data.transpose(0, 3, 1, 2)
else:
raise TypeError("Invalid dimensions for image data. Dim = %s"
% str(data.shape))
# Actual transformation.
if self.flag_gpu:
data.to_gpu()
z = self._encode(data, test=test)[0]
z.to_cpu()
return z.data | [
"def",
"transform",
"(",
"self",
",",
"data",
",",
"test",
"=",
"False",
")",
":",
"#make sure that data has the right shape.",
"if",
"not",
"type",
"(",
"data",
")",
"==",
"Variable",
":",
"if",
"len",
"(",
"data",
".",
"shape",
")",
"<",
"4",
":",
"d... | 38.82 | 18.74 |
def get_index(binstr, end_index=160):
"""
Return the position of the first 1 bit
from the left in the word until end_index
:param binstr:
:param end_index:
:return:
"""
res = -1
try:
res = binstr.index('1') + 1
except ValueError:
res = end_index
return res | [
"def",
"get_index",
"(",
"binstr",
",",
"end_index",
"=",
"160",
")",
":",
"res",
"=",
"-",
"1",
"try",
":",
"res",
"=",
"binstr",
".",
"index",
"(",
"'1'",
")",
"+",
"1",
"except",
"ValueError",
":",
"res",
"=",
"end_index",
"return",
"res"
] | 20.2 | 16.333333 |
def _url_params(size:str='>400*300', format:str='jpg') -> str:
"Build Google Images Search Url params and return them as a string."
_fmts = {'jpg':'ift:jpg','gif':'ift:gif','png':'ift:png','bmp':'ift:bmp', 'svg':'ift:svg','webp':'webp','ico':'ift:ico'}
if size not in _img_sizes:
raise RuntimeError(f"""Unexpected size argument value: {size}.
See `widgets.image_downloader._img_sizes` for supported sizes.""")
if format not in _fmts:
raise RuntimeError(f"Unexpected image file format: {format}. Use jpg, gif, png, bmp, svg, webp, or ico.")
return "&tbs=" + _img_sizes[size] + "," + _fmts[format] | [
"def",
"_url_params",
"(",
"size",
":",
"str",
"=",
"'>400*300'",
",",
"format",
":",
"str",
"=",
"'jpg'",
")",
"->",
"str",
":",
"_fmts",
"=",
"{",
"'jpg'",
":",
"'ift:jpg'",
",",
"'gif'",
":",
"'ift:gif'",
",",
"'png'",
":",
"'ift:png'",
",",
"'bmp... | 71.777778 | 27.888889 |
def _get_record(self, ipnum):
"""
Populate location dict for converted IP.
Returns dict with numerous location properties.
:arg ipnum: Result of ip2long conversion
"""
seek_country = self._seek_country(ipnum)
if seek_country == self._databaseSegments:
return {}
read_length = (2 * self._recordLength - 1) * self._databaseSegments
try:
self._lock.acquire()
self._fp.seek(seek_country + read_length, os.SEEK_SET)
buf = self._fp.read(const.FULL_RECORD_LENGTH)
finally:
self._lock.release()
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
record = {
'dma_code': 0,
'area_code': 0,
'metro_code': None,
'postal_code': None
}
latitude = 0
longitude = 0
char = ord(buf[0])
record['country_code'] = const.COUNTRY_CODES[char]
record['country_code3'] = const.COUNTRY_CODES3[char]
record['country_name'] = const.COUNTRY_NAMES[char]
record['continent'] = const.CONTINENT_NAMES[char]
def read_data(buf, pos):
cur = pos
while buf[cur] != '\0':
cur += 1
return cur, buf[pos:cur] if cur > pos else None
offset, record['region_code'] = read_data(buf, 1)
offset, record['city'] = read_data(buf, offset + 1)
offset, record['postal_code'] = read_data(buf, offset + 1)
offset = offset + 1
for j in range(3):
latitude += (ord(buf[offset + j]) << (j * 8))
for j in range(3):
longitude += (ord(buf[offset + j + 3]) << (j * 8))
record['latitude'] = (latitude / 10000.0) - 180.0
record['longitude'] = (longitude / 10000.0) - 180.0
if self._databaseType in (const.CITY_EDITION_REV1, const.CITY_EDITION_REV1_V6):
if record['country_code'] == 'US':
dma_area = 0
for j in range(3):
dma_area += ord(buf[offset + j + 6]) << (j * 8)
record['dma_code'] = int(floor(dma_area / 1000))
record['area_code'] = dma_area % 1000
record['metro_code'] = const.DMA_MAP.get(record['dma_code'])
params = (record['country_code'], record['region_code'])
record['time_zone'] = time_zone_by_country_and_region(*params)
return record | [
"def",
"_get_record",
"(",
"self",
",",
"ipnum",
")",
":",
"seek_country",
"=",
"self",
".",
"_seek_country",
"(",
"ipnum",
")",
"if",
"seek_country",
"==",
"self",
".",
"_databaseSegments",
":",
"return",
"{",
"}",
"read_length",
"=",
"(",
"2",
"*",
"se... | 33.416667 | 21.416667 |
def obj(self):
"""
Returns passed object but if chain method is used
returns the last processed result
"""
if self._wrapped is not self.Null:
return self._wrapped
else:
return self.object | [
"def",
"obj",
"(",
"self",
")",
":",
"if",
"self",
".",
"_wrapped",
"is",
"not",
"self",
".",
"Null",
":",
"return",
"self",
".",
"_wrapped",
"else",
":",
"return",
"self",
".",
"object"
] | 27.888889 | 10.111111 |
def _extract_dir(self, dir_not_exists, output):
"""Extract the content of dvc tree file
Args:
self(object) - Repo class instance
dir_not_exists(bool) - flag for directory existence
output(object) - OutputLOCAL class instance
Returns:
dict - dictionary with keys - paths to file in .dvc/cache
values -checksums for that files
"""
if not dir_not_exists:
lst = output.dir_cache
return {i["relpath"]: i["md5"] for i in lst}
return {} | [
"def",
"_extract_dir",
"(",
"self",
",",
"dir_not_exists",
",",
"output",
")",
":",
"if",
"not",
"dir_not_exists",
":",
"lst",
"=",
"output",
".",
"dir_cache",
"return",
"{",
"i",
"[",
"\"relpath\"",
"]",
":",
"i",
"[",
"\"md5\"",
"]",
"for",
"i",
"in"... | 37.071429 | 14.928571 |
def replace_post_data_parameters(request, replacements):
"""
Replace post data in request--either form data or json--according to
replacements. The replacements should be a list of (key, value) pairs where
the value can be any of:
1. A simple replacement string value.
2. None to remove the given header.
3. A callable which accepts (key, value, request) and returns a string
value or None.
"""
replacements = dict(replacements)
if request.method == 'POST' and not isinstance(request.body, BytesIO):
if request.headers.get('Content-Type') == 'application/json':
json_data = json.loads(request.body.decode('utf-8'))
for k, rv in replacements.items():
if k in json_data:
ov = json_data.pop(k)
if callable(rv):
rv = rv(key=k, value=ov, request=request)
if rv is not None:
json_data[k] = rv
request.body = json.dumps(json_data).encode('utf-8')
else:
if isinstance(request.body, text_type):
request.body = request.body.encode('utf-8')
splits = [p.partition(b'=') for p in request.body.split(b'&')]
new_splits = []
for k, sep, ov in splits:
if sep is None:
new_splits.append((k, sep, ov))
else:
rk = k.decode('utf-8')
if rk not in replacements:
new_splits.append((k, sep, ov))
else:
rv = replacements[rk]
if callable(rv):
rv = rv(key=rk, value=ov.decode('utf-8'),
request=request)
if rv is not None:
new_splits.append((k, sep, rv.encode('utf-8')))
request.body = b'&'.join(k if sep is None else b''.join([k, sep, v])
for k, sep, v in new_splits)
return request | [
"def",
"replace_post_data_parameters",
"(",
"request",
",",
"replacements",
")",
":",
"replacements",
"=",
"dict",
"(",
"replacements",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
"and",
"not",
"isinstance",
"(",
"request",
".",
"body",
",",
"BytesIO"... | 46.795455 | 15.204545 |
def mod_watch(name, **kwargs):
'''
Install/reinstall a package based on a watch requisite
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
'''
sfun = kwargs.pop('sfun', None)
mapfun = {'purged': purged,
'latest': latest,
'removed': removed,
'installed': installed}
if sfun in mapfun:
return mapfun[sfun](name, **kwargs)
return {'name': name,
'changes': {},
'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun),
'result': False} | [
"def",
"mod_watch",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"sfun",
"=",
"kwargs",
".",
"pop",
"(",
"'sfun'",
",",
"None",
")",
"mapfun",
"=",
"{",
"'purged'",
":",
"purged",
",",
"'latest'",
":",
"latest",
",",
"'removed'",
":",
"removed",
... | 34.904762 | 21.095238 |
def remove_option(self, mask):
"""Unset arbitrary query flags using a bitmask.
To unset the tailable flag:
cursor.remove_option(2)
"""
if not isinstance(mask, int):
raise TypeError("mask must be an int")
self.__check_okay_to_chain()
if mask & _QUERY_OPTIONS["exhaust"]:
self.__exhaust = False
self.__query_flags &= ~mask
return self | [
"def",
"remove_option",
"(",
"self",
",",
"mask",
")",
":",
"if",
"not",
"isinstance",
"(",
"mask",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"mask must be an int\"",
")",
"self",
".",
"__check_okay_to_chain",
"(",
")",
"if",
"mask",
"&",
"_QUERY_... | 27.8 | 13.133333 |
def execute(self, statements, exc=IntegrityError, rasie_as=ValueError):
"""Execute ``statements`` in a session, and perform a rollback on
error. ``exc`` is a single exception object or a tuple of
objects to be used in the except clause. The error message is
re-raised as the exception specified by ``raise_as``.
"""
Session = sessionmaker(bind=self.engine)
session = Session()
try:
for statement in statements:
session.execute(statement)
except exc as err:
session.rollback()
raise rasie_as(str(err))
else:
session.commit()
finally:
session.close() | [
"def",
"execute",
"(",
"self",
",",
"statements",
",",
"exc",
"=",
"IntegrityError",
",",
"rasie_as",
"=",
"ValueError",
")",
":",
"Session",
"=",
"sessionmaker",
"(",
"bind",
"=",
"self",
".",
"engine",
")",
"session",
"=",
"Session",
"(",
")",
"try",
... | 33.095238 | 18.142857 |
def gather(self, *futures: Union[asyncio.Future, asyncio.coroutine]):
"""Gather list of futures/coros and return single Task ready to schedule.
:Example:
Prepare all futures to execution
.. code-block:: python
>>> async def do_something():
... return 'something'
...
>>> async def do_something_else():
... return 'something_else'
...
Gather all tasks and then pass to context loop
.. code-block:: python
>>> loop = Loop(return_exceptions=True)
>>> loop.gather(do_something(), do_something_else())
>>> with loop as l:
... result = l.run_until_complete()
...
:param futures: One or more coroutine or future.
:type futures: asyncio.Future, asyncio.coroutine
:return: Futures grouped into single future
:rtype: asyncio.Task, asyncio.Future
"""
self.ft_count = len(futures)
self.futures = asyncio.gather(*futures, loop=self.loop,
return_exceptions=self.return_exceptions) | [
"def",
"gather",
"(",
"self",
",",
"*",
"futures",
":",
"Union",
"[",
"asyncio",
".",
"Future",
",",
"asyncio",
".",
"coroutine",
"]",
")",
":",
"self",
".",
"ft_count",
"=",
"len",
"(",
"futures",
")",
"self",
".",
"futures",
"=",
"asyncio",
".",
... | 33.147059 | 19.441176 |
def weave_on(advices, pointcut=None, ctx=None, depth=1, ttl=None):
"""Decorator for weaving advices on a callable target.
:param pointcut: condition for weaving advices on joinpointe.
The condition depends on its type.
:param ctx: target ctx (instance or class).
:type pointcut:
- NoneType: advices are weaved on target.
- str: target name is compared to pointcut regex.
- function: called with target in parameter, if True, advices will
be weaved on target.
:param depth: class weaving depthing
:type depth: int
:param public: (default True) weave only on public members
:type public: bool
"""
def __weave(target):
"""Internal weave function."""
weave(
target=target, advices=advices, pointcut=pointcut,
ctx=ctx, depth=depth, ttl=ttl
)
return target
return __weave | [
"def",
"weave_on",
"(",
"advices",
",",
"pointcut",
"=",
"None",
",",
"ctx",
"=",
"None",
",",
"depth",
"=",
"1",
",",
"ttl",
"=",
"None",
")",
":",
"def",
"__weave",
"(",
"target",
")",
":",
"\"\"\"Internal weave function.\"\"\"",
"weave",
"(",
"target"... | 30.586207 | 20.206897 |
def minute(self):
"""
Extract the "minute" part from a date column.
:returns: a single-column H2OFrame containing the "minute" part from the source frame.
"""
fr = H2OFrame._expr(expr=ExprNode("minute", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr | [
"def",
"minute",
"(",
"self",
")",
":",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"minute\"",
",",
"self",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")",
"if",
"fr",
".",
"_ex",
".",
"_cache",
".",... | 40.5 | 23.7 |
def _join_keys_v1(left, right):
"""
Join two keys into a format separable by using _split_keys_v1.
"""
if left.endswith(':') or '::' in left:
raise ValueError("Can't join a left string ending in ':' or containing '::'")
return u"{}::{}".format(_encode_v1(left), _encode_v1(right)) | [
"def",
"_join_keys_v1",
"(",
"left",
",",
"right",
")",
":",
"if",
"left",
".",
"endswith",
"(",
"':'",
")",
"or",
"'::'",
"in",
"left",
":",
"raise",
"ValueError",
"(",
"\"Can't join a left string ending in ':' or containing '::'\"",
")",
"return",
"u\"{}::{}\"",... | 43.142857 | 15.142857 |
def summarize_dataframe(self):
"""Summarize default dataframe for this cohort using a hash function.
Useful for confirming the version of data used in various reports, e.g. ipynbs
"""
if self.dataframe_hash:
return(self.dataframe_hash)
else:
df = self._as_dataframe_unmodified()
return(self.dataframe_hash) | [
"def",
"summarize_dataframe",
"(",
"self",
")",
":",
"if",
"self",
".",
"dataframe_hash",
":",
"return",
"(",
"self",
".",
"dataframe_hash",
")",
"else",
":",
"df",
"=",
"self",
".",
"_as_dataframe_unmodified",
"(",
")",
"return",
"(",
"self",
".",
"datafr... | 41.555556 | 11.333333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.