text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def remove_job_resolver(self, job_resolver):
"""Remove job_resolver from the list of job resolvers.
Keyword arguments:
job_resolver -- Function reference of the job resolver to be removed.
"""
for i, r in enumerate(self.job_resolvers()):
if job_resolver == r:
del self._job_resolvers[i] | [
"def",
"remove_job_resolver",
"(",
"self",
",",
"job_resolver",
")",
":",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"self",
".",
"job_resolvers",
"(",
")",
")",
":",
"if",
"job_resolver",
"==",
"r",
":",
"del",
"self",
".",
"_job_resolvers",
"[",
"... | 38.555556 | 12.888889 |
def sum(self, array, role = None):
"""
Return the sum of ``array`` for the members of the entity.
``array`` must have the dimension of the number of persons in the simulation
If ``role`` is provided, only the entity member with the given role are taken into account.
Example:
>>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0]
>>> household.sum(salaries)
>>> array([3500])
"""
self.entity.check_role_validity(role)
self.members.check_array_compatible_with_entity(array)
if role is not None:
role_filter = self.members.has_role(role)
return np.bincount(
self.members_entity_id[role_filter],
weights = array[role_filter],
minlength = self.count)
else:
return np.bincount(self.members_entity_id, weights = array) | [
"def",
"sum",
"(",
"self",
",",
"array",
",",
"role",
"=",
"None",
")",
":",
"self",
".",
"entity",
".",
"check_role_validity",
"(",
"role",
")",
"self",
".",
"members",
".",
"check_array_compatible_with_entity",
"(",
"array",
")",
"if",
"role",
"is",
"n... | 39.125 | 22.125 |
def assign_properties(thing):
"""Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
"""
for p in range(5):
property_name = "property" + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property)
session.commit() | [
"def",
"assign_properties",
"(",
"thing",
")",
":",
"for",
"p",
"in",
"range",
"(",
"5",
")",
":",
"property_name",
"=",
"\"property\"",
"+",
"str",
"(",
"p",
"+",
"1",
")",
"property",
"=",
"request_parameter",
"(",
"parameter",
"=",
"property_name",
",... | 38 | 21.785714 |
def get_object(self):
"""
Get the object we are working with. Makes sure
get_queryset is called even when in add mode.
"""
if not self.force_add and self.kwargs.get(self.slug_url_kwarg, None):
return super(FormView, self).get_object()
else:
self.queryset = self.get_queryset()
return None | [
"def",
"get_object",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"force_add",
"and",
"self",
".",
"kwargs",
".",
"get",
"(",
"self",
".",
"slug_url_kwarg",
",",
"None",
")",
":",
"return",
"super",
"(",
"FormView",
",",
"self",
")",
".",
"get_ob... | 29.916667 | 19.25 |
def _load_recursive(self, shape, gen):
"""Recursively create a multidimensional array (as lists of lists)
from a bit generator.
"""
if len(shape) > 0:
ans = []
for i in range(shape[0]):
ans.append(self._load_recursive(shape[1:], gen))
else:
fields = []
for code, length in self.format:
field = None
raw = gen.send(length)
if code == 'u':
field = raw
elif code == 'i':
field = raw
# Interpret as 2's complement
if field >= 1 << (length - 1):
field -= 1 << length
elif code == 'b':
field = bool(raw)
elif code == 'c':
field = six.int2byte(raw)
elif code == 'f':
if length == 32:
field = _np.uint32(raw).view(_np.float32)
elif length == 64:
field = _np.uint64(raw).view(_np.float64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
fields.append(field)
if len(fields) == 1:
ans = fields[0]
else:
ans = tuple(fields)
return ans | [
"def",
"_load_recursive",
"(",
"self",
",",
"shape",
",",
"gen",
")",
":",
"if",
"len",
"(",
"shape",
")",
">",
"0",
":",
"ans",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"shape",
"[",
"0",
"]",
")",
":",
"ans",
".",
"append",
"(",
"self... | 38.205128 | 12.282051 |
def _blocks_to_samples(sig_data, n_samp, fmt):
"""
Convert uint8 blocks into signal samples for unaligned dat formats.
Parameters
----------
sig_data : numpy array
The uint8 data blocks.
n_samp : int
The number of samples contained in the bytes
Returns
-------
signal : numpy array
The numpy array of digital samples
"""
if fmt == '212':
# Easier to process when dealing with whole blocks
if n_samp % 2:
n_samp += 1
added_samps = 1
sig_data = np.append(sig_data, np.zeros(1, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample pair is stored in one byte triplet.
# Even numbered samples
sig[0::2] = sig_data[0::3] + 256 * np.bitwise_and(sig_data[1::3], 0x0f)
# Odd numbered samples (len(sig) always > 1 due to processing of
# whole blocks)
sig[1::2] = sig_data[2::3] + 256*np.bitwise_and(sig_data[1::3] >> 4, 0x0f)
# Remove trailing sample read within the byte block if
# originally odd sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form:
# values > 2^11-1 are negative.
sig[sig > 2047] -= 4096
elif fmt == '310':
# Easier to process when dealing with whole blocks
if n_samp % 3:
n_samp = upround(n_samp,3)
added_samps = n_samp % 3
sig_data = np.append(sig_data, np.zeros(added_samps, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample triplet is stored in one byte quartet
# First sample is 7 msb of first byte and 3 lsb of second byte.
sig[0::3] = (sig_data[0::4] >> 1)[0:len(sig[0::3])] + 128 * np.bitwise_and(sig_data[1::4], 0x07)[0:len(sig[0::3])]
# Second signal is 7 msb of third byte and 3 lsb of forth byte
sig[1::3] = (sig_data[2::4] >> 1)[0:len(sig[1::3])] + 128 * np.bitwise_and(sig_data[3::4], 0x07)[0:len(sig[1::3])]
# Third signal is 5 msb of second byte and 5 msb of forth byte
sig[2::3] = np.bitwise_and((sig_data[1::4] >> 3), 0x1f)[0:len(sig[2::3])] + 32 * np.bitwise_and(sig_data[3::4] >> 3, 0x1f)[0:len(sig[2::3])]
# Remove trailing samples read within the byte block if
# originally not 3n sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form:
# values > 2^9-1 are negative.
sig[sig > 511] -= 1024
elif fmt == '311':
# Easier to process when dealing with whole blocks
if n_samp % 3:
n_samp = upround(n_samp,3)
added_samps = n_samp % 3
sig_data = np.append(sig_data, np.zeros(added_samps, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample triplet is stored in one byte quartet
# First sample is first byte and 2 lsb of second byte.
sig[0::3] = sig_data[0::4][0:len(sig[0::3])] + 256 * np.bitwise_and(sig_data[1::4], 0x03)[0:len(sig[0::3])]
# Second sample is 6 msb of second byte and 4 lsb of third byte
sig[1::3] = (sig_data[1::4] >> 2)[0:len(sig[1::3])] + 64 * np.bitwise_and(sig_data[2::4], 0x0f)[0:len(sig[1::3])]
# Third sample is 4 msb of third byte and 6 msb of forth byte
sig[2::3] = (sig_data[2::4] >> 4)[0:len(sig[2::3])] + 16 * np.bitwise_and(sig_data[3::4], 0x7f)[0:len(sig[2::3])]
# Remove trailing samples read within the byte block if
# originally not 3n sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form.
# Values > 2^9-1 are negative.
sig[sig > 511] -= 1024
return sig | [
"def",
"_blocks_to_samples",
"(",
"sig_data",
",",
"n_samp",
",",
"fmt",
")",
":",
"if",
"fmt",
"==",
"'212'",
":",
"# Easier to process when dealing with whole blocks",
"if",
"n_samp",
"%",
"2",
":",
"n_samp",
"+=",
"1",
"added_samps",
"=",
"1",
"sig_data",
"... | 38.480769 | 24.75 |
def activities(self, limit=1, event=None):
"""Return device activity information."""
activities = self._activities or []
# Filter our activity array if requested
if event:
activities = list(
filter(
lambda activity:
activity[CONST.EVENT] == event, activities))
# Return the requested number
return activities[:limit] | [
"def",
"activities",
"(",
"self",
",",
"limit",
"=",
"1",
",",
"event",
"=",
"None",
")",
":",
"activities",
"=",
"self",
".",
"_activities",
"or",
"[",
"]",
"# Filter our activity array if requested",
"if",
"event",
":",
"activities",
"=",
"list",
"(",
"f... | 32.461538 | 13.923077 |
def set_tab(self, widget, switch=False, title=None):
"""Add or modify a tab.
If widget is not a tab, it will be added. If switch is True, switch to
this tab. If title is given, set the tab's title.
"""
if widget not in self._widgets:
self._widgets.append(widget)
self._widget_title[widget] = ''
if switch:
self._tab_index = self._widgets.index(widget)
if title:
self._widget_title[widget] = title
self._update_tabs() | [
"def",
"set_tab",
"(",
"self",
",",
"widget",
",",
"switch",
"=",
"False",
",",
"title",
"=",
"None",
")",
":",
"if",
"widget",
"not",
"in",
"self",
".",
"_widgets",
":",
"self",
".",
"_widgets",
".",
"append",
"(",
"widget",
")",
"self",
".",
"_wi... | 36.857143 | 13.714286 |
def handle_message(self, client_conn, msg):
"""Handle messages of all types from clients.
Parameters
----------
client_conn : ClientConnection object
The client connection the message was from.
msg : Message object
The message to process.
"""
# log messages received so that no one else has to
self._logger.debug('received: {0!s}'.format(msg))
if msg.mtype == msg.REQUEST:
return self.handle_request(client_conn, msg)
elif msg.mtype == msg.INFORM:
return self.handle_inform(client_conn, msg)
elif msg.mtype == msg.REPLY:
return self.handle_reply(client_conn, msg)
else:
reason = "Unexpected message type received by server ['%s']." \
% (msg,)
client_conn.inform(self.create_log_inform("error", reason, "root")) | [
"def",
"handle_message",
"(",
"self",
",",
"client_conn",
",",
"msg",
")",
":",
"# log messages received so that no one else has to",
"self",
".",
"_logger",
".",
"debug",
"(",
"'received: {0!s}'",
".",
"format",
"(",
"msg",
")",
")",
"if",
"msg",
".",
"mtype",
... | 37.125 | 16.958333 |
def filter(self, *filt, **kwargs):
"""Filter this `TimeSeries` with an IIR or FIR filter
Parameters
----------
*filt : filter arguments
1, 2, 3, or 4 arguments defining the filter to be applied,
- an ``Nx1`` `~numpy.ndarray` of FIR coefficients
- an ``Nx6`` `~numpy.ndarray` of SOS coefficients
- ``(numerator, denominator)`` polynomials
- ``(zeros, poles, gain)``
- ``(A, B, C, D)`` 'state-space' representation
filtfilt : `bool`, optional
filter forward and backwards to preserve phase,
default: `False`
analog : `bool`, optional
if `True`, filter coefficients will be converted from Hz
to Z-domain digital representation, default: `False`
inplace : `bool`, optional
if `True`, this array will be overwritten with the filtered
version, default: `False`
**kwargs
other keyword arguments are passed to the filter method
Returns
-------
result : `TimeSeries`
the filtered version of the input `TimeSeries`
Notes
-----
IIR filters are converted either into cascading
second-order sections (if `scipy >= 0.16` is installed), or into the
``(numerator, denominator)`` representation before being applied
to this `TimeSeries`.
.. note::
When using `scipy < 0.16` some higher-order filters may be
unstable. With `scipy >= 0.16` higher-order filters are
decomposed into second-order-sections, and so are much more stable.
FIR filters are passed directly to :func:`scipy.signal.lfilter` or
:func:`scipy.signal.filtfilt` without any conversions.
See also
--------
scipy.signal.sosfilt
for details on filtering with second-order sections
(`scipy >= 0.16` only)
scipy.signal.sosfiltfilt
for details on forward-backward filtering with second-order
sections (`scipy >= 0.18` only)
scipy.signal.lfilter
for details on filtering (without SOS)
scipy.signal.filtfilt
for details on forward-backward filtering (without SOS)
Raises
------
ValueError
if ``filt`` arguments cannot be interpreted properly
Examples
--------
We can design an arbitrarily complicated filter using
:mod:`gwpy.signal.filter_design`
>>> from gwpy.signal import filter_design
>>> bp = filter_design.bandpass(50, 250, 4096.)
>>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)]
>>> zpk = filter_design.concatenate_zpks(bp, *notches)
And then can download some data from LOSC to apply it using
`TimeSeries.filter`:
>>> from gwpy.timeseries import TimeSeries
>>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> filtered = data.filter(zpk, filtfilt=True)
We can plot the original signal, and the filtered version, cutting
off either end of the filtered data to remove filter-edge artefacts
>>> from gwpy.plot import Plot
>>> plot = Plot(data, filtered[128:-128], separate=True)
>>> plot.show()
"""
# parse keyword arguments
filtfilt = kwargs.pop('filtfilt', False)
# parse filter
form, filt = filter_design.parse_filter(
filt, analog=kwargs.pop('analog', False),
sample_rate=self.sample_rate.to('Hz').value,
)
if form == 'zpk':
try:
sos = signal.zpk2sos(*filt)
except AttributeError: # scipy < 0.16, no SOS filtering
sos = None
b, a = signal.zpk2tf(*filt)
else:
sos = None
b, a = filt
# perform filter
kwargs.setdefault('axis', 0)
if sos is not None and filtfilt:
out = signal.sosfiltfilt(sos, self, **kwargs)
elif sos is not None:
out = signal.sosfilt(sos, self, **kwargs)
elif filtfilt:
out = signal.filtfilt(b, a, self, **kwargs)
else:
out = signal.lfilter(b, a, self, **kwargs)
# format as type(self)
new = out.view(type(self))
new.__metadata_finalize__(self)
new._unit = self.unit
return new | [
"def",
"filter",
"(",
"self",
",",
"*",
"filt",
",",
"*",
"*",
"kwargs",
")",
":",
"# parse keyword arguments",
"filtfilt",
"=",
"kwargs",
".",
"pop",
"(",
"'filtfilt'",
",",
"False",
")",
"# parse filter",
"form",
",",
"filt",
"=",
"filter_design",
".",
... | 34.031008 | 22.170543 |
def _lazy_load_units_by_code():
"""Populate dict of units by code iff UNITS_BY_CODE is empty."""
if UNITS_BY_CODE:
# already populated
return
for unit in units.UNITS_BY_NAME.values():
UNITS_BY_CODE[unit.code] = unit | [
"def",
"_lazy_load_units_by_code",
"(",
")",
":",
"if",
"UNITS_BY_CODE",
":",
"# already populated",
"return",
"for",
"unit",
"in",
"units",
".",
"UNITS_BY_NAME",
".",
"values",
"(",
")",
":",
"UNITS_BY_CODE",
"[",
"unit",
".",
"code",
"]",
"=",
"unit"
] | 28.375 | 15.625 |
def assert_valid_path(path):
"""Checks if a path is a correct format that Marathon expects. Raises ValueError if not valid.
:param str path: The app id.
:rtype: str
"""
if path is None:
return
# As seen in:
# https://github.com/mesosphere/marathon/blob/0c11661ca2f259f8a903d114ef79023649a6f04b/src/main/scala/mesosphere/marathon/state/PathId.scala#L71
for id in filter(None, path.strip('/').split('/')):
if not ID_PATTERN.match(id):
raise ValueError(
'invalid path (allowed: lowercase letters, digits, hyphen, "/", ".", ".."): %r' % path)
return path | [
"def",
"assert_valid_path",
"(",
"path",
")",
":",
"if",
"path",
"is",
"None",
":",
"return",
"# As seen in:",
"# https://github.com/mesosphere/marathon/blob/0c11661ca2f259f8a903d114ef79023649a6f04b/src/main/scala/mesosphere/marathon/state/PathId.scala#L71",
"for",
"id",
"in",
"fil... | 38.5625 | 26.25 |
def compile(self):
"""
Compile SQL and return 3-tuple ``(sql, params, keys)``.
Example usage::
(sql, params, keys) = sc.compile()
for row in cursor.execute(sql, params):
record = dict(zip(keys, row))
"""
params = self.column_params + self.join_params + self.params
if self.limit and self.limit >= 0:
self.sql_limit = 'LIMIT ?'
params += [self.limit]
return (self.sql, params, self.keys) | [
"def",
"compile",
"(",
"self",
")",
":",
"params",
"=",
"self",
".",
"column_params",
"+",
"self",
".",
"join_params",
"+",
"self",
".",
"params",
"if",
"self",
".",
"limit",
"and",
"self",
".",
"limit",
">=",
"0",
":",
"self",
".",
"sql_limit",
"=",... | 30.875 | 15.375 |
def delete_user(self, user, group):
""" Deletes user from group """
if not self.__contains__(group):
raise GroupNotExists
if not self.is_user_in(user, group):
raise UserNotInAGroup
self.new_groups.popvalue(group, user) | [
"def",
"delete_user",
"(",
"self",
",",
"user",
",",
"group",
")",
":",
"if",
"not",
"self",
".",
"__contains__",
"(",
"group",
")",
":",
"raise",
"GroupNotExists",
"if",
"not",
"self",
".",
"is_user_in",
"(",
"user",
",",
"group",
")",
":",
"raise",
... | 38.285714 | 4.142857 |
def readVersion(self):
""" Read the document version.
::
<designspace format="3">
"""
ds = self.root.findall("[@format]")[0]
raw_format = ds.attrib['format']
try:
self.documentFormatVersion = int(raw_format)
except ValueError:
# as of fontTools >= 3.27 'format' is formatted as a float "4.0"
self.documentFormatVersion = float(raw_format) | [
"def",
"readVersion",
"(",
"self",
")",
":",
"ds",
"=",
"self",
".",
"root",
".",
"findall",
"(",
"\"[@format]\"",
")",
"[",
"0",
"]",
"raw_format",
"=",
"ds",
".",
"attrib",
"[",
"'format'",
"]",
"try",
":",
"self",
".",
"documentFormatVersion",
"=",
... | 35.916667 | 14.166667 |
def _stringifyKeys(d):
"""
Return a copy of C{d} with C{str} keys.
@type d: C{dict} with C{unicode} keys.
@rtype: C{dict} with C{str} keys.
"""
return dict((k.encode('ascii'), v) for (k, v) in d.iteritems()) | [
"def",
"_stringifyKeys",
"(",
"d",
")",
":",
"return",
"dict",
"(",
"(",
"k",
".",
"encode",
"(",
"'ascii'",
")",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"d",
".",
"iteritems",
"(",
")",
")"
] | 28.25 | 11.75 |
def struct_from_value( cls, name, volume,
channel_list=None, mute=False, device=None ):
'Same arguments as with class instance init.'
chan_map = c.PA_CHANNEL_MAP()
if not channel_list: c.pa.channel_map_init_mono(chan_map)
else:
if not is_str(channel_list):
channel_list = b','.join(map(c.force_bytes, channel_list))
c.pa.channel_map_parse(chan_map, channel_list)
if not isinstance(volume, PulseVolumeInfo):
volume = PulseVolumeInfo(volume, chan_map.channels)
struct = c.PA_EXT_STREAM_RESTORE_INFO(
name=c.force_bytes(name),
mute=int(bool(mute)), device=c.force_bytes(device),
channel_map=chan_map, volume=volume.to_struct() )
return struct | [
"def",
"struct_from_value",
"(",
"cls",
",",
"name",
",",
"volume",
",",
"channel_list",
"=",
"None",
",",
"mute",
"=",
"False",
",",
"device",
"=",
"None",
")",
":",
"chan_map",
"=",
"c",
".",
"PA_CHANNEL_MAP",
"(",
")",
"if",
"not",
"channel_list",
"... | 41.4375 | 12.4375 |
def delete(self, name):
"""
Handle deletion race condition present in Django prior to 1.4
https://code.djangoproject.com/ticket/16108
"""
try:
super(StaticCompilerFileStorage, self).delete(name)
except OSError, e:
if e.errno != errno.ENOENT:
raise | [
"def",
"delete",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"super",
"(",
"StaticCompilerFileStorage",
",",
"self",
")",
".",
"delete",
"(",
"name",
")",
"except",
"OSError",
",",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
... | 32.6 | 14.2 |
def _process_organism_dbxref(self, limit):
"""
This is the mapping between the flybase organisms and
external identifier "FBsp". We will want to use the NCBITaxon as
the primary, if possible, but will default to a blank node/internal id
if that is all that is available
But we need to make the equivalences/sameAs.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'organism_dbxref'))
LOG.info("processing organsim dbxref mappings")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
(organism_dbxref_id, organism_id, dbxref_id, is_current) = line
if self.test_mode \
and int(organism_id) not in self.test_keys['organism']:
continue
organism_key = organism_id
if organism_key not in self.idhash['organism']:
continue
organism_id = self.idhash['organism'][organism_key]
dbxref_key = dbxref_id
dbxrefs = self.dbxrefs.get(dbxref_key)
if dbxrefs is not None:
for d in dbxrefs:
did = dbxrefs.get(d)
# don't make something sameAs itself
if did == organism_id:
continue
dlabel = self.label_hash.get(did)
model.addXref(organism_id, did)
if re.match(r'NCBITaxon', did):
model.makeLeader(did)
else:
model.addIndividualToGraph(did, dlabel)
line_counter += 1
if not self.test_mode and limit is not None and line_counter > limit:
break
return | [
"def",
"_process_organism_dbxref",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter"... | 37.732143 | 18.446429 |
def _longest_contig(self, contig_set, contig_lengths):
'''Returns the name of the longest contig, from the set of names contig_set. contig_lengths
is expected to be a dictionary of contig name => length.'''
longest_name = None
max_length = -1
for name in contig_set:
if contig_lengths[name] > max_length:
longest_name = name
max_length = contig_lengths[name]
assert max_length != -1
assert longest_name is not None
return longest_name | [
"def",
"_longest_contig",
"(",
"self",
",",
"contig_set",
",",
"contig_lengths",
")",
":",
"longest_name",
"=",
"None",
"max_length",
"=",
"-",
"1",
"for",
"name",
"in",
"contig_set",
":",
"if",
"contig_lengths",
"[",
"name",
"]",
">",
"max_length",
":",
"... | 44.5 | 15.666667 |
def get_tree_root(self):
""" Returns the absolute root node of current tree structure."""
root = self
while root.up is not None:
root = root.up
return root | [
"def",
"get_tree_root",
"(",
"self",
")",
":",
"root",
"=",
"self",
"while",
"root",
".",
"up",
"is",
"not",
"None",
":",
"root",
"=",
"root",
".",
"up",
"return",
"root"
] | 32.333333 | 13 |
def tic(self):
"""Start collecting stats for current batch.
Call before calling forward."""
if self.step % self.interval == 0:
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
self.queue = []
self.activated = True
self.step += 1 | [
"def",
"tic",
"(",
"self",
")",
":",
"if",
"self",
".",
"step",
"%",
"self",
".",
"interval",
"==",
"0",
":",
"for",
"exe",
"in",
"self",
".",
"exes",
":",
"for",
"array",
"in",
"exe",
".",
"arg_arrays",
":",
"array",
".",
"wait_to_read",
"(",
")... | 35.833333 | 6.75 |
def implicitly_declare_ro(instructions: List[AbstractInstruction]):
"""
Implicitly declare a register named ``ro`` for backwards compatibility with Quil 1.
There used to be one un-named hunk of classical memory. Now there are variables with
declarations. Instead of::
MEASURE 0 [0]
You must now measure into a named register, idiomatically::
MEASURE 0 ro[0]
The ``MEASURE`` instruction will emit this (with a deprecation warning) if you're still
using bare integers for classical addresses. However, you must also declare memory in the
new scheme::
DECLARE ro BIT[8]
MEASURE 0 ro[0]
This method will determine if you are in "backwards compatibility mode" and will declare
a read-out ``ro`` register for you. If you program contains any DECLARE commands or if it
does not have any MEASURE x ro[x], this will not do anything.
This behavior is included for backwards compatibility and will be removed in future releases
of PyQuil. Please DECLARE all memory including ``ro``.
"""
ro_addrs: List[int] = []
for instr in instructions:
if isinstance(instr, Declare):
# The user has declared their own memory
# so they are responsible for all declarations and memory references.
return instructions
if isinstance(instr, Measurement):
if instr.classical_reg is None:
continue
if instr.classical_reg.name == 'ro':
ro_addrs += [instr.classical_reg.offset]
else:
# The user has used a classical register named something other than "ro"
# so they are responsible for all declarations and memory references.
return instructions
if len(ro_addrs) == 0:
return instructions
warnings.warn("Please DECLARE all memory. I'm adding a declaration for the `ro` register, "
"but I won't do this for you in the future.")
new_instr = instructions.copy()
new_instr.insert(0, Declare(name='ro', memory_type='BIT', memory_size=max(ro_addrs) + 1))
return new_instr | [
"def",
"implicitly_declare_ro",
"(",
"instructions",
":",
"List",
"[",
"AbstractInstruction",
"]",
")",
":",
"ro_addrs",
":",
"List",
"[",
"int",
"]",
"=",
"[",
"]",
"for",
"instr",
"in",
"instructions",
":",
"if",
"isinstance",
"(",
"instr",
",",
"Declare... | 38.925926 | 27.962963 |
def depth_december_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_december_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_december_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_december_average_ground_temperature`'.format(value))
self._depth_december_average_ground_temperature = value | [
"def",
"depth_december_average_ground_temperature",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'... | 38.090909 | 23.545455 |
def get_waveform_filter_precondition(approximant, length, delta_f):
"""Return the data preconditioning factor for this approximant.
"""
if approximant in _filter_preconditions:
return _filter_preconditions[approximant](length, delta_f)
else:
return None | [
"def",
"get_waveform_filter_precondition",
"(",
"approximant",
",",
"length",
",",
"delta_f",
")",
":",
"if",
"approximant",
"in",
"_filter_preconditions",
":",
"return",
"_filter_preconditions",
"[",
"approximant",
"]",
"(",
"length",
",",
"delta_f",
")",
"else",
... | 39.857143 | 15.571429 |
def send(sender_instance):
"""Send a transactional email using SendInBlue API.
Site: https://www.sendinblue.com
API: https://apidocs.sendinblue.com/
"""
m = Mailin(
"https://api.sendinblue.com/v2.0",
sender_instance._kwargs.get("api_key")
)
data = {
"to": email_list_to_email_dict(sender_instance._recipient_list),
"cc": email_list_to_email_dict(sender_instance._cc),
"bcc": email_list_to_email_dict(sender_instance._bcc),
"from": email_address_to_list(sender_instance._from_email),
"subject": sender_instance._subject,
}
if sender_instance._template.is_html:
data.update({
"html": sender_instance._message,
"headers": {"Content-Type": "text/html; charset=utf-8"}
})
else:
data.update({"text": sender_instance._message})
if "attachments" in sender_instance._kwargs:
data["attachment"] = {}
for attachment in sender_instance._kwargs["attachments"]:
data["attachment"][attachment[0]] = base64.b64encode(attachment[1])
result = m.send_email(data)
if result["code"] != "success":
raise SendInBlueError(result["message"]) | [
"def",
"send",
"(",
"sender_instance",
")",
":",
"m",
"=",
"Mailin",
"(",
"\"https://api.sendinblue.com/v2.0\"",
",",
"sender_instance",
".",
"_kwargs",
".",
"get",
"(",
"\"api_key\"",
")",
")",
"data",
"=",
"{",
"\"to\"",
":",
"email_list_to_email_dict",
"(",
... | 38.032258 | 16.935484 |
def setup_catalogs(
portal, catalogs_definition={},
force_reindex=False, catalogs_extension={}, force_no_reindex=False):
"""
Setup the given catalogs. Redefines the map between content types and
catalogs and then checks the indexes and metacolumns, if one index/column
doesn't exist in the catalog_definition any more it will be
removed, otherwise, if a new index/column is found, it will be created.
:param portal: The Plone's Portal object
:param catalogs_definition: a dictionary with the following structure
{
CATALOG_ID: {
'types': ['ContentType', ...],
'indexes': {
'UID': 'FieldIndex',
...
},
'columns': [
'Title',
...
]
}
}
:type catalogs_definition: dict
:param force_reindex: Force to reindex the catalogs even if there's no need
:type force_reindex: bool
:param force_no_reindex: Force reindexing NOT to happen.
:param catalog_extensions: An extension for the primary catalogs definition
Same dict structure as param catalogs_definition. Allows to add
columns and indexes required by Bika-specific add-ons.
:type catalog_extensions: dict
"""
# If not given catalogs_definition, use the LIMS one
if not catalogs_definition:
catalogs_definition = getCatalogDefinitions()
# Merge the catalogs definition of the extension with the primary
# catalog definition
definition = _merge_catalog_definitions(catalogs_definition,
catalogs_extension)
# Mapping content types in catalogs
# This variable will be used to clean reindex the catalog. Saves the
# catalogs ids
archetype_tool = getToolByName(portal, 'archetype_tool')
clean_and_rebuild = _map_content_types(archetype_tool, definition)
# Indexing
for cat_id in definition.keys():
reindex = False
reindex = _setup_catalog(
portal, cat_id, definition.get(cat_id, {}))
if (reindex or force_reindex) and (cat_id not in clean_and_rebuild):
# add the catalog if it has not been added before
clean_and_rebuild.append(cat_id)
# Reindex the catalogs which needs it
if not force_no_reindex:
_cleanAndRebuildIfNeeded(portal, clean_and_rebuild)
return clean_and_rebuild | [
"def",
"setup_catalogs",
"(",
"portal",
",",
"catalogs_definition",
"=",
"{",
"}",
",",
"force_reindex",
"=",
"False",
",",
"catalogs_extension",
"=",
"{",
"}",
",",
"force_no_reindex",
"=",
"False",
")",
":",
"# If not given catalogs_definition, use the LIMS one",
... | 40.466667 | 19.866667 |
def _bisect(value_and_gradients_function, initial_args, f_lim):
"""Actual implementation of bisect given initial_args in a _BracketResult."""
def _loop_cond(curr):
# TODO(b/112524024): Also take into account max_iterations.
return ~tf.reduce_all(input_tensor=curr.stopped)
def _loop_body(curr):
"""Narrow down interval to satisfy opposite slope conditions."""
mid = value_and_gradients_function((curr.left.x + curr.right.x) / 2)
# Fail if function values at mid point are no longer finite; or left/right
# points are so close to it that we can't distinguish them any more.
failed = (curr.failed | ~is_finite(mid) |
tf.equal(mid.x, curr.left.x) | tf.equal(mid.x, curr.right.x))
# If mid point has a negative slope and the function value at that point is
# small enough, we can use it as a new left end point to narrow down the
# interval. If mid point has a positive slope, then we have found a suitable
# right end point to bracket a minima within opposite slopes. Otherwise, the
# mid point has a negative slope but the function value at that point is too
# high to work as left end point, we are in the same situation in which we
# started the loop so we just update the right end point and continue.
to_update = ~(curr.stopped | failed)
update_left = (mid.df < 0) & (mid.f <= f_lim)
left = val_where(to_update & update_left, mid, curr.left)
right = val_where(to_update & ~update_left, mid, curr.right)
# We're done when the right end point has a positive slope.
stopped = curr.stopped | failed | (right.df >= 0)
return [_IntermediateResult(
iteration=curr.iteration,
stopped=stopped,
failed=failed,
num_evals=curr.num_evals + 1,
left=left,
right=right)]
# The interval needs updating if the right end point has a negative slope and
# the value of the function at that point is too high. It is not a valid left
# end point but along with the current left end point, it encloses another
# minima. The loop above tries to narrow the interval so that it satisfies the
# opposite slope conditions.
return tf.while_loop(
cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0] | [
"def",
"_bisect",
"(",
"value_and_gradients_function",
",",
"initial_args",
",",
"f_lim",
")",
":",
"def",
"_loop_cond",
"(",
"curr",
")",
":",
"# TODO(b/112524024): Also take into account max_iterations.",
"return",
"~",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
... | 49.155556 | 25.177778 |
def incident_path(cls, project, incident):
"""Return a fully-qualified incident string."""
return google.api_core.path_template.expand(
"projects/{project}/incidents/{incident}",
project=project,
incident=incident,
) | [
"def",
"incident_path",
"(",
"cls",
",",
"project",
",",
"incident",
")",
":",
"return",
"google",
".",
"api_core",
".",
"path_template",
".",
"expand",
"(",
"\"projects/{project}/incidents/{incident}\"",
",",
"project",
"=",
"project",
",",
"incident",
"=",
"in... | 38.571429 | 11.571429 |
def id_pools_vmac_ranges(self):
"""
Gets the IdPoolsRanges API Client for VMAC Ranges.
Returns:
IdPoolsRanges:
"""
if not self.__id_pools_vmac_ranges:
self.__id_pools_vmac_ranges = IdPoolsRanges('vmac', self.__connection)
return self.__id_pools_vmac_ranges | [
"def",
"id_pools_vmac_ranges",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"__id_pools_vmac_ranges",
":",
"self",
".",
"__id_pools_vmac_ranges",
"=",
"IdPoolsRanges",
"(",
"'vmac'",
",",
"self",
".",
"__connection",
")",
"return",
"self",
".",
"__id_pools_v... | 32 | 15.2 |
def ArcTan2(x: vertex_constructor_param_types, y: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Calculates the signed angle, in radians, between the positive x-axis and a ray to the point (x, y) from the origin
:param x: x coordinate
:param y: y coordinate
"""
return Double(context.jvm_view().ArcTan2Vertex, label, cast_to_double_vertex(x), cast_to_double_vertex(y)) | [
"def",
"ArcTan2",
"(",
"x",
":",
"vertex_constructor_param_types",
",",
"y",
":",
"vertex_constructor_param_types",
",",
"label",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Vertex",
":",
"return",
"Double",
"(",
"context",
".",
"jvm_view",
"(... | 52.125 | 36.375 |
def certify_list(
value, certifier=None, min_len=None, max_len=None, required=True, schema=None,
include_collections=False,
):
"""
Certifier for a list.
:param list value:
The array to be certified.
:param func certifier:
A function to be called on each value in the iterable to check that it is valid.
:param int min_len:
The minimum acceptable length for the iterable. If None, the minimum length is not checked.
:param int max_len:
The maximum acceptable length for the iterable. If None, the maximum length is not checked.
:param bool required:
Whether the value can't be `None`. Defaults to True.
:param tuple schema:
The schema against which the value should be checked.
For single-item tuple make sure to add comma at the end of schema tuple, that is,
for example: schema=(certify_int(),)
:param bool include_collections:
Include types from collections.
:return:
The certified list.
:rtype:
list
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The valid is invalid
"""
certify_bool(include_collections, required=True)
certify_iterable(
value=value,
types=tuple([list, MutableSequence, Sequence]) if include_collections else tuple([list]),
certifier=certifier,
min_len=min_len,
max_len=max_len,
schema=schema,
required=required,
) | [
"def",
"certify_list",
"(",
"value",
",",
"certifier",
"=",
"None",
",",
"min_len",
"=",
"None",
",",
"max_len",
"=",
"None",
",",
"required",
"=",
"True",
",",
"schema",
"=",
"None",
",",
"include_collections",
"=",
"False",
",",
")",
":",
"certify_bool... | 34.833333 | 21.357143 |
def with_params(self, **kwargs):
"""Modify various execution parameters of a Pipeline before it runs.
This method has no effect in test mode.
Args:
kwargs: Attributes to modify on this Pipeline instance before it has
been executed.
Returns:
This Pipeline instance, for easy chaining.
"""
if _TEST_MODE:
logging.info(
'Setting runtime parameters for %s#%s: %r',
self, self.pipeline_id, kwargs)
return self
if self.pipeline_id is not None:
raise UnexpectedPipelineError(
'May only call with_params() on a Pipeline that has not yet '
'been scheduled for execution.')
ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target')
for name, value in kwargs.iteritems():
if name not in ALLOWED:
raise TypeError('Unexpected keyword: %s=%r' % (name, value))
setattr(self, name, value)
return self | [
"def",
"with_params",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"_TEST_MODE",
":",
"logging",
".",
"info",
"(",
"'Setting runtime parameters for %s#%s: %r'",
",",
"self",
",",
"self",
".",
"pipeline_id",
",",
"kwargs",
")",
"return",
"self",
"if"... | 31.551724 | 19.37931 |
def get_reversed_unification_program(angles, control_indices,
target, controls, mode):
"""
Gets the Program representing the reversed circuit
for the decomposition of the uniformly controlled
rotations in a unification step.
If :math:`n` is the number of controls, the indices within control indices
must range from 1 to :math:`n`, inclusive. The length of control_indices
and the length of angles must both be :math:`2^n`.
:param list angles: The angles of rotation in the the decomposition,
in order from left to right
:param list control_indices: a list of positions for the controls of the
CNOTs used when decomposing uniformly
controlled rotations; see
get_cnot_control_positions for labelling
conventions.
:param int target: Index of the target of all rotations
:param list controls: Index of the controls, in order from bottom to top.
:param str mode: The unification mode. Is either 'phase', corresponding
to controlled RZ rotations, or 'magnitude', corresponding
to controlled RY rotations.
:return: The reversed circuit of this unification step.
:rtype: Program
"""
if mode == 'phase':
gate = RZ
elif mode == 'magnitude':
gate = RY
else:
raise ValueError("mode must be \'phase\' or \'magnitude\'")
reversed_gates = []
for j in range(len(angles)):
if angles[j] != 0:
# angle is negated in conjugated/reversed circuit
reversed_gates.append(gate(-angles[j], target))
if len(controls) > 0:
reversed_gates.append(CNOT(controls[control_indices[j] - 1],
target))
return Program().inst(reversed_gates[::-1]) | [
"def",
"get_reversed_unification_program",
"(",
"angles",
",",
"control_indices",
",",
"target",
",",
"controls",
",",
"mode",
")",
":",
"if",
"mode",
"==",
"'phase'",
":",
"gate",
"=",
"RZ",
"elif",
"mode",
"==",
"'magnitude'",
":",
"gate",
"=",
"RY",
"el... | 43.295455 | 22.068182 |
def _new_stream(self, idx):
'''Randomly select and create a new stream.
Parameters
----------
idx : int, [0:n_streams - 1]
The stream index to replace
'''
# Don't activate the stream if the weight is 0 or None
if self.stream_weights_[idx]:
self.streams_[idx] = self.streamers[idx].iterate()
else:
self.streams_[idx] = None
# Reset the sample count to zero
self.stream_counts_[idx] = 0 | [
"def",
"_new_stream",
"(",
"self",
",",
"idx",
")",
":",
"# Don't activate the stream if the weight is 0 or None",
"if",
"self",
".",
"stream_weights_",
"[",
"idx",
"]",
":",
"self",
".",
"streams_",
"[",
"idx",
"]",
"=",
"self",
".",
"streamers",
"[",
"idx",
... | 30.4375 | 16.4375 |
def get_unique_backends():
"""Gets the unique backends that are available.
Returns:
list: Unique available backends.
Raises:
QiskitError: No backends available.
"""
backends = IBMQ.backends()
unique_hardware_backends = []
unique_names = []
for back in backends:
if back.name() not in unique_names and not back.configuration().simulator:
unique_hardware_backends.append(back)
unique_names.append(back.name())
if not unique_hardware_backends:
raise QiskitError('No backends available.')
return unique_hardware_backends | [
"def",
"get_unique_backends",
"(",
")",
":",
"backends",
"=",
"IBMQ",
".",
"backends",
"(",
")",
"unique_hardware_backends",
"=",
"[",
"]",
"unique_names",
"=",
"[",
"]",
"for",
"back",
"in",
"backends",
":",
"if",
"back",
".",
"name",
"(",
")",
"not",
... | 31.368421 | 14.736842 |
def do_exit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0) | [
"def",
"do_exit",
"(",
"*",
"args",
")",
":",
"try",
":",
"import",
"java",
".",
"lang",
".",
"System",
"java",
".",
"lang",
".",
"System",
".",
"exit",
"(",
"1",
")",
"except",
"ImportError",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
... | 25.2 | 25.466667 |
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps = preparation.merge_peptides(self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps} | [
"def",
"set_features",
"(",
"self",
")",
":",
"allpsms_str",
"=",
"readers",
".",
"generate_psms_multiple_fractions_strings",
"(",
"self",
".",
"mergefiles",
",",
"self",
".",
"ns",
")",
"allpeps",
"=",
"preparation",
".",
"merge_peptides",
"(",
"self",
".",
"... | 51.166667 | 17.5 |
def VEXTRACTF128(cpu, dest, src, offset):
"""Extract Packed Floating-Point Values
Extracts 128-bits of packed floating-point values from the source
operand (second operand) at an 128-bit offset from imm8[0] into the
destination operand (first operand). The destination may be either an
XMM register or an 128-bit memory location.
"""
offset = offset.read()
dest.write(Operators.EXTRACT(src.read(), offset * 128, (offset + 1) * 128)) | [
"def",
"VEXTRACTF128",
"(",
"cpu",
",",
"dest",
",",
"src",
",",
"offset",
")",
":",
"offset",
"=",
"offset",
".",
"read",
"(",
")",
"dest",
".",
"write",
"(",
"Operators",
".",
"EXTRACT",
"(",
"src",
".",
"read",
"(",
")",
",",
"offset",
"*",
"1... | 48.8 | 21 |
def get_job_id_from_name(self, job_name):
"""Retrieve the first job ID matching the given name"""
jobs = self._client.list_jobs(jobQueue=self._queue, jobStatus='RUNNING')['jobSummaryList']
matching_jobs = [job for job in jobs if job['jobName'] == job_name]
if matching_jobs:
return matching_jobs[0]['jobId'] | [
"def",
"get_job_id_from_name",
"(",
"self",
",",
"job_name",
")",
":",
"jobs",
"=",
"self",
".",
"_client",
".",
"list_jobs",
"(",
"jobQueue",
"=",
"self",
".",
"_queue",
",",
"jobStatus",
"=",
"'RUNNING'",
")",
"[",
"'jobSummaryList'",
"]",
"matching_jobs",... | 57.666667 | 18.833333 |
def insert_contribution_entries(database, entries):
"""Insert a set of records of a contribution report in the provided database.
Insert a set of new records into the provided database without checking
for conflicting entries.
@param database: The MongoDB database to operate on. The contributions
collection will be used from this database.
@type db: pymongo.database.Database
@param entries: The entries to insert into the database.
@type entries: dict
"""
entries = map(clean_entry, entries)
database.contributions.insert(entries, continue_on_error=True) | [
"def",
"insert_contribution_entries",
"(",
"database",
",",
"entries",
")",
":",
"entries",
"=",
"map",
"(",
"clean_entry",
",",
"entries",
")",
"database",
".",
"contributions",
".",
"insert",
"(",
"entries",
",",
"continue_on_error",
"=",
"True",
")"
] | 42.428571 | 17.714286 |
def create_event_study_tear_sheet(factor_data,
prices=None,
avgretplot=(5, 15),
rate_of_ret=True,
n_bars=50):
"""
Creates an event study tear sheet for analysis of a specific event.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single event, forward returns for each
period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
prices : pd.DataFrame, required only if 'avgretplot' is provided
A DataFrame indexed by date with assets in the columns containing the
pricing data.
- See full explanation in utils.get_clean_factor_and_forward_returns
avgretplot: tuple (int, int) - (before, after), optional
If not None, plot event style average cumulative returns within a
window (pre and post event).
rate_of_ret : bool, optional
Display rate of return instead of simple return in 'Mean Period Wise
Return By Factor Quantile' and 'Period Wise Return By Factor Quantile'
plots
n_bars : int, optional
Number of bars in event distribution plot
"""
long_short = False
plotting.plot_quantile_statistics_table(factor_data)
gf = GridFigure(rows=1, cols=1)
plotting.plot_events_distribution(events=factor_data['factor'],
num_bars=n_bars,
ax=gf.next_row())
plt.show()
gf.close()
if prices is not None and avgretplot is not None:
create_event_returns_tear_sheet(factor_data=factor_data,
prices=prices,
avgretplot=avgretplot,
long_short=long_short,
group_neutral=False,
std_bar=True,
by_group=False)
factor_returns = perf.factor_returns(factor_data,
demeaned=False,
equal_weight=True)
mean_quant_ret, std_quantile = \
perf.mean_return_by_quantile(factor_data,
by_group=False,
demeaned=long_short)
if rate_of_ret:
mean_quant_ret = \
mean_quant_ret.apply(utils.rate_of_return, axis=0,
base_period=mean_quant_ret.columns[0])
mean_quant_ret_bydate, std_quant_daily = \
perf.mean_return_by_quantile(factor_data,
by_date=True,
by_group=False,
demeaned=long_short)
if rate_of_ret:
mean_quant_ret_bydate = mean_quant_ret_bydate.apply(
utils.rate_of_return, axis=0,
base_period=mean_quant_ret_bydate.columns[0]
)
fr_cols = len(factor_returns.columns)
vertical_sections = 2 + fr_cols * 1
gf = GridFigure(rows=vertical_sections, cols=1)
plotting.plot_quantile_returns_bar(mean_quant_ret,
by_group=False,
ylim_percentiles=None,
ax=gf.next_row())
plotting.plot_quantile_returns_violin(mean_quant_ret_bydate,
ylim_percentiles=(1, 99),
ax=gf.next_row())
trading_calendar = factor_data.index.levels[0].freq
if trading_calendar is None:
trading_calendar = pd.tseries.offsets.BDay()
warnings.warn(
"'freq' not set in factor_data index: assuming business day",
UserWarning
)
for p in factor_returns:
plotting.plot_cumulative_returns(
factor_returns[p],
period=p,
freq=trading_calendar,
ax=gf.next_row()
)
plt.show()
gf.close() | [
"def",
"create_event_study_tear_sheet",
"(",
"factor_data",
",",
"prices",
"=",
"None",
",",
"avgretplot",
"=",
"(",
"5",
",",
"15",
")",
",",
"rate_of_ret",
"=",
"True",
",",
"n_bars",
"=",
"50",
")",
":",
"long_short",
"=",
"False",
"plotting",
".",
"p... | 38.766355 | 20.82243 |
def update_display(cb, pool, params, plane, qwertz):
"""
Draws everything.
:param cb: Cursebox instance.
:type cb: cursebox.Cursebox
:param params: Current application parameters.
:type params: params.Params
:param plane: Plane containing the current Mandelbrot values.
:type plane: plane.Plane
:return:
"""
cb.clear()
draw_panel(cb, pool, params, plane)
update_position(params) # Update Mandelbrot-space coordinates before drawing them
draw_menu(cb, params, qwertz)
cb.refresh() | [
"def",
"update_display",
"(",
"cb",
",",
"pool",
",",
"params",
",",
"plane",
",",
"qwertz",
")",
":",
"cb",
".",
"clear",
"(",
")",
"draw_panel",
"(",
"cb",
",",
"pool",
",",
"params",
",",
"plane",
")",
"update_position",
"(",
"params",
")",
"# Upd... | 30.882353 | 16.176471 |
def one(nodes, or_none=False):
"""
Assert that there is exactly one node in the give list, and return it.
"""
if not nodes and or_none:
return None
assert len(
nodes) == 1, 'Expected 1 result. Received %d results.' % (len(nodes))
return nodes[0] | [
"def",
"one",
"(",
"nodes",
",",
"or_none",
"=",
"False",
")",
":",
"if",
"not",
"nodes",
"and",
"or_none",
":",
"return",
"None",
"assert",
"len",
"(",
"nodes",
")",
"==",
"1",
",",
"'Expected 1 result. Received %d results.'",
"%",
"(",
"len",
"(",
"nod... | 30.777778 | 17.666667 |
def invalidate(self, comparison: Comparison[Entity, Entity]) -> None:
"""
Invalidate paths in a zone. See https://api.cloudflare.com
/#zone-purge-individual-files-by-url-and-cache-tags
:param comparison: The comparison whose changes to invalidate.
:raises requests.exceptions.RequestException: On request failure.
:raises RuntimeError: If the request succeeded but could not be carried
out.
"""
@backoff.on_exception(backoff.expo,
requests.exceptions.RequestException,
max_tries=5,
giveup=lambda e:
400 <= e.response.status_code < 500)
def _request(chunk: List[str]) -> requests.Response:
"""
Send a purge cache request to Cloudflare. This method will
automatically retry with a back-off in case of server-side error.
:param chunk: The list of paths to purge. These should not have a
leading slash, and will be combined with the prefix
to form a URL.
:return: Cloudflare's response to our successful request.
:raises requests.exceptions.RequestException: If the request fails
on the 5th attempt.
"""
response = self._session.delete(
f'{self._API_BASE}/client/v4/zones/{self._zone}/purge_cache',
headers={
'X-Auth-Email': self._email,
'X-Auth-Key': self._key
},
json={
'files': [self._prefix + path for path in chunk]
})
response.raise_for_status()
return response
paths = itertools.chain(comparison.deleted(), comparison.modified())
for chunk_ in util.chunk(paths, self._MAX_INVALIDATIONS_PER_REQUEST):
chunk_ = list(chunk_)
if not chunk_:
# nothing to do
return
logger.info('Invalidating %d paths (%s)', len(chunk_),
', '.join(chunk_))
response_ = _request(chunk_)
logger.debug('Cloudflare invalidation response [%d]: %s',
response_.status_code,
response_.text)
json_ = response_.json()
if not json_['success']:
# this would be strange - the API returned a success response
# code, but success was not "true"
# TODO more appropriate exception, with handling upstream
raise RuntimeError('Cloudflare reported failure')
logger.info('Created invalidation %s', json_['result']['id']) | [
"def",
"invalidate",
"(",
"self",
",",
"comparison",
":",
"Comparison",
"[",
"Entity",
",",
"Entity",
"]",
")",
"->",
"None",
":",
"@",
"backoff",
".",
"on_exception",
"(",
"backoff",
".",
"expo",
",",
"requests",
".",
"exceptions",
".",
"RequestException"... | 47.440678 | 19.881356 |
def as_numpy(dataset, graph=None):
"""Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
"""
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
# Eager mode
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
# Graph mode
# First create iterators for datasets
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next()
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
# Then create numpy arrays for tensors
with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor
# Calling sess.run once so that randomness is shared.
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
# Merge the dataset iterators and np arrays
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
# Nest
return tf.nest.pack_sequence_as(nested_ds, flat_np) | [
"def",
"as_numpy",
"(",
"dataset",
",",
"graph",
"=",
"None",
")",
":",
"nested_ds",
"=",
"dataset",
"del",
"dataset",
"# Flatten",
"flat_ds",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"nested_ds",
")",
"flat_np",
"=",
"[",
"]",
"# Type check for Tensor... | 33.701493 | 22.925373 |
def get_single_child_from_xml(elem, tag):
"""
Get a single child tag from an XML element.
Similar to "elem.find(tag)", but warns if there are multiple child tags with the given name.
"""
children = elem.findall(tag)
if not children:
return None
if len(children) > 1:
logging.warning('Tag "%s" has more than one child tags with name "%s" in input file, '
'ignoring all but the first.',
elem.tag, tag)
return children[0] | [
"def",
"get_single_child_from_xml",
"(",
"elem",
",",
"tag",
")",
":",
"children",
"=",
"elem",
".",
"findall",
"(",
"tag",
")",
"if",
"not",
"children",
":",
"return",
"None",
"if",
"len",
"(",
"children",
")",
">",
"1",
":",
"logging",
".",
"warning"... | 38.615385 | 16.615385 |
def _make_summary_statistic(attr):
"""Factory for implementing summary statistics, eg, mean, stddev, mode."""
def _fn(self, **kwargs):
"""Implements summary statistic, eg, mean, stddev, mode."""
x = getattr(self.distribution, attr)(**kwargs)
shape = prefer_static.concat([
self.distribution.batch_shape_tensor(),
prefer_static.ones(prefer_static.rank_from_shape(self.sample_shape),
dtype=self.sample_shape.dtype),
self.distribution.event_shape_tensor(),
], axis=0)
x = tf.reshape(x, shape=shape)
shape = prefer_static.concat([
self.distribution.batch_shape_tensor(),
self.sample_shape,
self.distribution.event_shape_tensor(),
], axis=0)
return tf.broadcast_to(x, shape)
return _fn | [
"def",
"_make_summary_statistic",
"(",
"attr",
")",
":",
"def",
"_fn",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Implements summary statistic, eg, mean, stddev, mode.\"\"\"",
"x",
"=",
"getattr",
"(",
"self",
".",
"distribution",
",",
"attr",
")",
"... | 40.789474 | 12 |
def package_name(self, PACKAGES_TXT):
"""Returns list with all the names of packages repository
"""
packages = []
for line in PACKAGES_TXT.splitlines():
if line.startswith("PACKAGE NAME:"):
packages.append(split_package(line[14:].strip())[0])
return packages | [
"def",
"package_name",
"(",
"self",
",",
"PACKAGES_TXT",
")",
":",
"packages",
"=",
"[",
"]",
"for",
"line",
"in",
"PACKAGES_TXT",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"PACKAGE NAME:\"",
")",
":",
"packages",
".",
"ap... | 39.875 | 10.125 |
def get(self):
"""Get run list"""
LOG.info('Returning all ansible runs')
response = []
for run in self.backend_store.list_runs():
response.append(run_model.format_response(run))
return response | [
"def",
"get",
"(",
"self",
")",
":",
"LOG",
".",
"info",
"(",
"'Returning all ansible runs'",
")",
"response",
"=",
"[",
"]",
"for",
"run",
"in",
"self",
".",
"backend_store",
".",
"list_runs",
"(",
")",
":",
"response",
".",
"append",
"(",
"run_model",
... | 34.142857 | 13.857143 |
def execute(self, string, max_tacts=None):
"""Execute algorithm (if max_times = None, there can be forever loop)."""
self.init_tape(string)
counter = 0
while True:
self.execute_once()
if self.state == self.TERM_STATE:
break
counter += 1
if max_tacts is not None and counter >= max_tacts:
raise TimeoutError("algorithm hasn't been stopped")
return self.get_tape() | [
"def",
"execute",
"(",
"self",
",",
"string",
",",
"max_tacts",
"=",
"None",
")",
":",
"self",
".",
"init_tape",
"(",
"string",
")",
"counter",
"=",
"0",
"while",
"True",
":",
"self",
".",
"execute_once",
"(",
")",
"if",
"self",
".",
"state",
"==",
... | 33.642857 | 17.285714 |
def __find_node_by_rule(self, point, search_rule, cur_node):
"""!
@brief Search node that satisfy to parameters in search rule.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] search_rule (lambda): Rule that is called to check whether node satisfies to search parameter.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None.
"""
req_node = None
if cur_node is None:
cur_node = self.__root
while cur_node:
if cur_node.data[cur_node.disc] <= point[cur_node.disc]:
# Check if it's required node
if search_rule(cur_node):
req_node = cur_node
break
cur_node = cur_node.right
else:
cur_node = cur_node.left
return req_node | [
"def",
"__find_node_by_rule",
"(",
"self",
",",
"point",
",",
"search_rule",
",",
"cur_node",
")",
":",
"req_node",
"=",
"None",
"if",
"cur_node",
"is",
"None",
":",
"cur_node",
"=",
"self",
".",
"__root",
"while",
"cur_node",
":",
"if",
"cur_node",
".",
... | 38.5 | 23.21875 |
def _filter_attribute(mcs, attribute_name, attribute_value):
"""
decides whether the given attribute should be excluded from tracing or not
"""
if attribute_name == '__module__':
return True
elif hasattr(attribute_value, '_trace_disable'):
return True
return False | [
"def",
"_filter_attribute",
"(",
"mcs",
",",
"attribute_name",
",",
"attribute_value",
")",
":",
"if",
"attribute_name",
"==",
"'__module__'",
":",
"return",
"True",
"elif",
"hasattr",
"(",
"attribute_value",
",",
"'_trace_disable'",
")",
":",
"return",
"True",
... | 36.444444 | 14.888889 |
def send(self, data):
"""
Sending data back to client
:return:
"""
data = data.decode().replace("\n", "\r\n")
self.writer.write(data.encode()) | [
"def",
"send",
"(",
"self",
",",
"data",
")",
":",
"data",
"=",
"data",
".",
"decode",
"(",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\\r\\n\"",
")",
"self",
".",
"writer",
".",
"write",
"(",
"data",
".",
"encode",
"(",
")",
")"
] | 26.285714 | 8.285714 |
def calc_A(Ys):
'''Return the matrix A from a list of Y vectors.'''
return sum(np.dot(np.reshape(Y, (3,1)), np.reshape(Y, (1, 3)))
for Y in Ys) | [
"def",
"calc_A",
"(",
"Ys",
")",
":",
"return",
"sum",
"(",
"np",
".",
"dot",
"(",
"np",
".",
"reshape",
"(",
"Y",
",",
"(",
"3",
",",
"1",
")",
")",
",",
"np",
".",
"reshape",
"(",
"Y",
",",
"(",
"1",
",",
"3",
")",
")",
")",
"for",
"Y... | 40 | 20.5 |
def poke(self, context):
"""
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:type context: dict
:return: True if attachment with the given name is present and False if not.
:rtype: bool
"""
self.log.info('Poking for %s', self.attachment_name)
with ImapHook(imap_conn_id=self.conn_id) as imap_hook:
return imap_hook.has_mail_attachment(
name=self.attachment_name,
mail_folder=self.mail_folder,
check_regex=self.check_regex
) | [
"def",
"poke",
"(",
"self",
",",
"context",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Poking for %s'",
",",
"self",
".",
"attachment_name",
")",
"with",
"ImapHook",
"(",
"imap_conn_id",
"=",
"self",
".",
"conn_id",
")",
"as",
"imap_hook",
":",
... | 36.352941 | 18.117647 |
def weighted_choice(self, probabilities, key):
"""Makes a weighted choice between several options.
Probabilities is a list of 2-tuples, (probability, option). The
probabilties don't need to add up to anything, they are automatically
scaled."""
total = sum(x[0] for x in probabilities)
choice = total * self._random(key)
for probability, option in probabilities:
choice -= probability
if choice <= 0:
return option | [
"def",
"weighted_choice",
"(",
"self",
",",
"probabilities",
",",
"key",
")",
":",
"total",
"=",
"sum",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"probabilities",
")",
"choice",
"=",
"total",
"*",
"self",
".",
"_random",
"(",
"key",
")",
"for",
"p... | 35.642857 | 17.428571 |
def setDefaultIREncoding(encoding):
'''
setDefaultIREncoding - Sets the default encoding used by IndexedRedis.
This will be the default encoding used for field data. You can override this on a
per-field basis by using an IRField (such as IRUnicodeField or IRRawField)
@param encoding - An encoding (like utf-8)
'''
try:
b''.decode(encoding)
except:
raise ValueError('setDefaultIREncoding was provided an invalid codec. Got (encoding="%s")' %(str(encoding), ))
global defaultIREncoding
defaultIREncoding = encoding | [
"def",
"setDefaultIREncoding",
"(",
"encoding",
")",
":",
"try",
":",
"b''",
".",
"decode",
"(",
"encoding",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'setDefaultIREncoding was provided an invalid codec. Got (encoding=\"%s\")'",
"%",
"(",
"str",
"(",
"encoding"... | 34.866667 | 30.6 |
def generate_session_token(refresh_token, verbose):
"""
Generates new session token from the given refresh token.
:param refresh_token: refresh token to generate from
:param verbose: whether expiration time should be added to output
"""
platform = _get_platform(authenticated=False)
session_token, expires_in = platform.generate_session_token(refresh_token)
if verbose:
click.echo("%s\n\n%s" % (session_token, _color('YELLOW', "Expires in %d seconds" % expires_in)))
else:
click.echo(session_token) | [
"def",
"generate_session_token",
"(",
"refresh_token",
",",
"verbose",
")",
":",
"platform",
"=",
"_get_platform",
"(",
"authenticated",
"=",
"False",
")",
"session_token",
",",
"expires_in",
"=",
"platform",
".",
"generate_session_token",
"(",
"refresh_token",
")",... | 38.5 | 23.642857 |
def is_fresh(self): # type: () -> bool
"""
Checks whether the lock file is still up to date with the current hash.
"""
lock = self._lock.read()
metadata = lock.get("metadata", {})
if "content-hash" in metadata:
return self._content_hash == lock["metadata"]["content-hash"]
return False | [
"def",
"is_fresh",
"(",
"self",
")",
":",
"# type: () -> bool",
"lock",
"=",
"self",
".",
"_lock",
".",
"read",
"(",
")",
"metadata",
"=",
"lock",
".",
"get",
"(",
"\"metadata\"",
",",
"{",
"}",
")",
"if",
"\"content-hash\"",
"in",
"metadata",
":",
"re... | 31.454545 | 16.909091 |
def julian_day(t: date) -> int:
"""Convert a Python datetime to a Julian day"""
# Compute the number of days from January 1, 2000 to date t
dt = t - julian_base_date
# Add the julian base number to the number of days from the julian base date to date t
return julian_base_number + dt.days | [
"def",
"julian_day",
"(",
"t",
":",
"date",
")",
"->",
"int",
":",
"# Compute the number of days from January 1, 2000 to date t",
"dt",
"=",
"t",
"-",
"julian_base_date",
"# Add the julian base number to the number of days from the julian base date to date t",
"return",
"julian_b... | 50.5 | 15.666667 |
def deltas(self):
"""
Dictionary of relative offsets. The keys in the result are
pairs of keys from the offset vector, (a, b), and the
values are the relative offsets, (offset[b] - offset[a]).
Raises ValueError if the offsetvector is empty (WARNING:
this behaviour might change in the future).
Example:
>>> x = offsetvector({"H1": 0, "L1": 10, "V1": 20})
>>> x.deltas
{('H1', 'L1'): 10, ('H1', 'V1'): 20, ('H1', 'H1'): 0}
>>> y = offsetvector({'H1': 100, 'L1': 110, 'V1': 120})
>>> y.deltas == x.deltas
True
Note that the result always includes a "dummy" entry,
giving the relative offset of self.refkey with respect to
itself, which is always 0.
See also .fromdeltas().
BUGS: I think the keys in each tuple should be reversed.
I can't remember why I put them in the way they are.
Expect them to change in the future.
"""
# FIXME: instead of raising ValueError when the
# offsetvector is empty this should return an empty
# dictionary. the inverse, .fromdeltas() accepts
# empty dictionaries
# NOTE: the arithmetic used to construct the offsets
# *must* match the arithmetic used by
# time_slide_component_vectors() so that the results of the
# two functions can be compared to each other without worry
# of floating-point round off confusing things.
refkey = self.refkey
refoffset = self[refkey]
return dict(((refkey, key), self[key] - refoffset) for key in self) | [
"def",
"deltas",
"(",
"self",
")",
":",
"# FIXME: instead of raising ValueError when the",
"# offsetvector is empty this should return an empty",
"# dictionary. the inverse, .fromdeltas() accepts",
"# empty dictionaries",
"# NOTE: the arithmetic used to construct the offsets",
"# *must* mat... | 35.948718 | 18.615385 |
def validate_state(self, model, context=None):
"""
Validate model state
Run state validators and return and result object.
:param model: object or dict
:param context: object, dict or None
:return: shiftschema.result.Result
"""
result = Result()
for state_validator in self.state:
error = state_validator.run(
value=model,
model=model,
context=context
)
if error:
result.add_state_errors(error)
return result | [
"def",
"validate_state",
"(",
"self",
",",
"model",
",",
"context",
"=",
"None",
")",
":",
"result",
"=",
"Result",
"(",
")",
"for",
"state_validator",
"in",
"self",
".",
"state",
":",
"error",
"=",
"state_validator",
".",
"run",
"(",
"value",
"=",
"mo... | 30.105263 | 10.842105 |
def _unpack_header(self, data):
"""
Unpacks the header of given byte string.
"""
return struct.unpack(self._struct_header,
data[:self._struct_header_size]) | [
"def",
"_unpack_header",
"(",
"self",
",",
"data",
")",
":",
"return",
"struct",
".",
"unpack",
"(",
"self",
".",
"_struct_header",
",",
"data",
"[",
":",
"self",
".",
"_struct_header_size",
"]",
")"
] | 35.166667 | 7.833333 |
def map_entity(self, entity: dal.AssetClass):
""" maps data from entity -> object """
obj = model.AssetClass()
obj.id = entity.id
obj.parent_id = entity.parentid
obj.name = entity.name
obj.allocation = entity.allocation
obj.sort_order = entity.sortorder
#entity.stock_links
#entity.diff_adjustment
if entity.parentid == None:
obj.depth = 0
return obj | [
"def",
"map_entity",
"(",
"self",
",",
"entity",
":",
"dal",
".",
"AssetClass",
")",
":",
"obj",
"=",
"model",
".",
"AssetClass",
"(",
")",
"obj",
".",
"id",
"=",
"entity",
".",
"id",
"obj",
".",
"parent_id",
"=",
"entity",
".",
"parentid",
"obj",
... | 29.2 | 12.333333 |
def get_gatk_version(self):
"""Retrieve GATK version, handling locally and config cached versions.
Calling version can be expensive due to all the startup and shutdown
of JVMs, so we prefer cached version information.
"""
if self._gatk_version is None:
self._set_default_versions(self._config)
if "gatk4" not in dd.get_tools_off({"config": self._config}):
# In cases whwere we don't have manifest versions. Not possible to get
# version from commandline with GATK4 alpha version
if self._gatk4_version is None:
self._gatk4_version = "4.0"
return self._gatk4_version
elif self._gatk_version is not None:
return self._gatk_version
else:
if self._has_gatk_conda_wrapper():
gatk_jar = None
else:
gatk_jar = self._get_jar("GenomeAnalysisTK", ["GenomeAnalysisTKLite"], allow_missing=True)
self._gatk_version = get_gatk_version(gatk_jar, config=self._config)
return self._gatk_version | [
"def",
"get_gatk_version",
"(",
"self",
")",
":",
"if",
"self",
".",
"_gatk_version",
"is",
"None",
":",
"self",
".",
"_set_default_versions",
"(",
"self",
".",
"_config",
")",
"if",
"\"gatk4\"",
"not",
"in",
"dd",
".",
"get_tools_off",
"(",
"{",
"\"config... | 47.304348 | 17.521739 |
def is_same_dict(d1, d2):
"""Test two dictionary is equal on values. (ignore order)
"""
for k, v in d1.items():
if isinstance(v, dict):
is_same_dict(v, d2[k])
else:
assert d1[k] == d2[k]
for k, v in d2.items():
if isinstance(v, dict):
is_same_dict(v, d1[k])
else:
assert d1[k] == d2[k] | [
"def",
"is_same_dict",
"(",
"d1",
",",
"d2",
")",
":",
"for",
"k",
",",
"v",
"in",
"d1",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"is_same_dict",
"(",
"v",
",",
"d2",
"[",
"k",
"]",
")",
"else",
":",
... | 26.357143 | 12.785714 |
def _list_of_dicts_to_column_headers(list_of_dicts):
"""
Detects if all entries in an list of ``dict``'s have identical keys.
Returns the keys if all keys are the same and ``None`` otherwise.
Parameters
----------
list_of_dicts : list
List of dictionaries to test for identical keys.
Returns
-------
list or None
List of column headers if all dictionary posessed the same keys. Returns ``None`` otherwise.
"""
if len(list_of_dicts) < 2 or not all(isinstance(item, dict) for item in list_of_dicts):
return None
column_headers = list_of_dicts[0].keys()
for d in list_of_dicts[1:]:
if len(d.keys()) != len(column_headers) or not all(header in d for header in column_headers):
return None
return column_headers | [
"def",
"_list_of_dicts_to_column_headers",
"(",
"list_of_dicts",
")",
":",
"if",
"len",
"(",
"list_of_dicts",
")",
"<",
"2",
"or",
"not",
"all",
"(",
"isinstance",
"(",
"item",
",",
"dict",
")",
"for",
"item",
"in",
"list_of_dicts",
")",
":",
"return",
"No... | 35.958333 | 26.041667 |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant user timestamp entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
if 'name' not in match or 'uid' not in match:
return
account = match['name'][0]
uid = match['uid'][0]
for policy in match.get('passwordpolicyoptions', []):
try:
xml_policy = ElementTree.fromstring(policy)
except (ElementTree.ParseError, LookupError) as exception:
logger.error((
'Unable to parse XML structure for an user policy, account: '
'{0:s} and uid: {1!s}, with error: {2!s}').format(
account, uid, exception))
continue
for dict_elements in xml_policy.iterfind('dict'):
key_values = [value.text for value in iter(dict_elements)]
# Taking a list and converting it to a dict, using every other item
# as the key and the other one as the value.
policy_dict = dict(zip(key_values[0::2], key_values[1::2]))
time_string = policy_dict.get('passwordLastSetTime', None)
if time_string and time_string != '2001-01-01T00:00:00Z':
try:
date_time = dfdatetime_time_elements.TimeElements()
date_time.CopyFromStringISO8601(time_string)
except ValueError:
date_time = None
parser_mediator.ProduceExtractionWarning(
'unable to parse password last set time string: {0:s}'.format(
time_string))
shadow_hash_data = match.get('ShadowHashData', None)
if date_time and isinstance(shadow_hash_data, (list, tuple)):
# Extract the hash password information.
# It is store in the attribute ShadowHasData which is
# a binary plist data; However biplist only extracts one
# level of binary plist, then it returns this information
# as a string.
# TODO: change this into a DataRange instead. For this we
# need the file offset and size of the ShadowHashData value data.
shadow_hash_data = shadow_hash_data[0]
resolver_context = context.Context()
fake_file = fake_file_io.FakeFile(
resolver_context, shadow_hash_data)
shadow_hash_data_path_spec = fake_path_spec.FakePathSpec(
location='ShadowHashData')
fake_file.open(path_spec=shadow_hash_data_path_spec)
try:
plist_file = biplist.readPlist(fake_file)
except biplist.InvalidPlistException:
plist_file = {}
salted_hash = plist_file.get('SALTED-SHA512-PBKDF2', None)
if salted_hash:
salt_hex_bytes = codecs.encode(salted_hash['salt'], 'hex')
salt_string = codecs.decode(salt_hex_bytes, 'ascii')
entropy_hex_bytes = codecs.encode(salted_hash['entropy'], 'hex')
entropy_string = codecs.decode(entropy_hex_bytes, 'ascii')
password_hash = '$ml${0:d}${1:s}${2:s}'.format(
salted_hash['iterations'], salt_string, entropy_string)
else:
password_hash = 'N/A'
event_data = plist_event.PlistTimeEventData()
event_data.desc = (
'Last time {0:s} ({1!s}) changed the password: {2!s}').format(
account, uid, password_hash)
event_data.key = 'passwordLastSetTime'
event_data.root = self._ROOT
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
time_string = policy_dict.get('lastLoginTimestamp', None)
if time_string and time_string != '2001-01-01T00:00:00Z':
try:
date_time = dfdatetime_time_elements.TimeElements()
date_time.CopyFromStringISO8601(time_string)
except ValueError:
date_time = None
parser_mediator.ProduceExtractionWarning(
'unable to parse last login time string: {0:s}'.format(
time_string))
if date_time:
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Last login from {0:s} ({1!s})'.format(
account, uid)
event_data.key = 'lastLoginTimestamp'
event_data.root = self._ROOT
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
time_string = policy_dict.get('failedLoginTimestamp', None)
if time_string and time_string != '2001-01-01T00:00:00Z':
try:
date_time = dfdatetime_time_elements.TimeElements()
date_time.CopyFromStringISO8601(time_string)
except ValueError:
date_time = None
parser_mediator.ProduceExtractionWarning(
'unable to parse failed login time string: {0:s}'.format(
time_string))
if date_time:
event_data = plist_event.PlistTimeEventData()
event_data.desc = (
'Last failed login from {0:s} ({1!s}) ({2!s} times)').format(
account, uid, policy_dict.get('failedLoginCount', 0))
event_data.key = 'failedLoginTimestamp'
event_data.root = self._ROOT
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | [
"def",
"GetEntries",
"(",
"self",
",",
"parser_mediator",
",",
"match",
"=",
"None",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"if",
"'name'",
"not",
"in",
"match",
"or",
"'uid'",
"not",
"in",
"match",
":",
"return",
"account",
"=",
"match",
"[",
"'nam... | 42.823077 | 21.661538 |
def to_type(self, dtype: type, *cols, **kwargs):
"""
Convert colums values to a given type in the
main dataframe
:param dtype: a type to convert to: ex: ``str``
:type dtype: type
:param \*cols: names of the colums
:type \*cols: str, at least one
:param \*\*kwargs: keyword arguments for ``df.astype``
:type \*\*kwargs: optional
:example: ``ds.to_type(str, "mycol")``
"""
try:
allcols = self.df.columns.values
for col in cols:
if col not in allcols:
self.err("Column " + col + " not found")
return
self.df[col] = self.df[col].astype(dtype, **kwargs)
except Exception as e:
self.err(e, "Can not convert to type") | [
"def",
"to_type",
"(",
"self",
",",
"dtype",
":",
"type",
",",
"*",
"cols",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"allcols",
"=",
"self",
".",
"df",
".",
"columns",
".",
"values",
"for",
"col",
"in",
"cols",
":",
"if",
"col",
"not",
"i... | 34.913043 | 13.608696 |
def key_file_private(self):
'''str: path to the private key used by Ansible to connect to virtual
machines (by default looks for a file with name
:attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh``
directory)
'''
if not hasattr(self, '_key_file_private'):
self.key_file_private = '~/.ssh/{key}'.format(key=self.key_name)
return self._key_file_private | [
"def",
"key_file_private",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_key_file_private'",
")",
":",
"self",
".",
"key_file_private",
"=",
"'~/.ssh/{key}'",
".",
"format",
"(",
"key",
"=",
"self",
".",
"key_name",
")",
"return",
"se... | 47.666667 | 22.555556 |
def read_memory_block8(self, addr, size):
"""
read a block of unaligned bytes in memory. Returns
an array of byte values
"""
data = self.ap.read_memory_block8(addr, size)
return self.bp_manager.filter_memory_unaligned_8(addr, size, data) | [
"def",
"read_memory_block8",
"(",
"self",
",",
"addr",
",",
"size",
")",
":",
"data",
"=",
"self",
".",
"ap",
".",
"read_memory_block8",
"(",
"addr",
",",
"size",
")",
"return",
"self",
".",
"bp_manager",
".",
"filter_memory_unaligned_8",
"(",
"addr",
",",... | 39.857143 | 10.714286 |
def encrypt(self, key):
"""This method encrypts and signs the state to make it unreadable by
the server, since it contains information that would allow faking
proof of storage.
:param key: the key to encrypt and sign with
"""
if (self.encrypted):
return
# encrypt
self.iv = Random.new().read(AES.block_size)
aes = AES.new(key, AES.MODE_CFB, self.iv)
self.f_key = aes.encrypt(self.f_key)
self.alpha_key = aes.encrypt(self.alpha_key)
self.encrypted = True
# sign
self.hmac = self.get_hmac(key) | [
"def",
"encrypt",
"(",
"self",
",",
"key",
")",
":",
"if",
"(",
"self",
".",
"encrypted",
")",
":",
"return",
"# encrypt",
"self",
".",
"iv",
"=",
"Random",
".",
"new",
"(",
")",
".",
"read",
"(",
"AES",
".",
"block_size",
")",
"aes",
"=",
"AES",... | 35.294118 | 14.647059 |
def get_aspect(self, xspan, yspan):
"""
Computes the aspect ratio of the plot
"""
if isinstance(self.aspect, (int, float)):
return self.aspect
elif self.aspect == 'square':
return 1
elif self.aspect == 'equal':
return xspan/yspan
return 1 | [
"def",
"get_aspect",
"(",
"self",
",",
"xspan",
",",
"yspan",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"aspect",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"return",
"self",
".",
"aspect",
"elif",
"self",
".",
"aspect",
"==",
"'square'",
... | 29.090909 | 8.181818 |
def mail_setup(path):
"""
Set the variables to be able to send emails.
:param path: path to the config file
"""
global dest_mails
global smtp_server
global smtp_port
global src_server
config = configparser.RawConfigParser()
config.readfp(path)
dest_mails = config.get('mail', 'dest_mail').split(',')
smtp_server = config.get('mail', 'smtp_server')
smtp_port = config.get('mail', 'smtp_port')
src_server = config.get('mail', 'src_server') | [
"def",
"mail_setup",
"(",
"path",
")",
":",
"global",
"dest_mails",
"global",
"smtp_server",
"global",
"smtp_port",
"global",
"src_server",
"config",
"=",
"configparser",
".",
"RawConfigParser",
"(",
")",
"config",
".",
"readfp",
"(",
"path",
")",
"dest_mails",
... | 29.9375 | 13.0625 |
def histogram(self, stat, value, tags=None):
"""Report a histogram."""
self._log('histogram', stat, value, tags) | [
"def",
"histogram",
"(",
"self",
",",
"stat",
",",
"value",
",",
"tags",
"=",
"None",
")",
":",
"self",
".",
"_log",
"(",
"'histogram'",
",",
"stat",
",",
"value",
",",
"tags",
")"
] | 42 | 4.333333 |
def empty_wav(wav_path: Union[Path, str]) -> bool:
"""Check if a wav contains data"""
with wave.open(str(wav_path), 'rb') as wav_f:
return wav_f.getnframes() == 0 | [
"def",
"empty_wav",
"(",
"wav_path",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
")",
"->",
"bool",
":",
"with",
"wave",
".",
"open",
"(",
"str",
"(",
"wav_path",
")",
",",
"'rb'",
")",
"as",
"wav_f",
":",
"return",
"wav_f",
".",
"getnframes",
"(",... | 43.75 | 5.25 |
def jsonify(py_data, default=None, indent=4, sort_keys=True):
"""
Converts the inputted Python data to JSON format.
:param py_data | <variant>
"""
return json.dumps(py_data, default=py2json, indent=indent, sort_keys=sort_keys) | [
"def",
"jsonify",
"(",
"py_data",
",",
"default",
"=",
"None",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"py_data",
",",
"default",
"=",
"py2json",
",",
"indent",
"=",
"indent",
",",
"sort_... | 35.714286 | 16.857143 |
def or_(cls, obj, **kwargs):
"""Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if at leat one `kwargs` expression is `True`,
`False` otherwise.
:rtype: bool
"""
return cls.__eval_seqexp(obj, operator.or_, **kwargs) | [
"def",
"or_",
"(",
"cls",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"cls",
".",
"__eval_seqexp",
"(",
"obj",
",",
"operator",
".",
"or_",
",",
"*",
"*",
"kwargs",
")"
] | 24.714286 | 19.928571 |
def call(cmd, input=None, assert_zero_exit_status=True, warn_on_non_zero_exist_status=False, **kwargs):
"""
:rtype: SubprocessResult
Raises OSError if command was not found
Returns non-zero result in result.ret if subprocess terminated with non-zero exist status.
"""
if (not kwargs.get('shell')) and isinstance(cmd, basestring):
raise ValueError('cmd should be list or tuple, not a string: %r' % cmd)
result = SubprocessResult.call(cmd, input=input, **kwargs)
if assert_zero_exit_status and result.ret != 0:
raise SubprocessError(result)
if warn_on_non_zero_exist_status and result.ret != 0:
logger.warn('subprocess failed %r' % result)
return result | [
"def",
"call",
"(",
"cmd",
",",
"input",
"=",
"None",
",",
"assert_zero_exit_status",
"=",
"True",
",",
"warn_on_non_zero_exist_status",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"not",
"kwargs",
".",
"get",
"(",
"'shell'",
")",
")",
... | 39 | 24.666667 |
def _check_function(self):
''' make some basic checks on the function to make sure it is valid'''
# note, callable is valid for Python 2 and Python 3.2 onwards but
# not inbetween
if not callable(self._function):
raise RuntimeError(
"provided function '{0}' is not callable".
format(str(self._function)))
from inspect import getargspec
arg_info = getargspec(self._function)
if len(arg_info.args) != 1:
print str(arg_info)
raise RuntimeError(
"provided function should have one argument but found "
"{0}".format(len(arg_info.args))) | [
"def",
"_check_function",
"(",
"self",
")",
":",
"# note, callable is valid for Python 2 and Python 3.2 onwards but",
"# not inbetween",
"if",
"not",
"callable",
"(",
"self",
".",
"_function",
")",
":",
"raise",
"RuntimeError",
"(",
"\"provided function '{0}' is not callable\... | 44.866667 | 13.533333 |
def start(self, *args):
"""
Start a nested log.
"""
if self._is_verbose:
# verbose log has no start method
return self
self.writeln('start', *args)
self._indent += 1
return self | [
"def",
"start",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"_is_verbose",
":",
"# verbose log has no start method",
"return",
"self",
"self",
".",
"writeln",
"(",
"'start'",
",",
"*",
"args",
")",
"self",
".",
"_indent",
"+=",
"1",
"retu... | 20.666667 | 15.333333 |
def pairwise(iterable: Iterable[X]) -> Iterable[Tuple[X, X]]:
"""Iterate over pairs in list s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itt.tee(iterable)
next(b, None)
return zip(a, b) | [
"def",
"pairwise",
"(",
"iterable",
":",
"Iterable",
"[",
"X",
"]",
")",
"->",
"Iterable",
"[",
"Tuple",
"[",
"X",
",",
"X",
"]",
"]",
":",
"a",
",",
"b",
"=",
"itt",
".",
"tee",
"(",
"iterable",
")",
"next",
"(",
"b",
",",
"None",
")",
"retu... | 39.8 | 15.2 |
def signed_session(self, session=None):
# type: (Optional[requests.Session]) -> requests.Session
"""Create requests session with ApiKey.
If a session object is provided, configure it directly. Otherwise,
create a new session and return it.
:param session: The session to configure for authentication
:type session: requests.Session
:rtype: requests.Session
"""
session = super(ApiKeyCredentials, self).signed_session(session)
session.headers.update(self.in_headers)
try:
# params is actually Union[bytes, MutableMapping[Text, Text]]
session.params.update(self.in_query) # type: ignore
except AttributeError: # requests.params can be bytes
raise ValueError("session.params must be a dict to be used in ApiKeyCredentials")
return session | [
"def",
"signed_session",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"# type: (Optional[requests.Session]) -> requests.Session",
"session",
"=",
"super",
"(",
"ApiKeyCredentials",
",",
"self",
")",
".",
"signed_session",
"(",
"session",
")",
"session",
".",
... | 45.263158 | 20.736842 |
def _MakeConnection(self, database=""):
"""Repeat connection attempts to server until we get a valid connection."""
first_attempt_time = time.time()
wait_time = config.CONFIG["Mysql.max_connect_wait"]
while wait_time == 0 or time.time() - first_attempt_time < wait_time:
try:
connection_args = dict(
user=config.CONFIG["Mysql.database_username"],
db=database,
charset="utf8",
passwd=config.CONFIG["Mysql.database_password"],
autocommit=True,
cursorclass=cursors.DictCursor,
host=config.CONFIG["Mysql.host"],
port=config.CONFIG["Mysql.port"])
key_path = config.CONFIG["Mysql.client_key_path"]
if key_path:
cert_path = config.CONFIG["Mysql.client_cert_path"]
ca_cert_path = config.CONFIG["Mysql.ca_cert_path"]
logging.debug("Client key file configured, trying to use SSL.")
connection_args["ssl"] = {
"key": key_path,
"cert": cert_path,
"ca": ca_cert_path,
}
return MySQLdb.connect(**connection_args)
except MySQLdb.OperationalError as e:
# This is a fatal error, we just raise the top level exception here.
if "Access denied" in str(e):
raise Error(str(e))
if "Can't connect" in str(e):
logging.warning("Datastore connection retrying after failed with %s.",
str(e))
time.sleep(.5)
continue
raise
raise IOError("Unable to connect to Mysql database.") | [
"def",
"_MakeConnection",
"(",
"self",
",",
"database",
"=",
"\"\"",
")",
":",
"first_attempt_time",
"=",
"time",
".",
"time",
"(",
")",
"wait_time",
"=",
"config",
".",
"CONFIG",
"[",
"\"Mysql.max_connect_wait\"",
"]",
"while",
"wait_time",
"==",
"0",
"or",... | 36.325581 | 18.302326 |
def WriteFlowResponses(self, responses):
"""Writes FlowMessages and updates corresponding requests."""
if not responses:
return
for batch in collection.Batch(responses, self._WRITE_ROWS_BATCH_SIZE):
self._WriteFlowResponsesAndExpectedUpdates(batch)
completed_requests = self._UpdateRequestsAndScheduleFPRs(batch)
if completed_requests:
self._DeleteClientActionRequest(completed_requests) | [
"def",
"WriteFlowResponses",
"(",
"self",
",",
"responses",
")",
":",
"if",
"not",
"responses",
":",
"return",
"for",
"batch",
"in",
"collection",
".",
"Batch",
"(",
"responses",
",",
"self",
".",
"_WRITE_ROWS_BATCH_SIZE",
")",
":",
"self",
".",
"_WriteFlowR... | 30.214286 | 25.428571 |
def copy(self):
"""Create a copy.
Examples:
This example copies constraint :math:`a \\ne b` and tests a solution
on the copied constraint.
>>> import dwavebinarycsp
>>> import operator
>>> const = dwavebinarycsp.Constraint.from_func(operator.ne,
... ['a', 'b'], 'BINARY')
>>> const2 = const.copy()
>>> const2 is const
False
>>> const2.check({'a': 1, 'b': 1})
False
"""
# each object is itself immutable (except the function)
return self.__class__(self.func, self.configurations, self.variables, self.vartype, name=self.name) | [
"def",
"copy",
"(",
"self",
")",
":",
"# each object is itself immutable (except the function)",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"func",
",",
"self",
".",
"configurations",
",",
"self",
".",
"variables",
",",
"self",
".",
"vartype",
",",
... | 34.6 | 20.9 |
def __start_connection(self, context, node, ccallbacks=None):
"""Start a new connection, and manage it from a new greenlet."""
_logger.debug("Creating connection object: CONTEXT=[%s] NODE=[%s]",
context, node)
c = nsq.connection.Connection(
context,
node,
self.__identify,
self.__message_handler,
self.__quit_ev,
ccallbacks,
ignore_quit=self.__connection_ignore_quit)
g = gevent.spawn(c.run)
# Now, wait for the thread to finish the connection.
timeout_s = nsq.config.client.NEW_CONNECTION_NEGOTIATE_TIMEOUT_S
if c.connected_ev.wait(timeout_s) is False:
_logger.error("New connection to server [%s] timed-out. Cleaning-"
"up thread.", node)
g.kill()
g.join()
# We'll try again on the next audit.
raise EnvironmentError("Connection to server [%s] failed." %
(node,))
self.__connections.append((node, c, g)) | [
"def",
"__start_connection",
"(",
"self",
",",
"context",
",",
"node",
",",
"ccallbacks",
"=",
"None",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Creating connection object: CONTEXT=[%s] NODE=[%s]\"",
",",
"context",
",",
"node",
")",
"c",
"=",
"nsq",
".",
"co... | 33.484848 | 21.545455 |
def create_aeff(event_class, event_type, egy, cth):
"""Create an array of effective areas versus energy and incidence
angle. Binning in energy and incidence angle is controlled with
the egy and cth input parameters.
Parameters
----------
event_class : str
Event class string (e.g. P8R2_SOURCE_V6).
event_type : list
egy : array_like
Evaluation points in energy (MeV).
cth : array_like
Evaluation points in cosine of the incidence angle.
"""
irf = create_irf(event_class, event_type)
irf.aeff().setPhiDependence(False)
theta = np.degrees(np.arccos(cth))
# Exposure Matrix
# Dimensions are Etrue and incidence angle
m = np.zeros((len(egy), len(cth)))
for i, x in enumerate(egy):
for j, y in enumerate(theta):
m[i, j] = irf.aeff().value(x, y, 0.0)
return m | [
"def",
"create_aeff",
"(",
"event_class",
",",
"event_type",
",",
"egy",
",",
"cth",
")",
":",
"irf",
"=",
"create_irf",
"(",
"event_class",
",",
"event_type",
")",
"irf",
".",
"aeff",
"(",
")",
".",
"setPhiDependence",
"(",
"False",
")",
"theta",
"=",
... | 26.46875 | 18.96875 |
def get_filters(self, dataset):
"""Get available filters from dataset you've selected"""
filters = self.filters(dataset)
filt_ = [ (k, v[0]) for k, v in filters.items()]
return pd.DataFrame(filt_, columns=["Filter", "Description"]) | [
"def",
"get_filters",
"(",
"self",
",",
"dataset",
")",
":",
"filters",
"=",
"self",
".",
"filters",
"(",
"dataset",
")",
"filt_",
"=",
"[",
"(",
"k",
",",
"v",
"[",
"0",
"]",
")",
"for",
"k",
",",
"v",
"in",
"filters",
".",
"items",
"(",
")",
... | 51.8 | 11 |
def emit(self, record):
"""Save a logging.LogRecord to our test record.
Logs carry useful metadata such as the logger name and level information.
We capture this in a structured format in the test record to enable
filtering by client applications.
Args:
record: A logging.LogRecord to record.
"""
try:
message = self.format(record)
log_record = LogRecord(
record.levelno, record.name, os.path.basename(record.pathname),
record.lineno, int(record.created * 1000), message,
)
self._test_record.add_log_record(log_record)
self._notify_update()
except Exception: # pylint: disable=broad-except
self.handleError(record) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"try",
":",
"message",
"=",
"self",
".",
"format",
"(",
"record",
")",
"log_record",
"=",
"LogRecord",
"(",
"record",
".",
"levelno",
",",
"record",
".",
"name",
",",
"os",
".",
"path",
".",
"bas... | 34.6 | 19.2 |
def prepend(self, _, child, name=None):
"""Adds childs to this tag, starting from the first position."""
self._insert(child, prepend=True, name=name)
return self | [
"def",
"prepend",
"(",
"self",
",",
"_",
",",
"child",
",",
"name",
"=",
"None",
")",
":",
"self",
".",
"_insert",
"(",
"child",
",",
"prepend",
"=",
"True",
",",
"name",
"=",
"name",
")",
"return",
"self"
] | 45.5 | 8.5 |
def _decorate(flush=True, attempts=1, only_authenticate=False):
"""
Wraps the given function such that conn.login() or conn.authenticate() is
executed.
Doing the real work for autologin and autoauthenticate to minimize code
duplication.
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
:type attempts: int
:param attempts: The number of login attempts if login fails.
:type only_authenticate: bool
:param only_authenticate: login or only authenticate (don't authorize)?
:rtype: function
:return: The wrapped function.
"""
def decorator(function):
def decorated(job, host, conn, *args, **kwargs):
failed = 0
while True:
try:
if only_authenticate:
conn.authenticate(flush=flush)
else:
conn.login(flush=flush)
except LoginFailure as e:
failed += 1
if failed >= attempts:
raise
continue
break
return function(job, host, conn, *args, **kwargs)
copy_labels(function, decorated)
return decorated
return decorator | [
"def",
"_decorate",
"(",
"flush",
"=",
"True",
",",
"attempts",
"=",
"1",
",",
"only_authenticate",
"=",
"False",
")",
":",
"def",
"decorator",
"(",
"function",
")",
":",
"def",
"decorated",
"(",
"job",
",",
"host",
",",
"conn",
",",
"*",
"args",
","... | 35.771429 | 16.285714 |
def periodogram_auto(self, oversampling=5, nyquist_factor=3,
return_periods=True):
"""Compute the periodogram on an automatically-determined grid
This function uses heuristic arguments to choose a suitable frequency
grid for the data. Note that depending on the data window function,
the model may be sensitive to periodicity at higher frequencies than
this function returns!
The final number of frequencies will be
Nf = oversampling * nyquist_factor * len(t) / 2
Parameters
----------
oversampling : float
the number of samples per approximate peak width
nyquist_factor : float
the highest frequency, in units of the nyquist frequency for points
spread uniformly through the data range.
Returns
-------
period : ndarray
the grid of periods
power : ndarray
the power at each frequency
"""
N = len(self.t)
T = np.max(self.t) - np.min(self.t)
df = 1. / T / oversampling
f0 = df
Nf = int(0.5 * oversampling * nyquist_factor * N)
freq = f0 + df * np.arange(Nf)
return 1. / freq, self._score_frequency_grid(f0, df, Nf) | [
"def",
"periodogram_auto",
"(",
"self",
",",
"oversampling",
"=",
"5",
",",
"nyquist_factor",
"=",
"3",
",",
"return_periods",
"=",
"True",
")",
":",
"N",
"=",
"len",
"(",
"self",
".",
"t",
")",
"T",
"=",
"np",
".",
"max",
"(",
"self",
".",
"t",
... | 36.852941 | 19.117647 |
def _iq_request_coro_done_send_reply(self, request, task):
"""
Called when an IQ request handler coroutine returns. `request` holds
the IQ request which triggered the excecution of the coroutine and
`task` is the :class:`asyncio.Task` which tracks the running coroutine.
Compose a response and send that response.
"""
try:
payload = task.result()
except errors.XMPPError as err:
self._send_iq_reply(request, err)
except Exception:
response = self._compose_undefined_condition(request)
self._enqueue(response)
self._logger.exception("IQ request coroutine failed")
else:
self._send_iq_reply(request, payload) | [
"def",
"_iq_request_coro_done_send_reply",
"(",
"self",
",",
"request",
",",
"task",
")",
":",
"try",
":",
"payload",
"=",
"task",
".",
"result",
"(",
")",
"except",
"errors",
".",
"XMPPError",
"as",
"err",
":",
"self",
".",
"_send_iq_reply",
"(",
"request... | 41.222222 | 17.888889 |
def update(self, storagemodel:object, modeldefinition = None, hide = 0) -> StorageQueueModel:
""" update the message in queue """
if (storagemodel.id != '') and (storagemodel.pop_receipt != '') and (not storagemodel.id is None) and (not storagemodel.pop_receipt is None):
try:
content = storagemodel.getmessage()
message = modeldefinition['queueservice'].update_message(storagemodel._queuename, storagemodel.id, storagemodel.pop_receipt, visibility_timeout = hide, content=content)
storagemodel.content = content
storagemodel.pop_receipt = message.pop_receipt
except Exception as e:
msg = 'can not update queue message: queue {} with message.id {!s} because {!s}'.format(storagemodel._queuename, storagemodel.id, e)
storagemodel = None
raise AzureStorageWrapException(msg=msg)
else:
msg = 'cant update queuemessage in {!s} due to missing id {!s} and/or pop_receipt {!s}'.format(storagemodel._queuename, storagemodel.id, storagemodel.pop_receipt)
storagemodel = None
raise AzureStorageWrapException(msg=msg)
return storagemodel | [
"def",
"update",
"(",
"self",
",",
"storagemodel",
":",
"object",
",",
"modeldefinition",
"=",
"None",
",",
"hide",
"=",
"0",
")",
"->",
"StorageQueueModel",
":",
"if",
"(",
"storagemodel",
".",
"id",
"!=",
"''",
")",
"and",
"(",
"storagemodel",
".",
"... | 60.75 | 41 |
def _validate_depedencies(batches):
"""Validates the transaction dependencies for the transactions contained
within the sequence of batches. Given that all the batches are expected to
to be executed for the genesis blocks, it is assumed that any dependent
transaction will proceed the depending transaction.
"""
transaction_ids = set()
for batch in batches:
for txn in batch.transactions:
txn_header = TransactionHeader()
txn_header.ParseFromString(txn.header)
if txn_header.dependencies:
unsatisfied_deps = [
id for id in txn_header.dependencies
if id not in transaction_ids
]
if unsatisfied_deps:
raise CliException(
'Unsatisfied dependency in given transactions:'
' {}'.format(unsatisfied_deps))
transaction_ids.add(txn.header_signature) | [
"def",
"_validate_depedencies",
"(",
"batches",
")",
":",
"transaction_ids",
"=",
"set",
"(",
")",
"for",
"batch",
"in",
"batches",
":",
"for",
"txn",
"in",
"batch",
".",
"transactions",
":",
"txn_header",
"=",
"TransactionHeader",
"(",
")",
"txn_header",
".... | 41.73913 | 14.478261 |
def _pkg(jail=None, chroot=None, root=None):
'''
Returns the prefix for a pkg command, using -j if a jail is specified, or
-c if chroot is specified.
'''
ret = ['pkg']
if jail:
ret.extend(['-j', jail])
elif chroot:
ret.extend(['-c', chroot])
elif root:
ret.extend(['-r', root])
return ret | [
"def",
"_pkg",
"(",
"jail",
"=",
"None",
",",
"chroot",
"=",
"None",
",",
"root",
"=",
"None",
")",
":",
"ret",
"=",
"[",
"'pkg'",
"]",
"if",
"jail",
":",
"ret",
".",
"extend",
"(",
"[",
"'-j'",
",",
"jail",
"]",
")",
"elif",
"chroot",
":",
"... | 25.846154 | 20.461538 |
def is_number_type_geographical(num_type, country_code):
"""Tests whether a phone number has a geographical association,
as represented by its type and the country it belongs to.
This version of isNumberGeographical exists since calculating the phone
number type is expensive; if we have already done this, we don't want to
do it again.
"""
return (num_type == PhoneNumberType.FIXED_LINE or
num_type == PhoneNumberType.FIXED_LINE_OR_MOBILE or
((country_code in _GEO_MOBILE_COUNTRIES) and
num_type == PhoneNumberType.MOBILE)) | [
"def",
"is_number_type_geographical",
"(",
"num_type",
",",
"country_code",
")",
":",
"return",
"(",
"num_type",
"==",
"PhoneNumberType",
".",
"FIXED_LINE",
"or",
"num_type",
"==",
"PhoneNumberType",
".",
"FIXED_LINE_OR_MOBILE",
"or",
"(",
"(",
"country_code",
"in",... | 48.25 | 19.416667 |
def sheetDeleteEmpty(bookName=None):
"""Delete all sheets which contain no data"""
if bookName is None:
bookName = activeBook()
if not bookName.lower() in [x.lower() for x in bookNames()]:
print("can't clean up a book that doesn't exist:",bookName)
return
poBook=PyOrigin.WorksheetPages(bookName)
namesToKill=[]
for i,poSheet in enumerate([poSheet for poSheet in poBook.Layers()]):
poFirstCol=poSheet.Columns(0)
if poFirstCol.GetLongName()=="" and poFirstCol.GetData()==[]:
namesToKill.append(poSheet.GetName())
for sheetName in namesToKill:
print("deleting empty sheet",sheetName)
sheetDelete(bookName,sheetName) | [
"def",
"sheetDeleteEmpty",
"(",
"bookName",
"=",
"None",
")",
":",
"if",
"bookName",
"is",
"None",
":",
"bookName",
"=",
"activeBook",
"(",
")",
"if",
"not",
"bookName",
".",
"lower",
"(",
")",
"in",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"i... | 43.375 | 13.8125 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.