text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def get_package_filename(filename, package_dir=None):
'''Return the filename of the data file.'''
if getattr(sys, 'frozen', False):
package_dir = os.path.join(
sys._MEIPASS,
os.path.basename(os.path.dirname(__file__))
)
elif not package_dir:
package_dir = os.path.dirname(__file__)
return os.path.join(package_dir, filename) | [
"def",
"get_package_filename",
"(",
"filename",
",",
"package_dir",
"=",
"None",
")",
":",
"if",
"getattr",
"(",
"sys",
",",
"'frozen'",
",",
"False",
")",
":",
"package_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"_MEIPASS",
",",
"os",... | 34.454545 | 14.272727 |
async def rtm(self) -> AsyncIterator[Event]:
"""Connect to the realtime event API and start yielding events."""
response = cast(RTMStart, await self.api("rtm.start"))
self.me = Auto.generate(response.self_, "Me", recursive=False)
self.team = Auto.generate(response.team, "Team", recursive=False)
self.channels.fill(Channel.build(item) for item in response.channels)
self.users.fill(User.build(item) for item in response.users)
self.groups.fill(Group.build(item) for item in response.groups)
log.debug(
f"received {len(self.users)} users, {len(self.channels)} channels "
f"and {len(self.groups)} groups from rtm.start"
)
async with self.session.ws_connect(response["url"]) as ws:
async for msg in ws:
event: Event = Event.generate(msg.json(), recursive=False)
if event.type == "goodbye":
break
yield event | [
"async",
"def",
"rtm",
"(",
"self",
")",
"->",
"AsyncIterator",
"[",
"Event",
"]",
":",
"response",
"=",
"cast",
"(",
"RTMStart",
",",
"await",
"self",
".",
"api",
"(",
"\"rtm.start\"",
")",
")",
"self",
".",
"me",
"=",
"Auto",
".",
"generate",
"(",
... | 42.217391 | 25.869565 |
def get_section_by_url(url,
include_instructor_not_on_time_schedule=True):
"""
Returns a uw_sws.models.Section object
for the passed section url.
"""
if not course_url_pattern.match(url):
raise InvalidSectionURL(url)
return _json_to_section(
get_resource(url),
include_instructor_not_on_time_schedule=(
include_instructor_not_on_time_schedule)) | [
"def",
"get_section_by_url",
"(",
"url",
",",
"include_instructor_not_on_time_schedule",
"=",
"True",
")",
":",
"if",
"not",
"course_url_pattern",
".",
"match",
"(",
"url",
")",
":",
"raise",
"InvalidSectionURL",
"(",
"url",
")",
"return",
"_json_to_section",
"(",... | 32 | 11.230769 |
def getsystemhooks(self, page=1, per_page=20):
"""
Get all system hooks
:param page: Page number
:param per_page: Records per page
:return: list of hooks
"""
data = {'page': page, 'per_page': per_page}
request = requests.get(
self.hook_url, params=data, headers=self.headers,
verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return request.json()
else:
return False | [
"def",
"getsystemhooks",
"(",
"self",
",",
"page",
"=",
"1",
",",
"per_page",
"=",
"20",
")",
":",
"data",
"=",
"{",
"'page'",
":",
"page",
",",
"'per_page'",
":",
"per_page",
"}",
"request",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"hook_url",... | 29.055556 | 15.722222 |
def _validate_iterable(self, is_iterable, key, value):
"""Validate fields with `iterable` key in schema set to True"""
if is_iterable:
try:
iter(value)
except TypeError:
self._error(key, "Must be iterable (e.g. a list or array)") | [
"def",
"_validate_iterable",
"(",
"self",
",",
"is_iterable",
",",
"key",
",",
"value",
")",
":",
"if",
"is_iterable",
":",
"try",
":",
"iter",
"(",
"value",
")",
"except",
"TypeError",
":",
"self",
".",
"_error",
"(",
"key",
",",
"\"Must be iterable (e.g.... | 42.142857 | 16.285714 |
def _RecurseOverObject(obj, factory, parent=None):
"""Recurses over a nested structure to look for changes in Suds objects.
Args:
obj: A parameter for a SOAP request field which is to be inspected and
will be packed for Suds if an xsi_type is specified, otherwise will be
left unaltered.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
parent: The parent object that contains the obj parameter to be inspected.
"""
if _IsSudsIterable(obj):
# Since in-place modification of the Suds object is taking place, the
# iterator should be done over a frozen copy of the unpacked fields.
copy_of_obj = tuple(obj)
for item in copy_of_obj:
if _IsSudsIterable(item):
if 'xsi_type' in item:
if isinstance(obj, tuple):
parent[obj[0]] = _PackForSuds(obj[1], factory)
else:
obj.remove(item)
obj.append(_PackForSuds(item, factory))
_RecurseOverObject(item, factory, obj) | [
"def",
"_RecurseOverObject",
"(",
"obj",
",",
"factory",
",",
"parent",
"=",
"None",
")",
":",
"if",
"_IsSudsIterable",
"(",
"obj",
")",
":",
"# Since in-place modification of the Suds object is taking place, the",
"# iterator should be done over a frozen copy of the unpacked f... | 42.833333 | 18.541667 |
def button_clicked(self, button):
"""Action when button was clicked.
Parameters
----------
button : instance of QPushButton
which button was pressed
"""
if button is self.idx_ok:
# File location
if not self.filename:
msg = 'Select location for data export file.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('File path error')
error_dialog.showMessage(msg)
return
# Check for signal
self.update_nseg
if self.nseg <= 0:
msg = 'No valid signal found.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error fetching data')
error_dialog.showMessage(msg)
return
# Which analyses?
freq = self.frequency
freq_full = freq['export_full'].get_value()
freq_band = freq['export_band'].get_value()
freq_plot = freq['plot_on'].get_value()
freq_fooof = freq['fooof_on'].get_value()
freq_prep = freq['prep'].get_value()
freq_on = freq_full or freq_band or freq_plot or freq_fooof
if Pac is not None:
pac_on = self.pac['pac_on'].get_value()
pac_prep = self.pac['prep'].get_value()
else:
pac_on = False
pac_prep = False
ev = self.event
glob = asarray(
[v.get_value() for v in ev['global'].values()]).any()
loc = asarray(
[v[0].get_value() for v in ev['local'].values()]).any()
avg_sl = ev['sw']['avg_slope'].get_value()
max_sl = ev['sw']['max_slope'].get_value()
loc_prep = asarray(
[v[1].get_value() for v in ev['local'].values()]).any()
slope_prep = ev['sw']['prep'].get_value()
if not (freq_on or pac_on or glob or loc or avg_sl or max_sl):
return
if freq['export_band'].get_value():
bands = freq_from_str(freq['band'].get_value())
if bands is None:
msg = ('Invalid input for Define bands. Click the '
"'i' button for instructions.")
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error reading bands')
error_dialog.showMessage(msg)
return
if (freq['norm'].get_value() == 'by mean of event type(s)' and
not freq['norm_evt_type'].selectedItems()):
msg = 'Select event type(s) for normalization.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error fetching data')
error_dialog.showMessage(msg)
return
if (freq['norm'].get_value() == 'by mean of stage(s)' and
not freq['norm_stage'].selectedItems()):
msg = 'Select stage(s) for normalization.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error fetching data')
error_dialog.showMessage(msg)
return
# Fetch signal
eco = self.evt_chan_only
chan = [] if (eco.get_value() and eco.isEnabled()) else self.chan
concat_chan = self.cat['chan'].get_value()
self.data = self.get_segments()
if not self.data.segments:
msg = 'No valid signal found.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error fetching data')
error_dialog.showMessage(msg)
return
ding = self.data.read_data(chan,
ref_chan=self.one_grp['ref_chan'],
grp_name=self.one_grp['name'],
concat_chan=concat_chan,
max_s_freq=self.parent.value('max_s_freq'),
parent=self)
if not ding:
self.parent.statusBar().showMessage('Process interrupted.')
return
# Transform signal
if freq_prep or pac_prep or loc_prep or slope_prep:
lg.info('Pre-processing data')
self.data = self.transform_data(self.data)
""" ------ FREQUENCY ------ """
if freq_on:
csd_on = freq['csd'].get_value()
gainphase_on = freq['gainphase'].get_value()
coh_on = freq['coh'].get_value()
# don't need autospectrum if all we want is CSD
if not (csd_on and not (gainphase_on or coh_on)):
asd = self.compute_freq() # autospectral density
if not asd:
return
if csd_on or gainphase_on or coh_on:
csd = self.compute_freq(csd=True) # cross-spectral density
chancombo = str(csd[0]['data'].axis['chan'][0][0])
freq_out = []
if csd_on:
freq_out.append((csd, 'csd',
('Cross-spectral density, '
+ chancombo + ', '),
None, 'semilogy'))
if gainphase_on:
xg, yg, ph = self.compute_freq_cross(csd, asd,
output='gainphase')
xchancombo = str(xg[0]['data'].axis['chan'][0][0])
ychancombo = str(yg[0]['data'].axis['chan'][0][0])
freq_out.append((xg, 'xgain',
('Gain, ' + xchancombo + ', '),
'Gain', 'linear'))
freq_out.append((yg, 'ygain',
('Gain, ' + ychancombo + ', '),
'Gain', 'linear'))
freq_out.append((ph, 'phase',
('Phase shift, ' + xchancombo + ', '),
'Phase shift (degrees)', 'linear'))
if coh_on:
coh, = self.compute_freq_cross(csd, asd,
output='coherence')
freq_out.append((coh, 'coh',
('Coherence, ' + chancombo + ', '),
'Coherence', 'linear'))
else:
freq_out = [(asd, 'freq', '', None, 'semilogy')]
for one_xf, suffix, prefix, ylabel, scale in freq_out:
if freq_band:
filename = (splitext(self.filename)[0] + '_' + suffix +
'_band.csv')
export_freq_band(one_xf, bands, filename)
if freq_full or freq_plot or freq_fooof:
n_freq_bins = [x['data']()[0].shape for x in one_xf]
if all(x == n_freq_bins[0] for x in n_freq_bins):
x = list(one_xf[0]['data'].axis['freq'][0])
if len(one_xf) == 1:
desc = None
y = abs(one_xf[0]['data'].data[0][0])
else:
as_matrix = asarray(
[y for x in one_xf for y in x['data']()[0]])
desc = get_descriptives(as_matrix)
y = desc['mean']
if freq_full:
filename = (splitext(self.filename)[0] + '_' +
suffix + '_full.csv')
export_freq(one_xf, filename, desc=desc)
if freq_plot:
self.plot_freq(x, y,
title=(prefix + self.title),
ylabel=ylabel, scale=scale)
if freq_fooof:
self.report_fooof(asarray(x), y, suffix)
""" ------ PAC ------ """
if pac_on:
pac_output = self.compute_pac()
if pac_output is not None:
xpac, fpha, famp = pac_output
else:
return
as_matrix = asarray(
[ravel(chan['data'][x,:,:]) for chan in xpac.values() \
for x in range(chan['data'].shape[0])])
desc = get_descriptives(as_matrix)
self.export_pac(xpac, fpha, famp, desc)
""" ------ EVENTS ------ """
evt_dat, count, density = self.compute_evt_params()
if (evt_dat or count or density):
fn = splitext(self.filename)[0] + '_params.csv'
export_event_params(fn, evt_dat, count=count, density=density)
self.parent.overview.mark_poi() # remove poi
self.accept()
if button is self.idx_cancel:
self.parent.overview.mark_poi() # remove poi
self.reject() | [
"def",
"button_clicked",
"(",
"self",
",",
"button",
")",
":",
"if",
"button",
"is",
"self",
".",
"idx_ok",
":",
"# File location",
"if",
"not",
"self",
".",
"filename",
":",
"msg",
"=",
"'Select location for data export file.'",
"error_dialog",
"=",
"QErrorMess... | 41.267544 | 21.491228 |
def encode(lng, lat, precision=10, bits_per_char=6):
"""Encode a lng/lat position as a geohash using a hilbert curve
This function encodes a lng/lat coordinate to a geohash of length `precision`
on a corresponding a hilbert curve. Each character encodes `bits_per_char` bits
per character (allowed are 2, 4 and 6 bits [default 6]). Hence, the geohash encodes
the lng/lat coordinate using `precision` * `bits_per_char` bits. The number of
bits devided by 2 give the level of the used hilbert curve, e.g. precision=10, bits_per_char=6
(default values) use 60 bit and a level 30 hilbert curve to map the globe.
Parameters:
lng: float Longitude; between -180.0 and 180.0; WGS 84
lat: float Latitude; between -90.0 and 90.0; WGS 84
precision: int The number of characters in a geohash
bits_per_char: int The number of bits per coding character
Returns:
str: geohash for lng/lat of length `precision`
"""
assert _LNG_INTERVAL[0] <= lng <= _LNG_INTERVAL[1]
assert _LAT_INTERVAL[0] <= lat <= _LAT_INTERVAL[1]
assert precision > 0
assert bits_per_char in (2, 4, 6)
bits = precision * bits_per_char
level = bits >> 1
dim = 1 << level
x, y = _coord2int(lng, lat, dim)
if CYTHON_AVAILABLE and bits <= MAX_BITS:
code = xy2hash_cython(x, y, dim)
else:
code = _xy2hash(x, y, dim)
return encode_int(code, bits_per_char).rjust(precision, '0') | [
"def",
"encode",
"(",
"lng",
",",
"lat",
",",
"precision",
"=",
"10",
",",
"bits_per_char",
"=",
"6",
")",
":",
"assert",
"_LNG_INTERVAL",
"[",
"0",
"]",
"<=",
"lng",
"<=",
"_LNG_INTERVAL",
"[",
"1",
"]",
"assert",
"_LAT_INTERVAL",
"[",
"0",
"]",
"<=... | 40.583333 | 24.972222 |
def transform(self, X=None, y=None):
"""
Transform an image using an Affine transform with the given
shear parameters. Return the transform if X=None.
Arguments
---------
X : ANTsImage
Image to transform
y : ANTsImage (optional)
Another image to transform
Returns
-------
ANTsImage if y is None, else a tuple of ANTsImage types
Examples
--------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> tx = ants.contrib.Shear2D(shear=(10,0,0))
>>> img2_x = tx.transform(img)# x axis stays same
>>> tx = ants.contrib.Shear2D(shear=(-10,0,0)) # other direction
>>> img2_x = tx.transform(img)# x axis stays same
>>> tx = ants.contrib.Shear2D(shear=(0,10,0))
>>> img2_y = tx.transform(img) # y axis stays same
>>> tx = ants.contrib.Shear2D(shear=(0,0,10))
>>> img2_z = tx.transform(img) # z axis stays same
>>> tx = ants.contrib.Shear2D(shear=(10,10,10))
>>> img2 = tx.transform(img)
"""
# convert to radians and unpack
shear = [math.pi / 180 * s for s in self.shear]
shear_x, shear_y = shear
shear_matrix = np.array([[1, shear_x, 0],
[shear_y, 1, 0]])
self.tx.set_parameters(shear_matrix)
if self.lazy or X is None:
return self.tx
else:
return self.tx.apply_to_image(X, reference=self.reference) | [
"def",
"transform",
"(",
"self",
",",
"X",
"=",
"None",
",",
"y",
"=",
"None",
")",
":",
"# convert to radians and unpack",
"shear",
"=",
"[",
"math",
".",
"pi",
"/",
"180",
"*",
"s",
"for",
"s",
"in",
"self",
".",
"shear",
"]",
"shear_x",
",",
"sh... | 35.023256 | 17.953488 |
def parse_param(param, include_desc=False):
"""Parse a single typed parameter statement."""
param_def, _colon, desc = param.partition(':')
if not include_desc:
desc = None
else:
desc = desc.lstrip()
if _colon == "":
raise ValidationError("Invalid parameter declaration in docstring, missing colon", declaration=param)
param_name, _space, param_type = param_def.partition(' ')
if len(param_type) < 2 or param_type[0] != '(' or param_type[-1] != ')':
raise ValidationError("Invalid parameter type string not enclosed in ( ) characters", param_string=param_def, type_string=param_type)
param_type = param_type[1:-1]
return param_name, ParameterInfo(param_type, [], desc) | [
"def",
"parse_param",
"(",
"param",
",",
"include_desc",
"=",
"False",
")",
":",
"param_def",
",",
"_colon",
",",
"desc",
"=",
"param",
".",
"partition",
"(",
"':'",
")",
"if",
"not",
"include_desc",
":",
"desc",
"=",
"None",
"else",
":",
"desc",
"=",
... | 40.111111 | 29.166667 |
def get(self, key, default=None):
"""
Retreive a value from the cache. In the event the value
does not exist, return the ``default``.
"""
key = self.make_key(key)
if self.debug:
return default
try:
value = self.database[key]
except KeyError:
self.metrics['misses'] += 1
return default
else:
self.metrics['hits'] += 1
return pickle.loads(value) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"key",
"=",
"self",
".",
"make_key",
"(",
"key",
")",
"if",
"self",
".",
"debug",
":",
"return",
"default",
"try",
":",
"value",
"=",
"self",
".",
"database",
"[",
"key... | 26.222222 | 13.888889 |
def glob_all(folder: str, filt: str) -> List[str]:
"""Recursive glob"""
import os
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, filt):
matches.append(os.path.join(root, filename))
return matches | [
"def",
"glob_all",
"(",
"folder",
":",
"str",
",",
"filt",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"import",
"os",
"import",
"fnmatch",
"matches",
"=",
"[",
"]",
"for",
"root",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk"... | 33.777778 | 16.666667 |
def route_filter_get(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get details about a specific route filter.
:param name: The name of the route table to query.
:param resource_group: The resource group name assigned to the
route filter.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.route_filter_get test-filter testgroup
'''
expand = kwargs.get('expand')
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
route_filter = netconn.route_filters.get(
route_filter_name=name,
resource_group_name=resource_group,
expand=expand
)
result = route_filter.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | [
"def",
"route_filter_get",
"(",
"name",
",",
"resource_group",
",",
"*",
"*",
"kwargs",
")",
":",
"expand",
"=",
"kwargs",
".",
"get",
"(",
"'expand'",
")",
"netconn",
"=",
"__utils__",
"[",
"'azurearm.get_client'",
"]",
"(",
"'network'",
",",
"*",
"*",
... | 25.5 | 24.558824 |
def set_axes(self, channels, ax):
"""
channels : iterable of string
each value corresponds to a channel names
names must be unique
"""
# To make sure displayed as hist
if len(set(channels)) == 1:
channels = channels[0],
self.current_channels = channels
# Remove existing gates
for gate in self.gates:
gate.remove_spawned_gates()
##
# Has a clear axis command inside!!
# which will "force kill" spawned gates
self.plot_data()
for gate in self.gates:
sgate = gate.spawn(channels, ax)
gate._refresh_activation() | [
"def",
"set_axes",
"(",
"self",
",",
"channels",
",",
"ax",
")",
":",
"# To make sure displayed as hist",
"if",
"len",
"(",
"set",
"(",
"channels",
")",
")",
"==",
"1",
":",
"channels",
"=",
"channels",
"[",
"0",
"]",
",",
"self",
".",
"current_channels"... | 30.272727 | 9.545455 |
def requires(self):
""" Index all pages. """
for url in NEWSPAPERS:
yield IndexPage(url=url, date=self.date) | [
"def",
"requires",
"(",
"self",
")",
":",
"for",
"url",
"in",
"NEWSPAPERS",
":",
"yield",
"IndexPage",
"(",
"url",
"=",
"url",
",",
"date",
"=",
"self",
".",
"date",
")"
] | 33.25 | 10.75 |
def mdct(x, L):
"""Modified Discrete Cosine Transform (MDCT)
Returns the Modified Discrete Cosine Transform with fixed
window size L of the signal x.
The window is based on a sine window.
Parameters
----------
x : ndarray, shape (N,)
The signal
L : int
The window length
Returns
-------
y : ndarray, shape (L/2, 2 * N / L)
The MDCT coefficients
See also
--------
imdct
"""
x = np.asarray(x, dtype=np.float)
N = x.size
# Number of frequency channels
K = L // 2
# Test length
if N % K != 0:
raise RuntimeError('Input length must be a multiple of the half of '
'the window size')
# Pad edges with zeros
xx = np.zeros(L // 4 + N + L // 4)
xx[L // 4:-L // 4] = x
x = xx
del xx
# Number of frames
P = N // K
if P < 2:
raise ValueError('Signal too short')
# Framing
x = _framing(x, L)
# Windowing
aL = np.arange(L, dtype=np.float)
w_long = np.sin((np.pi / L) * (aL + 0.5))
w_edge_L = w_long.copy()
w_edge_L[:L // 4] = 0.
w_edge_L[L // 4:L // 2] = 1.
w_edge_R = w_long.copy()
w_edge_R[L // 2:L // 2 + L // 4] = 1.
w_edge_R[L // 2 + L // 4:] = 0.
x[:, 0] *= w_edge_L
x[:, 1:-1] *= w_long[:, None]
x[:, -1] *= w_edge_R
# Pre-twiddle
x = x.astype(np.complex)
x *= np.exp((-1j * np.pi / L) * aL)[:, None]
# FFT
y = fft(x, axis=0)
# Post-twiddle
y = y[:L // 2, :]
y *= np.exp((-1j * np.pi * (L // 2 + 1.) / L)
* (0.5 + aL[:L // 2]))[:, None]
# Real part and scaling
y = math.sqrt(2. / K) * np.real(y)
return y | [
"def",
"mdct",
"(",
"x",
",",
"L",
")",
":",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"N",
"=",
"x",
".",
"size",
"# Number of frequency channels",
"K",
"=",
"L",
"//",
"2",
"# Test length",
"if",
"N",... | 20.948718 | 21.217949 |
def run(self, messages):
"""Returns some analytics about this autograder run."""
statistics = {}
statistics['time'] = str(datetime.now())
statistics['time-utc'] = str(datetime.utcnow())
statistics['unlock'] = self.args.unlock
if self.args.question:
statistics['question'] = [t.name for t in self.assignment.specified_tests]
statistics['requested-questions'] = self.args.question
if self.args.suite:
statistics['requested-suite'] = self.args.suite
if self.args.case:
statistics['requested-case'] = self.args.case
messages['analytics'] = statistics
self.log_run(messages) | [
"def",
"run",
"(",
"self",
",",
"messages",
")",
":",
"statistics",
"=",
"{",
"}",
"statistics",
"[",
"'time'",
"]",
"=",
"str",
"(",
"datetime",
".",
"now",
"(",
")",
")",
"statistics",
"[",
"'time-utc'",
"]",
"=",
"str",
"(",
"datetime",
".",
"ut... | 38.833333 | 18.888889 |
def _get_magnitude_scaling_term(self, C, mag):
"""
Returns the magnitude scaling term defined in equation 3
"""
if mag < 6.75:
return C["a1_lo"] + C["a2_lo"] * mag + C["a3"] *\
((8.5 - mag) ** 2.0)
else:
return C["a1_hi"] + C["a2_hi"] * mag + C["a3"] *\
((8.5 - mag) ** 2.0) | [
"def",
"_get_magnitude_scaling_term",
"(",
"self",
",",
"C",
",",
"mag",
")",
":",
"if",
"mag",
"<",
"6.75",
":",
"return",
"C",
"[",
"\"a1_lo\"",
"]",
"+",
"C",
"[",
"\"a2_lo\"",
"]",
"*",
"mag",
"+",
"C",
"[",
"\"a3\"",
"]",
"*",
"(",
"(",
"8.5... | 36.1 | 12.5 |
def set_state_from_exit_status(self, status, notif_period, hosts, services):
"""Set the state in UP, WARNING, CRITICAL, UNKNOWN or UNREACHABLE
according to the status of a check result.
:param status: integer between 0 and 4
:type status: int
:return: None
"""
now = time.time()
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# but only if the global conf have enable the impact state change
cls = self.__class__
if cls.enable_problem_impacts_states_change \
and self.is_impact \
and not self.state_changed_since_impact:
self.last_state = self.state_before_impact
else: # standard case
self.last_state = self.state
# The last times are kept as integer values rather than float... no need for ms!
if status == 0:
self.state = u'OK'
self.state_id = 0
self.last_time_ok = int(self.last_state_update)
# self.last_time_ok = self.last_state_update
state_code = 'o'
elif status == 1:
self.state = u'WARNING'
self.state_id = 1
self.last_time_warning = int(self.last_state_update)
# self.last_time_warning = self.last_state_update
state_code = 'w'
elif status == 2:
self.state = u'CRITICAL'
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
# self.last_time_critical = self.last_state_update
state_code = 'c'
elif status == 3:
self.state = u'UNKNOWN'
self.state_id = 3
self.last_time_unknown = int(self.last_state_update)
# self.last_time_unknown = self.last_state_update
state_code = 'u'
elif status == 4:
self.state = u'UNREACHABLE'
self.state_id = 4
self.last_time_unreachable = int(self.last_state_update)
# self.last_time_unreachable = self.last_state_update
state_code = 'x'
else:
self.state = u'CRITICAL' # exit code UNDETERMINED
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
# self.last_time_critical = self.last_state_update
state_code = 'c'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
# Now we add a value, we update the is_flapping prop
self.update_flapping(notif_period, hosts, services)
if self.state != self.last_state:
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change | [
"def",
"set_state_from_exit_status",
"(",
"self",
",",
"status",
",",
"notif_period",
",",
"hosts",
",",
"services",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"# we should put in last_state the good last state:",
"# if not just change the state by an problem/im... | 41.785714 | 17.671429 |
def _push_textbuffer(self):
"""Push the textbuffer onto the stack as a Text node and clear it."""
if self._textbuffer:
self._stack.append(tokens.Text(text="".join(self._textbuffer)))
self._textbuffer = [] | [
"def",
"_push_textbuffer",
"(",
"self",
")",
":",
"if",
"self",
".",
"_textbuffer",
":",
"self",
".",
"_stack",
".",
"append",
"(",
"tokens",
".",
"Text",
"(",
"text",
"=",
"\"\"",
".",
"join",
"(",
"self",
".",
"_textbuffer",
")",
")",
")",
"self",
... | 48 | 13.4 |
def _get_completions(self):
"""Return a list of possible completions for the string ending at the point.
Also set begidx and endidx in the process."""
completions = []
self.begidx = self.l_buffer.point
self.endidx = self.l_buffer.point
buf=self.l_buffer.line_buffer
if self.completer:
# get the string to complete
while self.begidx > 0:
self.begidx -= 1
if buf[self.begidx] in self.completer_delims:
self.begidx += 1
break
text = ensure_str(u''.join(buf[self.begidx:self.endidx]))
log(u'complete text="%s"' % ensure_unicode(text))
i = 0
while 1:
try:
r = ensure_unicode(self.completer(text, i))
except IndexError:
break
i += 1
if r is None:
break
elif r and r not in completions:
completions.append(r)
else:
pass
log(u'text completions=<%s>' % map(ensure_unicode, completions))
if (self.complete_filesystem == "on") and not completions:
# get the filename to complete
while self.begidx > 0:
self.begidx -= 1
if buf[self.begidx] in u' \t\n':
self.begidx += 1
break
text = ensure_str(u''.join(buf[self.begidx:self.endidx]))
log(u'file complete text="%s"' % ensure_unicode(text))
completions = map(ensure_unicode, glob.glob(os.path.expanduser(text) + '*'))
if self.mark_directories == u'on':
mc = []
for f in completions:
if os.path.isdir(f):
mc.append(f + os.sep)
else:
mc.append(f)
completions = mc
log(u'fnames=<%s>' % map(ensure_unicode, completions))
return completions | [
"def",
"_get_completions",
"(",
"self",
")",
":",
"completions",
"=",
"[",
"]",
"self",
".",
"begidx",
"=",
"self",
".",
"l_buffer",
".",
"point",
"self",
".",
"endidx",
"=",
"self",
".",
"l_buffer",
".",
"point",
"buf",
"=",
"self",
".",
"l_buffer",
... | 41.74 | 12.92 |
def delete(
self,
endpoint,
timeout=None,
allow_redirects=None,
validate=True,
headers=None,
):
"""*Sends a DELETE request to the endpoint.*
The endpoint is joined with the URL given on library init (if any).
If endpoint starts with ``http://`` or ``https://``, it is assumed
an URL outside the tested API (which may affect logging).
*Options*
``timeout``: A number of seconds to wait for the response before failing the keyword.
``allow_redirects``: If false, do not follow any redirects.
``validate``: If false, skips any request and response validations set
by expectation keywords and a spec given on library init.
``headers``: Headers as a JSON object to add or override for the request.
*Examples*
| `DELETE` | /users/6 |
| `DELETE` | http://localhost:8273/state | validate=false |
"""
endpoint = self._input_string(endpoint)
request = deepcopy(self.request)
request["method"] = "DELETE"
if allow_redirects is not None:
request["allowRedirects"] = self._input_boolean(allow_redirects)
if timeout is not None:
request["timeout"] = self._input_timeout(timeout)
validate = self._input_boolean(validate)
if headers:
request["headers"].update(self._input_object(headers))
return self._request(endpoint, request, validate)["response"] | [
"def",
"delete",
"(",
"self",
",",
"endpoint",
",",
"timeout",
"=",
"None",
",",
"allow_redirects",
"=",
"None",
",",
"validate",
"=",
"True",
",",
"headers",
"=",
"None",
",",
")",
":",
"endpoint",
"=",
"self",
".",
"_input_string",
"(",
"endpoint",
"... | 35.756098 | 24.902439 |
def write(self, data):
'''
Write method used by internal tarfile instance to output data.
This method blocks tarfile execution once internal buffer is full.
As this method is blocking, it is used inside the same thread of
:meth:`fill`.
:param data: bytes to write to internal buffer
:type data: bytes
:returns: number of bytes written
:rtype: int
'''
self._add.wait()
self._data += data
if len(self._data) > self._want:
self._add.clear()
self._result.set()
return len(data) | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_add",
".",
"wait",
"(",
")",
"self",
".",
"_data",
"+=",
"data",
"if",
"len",
"(",
"self",
".",
"_data",
")",
">",
"self",
".",
"_want",
":",
"self",
".",
"_add",
".",
"clear",
... | 31.210526 | 20.473684 |
def getItem(self, index, altItem=None):
""" Returns the TreeItem for the given index. Returns the altItem if the index is invalid.
"""
if index.isValid():
item = index.internalPointer()
if item:
return item
#return altItem if altItem is not None else self.invisibleRootItem # TODO: remove
return altItem | [
"def",
"getItem",
"(",
"self",
",",
"index",
",",
"altItem",
"=",
"None",
")",
":",
"if",
"index",
".",
"isValid",
"(",
")",
":",
"item",
"=",
"index",
".",
"internalPointer",
"(",
")",
"if",
"item",
":",
"return",
"item",
"#return altItem if altItem is ... | 37.5 | 15.6 |
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds']) | [
"def",
"eval",
"(",
"self",
",",
"now",
"=",
"None",
")",
":",
"log",
".",
"trace",
"(",
"'==== evaluating schedule now %s ====='",
",",
"now",
")",
"loop_interval",
"=",
"self",
".",
"opts",
"[",
"'loop_interval'",
"]",
"if",
"not",
"isinstance",
"(",
"lo... | 40.53121 | 19.132484 |
def dpar(self, cl=1):
"""Return dpar-style executable assignment for parameter
Default is to write CL version of code; if cl parameter is
false, writes Python executable code instead.
"""
sval = self.toString(self.value, quoted=1)
if not cl:
if sval == "": sval = "None"
s = "%s = %s" % (self.name, sval)
return s | [
"def",
"dpar",
"(",
"self",
",",
"cl",
"=",
"1",
")",
":",
"sval",
"=",
"self",
".",
"toString",
"(",
"self",
".",
"value",
",",
"quoted",
"=",
"1",
")",
"if",
"not",
"cl",
":",
"if",
"sval",
"==",
"\"\"",
":",
"sval",
"=",
"\"None\"",
"s",
"... | 34.545455 | 14.090909 |
def load_modules_from_python(self, route_list):
"""Load modules from the native python source."""
for name, modpath in route_list:
if ':' in modpath:
path, attr = modpath.split(':', 1)
else:
path, attr = modpath, None
self.commands[name] = ModuleLoader(path, attr=attr) | [
"def",
"load_modules_from_python",
"(",
"self",
",",
"route_list",
")",
":",
"for",
"name",
",",
"modpath",
"in",
"route_list",
":",
"if",
"':'",
"in",
"modpath",
":",
"path",
",",
"attr",
"=",
"modpath",
".",
"split",
"(",
"':'",
",",
"1",
")",
"else"... | 43.25 | 9.375 |
def get_parent_dir(name):
"""Get the parent directory of a filename."""
parent_dir = os.path.dirname(os.path.dirname(name))
if parent_dir:
return parent_dir
return os.path.abspath('.') | [
"def",
"get_parent_dir",
"(",
"name",
")",
":",
"parent_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"name",
")",
")",
"if",
"parent_dir",
":",
"return",
"parent_dir",
"return",
"os",
".",
"path",
".",
"a... | 33.833333 | 12.666667 |
def get_machine_group_applied_configs(self, project_name, group_name):
""" get the logtail config names applied in a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_name: string
:param group_name: the group name list
:return: GetMachineGroupAppliedConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/configs"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetMachineGroupAppliedConfigResponse(resp, header) | [
"def",
"get_machine_group_applied_configs",
"(",
"self",
",",
"project_name",
",",
"group_name",
")",
":",
"headers",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"resource",
"=",
"\"/machinegroups/\"",
"+",
"group_name",
"+",
"\"/configs\"",
"(",
"resp",
",",
"hea... | 36.35 | 21.25 |
def get_hash(path, hash_alg="sha256"):
"""Get the hash of the file at ``path``.
I'd love to make this async, but evidently file i/o is always ready
Args:
path (str): the path to the file to hash.
hash_alg (str, optional): the algorithm to use. Defaults to 'sha256'.
Returns:
str: the hexdigest of the hash.
"""
h = hashlib.new(hash_alg)
with open(path, "rb") as f:
for chunk in iter(functools.partial(f.read, 4096), b''):
h.update(chunk)
return h.hexdigest() | [
"def",
"get_hash",
"(",
"path",
",",
"hash_alg",
"=",
"\"sha256\"",
")",
":",
"h",
"=",
"hashlib",
".",
"new",
"(",
"hash_alg",
")",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"iter",
"(",
"functools",
".... | 29 | 20.722222 |
def create(self,params=None, headers=None):
"""Create a creditor bank account.
Creates a new creditor bank account object.
Args:
params (dict, optional): Request body.
Returns:
ListResponse of CreditorBankAccount instances
"""
path = '/creditor_bank_accounts'
if params is not None:
params = {self._envelope_key(): params}
try:
response = self._perform_request('POST', path, params, headers,
retry_failures=True)
except errors.IdempotentCreationConflictError as err:
return self.get(identity=err.conflicting_resource_id,
params=params,
headers=headers)
return self._resource_for(response) | [
"def",
"create",
"(",
"self",
",",
"params",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"path",
"=",
"'/creditor_bank_accounts'",
"if",
"params",
"is",
"not",
"None",
":",
"params",
"=",
"{",
"self",
".",
"_envelope_key",
"(",
")",
":",
"param... | 33.916667 | 18.458333 |
def save_xml(self, doc, element):
'''Save this configuration set into an xml.dom.Element object.'''
element.setAttributeNS(RTS_NS, RTS_NS_S + 'id', self.id)
for c in self._config_data:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'ConfigurationData')
c.save_xml(doc, new_element)
element.appendChild(new_element) | [
"def",
"save_xml",
"(",
"self",
",",
"doc",
",",
"element",
")",
":",
"element",
".",
"setAttributeNS",
"(",
"RTS_NS",
",",
"RTS_NS_S",
"+",
"'id'",
",",
"self",
".",
"id",
")",
"for",
"c",
"in",
"self",
".",
"_config_data",
":",
"new_element",
"=",
... | 52.375 | 15.375 |
def find_endurance_tier_iops_per_gb(volume):
"""Find the tier for the given endurance volume (IOPS per GB)
:param volume: The volume for which the tier level is desired
:return: Returns a float value indicating the IOPS per GB for the volume
"""
tier = volume['storageTierLevel']
iops_per_gb = 0.25
if tier == "LOW_INTENSITY_TIER":
iops_per_gb = 0.25
elif tier == "READHEAVY_TIER":
iops_per_gb = 2
elif tier == "WRITEHEAVY_TIER":
iops_per_gb = 4
elif tier == "10_IOPS_PER_GB":
iops_per_gb = 10
else:
raise ValueError("Could not find tier IOPS per GB for this volume")
return iops_per_gb | [
"def",
"find_endurance_tier_iops_per_gb",
"(",
"volume",
")",
":",
"tier",
"=",
"volume",
"[",
"'storageTierLevel'",
"]",
"iops_per_gb",
"=",
"0.25",
"if",
"tier",
"==",
"\"LOW_INTENSITY_TIER\"",
":",
"iops_per_gb",
"=",
"0.25",
"elif",
"tier",
"==",
"\"READHEAVY_... | 31.285714 | 17.857143 |
def heuristic_search(graph, start, goal, heuristic):
"""
A* search algorithm.
A set of heuristics is available under C{graph.algorithms.heuristics}. User-created heuristics
are allowed too.
@type graph: graph, digraph
@param graph: Graph
@type start: node
@param start: Start node
@type goal: node
@param goal: Goal node
@type heuristic: function
@param heuristic: Heuristic function
@rtype: list
@return: Optimized path from start to goal node
"""
# The queue stores priority, node, cost to reach, and parent.
queue = [ (0, start, 0, None) ]
# This dictionary maps queued nodes to distance of discovered paths
# and the computed heuristics to goal. We avoid to compute the heuristics
# more than once and to insert too many times the node in the queue.
g = {}
# This maps explored nodes to parent closest to the start
explored = {}
while queue:
_, current, dist, parent = heappop(queue)
if current == goal:
path = [current] + [ n for n in _reconstruct_path( parent, explored ) ]
path.reverse()
return path
if current in explored:
continue
explored[current] = parent
for neighbor in graph[current]:
if neighbor in explored:
continue
ncost = dist + graph.edge_weight((current, neighbor))
if neighbor in g:
qcost, h = g[neighbor]
if qcost <= ncost:
continue
# if ncost < qcost, a longer path to neighbor remains
# g. Removing it would need to filter the whole
# queue, it's better just to leave it there and ignore
# it when we visit the node a second time.
else:
h = heuristic(neighbor, goal)
g[neighbor] = ncost, h
heappush(queue, (ncost + h, neighbor, ncost, current))
raise NodeUnreachable( start, goal ) | [
"def",
"heuristic_search",
"(",
"graph",
",",
"start",
",",
"goal",
",",
"heuristic",
")",
":",
"# The queue stores priority, node, cost to reach, and parent.",
"queue",
"=",
"[",
"(",
"0",
",",
"start",
",",
"0",
",",
"None",
")",
"]",
"# This dictionary maps que... | 29.852941 | 22 |
def merge_pdfs(pdf_filepaths, out_filepath):
""" Merge all the PDF files in `pdf_filepaths` in a new PDF file `out_filepath`.
Parameters
----------
pdf_filepaths: list of str
Paths to PDF files.
out_filepath: str
Path to the result PDF file.
Returns
-------
path: str
The output file path.
"""
merger = PdfFileMerger()
for pdf in pdf_filepaths:
merger.append(PdfFileReader(open(pdf, 'rb')))
merger.write(out_filepath)
return out_filepath | [
"def",
"merge_pdfs",
"(",
"pdf_filepaths",
",",
"out_filepath",
")",
":",
"merger",
"=",
"PdfFileMerger",
"(",
")",
"for",
"pdf",
"in",
"pdf_filepaths",
":",
"merger",
".",
"append",
"(",
"PdfFileReader",
"(",
"open",
"(",
"pdf",
",",
"'rb'",
")",
")",
"... | 21.913043 | 20.043478 |
def emit(signal, *args, **kwargs):
"""
Emits a single signal to call callbacks registered to respond to that signal.
Optionally accepts args and kwargs that are passed directly to callbacks.
:param signal: Signal to send
"""
for callback in set(receivers[signal]): # Make a copy in case of any ninja signals
_call(callback, args=args, kwargs=kwargs) | [
"def",
"emit",
"(",
"signal",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"callback",
"in",
"set",
"(",
"receivers",
"[",
"signal",
"]",
")",
":",
"# Make a copy in case of any ninja signals",
"_call",
"(",
"callback",
",",
"args",
"=",
"... | 41.666667 | 20.777778 |
def mcp_als(X, rank, mask, random_state=None, init='randn', **options):
"""Fits CP Decomposition with missing data using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
A binary tensor with the same shape as ``X``. All entries equal to zero
correspond to held out or missing data in ``X``. All entries equal to
one correspond to observed entries in ``X`` and the decomposition is
fit to these datapoints.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Fitting CP decompositions with missing data can be exploited to perform
cross-validation.
References
----------
Williams, A. H.
"Solving Least-Squares Regression with Missing Data."
http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
"""
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, _ = optim_utils._get_initial_ktensor(init, X, rank, random_state, scale_norm=False)
result = FitResult(U, 'MCP_ALS', **options)
normX = np.linalg.norm((X * mask))
# Main optimization loop.
while result.still_optimizing:
# Iterate over each tensor mode.
for n in range(X.ndim):
# i) Normalize factors to prevent singularities.
U.rebalance()
# ii) Unfold data and mask along the nth mode.
unf = unfold(X, n) # i_n x N
m = unfold(mask, n) # i_n x N
# iii) Form Khatri-Rao product of factors matrices.
components = [U[j] for j in range(X.ndim) if j != n]
krt = khatri_rao(components).T # N x r
# iv) Broadcasted solve of linear systems.
# Left hand side of equations, R x R x X.shape[n]
# Right hand side of equations, X.shape[n] x R x 1
lhs_stack = np.matmul(m[:, None, :] * krt[None, :, :], krt.T[None, :, :])
rhs_stack = np.dot(unf * m, krt.T)[:, :, None]
# vi) Update factor.
U[n] = np.linalg.solve(lhs_stack, rhs_stack).reshape(X.shape[n], rank)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[-1].T.dot(U[-1])
# obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX
obj = linalg.norm(mask * (U.full() - X)) / normX
# Update result
result.update(obj)
# Finalize and return the optimization result.
return result.finalize() | [
"def",
"mcp_als",
"(",
"X",
",",
"rank",
",",
"mask",
",",
"random_state",
"=",
"None",
",",
"init",
"=",
"'randn'",
",",
"*",
"*",
"options",
")",
":",
"# Check inputs.",
"optim_utils",
".",
"_check_cpd_inputs",
"(",
"X",
",",
"rank",
")",
"# Initialize... | 37.348214 | 25.491071 |
def com_google_fonts_check_name_familyname_first_char(ttFont):
"""Make sure family name does not begin with a digit.
Font family names which start with a numeral are often not
discoverable in Windows applications.
"""
from fontbakery.utils import get_name_entry_strings
failed = False
for familyname in get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME):
digits = map(str, range(0, 10))
if familyname[0] in digits:
yield FAIL, ("Font family name '{}'"
" begins with a digit!").format(familyname)
failed = True
if failed is False:
yield PASS, "Font family name first character is not a digit." | [
"def",
"com_google_fonts_check_name_familyname_first_char",
"(",
"ttFont",
")",
":",
"from",
"fontbakery",
".",
"utils",
"import",
"get_name_entry_strings",
"failed",
"=",
"False",
"for",
"familyname",
"in",
"get_name_entry_strings",
"(",
"ttFont",
",",
"NameID",
".",
... | 40.5 | 16.5 |
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist | [
"def",
"_candidate_tempdir_list",
"(",
")",
":",
"dirlist",
"=",
"[",
"]",
"# First, try the environment.",
"for",
"envname",
"in",
"'TMPDIR'",
",",
"'TEMP'",
",",
"'TMP'",
":",
"dirname",
"=",
"_os",
".",
"getenv",
"(",
"envname",
")",
"if",
"dirname",
":",... | 28.583333 | 17.583333 |
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure (Structure): Input structure to dope
Returns:
[{"structure": Structure, "energy": float}]
"""
comp = structure.composition
logger.info("Composition: %s" % comp)
for sp in comp:
try:
sp.oxi_state
except AttributeError:
analyzer = BVAnalyzer()
structure = analyzer.get_oxi_state_decorated_structure(
structure)
comp = structure.composition
break
ox = self.dopant.oxi_state
radius = self.dopant.ionic_radius
compatible_species = [
sp for sp in comp if sp.oxi_state == ox and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol]
if (not compatible_species) and self.alio_tol:
# We only consider aliovalent doping if there are no compatible
# isovalent species.
compatible_species = [
sp for sp in comp
if abs(sp.oxi_state - ox) <= self.alio_tol and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol and
sp.oxi_state * ox >= 0]
if self.allowed_doping_species is not None:
# Only keep allowed doping species.
compatible_species = [
sp for sp in compatible_species
if sp in [get_el_sp(s) for s in self.allowed_doping_species]]
logger.info("Compatible species: %s" % compatible_species)
lengths = structure.lattice.abc
scaling = [max(1, int(round(math.ceil(self.min_length / x))))
for x in lengths]
logger.info("Lengths are %s" % str(lengths))
logger.info("Scaling = %s" % str(scaling))
all_structures = []
t = EnumerateStructureTransformation(**self.kwargs)
for sp in compatible_species:
supercell = structure * scaling
nsp = supercell.composition[sp]
if sp.oxi_state == ox:
supercell.replace_species({sp: {sp: (nsp - 1) / nsp,
self.dopant: 1 / nsp}})
logger.info("Doping %s for %s at level %.3f" % (
sp, self.dopant, 1 / nsp))
elif self.codopant:
codopant = _find_codopant(sp, 2 * sp.oxi_state - ox)
supercell.replace_species({sp: {sp: (nsp - 2) / nsp,
self.dopant: 1 / nsp,
codopant: 1 / nsp}})
logger.info("Doping %s for %s + %s at level %.3f" % (
sp, self.dopant, codopant, 1 / nsp))
elif abs(sp.oxi_state) < abs(ox):
# Strategy: replace the target species with a
# combination of dopant and vacancy.
# We will choose the lowest oxidation state species as a
# vacancy compensation species as it is likely to be lower in
# energy
sp_to_remove = min([s for s in comp if s.oxi_state * ox > 0],
key=lambda ss: abs(ss.oxi_state))
if sp_to_remove == sp:
common_charge = lcm(int(abs(sp.oxi_state)), int(abs(ox)))
ndopant = common_charge / abs(ox)
nsp_to_remove = common_charge / abs(sp.oxi_state)
logger.info("Doping %d %s with %d %s." %
(nsp_to_remove, sp, ndopant, self.dopant))
supercell.replace_species(
{sp: {sp: (nsp - nsp_to_remove) / nsp,
self.dopant: ndopant / nsp}})
else:
ox_diff = int(abs(round(sp.oxi_state - ox)))
vac_ox = int(abs(sp_to_remove.oxi_state))
common_charge = lcm(vac_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / vac_ox
nx = supercell.composition[sp_to_remove]
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {
sp_to_remove: (nx - nx_to_remove) / nx}})
elif abs(sp.oxi_state) > abs(ox):
# Strategy: replace the target species with dopant and also
# remove some opposite charged species for charge neutrality
if ox > 0:
sp_to_remove = max(supercell.composition.keys(),
key=lambda el: el.X)
else:
sp_to_remove = min(supercell.composition.keys(),
key=lambda el: el.X)
# Confirm species are of opposite oxidation states.
assert sp_to_remove.oxi_state * sp.oxi_state < 0
ox_diff = int(abs(round(sp.oxi_state - ox)))
anion_ox = int(abs(sp_to_remove.oxi_state))
nx = supercell.composition[sp_to_remove]
common_charge = lcm(anion_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / anion_ox
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove) / nx}})
ss = t.apply_transformation(
supercell, return_ranked_list=self.max_structures_per_enum)
logger.info("%s distinct structures" % len(ss))
all_structures.extend(ss)
logger.info("Total %s doped structures" % len(all_structures))
if return_ranked_list:
return all_structures[:return_ranked_list]
return all_structures[0]["structure"] | [
"def",
"apply_transformation",
"(",
"self",
",",
"structure",
",",
"return_ranked_list",
"=",
"False",
")",
":",
"comp",
"=",
"structure",
".",
"composition",
"logger",
".",
"info",
"(",
"\"Composition: %s\"",
"%",
"comp",
")",
"for",
"sp",
"in",
"comp",
":"... | 46.188406 | 19.753623 |
def tsallis(alphas, Ks, dim, required, clamp=True, to_self=False):
r'''
Estimate the Tsallis-alpha divergence between distributions, based on kNN
distances: (\int p^alpha q^(1-\alpha) - 1) / (\alpha - 1)
If clamp (the default), enforces the estimate is nonnegative.
Returns an array of shape (num_alphas, num_Ks).
'''
alphas = np.reshape(alphas, (-1, 1))
alpha_est = required
est = alpha_est - 1
est /= alphas - 1
if clamp:
np.maximum(est, 0, out=est)
return est | [
"def",
"tsallis",
"(",
"alphas",
",",
"Ks",
",",
"dim",
",",
"required",
",",
"clamp",
"=",
"True",
",",
"to_self",
"=",
"False",
")",
":",
"alphas",
"=",
"np",
".",
"reshape",
"(",
"alphas",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
"alpha_est",
... | 29.764706 | 24.470588 |
def get_lr(lr, epoch, steps, factor):
"""Get learning rate based on schedule."""
for s in steps:
if epoch >= s:
lr *= factor
return lr | [
"def",
"get_lr",
"(",
"lr",
",",
"epoch",
",",
"steps",
",",
"factor",
")",
":",
"for",
"s",
"in",
"steps",
":",
"if",
"epoch",
">=",
"s",
":",
"lr",
"*=",
"factor",
"return",
"lr"
] | 26.833333 | 14.166667 |
def validate_ipv6(self, id_vlan):
"""Validates ACL - IPv6 of VLAN from its identifier.
Assigns 1 to 'acl_valida_v6'.
:param id_vlan: Identifier of the Vlan. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: Vlan identifier is null and invalid.
:raise VlanNaoExisteError: Vlan not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'The identifier of Vlan is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/validate/' + IP_VERSION.IPv6[0] + '/'
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml) | [
"def",
"validate_ipv6",
"(",
"self",
",",
"id_vlan",
")",
":",
"if",
"not",
"is_valid_int_param",
"(",
"id_vlan",
")",
":",
"raise",
"InvalidParameterError",
"(",
"u'The identifier of Vlan is invalid or was not informed.'",
")",
"url",
"=",
"'vlan/'",
"+",
"str",
"(... | 35.041667 | 24.583333 |
def flatten(self, D):
'''flatten a nested dictionary D to a flat dictionary
nested keys are separated by '.'
'''
if not isinstance(D, dict):
return D
result = {}
for k,v in D.items():
if isinstance(v, dict):
for _k,_v in self.flatten(v).items():
result['.'.join([k,_k])] = _v
else:
result[k] = v
return result | [
"def",
"flatten",
"(",
"self",
",",
"D",
")",
":",
"if",
"not",
"isinstance",
"(",
"D",
",",
"dict",
")",
":",
"return",
"D",
"result",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"D",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",... | 25.882353 | 19.176471 |
def flushOutBoxes(self) -> None:
"""
Clear the outBoxes and transmit batched messages to remotes.
"""
removedRemotes = []
for rid, msgs in self.outBoxes.items():
try:
dest = self.remotes[rid].name
except KeyError:
removedRemotes.append(rid)
continue
if msgs:
if self._should_batch(msgs):
logger.trace(
"{} batching {} msgs to {} into fewer transmissions".
format(self, len(msgs), dest))
logger.trace(" messages: {}".format(msgs))
batches = split_messages_on_batches(list(msgs),
self._make_batch,
self._test_batch_len,
)
msgs.clear()
if batches:
for batch, size in batches:
logger.trace("{} sending payload to {}: {}".format(
self, dest, batch))
self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, size)
# Setting timeout to never expire
self.transmit(
batch,
rid,
timeout=self.messageTimeout,
serialized=True)
else:
logger.error("{} cannot create batch(es) for {}".format(self, dest))
else:
while msgs:
msg = msgs.popleft()
logger.trace(
"{} sending msg {} to {}".format(self, msg, dest))
self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, 1)
# Setting timeout to never expire
self.transmit(msg, rid, timeout=self.messageTimeout,
serialized=True)
for rid in removedRemotes:
logger.warning("{}{} has removed rid {}"
.format(CONNECTION_PREFIX, self,
z85_to_friendly(rid)),
extra={"cli": False})
msgs = self.outBoxes[rid]
if msgs:
self.discard(msgs,
"{}rid {} no longer available"
.format(CONNECTION_PREFIX,
z85_to_friendly(rid)),
logMethod=logger.debug)
del self.outBoxes[rid] | [
"def",
"flushOutBoxes",
"(",
"self",
")",
"->",
"None",
":",
"removedRemotes",
"=",
"[",
"]",
"for",
"rid",
",",
"msgs",
"in",
"self",
".",
"outBoxes",
".",
"items",
"(",
")",
":",
"try",
":",
"dest",
"=",
"self",
".",
"remotes",
"[",
"rid",
"]",
... | 47.155172 | 16.775862 |
def get_asset_content_lookup_session_for_repository(self, repository_id=None):
"""Gets the ``OsidSession`` associated with the asset content lookup service for
the given repository.
arg: repository_id (osid.id.Id): the ``Id`` of the repository
return: (osid.repository.AssetLookupSession) - the new
``AssetLookupSession``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_asset_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return AssetContentLookupSession(
self._provider_manager.get_asset_content_lookup_session_for_repository(repository_id),
self._config_map) | [
"def",
"get_asset_content_lookup_session_for_repository",
"(",
"self",
",",
"repository_id",
"=",
"None",
")",
":",
"return",
"AssetContentLookupSession",
"(",
"self",
".",
"_provider_manager",
".",
"get_asset_content_lookup_session_for_repository",
"(",
"repository_id",
")",... | 50.5 | 20.05 |
def bulk_copy(self, ids):
"""Bulk copy a set of configs.
:param ids: Int list of config IDs.
:return: :class:`configs.Config <configs.Config>` list
"""
schema = self.GET_SCHEMA
return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema) | [
"def",
"bulk_copy",
"(",
"self",
",",
"ids",
")",
":",
"schema",
"=",
"self",
".",
"GET_SCHEMA",
"return",
"self",
".",
"service",
".",
"bulk_copy",
"(",
"self",
".",
"base",
",",
"self",
".",
"RESOURCE",
",",
"ids",
",",
"schema",
")"
] | 35.875 | 15.5 |
def p_file_project(self, project):
"""Helper function for parsing doap:project name and homepage.
and setting them using the file builder.
"""
for _, _, name in self.graph.triples((project, self.doap_namespace['name'], None)):
self.builder.set_file_atrificat_of_project(self.doc, 'name', six.text_type(name))
for _, _, homepage in self.graph.triples(
(project, self.doap_namespace['homepage'], None)):
self.builder.set_file_atrificat_of_project(self.doc, 'home', six.text_type(homepage)) | [
"def",
"p_file_project",
"(",
"self",
",",
"project",
")",
":",
"for",
"_",
",",
"_",
",",
"name",
"in",
"self",
".",
"graph",
".",
"triples",
"(",
"(",
"project",
",",
"self",
".",
"doap_namespace",
"[",
"'name'",
"]",
",",
"None",
")",
")",
":",
... | 61.666667 | 22.888889 |
def convert_linear_problem_to_dual(model, sloppy=False, infinity=None, maintain_standard_form=True, prefix="dual_", dual_model=None): # NOQA
"""
A mathematical optimization problem can be viewed as a primal and a dual problem. If the primal problem is
a minimization problem the dual is a maximization problem, and the optimal value of the dual is a lower bound of
the optimal value of the primal.
For linear problems, strong duality holds, which means that the optimal values of the primal and dual are equal
(duality gap = 0).
This functions takes an optlang Model representing a primal linear problem and returns a new Model representing
the dual optimization problem. The provided model must have a linear objective, linear constraints and only
continuous variables. Furthermore, the problem must be in standard form, i.e. all variables should be non-negative.
Both minimization and maximization problems are allowed. The objective direction of the dual will always be
opposite of the primal.
Attributes:
----------
model: optlang.interface.Model
The primal problem to be dualized
sloppy: Boolean (default False)
If True, linearity, variable types and standard form will not be checked. Only use if you know the primal is
valid
infinity: Numeric or None
If not None this value will be used as bounds instead of unbounded variables.
maintain_standard_form: Boolean (default True)
If False the returned dual problem will not be in standard form, but will have fewer variables and/or constraints
prefix: str
The string that will be prepended to all variable and constraint names in the returned dual problem.
dual_model: optlang.interface.Model or None (default)
If not None, the dual variables and constraints will be added to this model. Note the objective will also be
set to the dual objective. If None a new model will be created.
Returns:
----------
dual_problem: optlang.interface.Model (same solver as the primal)
"""
if dual_model is None:
dual_model = model.interface.Model()
maximization = model.objective.direction == "max"
if infinity is not None:
neg_infinity = -infinity
else:
neg_infinity = None
if maximization:
sign = 1
else:
sign = -1
coefficients = {}
dual_objective = {}
# Add dual variables from primal constraints:
for constraint in model.constraints:
if constraint.expression == 0:
continue # Skip empty constraint
if not (sloppy or constraint.is_Linear):
raise ValueError("Non-linear problems are not supported: " + str(constraint))
if constraint.lb is None and constraint.ub is None:
continue # Skip free constraint
if not maintain_standard_form and constraint.lb == constraint.ub:
const_var = model.interface.Variable(prefix + constraint.name + "_constraint", lb=neg_infinity, ub=infinity)
dual_model.add(const_var)
if constraint.lb != 0:
dual_objective[const_var] = sign * constraint.lb
for variable, coef in constraint.expression.as_coefficients_dict().items():
if variable == 1: # pragma: no cover # For symengine
continue
coefficients.setdefault(variable.name, {})[const_var] = sign * coef
else:
if constraint.lb is not None:
lb_var = model.interface.Variable(prefix + constraint.name + "_constraint_lb", lb=0, ub=infinity)
dual_model.add(lb_var)
if constraint.lb != 0:
dual_objective[lb_var] = -sign * constraint.lb
if constraint.ub is not None:
ub_var = model.interface.Variable(prefix + constraint.name + "_constraint_ub", lb=0, ub=infinity)
dual_model.add(ub_var)
if constraint.ub != 0:
dual_objective[ub_var] = sign * constraint.ub
assert constraint.expression.is_Add or constraint.expression.is_Mul, \
"Invalid expression type: " + str(type(constraint.expression))
if constraint.expression.is_Add:
coefficients_dict = constraint.expression.as_coefficients_dict()
else: # constraint.expression.is_Mul:
coefficients_dict = {constraint.expression.args[1]: constraint.expression.args[0]}
for variable, coef in coefficients_dict.items():
if variable == 1: # pragma: no cover # For symengine
continue
if constraint.lb is not None:
coefficients.setdefault(variable.name, {})[lb_var] = -sign * coef
if constraint.ub is not None:
coefficients.setdefault(variable.name, {})[ub_var] = sign * coef
# Add dual variables from primal bounds
for variable in model.variables:
if not (sloppy or variable.type == "continuous"):
raise ValueError("Integer variables are not supported: " + str(variable))
if not sloppy and (variable.lb is None or variable.lb < 0):
raise ValueError("Problem is not in standard form (" + variable.name + " can be negative)")
if variable.lb > 0:
bound_var = model.interface.Variable(prefix + variable.name + "_lb", lb=0, ub=infinity)
dual_model.add(bound_var)
coefficients.setdefault(variable.name, {})[bound_var] = -sign * 1
dual_objective[bound_var] = -sign * variable.lb
if variable.ub is not None:
bound_var = model.interface.Variable(prefix + variable.name + "_ub", lb=0, ub=infinity)
dual_model.add(bound_var)
coefficients.setdefault(variable.name, {})[bound_var] = sign * 1
if variable.ub != 0:
dual_objective[bound_var] = sign * variable.ub
# Add dual constraints from primal objective
primal_objective_dict = model.objective.expression.as_coefficients_dict()
for variable in model.variables:
expr = optlang.symbolics.add([(coef * dual_var) for dual_var, coef in coefficients[variable.name].items()])
obj_coef = primal_objective_dict[variable]
if maximization:
const = model.interface.Constraint(expr, lb=obj_coef, name=prefix + variable.name)
else:
const = model.interface.Constraint(expr, ub=obj_coef, name=prefix + variable.name)
dual_model.add(const)
# Make dual objective
expr = optlang.symbolics.add([(coef * dual_var) for dual_var, coef in dual_objective.items() if coef != 0])
if maximization:
objective = model.interface.Objective(expr, direction="min")
else:
objective = model.interface.Objective(expr, direction="max")
dual_model.objective = objective
return dual_model | [
"def",
"convert_linear_problem_to_dual",
"(",
"model",
",",
"sloppy",
"=",
"False",
",",
"infinity",
"=",
"None",
",",
"maintain_standard_form",
"=",
"True",
",",
"prefix",
"=",
"\"dual_\"",
",",
"dual_model",
"=",
"None",
")",
":",
"# NOQA",
"if",
"dual_model... | 50.422222 | 29.266667 |
def text(cls, text, *, resize=None, single_use=None, selective=None):
"""
Creates a new button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to.
"""
return cls(types.KeyboardButton(text),
resize=resize, single_use=single_use, selective=selective) | [
"def",
"text",
"(",
"cls",
",",
"text",
",",
"*",
",",
"resize",
"=",
"None",
",",
"single_use",
"=",
"None",
",",
"selective",
"=",
"None",
")",
":",
"return",
"cls",
"(",
"types",
".",
"KeyboardButton",
"(",
"text",
")",
",",
"resize",
"=",
"resi... | 44.619048 | 24.142857 |
def _json_safe(cls, value):
"""Return a JSON safe value"""
# Date
if type(value) == date:
return str(value)
# Datetime
elif type(value) == datetime:
return value.strftime('%Y-%m-%d %H:%M:%S')
# Object Id
elif isinstance(value, ObjectId):
return str(value)
# Frame
elif isinstance(value, _BaseFrame):
return value.to_json_type()
# Lists
elif isinstance(value, (list, tuple)):
return [cls._json_safe(v) for v in value]
# Dictionaries
elif isinstance(value, dict):
return {k:cls._json_safe(v) for k, v in value.items()}
return value | [
"def",
"_json_safe",
"(",
"cls",
",",
"value",
")",
":",
"# Date",
"if",
"type",
"(",
"value",
")",
"==",
"date",
":",
"return",
"str",
"(",
"value",
")",
"# Datetime",
"elif",
"type",
"(",
"value",
")",
"==",
"datetime",
":",
"return",
"value",
".",... | 25.666667 | 18.925926 |
def ycoord(self):
"""The y coordinate :class:`xarray.Variable`"""
v = next(self.raw_data.psy.iter_base_variables)
return self.decoder.get_y(v, coords=self.data.coords) | [
"def",
"ycoord",
"(",
"self",
")",
":",
"v",
"=",
"next",
"(",
"self",
".",
"raw_data",
".",
"psy",
".",
"iter_base_variables",
")",
"return",
"self",
".",
"decoder",
".",
"get_y",
"(",
"v",
",",
"coords",
"=",
"self",
".",
"data",
".",
"coords",
"... | 47 | 14.75 |
def rank_targets(sample_frame, ref_targets, ref_sample):
"""Uses the geNorm algorithm to determine the most stably expressed
genes from amongst ref_targets in your sample.
See Vandesompele et al.'s 2002 Genome Biology paper for information about
the algorithm: http://dx.doi.org/10.1186/gb-2002-3-7-research0034
:param DataFrame sample_frame: A sample data frame.
:param iterable ref_targets: A sequence of targets from the Target column
of sample_frame to consider for ranking.
:param string ref_sample: The name of a sample from the Sample
column of sample_frame. It doesn't really matter what it is but it
should exist for every target.
:return: a sorted DataFrame with two columns, 'Target' and 'M' (the
relative stability; lower means more stable).
:rtype: DataFrame
"""
table = collect_expression(sample_frame, ref_targets, ref_sample)
all_samples = sample_frame['Sample'].unique()
t = table.groupby(['Sample', 'Target']).mean()
logt = log2(t)
ref_targets = set(ref_targets)
worst = []
worst_m = []
while len(ref_targets) - len(worst) > 1:
M = []
for test_target in ref_targets:
if test_target in worst: continue
Vs = []
for ref_target in ref_targets:
if ref_target == test_target or ref_target in worst: continue
A = logt.ix[zip(all_samples, repeat(test_target)), ref_target]
Vs.append(A.std())
M.append( (sum(Vs)/(len(ref_targets)-len(worst)-1), test_target) )
worst.append(max(M)[1])
worst_m.append(max(M)[0])
best = ref_targets - set(worst)
worst.reverse()
worst_m.reverse()
worst_m = [worst_m[0]] + worst_m
return pd.DataFrame({'Target': list(best) + worst, 'M': worst_m}, columns=['Target', 'M']) | [
"def",
"rank_targets",
"(",
"sample_frame",
",",
"ref_targets",
",",
"ref_sample",
")",
":",
"table",
"=",
"collect_expression",
"(",
"sample_frame",
",",
"ref_targets",
",",
"ref_sample",
")",
"all_samples",
"=",
"sample_frame",
"[",
"'Sample'",
"]",
".",
"uniq... | 43.380952 | 19.52381 |
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records] | [
"def",
"_convert_from_pandas",
"(",
"self",
",",
"pdf",
",",
"schema",
",",
"timezone",
")",
":",
"if",
"timezone",
"is",
"not",
"None",
":",
"from",
"pyspark",
".",
"sql",
".",
"types",
"import",
"_check_series_convert_timestamps_tz_local",
"copied",
"=",
"Fa... | 49.47619 | 19.904762 |
def __quarters(self, from_date=None):
"""Get a set of quarters with available items from a given index date.
:param from_date:
:return: list of `pandas.Period` corresponding to quarters
"""
s = Search(using=self._es_conn, index=self._es_index)
if from_date:
# Work around to solve conversion problem of '__' to '.' in field name
q = Q('range')
q.__setattr__(self._sort_on_field, {'gte': from_date})
s = s.filter(q)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket(self.TIMEFRAME, 'date_histogram', field=self._timeframe_field,
interval='quarter', min_doc_count=1)
response = s.execute()
quarters = []
for quarter in response.aggregations[self.TIMEFRAME].buckets:
period = pandas.Period(quarter.key_as_string, 'Q')
quarters.append(period)
return quarters | [
"def",
"__quarters",
"(",
"self",
",",
"from_date",
"=",
"None",
")",
":",
"s",
"=",
"Search",
"(",
"using",
"=",
"self",
".",
"_es_conn",
",",
"index",
"=",
"self",
".",
"_es_index",
")",
"if",
"from_date",
":",
"# Work around to solve conversion problem of... | 36.538462 | 22.115385 |
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built | [
"def",
"Length",
"(",
"min",
"=",
"None",
",",
"max",
"=",
"None",
",",
"min_message",
"=",
"\"Must have a length of at least {min}\"",
",",
"max_message",
"=",
"\"Must have a length of at most {max}\"",
")",
":",
"validator",
"=",
"Range",
"(",
"min",
",",
"max",... | 37.066667 | 21.866667 |
def initializer(func):
"""
Automatically assigns the parameters.
http://stackoverflow.com/questions/1389180/python-automatically-initialize-instance-variables
>>> class process:
... @initializer
... def __init__(self, cmd, reachable=False, user='root'):
... pass
>>> p = process('halt', True)
>>> p.cmd, p.reachable, p.user
('halt', True, 'root')
"""
names, varargs, keywords, defaults = inspect.getargspec(func)
from functools import wraps
@wraps(func)
def wrapper(self, *args, **kargs):
#print("names", names, "defaults", defaults)
for name, arg in list(zip(names[1:], args)) + list(kargs.items()):
setattr(self, name, arg)
# Avoid TypeError: argument to reversed() must be a sequence
if defaults is not None:
for name, default in zip(reversed(names), reversed(defaults)):
if not hasattr(self, name):
setattr(self, name, default)
return func(self, *args, **kargs)
return wrapper | [
"def",
"initializer",
"(",
"func",
")",
":",
"names",
",",
"varargs",
",",
"keywords",
",",
"defaults",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"from",
"functools",
"import",
"wraps",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
... | 33.387097 | 19.258065 |
def _generic_callable(group_idx, a, size, fill_value, dtype=None,
func=lambda g: g, **kwargs):
"""groups a by inds, and then applies foo to each group in turn, placing
the results in an array."""
groups = _array(group_idx, a, size, ())
ret = np.full(size, fill_value, dtype=dtype or np.float64)
for i, grp in enumerate(groups):
if np.ndim(grp) == 1 and len(grp) > 0:
ret[i] = func(grp)
return ret | [
"def",
"_generic_callable",
"(",
"group_idx",
",",
"a",
",",
"size",
",",
"fill_value",
",",
"dtype",
"=",
"None",
",",
"func",
"=",
"lambda",
"g",
":",
"g",
",",
"*",
"*",
"kwargs",
")",
":",
"groups",
"=",
"_array",
"(",
"group_idx",
",",
"a",
",... | 41.181818 | 13.272727 |
def unpack(fmt, data, endian=None, target=None):
"""
Unpack the string (presumably packed by pack(fmt, ...)) according to the
given format. The actual unpacking is performed by ``struct.unpack``
but the byte order will be set according to the given `endian`, `target`
or byte order of the global target.
Args:
fmt(str): The format string.
data(bytes): The data to unpack.
endian(:class:`~pwnypack.target.Target.Endian`): Override the default
byte order. If ``None``, it will look at the byte order of
the ``target`` argument.
target(:class:`~pwnypack.target.Target`): Override the default byte
order. If ``None``, it will look at the byte order of
the global :data:`~pwnypack.target.target`.
Returns:
list: The unpacked values according to the format.
"""
endian = endian if endian is not None else target.endian if target is not None else pwnypack.target.target.endian
if fmt and fmt[0] not in '@=<>!':
if endian is pwnypack.target.Target.Endian.little:
fmt = '<' + fmt
elif endian is pwnypack.target.Target.Endian.big:
fmt = '>' + fmt
else:
raise NotImplementedError('Unsupported endianness: %s' % endian)
return struct.unpack(fmt, data) | [
"def",
"unpack",
"(",
"fmt",
",",
"data",
",",
"endian",
"=",
"None",
",",
"target",
"=",
"None",
")",
":",
"endian",
"=",
"endian",
"if",
"endian",
"is",
"not",
"None",
"else",
"target",
".",
"endian",
"if",
"target",
"is",
"not",
"None",
"else",
... | 43.5 | 22.3 |
def kwargs_from_keyword(from_kwargs,to_kwargs,keyword,clean_origin=True):
"""
Looks for keys of the format keyword_value.
And return a dictionary with {keyword:value} format
Parameters:
-----------
from_kwargs : dict
Original dictionary
to_kwargs : dict
Dictionary where the items will be appended
keyword : string
Keyword to look for in the orginal dictionary
clean_origin : bool
If True then the k,v pairs from the original
dictionary are deleted
"""
for k in list(from_kwargs.keys()):
if '{0}_'.format(keyword) in k:
to_kwargs[k.replace('{0}_'.format(keyword),'')]=from_kwargs[k]
if clean_origin:
del from_kwargs[k]
return to_kwargs | [
"def",
"kwargs_from_keyword",
"(",
"from_kwargs",
",",
"to_kwargs",
",",
"keyword",
",",
"clean_origin",
"=",
"True",
")",
":",
"for",
"k",
"in",
"list",
"(",
"from_kwargs",
".",
"keys",
"(",
")",
")",
":",
"if",
"'{0}_'",
".",
"format",
"(",
"keyword",
... | 28.652174 | 16.652174 |
def _tokenize(self, text):
"""Tokenize the text into a list of sentences with a list of words.
:param text: raw text
:return: tokenized text
:rtype : list
"""
sentences = []
tokens = []
for word in self._clean_accents(text).split(' '):
tokens.append(word)
if '.' in word:
sentences.append(tokens)
tokens = []
return sentences | [
"def",
"_tokenize",
"(",
"self",
",",
"text",
")",
":",
"sentences",
"=",
"[",
"]",
"tokens",
"=",
"[",
"]",
"for",
"word",
"in",
"self",
".",
"_clean_accents",
"(",
"text",
")",
".",
"split",
"(",
"' '",
")",
":",
"tokens",
".",
"append",
"(",
"... | 29.333333 | 13.333333 |
def _get_efron_values_single(self, X, T, E, weights, beta):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = np.zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * np.exp(np.dot(X, beta))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
score = scores[i]
w = weights[i]
# Calculate phi values
phi_i = score
phi_x_i = phi_i * xi
phi_x_x_i = np.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
#
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
#
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = np.arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = np.zeros((d,))
tie_phi = 0
tie_phi_x = np.zeros((d,))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik | [
"def",
"_get_efron_values_single",
"(",
"self",
",",
"X",
",",
"T",
",",
"E",
",",
"weights",
",",
"beta",
")",
":",
"n",
",",
"d",
"=",
"X",
".",
"shape",
"hessian",
"=",
"np",
".",
"zeros",
"(",
"(",
"d",
",",
"d",
")",
")",
"gradient",
"=",
... | 37.126984 | 22.142857 |
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
CPIOFileEntry: a file entry or None if not available.
"""
path_spec = cpio_path_spec.CPIOPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | [
"def",
"GetRootFileEntry",
"(",
"self",
")",
":",
"path_spec",
"=",
"cpio_path_spec",
".",
"CPIOPathSpec",
"(",
"location",
"=",
"self",
".",
"LOCATION_ROOT",
",",
"parent",
"=",
"self",
".",
"_path_spec",
".",
"parent",
")",
"return",
"self",
".",
"GetFileE... | 33.555556 | 15.555556 |
def finder(target, matchlist, foldermode=0, regex=False, recursive=True):
"""
function for finding files/folders in folders and their subdirectories
Parameters
----------
target: str or list of str
a directory, zip- or tar-archive or a list of them to be searched
matchlist: list
a list of search patterns
foldermode: int
* 0: only files
* 1: files and folders
* 2: only folders
regex: bool
are the search patterns in matchlist regular expressions or unix shell standard (default)?
recursive: bool
search target recursively into all subdirectories or only in the top level?
This is currently only implemented for parameter `target` being a directory.
Returns
-------
list of str
the absolute names of files/folders matching the patterns
"""
if foldermode not in [0, 1, 2]:
raise ValueError("'foldermode' must be either 0, 1 or 2")
# match patterns
if isinstance(target, str):
pattern = r'|'.join(matchlist if regex else [fnmatch.translate(x) for x in matchlist])
if os.path.isdir(target):
if recursive:
out = dissolve([[os.path.join(root, x)
for x in dirs + files
if re.search(pattern, x)]
for root, dirs, files in os.walk(target)])
else:
out = [os.path.join(target, x)
for x in os.listdir(target)
if re.search(pattern, x)]
if foldermode == 0:
out = [x for x in out if not os.path.isdir(x)]
if foldermode == 2:
out = [x for x in out if os.path.isdir(x)]
return sorted(out)
elif os.path.isfile(target):
if zf.is_zipfile(target):
with zf.ZipFile(target, 'r') as zip:
out = [os.path.join(target, name)
for name in zip.namelist()
if re.search(pattern, os.path.basename(name.strip('/')))]
if foldermode == 0:
out = [x for x in out if not x.endswith('/')]
elif foldermode == 1:
out = [x.strip('/') for x in out]
elif foldermode == 2:
out = [x.strip('/') for x in out if x.endswith('/')]
return sorted(out)
elif tf.is_tarfile(target):
tar = tf.open(target)
out = [name for name in tar.getnames()
if re.search(pattern, os.path.basename(name.strip('/')))]
if foldermode == 0:
out = [x for x in out if not tar.getmember(x).isdir()]
elif foldermode == 2:
out = [x for x in out if tar.getmember(x).isdir()]
tar.close()
out = [os.path.join(target, x) for x in out]
return sorted(out)
else:
raise TypeError("if parameter 'target' is a file, "
"it must be a zip or tar archive:\n {}"
.format(target))
else:
raise TypeError("if parameter 'target' is of type str, "
"it must be a directory or a file:\n {}"
.format(target))
elif isinstance(target, list):
groups = [finder(x, matchlist, foldermode, regex, recursive) for x in target]
return list(itertools.chain(*groups))
else:
raise TypeError("parameter 'target' must be of type str or list") | [
"def",
"finder",
"(",
"target",
",",
"matchlist",
",",
"foldermode",
"=",
"0",
",",
"regex",
"=",
"False",
",",
"recursive",
"=",
"True",
")",
":",
"if",
"foldermode",
"not",
"in",
"[",
"0",
",",
"1",
",",
"2",
"]",
":",
"raise",
"ValueError",
"(",... | 38.581633 | 21.112245 |
def build_command_groups(self, block):
"""
Creates block modification commands, grouped by start index,
with the text to apply them on.
"""
text = block['text']
commands = sorted(self.build_commands(block))
grouped = groupby(commands, Command.key)
listed = list(groupby(commands, Command.key))
sliced = []
i = 0
for start_index, commands in grouped:
if i < len(listed) - 1:
stop_index = listed[i + 1][0]
sliced.append((text[start_index:stop_index], list(commands)))
else:
sliced.append((text[start_index:start_index], list(commands)))
i += 1
return sliced | [
"def",
"build_command_groups",
"(",
"self",
",",
"block",
")",
":",
"text",
"=",
"block",
"[",
"'text'",
"]",
"commands",
"=",
"sorted",
"(",
"self",
".",
"build_commands",
"(",
"block",
")",
")",
"grouped",
"=",
"groupby",
"(",
"commands",
",",
"Command... | 32.590909 | 18.136364 |
def mail2blogger(entry, **kwargs):
"""This signal handler cross-posts published ``Entry``'s to Blogger. For
this to work, the following settings must be non-False; e.g.:
BLARGG = {
'mail2blogger': True,
'mail2blogger_email': 'user@example.com',
}
"""
enabled = blargg_settings.get('mail2blogger', False)
recipient = blargg_settings.get('mail2blogger_email', None)
if enabled and recipient:
# Send HTML (and text-only) email
msg = EmailMultiAlternatives(
entry.title, # Subject
striptags(entry.crossposted_content), # Text-only
settings.DEFAULT_FROM_EMAIL, # From
[recipient] # List of Recipients
)
msg.attach_alternative(entry.crossposted_content, "text/html")
msg.send(fail_silently=True) | [
"def",
"mail2blogger",
"(",
"entry",
",",
"*",
"*",
"kwargs",
")",
":",
"enabled",
"=",
"blargg_settings",
".",
"get",
"(",
"'mail2blogger'",
",",
"False",
")",
"recipient",
"=",
"blargg_settings",
".",
"get",
"(",
"'mail2blogger_email'",
",",
"None",
")",
... | 35.913043 | 16.652174 |
def download(self, id, attid): # pylint: disable=invalid-name,redefined-builtin
"""Download a device's attachment.
:param id: Device ID as an int.
:param attid: Attachment ID as an int.
:rtype: tuple `(io.BytesIO, 'filename')`
"""
resp = self.service.get_id(self._base(id), attid, params={'format': 'download'}, stream=True)
b = io.BytesIO()
stream.stream_response_to_file(resp, path=b)
resp.close()
b.seek(0)
return (b, self.service.filename(resp)) | [
"def",
"download",
"(",
"self",
",",
"id",
",",
"attid",
")",
":",
"# pylint: disable=invalid-name,redefined-builtin",
"resp",
"=",
"self",
".",
"service",
".",
"get_id",
"(",
"self",
".",
"_base",
"(",
"id",
")",
",",
"attid",
",",
"params",
"=",
"{",
"... | 40.461538 | 17.923077 |
def getMouse(self):
"""
Waits for a mouse click.
"""
# FIXME: this isn't working during an executing cell
self.mouse_x.value = -1
self.mouse_y.value = -1
while self.mouse_x.value == -1 and self.mouse_y.value == -1:
time.sleep(.1)
return (self.mouse_x.value, self.mouse_y.value) | [
"def",
"getMouse",
"(",
"self",
")",
":",
"# FIXME: this isn't working during an executing cell",
"self",
".",
"mouse_x",
".",
"value",
"=",
"-",
"1",
"self",
".",
"mouse_y",
".",
"value",
"=",
"-",
"1",
"while",
"self",
".",
"mouse_x",
".",
"value",
"==",
... | 34.4 | 12.4 |
def _log_players(self, players):
"""
:param players: list of catan.game.Player objects
"""
self._logln('players: {0}'.format(len(players)))
for p in self._players:
self._logln('name: {0}, color: {1}, seat: {2}'.format(p.name, p.color, p.seat)) | [
"def",
"_log_players",
"(",
"self",
",",
"players",
")",
":",
"self",
".",
"_logln",
"(",
"'players: {0}'",
".",
"format",
"(",
"len",
"(",
"players",
")",
")",
")",
"for",
"p",
"in",
"self",
".",
"_players",
":",
"self",
".",
"_logln",
"(",
"'name: ... | 41.285714 | 14.428571 |
def AddStorageMediaImageOptions(self, argument_group):
"""Adds the storage media image options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--partitions', '--partition', dest='partitions', action='store',
type=str, default=None, help=(
'Define partitions to be processed. A range of '
'partitions can be defined as: "3..5". Multiple partitions can '
'be defined as: "1,3,5" (a list of comma separated values). '
'Ranges and lists can also be combined as: "1,3..5". The first '
'partition is 1. All partitions can be specified with: "all".'))
argument_group.add_argument(
'--volumes', '--volume', dest='volumes', action='store', type=str,
default=None, help=(
'Define volumes to be processed. A range of volumes can be defined '
'as: "3..5". Multiple volumes can be defined as: "1,3,5" (a list '
'of comma separated values). Ranges and lists can also be combined '
'as: "1,3..5". The first volume is 1. All volumes can be specified '
'with: "all".')) | [
"def",
"AddStorageMediaImageOptions",
"(",
"self",
",",
"argument_group",
")",
":",
"argument_group",
".",
"add_argument",
"(",
"'--partitions'",
",",
"'--partition'",
",",
"dest",
"=",
"'partitions'",
",",
"action",
"=",
"'store'",
",",
"type",
"=",
"str",
",",... | 51.826087 | 25.434783 |
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date, "branch": None}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None,
"branch": None} | [
"def",
"git_versions_from_keywords",
"(",
"keywords",
",",
"tag_prefix",
",",
"verbose",
")",
":",
"if",
"not",
"keywords",
":",
"raise",
"NotThisMethod",
"(",
"\"no keywords at all, weird\"",
")",
"date",
"=",
"keywords",
".",
"get",
"(",
"\"date\"",
")",
"if",... | 51.264151 | 21.075472 |
def send_message(self, peer: Peer, text: str, reply: int=None, link_preview: bool=None,
on_success: callable=None, reply_markup: botapi.ReplyMarkup=None):
"""
Send message to peer.
:param peer: Peer to send message to.
:param text: Text to send.
:param reply: Message object or message_id to reply to.
:param link_preview: Whether or not to show the link preview for this message
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
if isinstance(reply, Message):
reply = reply.id
botapi.send_message(chat_id=peer.id, text=text, disable_web_page_preview=not link_preview,
reply_to_message_id=reply, on_success=on_success, reply_markup=reply_markup,
**self.request_args).run() | [
"def",
"send_message",
"(",
"self",
",",
"peer",
":",
"Peer",
",",
"text",
":",
"str",
",",
"reply",
":",
"int",
"=",
"None",
",",
"link_preview",
":",
"bool",
"=",
"None",
",",
"on_success",
":",
"callable",
"=",
"None",
",",
"reply_markup",
":",
"b... | 48.611111 | 24.722222 |
def register_type_name(t, name):
""" Register a human-friendly name for the given type. This will be used in Invalid errors
:param t: The type to register
:type t: type
:param name: Name for the type
:type name: unicode
"""
assert isinstance(t, type)
assert isinstance(name, unicode)
__type_names[t] = name | [
"def",
"register_type_name",
"(",
"t",
",",
"name",
")",
":",
"assert",
"isinstance",
"(",
"t",
",",
"type",
")",
"assert",
"isinstance",
"(",
"name",
",",
"unicode",
")",
"__type_names",
"[",
"t",
"]",
"=",
"name"
] | 30.272727 | 11.636364 |
def roll_estimate(RAW_IMU,GPS_RAW_INT=None,ATTITUDE=None,SENSOR_OFFSETS=None, ofs=None, mul=None,smooth=0.7):
'''estimate roll from accelerometer'''
rx = RAW_IMU.xacc * 9.81 / 1000.0
ry = RAW_IMU.yacc * 9.81 / 1000.0
rz = RAW_IMU.zacc * 9.81 / 1000.0
if ATTITUDE is not None and GPS_RAW_INT is not None:
ry -= ATTITUDE.yawspeed * GPS_RAW_INT.vel*0.01
rz += ATTITUDE.pitchspeed * GPS_RAW_INT.vel*0.01
if SENSOR_OFFSETS is not None and ofs is not None:
rx += SENSOR_OFFSETS.accel_cal_x
ry += SENSOR_OFFSETS.accel_cal_y
rz += SENSOR_OFFSETS.accel_cal_z
rx -= ofs[0]
ry -= ofs[1]
rz -= ofs[2]
if mul is not None:
rx *= mul[0]
ry *= mul[1]
rz *= mul[2]
return lowpass(degrees(-asin(ry/sqrt(rx**2+ry**2+rz**2))),'_roll',smooth) | [
"def",
"roll_estimate",
"(",
"RAW_IMU",
",",
"GPS_RAW_INT",
"=",
"None",
",",
"ATTITUDE",
"=",
"None",
",",
"SENSOR_OFFSETS",
"=",
"None",
",",
"ofs",
"=",
"None",
",",
"mul",
"=",
"None",
",",
"smooth",
"=",
"0.7",
")",
":",
"rx",
"=",
"RAW_IMU",
".... | 41.9 | 14.9 |
def reset(self):
"""Attempts to reset the dongle to a known state.
When called, this method will reset the internal state of the object, and
disconnect any active connections.
"""
logger.debug('resetting dongle state')
self._clear()
if self.api is not None:
self._set_state(Dongle._STATE_RESET)
self.api.ble_cmd_gap_set_mode(gap_discoverable_mode['gap_non_discoverable'], gap_connectable_mode['gap_non_connectable'])
self._wait_for_state(self._STATE_RESET)
for i in range(self.supported_connections):
self._set_conn_state(i, self._STATE_DISCONNECTING)
self.api.ble_cmd_connection_disconnect(i)
self._wait_for_conn_state(i, self._STATE_DISCONNECTING)
logger.debug('reset completed') | [
"def",
"reset",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'resetting dongle state'",
")",
"self",
".",
"_clear",
"(",
")",
"if",
"self",
".",
"api",
"is",
"not",
"None",
":",
"self",
".",
"_set_state",
"(",
"Dongle",
".",
"_STATE_RESET",
")"... | 39.428571 | 24 |
def enriched(self, thresh=0.05, idx=True):
"""
Enriched features.
{threshdoc}
"""
return self.upregulated(thresh=thresh, idx=idx) | [
"def",
"enriched",
"(",
"self",
",",
"thresh",
"=",
"0.05",
",",
"idx",
"=",
"True",
")",
":",
"return",
"self",
".",
"upregulated",
"(",
"thresh",
"=",
"thresh",
",",
"idx",
"=",
"idx",
")"
] | 23.428571 | 13.142857 |
def remove(path, **kwargs):
r'''
Remove the directory from the SYSTEM path
Returns:
boolean True if successful, False if unsuccessful
rehash : True
If the registry was updated, and this value is set to ``True``, sends a
WM_SETTINGCHANGE broadcast to refresh the environment variables. Set
this to ``False`` to skip this broadcast.
CLI Example:
.. code-block:: bash
# Will remove C:\Python27 from the path
salt '*' win_path.remove 'c:\\python27'
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
rehash_ = kwargs.pop('rehash', True)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
path = _normalize_dir(path)
path_str = salt.utils.stringutils.to_str(path)
system_path = get_path()
# The current path should not have any unicode in it, but don't take any
# chances.
local_path = [
salt.utils.stringutils.to_str(x)
for x in os.environ['PATH'].split(PATHSEP)
]
def _check_path(dirs, path):
'''
Check the dir list for the specified path, and make changes to the list
if needed. Return True if changes were made to the list, otherwise
return False.
'''
dirs_lc = [x.lower() for x in dirs]
path_lc = path.lower()
new_dirs = []
for index, dirname in enumerate(dirs_lc):
if path_lc != dirname:
new_dirs.append(dirs[index])
if len(new_dirs) != len(dirs):
dirs[:] = new_dirs[:]
return True
else:
return False
if _check_path(local_path, path_str):
_update_local_path(local_path)
if not _check_path(system_path, path):
# No changes necessary
return True
result = __utils__['reg.set_value'](
HIVE,
KEY,
VNAME,
';'.join(salt.utils.data.decode(system_path)),
VTYPE
)
if result and rehash_:
# Broadcast WM_SETTINGCHANGE to Windows if registry was updated
return rehash()
else:
return result | [
"def",
"remove",
"(",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"clean_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"rehash_",
"=",
"kwargs",
".",
"pop",
"(",
"'rehash'",
",",
"True",
")",
"if",
"... | 27.364865 | 21.554054 |
def infer_dtype_from_array(arr, pandas_dtype=False):
"""
infer the dtype from a scalar or array
Parameters
----------
arr : scalar or array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(numpy.object_, [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_type(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ['string', 'bytes', 'unicode',
'mixed', 'mixed-integer']:
return (np.object_, arr)
arr = np.asarray(arr)
return arr.dtype, arr | [
"def",
"infer_dtype_from_array",
"(",
"arr",
",",
"pandas_dtype",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"arr",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"arr",
".",
"dtype",
",",
"arr",
"if",
"not",
"is_list_like",
"(",
"arr",
")",
":",... | 25.127273 | 19.309091 |
def replace_suffixes_1(self, word):
"""
Find the longest suffix among the ones specified
and perform the required action.
"""
length = len(word)
if word.endswith("sses"):
return word[:-2]
elif word.endswith("ied") or word.endswith("ies"):
word = word[:-3]
if len(word) == 1:
word += 'ie'
else:
word += 'i'
return word
# This ensures that words like conspicous stem properly
elif word.endswith('us') or word.endswith('ss'):
return word
# From spec: 'delete if the preceding word part contains a vowel
# not immediately before the s (so gas and this retain the s,
# gaps and kiwis lose it)
elif word[length - 1] == 's':
for letter in word[:-2]:
if letter in self.vowels:
return word[:-1]
return word | [
"def",
"replace_suffixes_1",
"(",
"self",
",",
"word",
")",
":",
"length",
"=",
"len",
"(",
"word",
")",
"if",
"word",
".",
"endswith",
"(",
"\"sses\"",
")",
":",
"return",
"word",
"[",
":",
"-",
"2",
"]",
"elif",
"word",
".",
"endswith",
"(",
"\"i... | 30.193548 | 16.645161 |
def extract_pathvars(callback):
'''Extract the path variables from an Resource operation.
Return {'mandatory': [<list-of-pnames>], 'optional': [<list-of-pnames>]}
'''
mandatory = []
optional = []
# We loop on the signature because the order of the parameters is
# important, and signature is an OrderedDict, while annotations is a
# regular dictionary
for pname in callback.signature.parameters.keys():
try:
anno = callback.__annotations__[pname]
except KeyError: # unannotated params, like "cls" or "request"
continue
if anno[0] != Ptypes.path:
continue
# At this point we are only considering path variables, but
# we have to generate different (present/absent) if these
# parameters have a default.
if callback.signature.parameters[pname].default == inspect._empty:
mandatory.append(pname)
else:
optional.append(pname)
return {'mandatory': mandatory, 'optional': optional} | [
"def",
"extract_pathvars",
"(",
"callback",
")",
":",
"mandatory",
"=",
"[",
"]",
"optional",
"=",
"[",
"]",
"# We loop on the signature because the order of the parameters is",
"# important, and signature is an OrderedDict, while annotations is a",
"# regular dictionary",
"for",
... | 40.68 | 21.4 |
def set_tile(self, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
#print('set_tile: y=', row, 'x=', col)
if col < 0:
print("ERROR - x less than zero", col)
col = 0
#return
if col > self.grid_width - 1 :
print("ERROR - x larger than grid", col)
col = self.grid_width - 1
#return
if row < 0:
print("ERROR - y less than zero", row)
row = 0
#return
if row > self.grid_height - 1:
print("ERROR - y larger than grid", row)
row = self.grid_height - 1
self.grid[row][col] = value | [
"def",
"set_tile",
"(",
"self",
",",
"row",
",",
"col",
",",
"value",
")",
":",
"#print('set_tile: y=', row, 'x=', col)",
"if",
"col",
"<",
"0",
":",
"print",
"(",
"\"ERROR - x less than zero\"",
",",
"col",
")",
"col",
"=",
"0",
"#return",
"if",
"col",
">... | 29.28 | 14.6 |
def handle_provider(self, provider_factory, note):
"""Get value from provider as requested by note."""
# Implementation in separate method to support accurate book-keeping.
basenote, name = self.parse_note(note)
# _handle_provider could be even shorter if
# Injector.apply() worked with classes, issue #9.
if basenote not in self.instances:
if (isinstance(provider_factory, type) and
self.has_annotations(provider_factory.__init__)):
args, kwargs = self.prepare_callable(provider_factory.__init__)
self.instances[basenote] = provider_factory(*args, **kwargs)
else:
self.instances[basenote] = self.apply_regardless(
provider_factory)
provider = self.instances[basenote]
if hasattr(provider, 'close'):
self.finalizers.append(self.instances[basenote].close)
provider = self.instances[basenote]
get = self.partial_regardless(provider.get)
try:
if name is not None:
return get(name=name)
self.values[basenote] = get()
return self.values[basenote]
except UnsetError:
# Use sys.exc_info to support both Python 2 and Python 3.
exc_type, exc_value, tb = sys.exc_info()
exc_msg = str(exc_value)
if exc_msg:
msg = '{}: {!r}'.format(exc_msg, note)
else:
msg = repr(note)
six.reraise(exc_type, exc_type(msg, note=note), tb) | [
"def",
"handle_provider",
"(",
"self",
",",
"provider_factory",
",",
"note",
")",
":",
"# Implementation in separate method to support accurate book-keeping.",
"basenote",
",",
"name",
"=",
"self",
".",
"parse_note",
"(",
"note",
")",
"# _handle_provider could be even short... | 40.25641 | 18.641026 |
def get_bounding_box(self, lon, lat, trt=None, mag=None):
"""
Build a bounding box around the given lon, lat by computing the
maximum_distance at the given tectonic region type and magnitude.
:param lon: longitude
:param lat: latitude
:param trt: tectonic region type, possibly None
:param mag: magnitude, possibly None
:returns: min_lon, min_lat, max_lon, max_lat
"""
if trt is None: # take the greatest integration distance
maxdist = max(self(trt, mag) for trt in self.dic)
else: # get the integration distance for the given TRT
maxdist = self(trt, mag)
a1 = min(maxdist * KM_TO_DEGREES, 90)
a2 = min(angular_distance(maxdist, lat), 180)
return lon - a2, lat - a1, lon + a2, lat + a1 | [
"def",
"get_bounding_box",
"(",
"self",
",",
"lon",
",",
"lat",
",",
"trt",
"=",
"None",
",",
"mag",
"=",
"None",
")",
":",
"if",
"trt",
"is",
"None",
":",
"# take the greatest integration distance",
"maxdist",
"=",
"max",
"(",
"self",
"(",
"trt",
",",
... | 44.833333 | 15.5 |
def containerIsRunning(name_or_id):
'''Check if container with the given name or ID (str) is running. No side
effects. Idempotent. Returns True if running, False if not.'''
require_str("name_or_id", name_or_id)
try:
container = getContainer(name_or_id)
# Refer to the latest status list here: https://docs.docker.com/engine/
# api/v1.33/#operation/ContainerList
if container:
if container.status == 'created':
return False
elif container.status == 'restarting':
return True
elif container.status == 'running':
return True
elif container.status == 'removing':
return False
elif container.status == 'paused':
return False
elif container.status == 'exited':
return False
elif container.status == 'dead':
return False
else:
return False
except NotFound as exc:
return False
return False | [
"def",
"containerIsRunning",
"(",
"name_or_id",
")",
":",
"require_str",
"(",
"\"name_or_id\"",
",",
"name_or_id",
")",
"try",
":",
"container",
"=",
"getContainer",
"(",
"name_or_id",
")",
"# Refer to the latest status list here: https://docs.docker.com/engine/",
"# api/... | 34.3 | 16.3 |
def kindpath(self, kind):
"""Returns a path to the resources for a given input kind.
:param `kind`: The kind of input:
- "ad": Active Directory
- "monitor": Files and directories
- "registry": Windows Registry
- "script": Scripts
- "splunktcp": TCP, processed
- "tcp": TCP, unprocessed
- "udp": UDP
- "win-event-log-collections": Windows event log
- "win-perfmon": Performance monitoring
- "win-wmi-collections": WMI
:type kind: ``string``
:return: The relative endpoint path.
:rtype: ``string``
"""
if kind == 'tcp':
return UrlEncoded('tcp/raw', skip_encode=True)
elif kind == 'splunktcp':
return UrlEncoded('tcp/cooked', skip_encode=True)
else:
return UrlEncoded(kind, skip_encode=True) | [
"def",
"kindpath",
"(",
"self",
",",
"kind",
")",
":",
"if",
"kind",
"==",
"'tcp'",
":",
"return",
"UrlEncoded",
"(",
"'tcp/raw'",
",",
"skip_encode",
"=",
"True",
")",
"elif",
"kind",
"==",
"'splunktcp'",
":",
"return",
"UrlEncoded",
"(",
"'tcp/cooked'",
... | 24.833333 | 20.472222 |
def get(cls, exp, files=None):
"""
:param str|unicode exp: Haskell expression to evaluate.
:param dict[str|unicode, str|unicode] files: Dictionary of file names->contents
:rtype: TryHaskell.Result
"""
return cls.parse(cls.raw(exp, files=files)) | [
"def",
"get",
"(",
"cls",
",",
"exp",
",",
"files",
"=",
"None",
")",
":",
"return",
"cls",
".",
"parse",
"(",
"cls",
".",
"raw",
"(",
"exp",
",",
"files",
"=",
"files",
")",
")"
] | 40.857143 | 14 |
def Array(dtype, size=None, ref=False):
"""Factory function that creates typed Array or ArrayRef objects
dtype - the data type of the array (as string).
Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64
size - the size of the array. Must be positive integer.
"""
def getArrayType(self):
"""A little function to replace the getType() method of arrays
It returns a string representation of the array element type instead of the
integer value (NTA_BasicType enum) returned by the origianl array
"""
return self._dtype
# ArrayRef can't be allocated
if ref:
assert size is None
index = basicTypes.index(dtype)
if index == -1:
raise Exception('Invalid data type: ' + dtype)
if size and size <= 0:
raise Exception('Array size must be positive')
suffix = 'ArrayRef' if ref else 'Array'
arrayFactory = getattr(engine_internal, dtype + suffix)
arrayFactory.getType = getArrayType
if size:
a = arrayFactory(size)
else:
a = arrayFactory()
a._dtype = basicTypes[index]
return a | [
"def",
"Array",
"(",
"dtype",
",",
"size",
"=",
"None",
",",
"ref",
"=",
"False",
")",
":",
"def",
"getArrayType",
"(",
"self",
")",
":",
"\"\"\"A little function to replace the getType() method of arrays\n\n It returns a string representation of the array element type ins... | 27.657895 | 21.5 |
def SetPassword(self,password):
"""Request change of password.
The API request requires supplying the current password. For this we issue a call
to retrieve the credentials so note there will be an activity log for retrieving the
credentials associated with any SetPassword entry
>>> s.SetPassword("newpassword")
"""
# 0: {op: "set", member: "password", value: {current: " r`5Mun/vT:qZ]2?z", password: "Savvis123!"}}
if self.data['status'] != "active": raise(clc.CLCException("Server must be powered on to change password"))
return(clc.v2.Requests(clc.v2.API.Call('PATCH','servers/%s/%s' % (self.alias,self.id),
json.dumps([{"op": "set", "member": "password", "value": {"current": self.Credentials()['password'], "password": password}}]),
session=self.session),
alias=self.alias,
session=self.session)) | [
"def",
"SetPassword",
"(",
"self",
",",
"password",
")",
":",
"# 0: {op: \"set\", member: \"password\", value: {current: \" r`5Mun/vT:qZ]2?z\", password: \"Savvis123!\"}}",
"if",
"self",
".",
"data",
"[",
"'status'",
"]",
"!=",
"\"active\"",
":",
"raise",
"(",
"clc",
".",... | 46.526316 | 34.052632 |
def revoke_cert(
ca_name,
CN,
cacert_path=None,
ca_filename=None,
cert_path=None,
cert_filename=None,
crl_file=None,
digest='sha256',
):
'''
Revoke a certificate.
.. versionadded:: 2015.8.0
ca_name
Name of the CA.
CN
Common name matching the certificate signing request.
cacert_path
Absolute path to ca certificates root directory.
ca_filename
Alternative filename for the CA.
cert_path
Path to the cert file.
cert_filename
Alternative filename for the certificate, useful when using special
characters in the CN.
crl_file
Full path to the CRL file.
digest
The message digest algorithm. Must be a string describing a digest
algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically).
For example, "md5" or "sha1". Default: 'sha256'
CLI Example:
.. code-block:: bash
salt '*' tls.revoke_cert ca_name='koji' \
ca_filename='ca' \
crl_file='/etc/openvpn/team1/crl.pem'
'''
set_ca_path(cacert_path)
ca_dir = '{0}/{1}'.format(cert_base_path(), ca_name)
if ca_filename is None:
ca_filename = '{0}_ca_cert'.format(ca_name)
if cert_path is None:
cert_path = '{0}/{1}/certs'.format(_cert_base_path(), ca_name)
if cert_filename is None:
cert_filename = '{0}'.format(CN)
try:
with salt.utils.files.fopen('{0}/{1}/{2}.crt'.format(
cert_base_path(),
ca_name,
ca_filename)) as fp_:
ca_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
fp_.read()
)
with salt.utils.files.fopen('{0}/{1}/{2}.key'.format(
cert_base_path(),
ca_name,
ca_filename)) as fp_:
ca_key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
fp_.read()
)
except IOError:
return 'There is no CA named "{0}"'.format(ca_name)
client_cert = _read_cert('{0}/{1}.crt'.format(cert_path, cert_filename))
if client_cert is None:
return 'There is no client certificate named "{0}"'.format(CN)
index_file, expire_date, serial_number, subject = _get_basic_info(
ca_name,
client_cert,
ca_dir)
index_serial_subject = '{0}\tunknown\t{1}'.format(
serial_number,
subject)
index_v_data = 'V\t{0}\t\t{1}'.format(
expire_date,
index_serial_subject)
index_r_data_pattern = re.compile(
r"R\t" +
expire_date +
r"\t\d{12}Z\t" +
re.escape(index_serial_subject))
index_r_data = 'R\t{0}\t{1}\t{2}'.format(
expire_date,
_four_digit_year_to_two_digit(datetime.utcnow()),
index_serial_subject)
ret = {}
with salt.utils.files.fopen(index_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if index_r_data_pattern.match(line):
revoke_date = line.split('\t')[2]
try:
datetime.strptime(revoke_date, two_digit_year_fmt)
return ('"{0}/{1}.crt" was already revoked, '
'serial number: {2}').format(
cert_path,
cert_filename,
serial_number
)
except ValueError:
ret['retcode'] = 1
ret['comment'] = ("Revocation date '{0}' does not match"
"format '{1}'".format(
revoke_date,
two_digit_year_fmt))
return ret
elif index_serial_subject in line:
__salt__['file.replace'](
index_file,
index_v_data,
index_r_data,
backup=False)
break
crl = OpenSSL.crypto.CRL()
with salt.utils.files.fopen(index_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('R'):
fields = line.split('\t')
revoked = OpenSSL.crypto.Revoked()
revoked.set_serial(salt.utils.stringutils.to_bytes(fields[3]))
revoke_date_2_digit = datetime.strptime(fields[2],
two_digit_year_fmt)
revoked.set_rev_date(salt.utils.stringutils.to_bytes(
revoke_date_2_digit.strftime(four_digit_year_fmt)
))
crl.add_revoked(revoked)
crl_text = crl.export(ca_cert,
ca_key,
digest=salt.utils.stringutils.to_bytes(digest))
if crl_file is None:
crl_file = '{0}/{1}/crl.pem'.format(
_cert_base_path(),
ca_name
)
if os.path.isdir(crl_file):
ret['retcode'] = 1
ret['comment'] = 'crl_file "{0}" is an existing directory'.format(
crl_file)
return ret
with salt.utils.files.fopen(crl_file, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(crl_text))
return ('Revoked Certificate: "{0}/{1}.crt", '
'serial number: {2}').format(
cert_path,
cert_filename,
serial_number
) | [
"def",
"revoke_cert",
"(",
"ca_name",
",",
"CN",
",",
"cacert_path",
"=",
"None",
",",
"ca_filename",
"=",
"None",
",",
"cert_path",
"=",
"None",
",",
"cert_filename",
"=",
"None",
",",
"crl_file",
"=",
"None",
",",
"digest",
"=",
"'sha256'",
",",
")",
... | 30.424581 | 20.703911 |
def retrieve_descriptor(descriptor):
"""Retrieve descriptor.
"""
the_descriptor = descriptor
if the_descriptor is None:
the_descriptor = {}
if isinstance(the_descriptor, six.string_types):
try:
if os.path.isfile(the_descriptor):
with open(the_descriptor, 'r') as f:
the_descriptor = json.load(f)
else:
req = requests.get(the_descriptor)
req.raise_for_status()
# Force UTF8 encoding for 'text/plain' sources
req.encoding = 'utf8'
the_descriptor = req.json()
except (IOError, requests.exceptions.RequestException) as error:
message = 'Unable to load JSON at "%s"' % descriptor
six.raise_from(exceptions.DataPackageException(message), error)
except ValueError as error:
# Python2 doesn't have json.JSONDecodeError (use ValueErorr)
message = 'Unable to parse JSON at "%s". %s' % (descriptor, error)
six.raise_from(exceptions.DataPackageException(message), error)
if hasattr(the_descriptor, 'read'):
try:
the_descriptor = json.load(the_descriptor)
except ValueError as e:
six.raise_from(exceptions.DataPackageException(str(e)), e)
if not isinstance(the_descriptor, dict):
msg = 'Data must be a \'dict\', but was a \'{0}\''
raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__))
return the_descriptor | [
"def",
"retrieve_descriptor",
"(",
"descriptor",
")",
":",
"the_descriptor",
"=",
"descriptor",
"if",
"the_descriptor",
"is",
"None",
":",
"the_descriptor",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"the_descriptor",
",",
"six",
".",
"string_types",
")",
":",
"t... | 39.684211 | 19.315789 |
def findkey(d, *keys):
"""
Get a value from a dictionary based on a list of keys and/or list indexes.
Parameters
----------
d: dict
A Python dictionary
keys: list
A list of key names, or list indexes
Returns
-------
dict
The composite dictionary object at the path specified by the keys
Example
-------
To return the value of the first class of the first layer in a Mapfile::
s = '''
MAP
LAYER
NAME "Layer1"
TYPE POLYGON
CLASS
NAME "Class1"
COLOR 0 0 255
END
END
END
'''
d = mappyfile.loads(s)
pth = ["layers", 0, "classes", 0]
cls1 = mappyfile.findkey(d, *pth)
assert cls1["name"] == "Class1"
"""
if keys:
keys = list(keys)
key = keys.pop(0)
return findkey(d[key], *keys)
else:
return d | [
"def",
"findkey",
"(",
"d",
",",
"*",
"keys",
")",
":",
"if",
"keys",
":",
"keys",
"=",
"list",
"(",
"keys",
")",
"key",
"=",
"keys",
".",
"pop",
"(",
"0",
")",
"return",
"findkey",
"(",
"d",
"[",
"key",
"]",
",",
"*",
"keys",
")",
"else",
... | 19.9375 | 23.395833 |
def markVisibilityOfSignals(ctx, ctxName, signals, interfaceSignals):
"""
* check if all signals are driven by something
* mark signals with hidden = False if they are connecting statements
or if they are external interface
"""
for sig in signals:
driver_cnt = len(sig.drivers)
has_comb_driver = False
if driver_cnt > 1:
sig.hidden = False
for d in sig.drivers:
if not isinstance(d, Operator):
sig.hidden = False
is_comb_driver = False
if isinstance(d, PortItem):
is_comb_driver = True
elif not d._now_is_event_dependent:
for a in walk_assignments(d, sig):
if not a.indexes\
and not a._is_completly_event_dependent:
is_comb_driver = True
break
if has_comb_driver and is_comb_driver:
raise MultipleDriversErr(
"%s: Signal %r has multiple combinational drivers" %
(ctx.getDebugScopeName(), sig))
has_comb_driver |= is_comb_driver
elif driver_cnt == 1:
if not isinstance(sig.drivers[0], Operator):
sig.hidden = False
else:
sig.hidden = False
if sig not in interfaceSignals:
if not sig.defVal._isFullVld():
raise NoDriverErr(
sig, "Signal without any driver or valid value in ", ctxName)
sig._const = True | [
"def",
"markVisibilityOfSignals",
"(",
"ctx",
",",
"ctxName",
",",
"signals",
",",
"interfaceSignals",
")",
":",
"for",
"sig",
"in",
"signals",
":",
"driver_cnt",
"=",
"len",
"(",
"sig",
".",
"drivers",
")",
"has_comb_driver",
"=",
"False",
"if",
"driver_cnt... | 38.52381 | 14.142857 |
def _iterate_rules(rules, topology, max_iter):
"""Iteratively run all the rules until the white- and backlists converge.
Parameters
----------
rules : dict
A dictionary mapping rule names (typically atomtype names) to
SMARTSGraphs that evaluate those rules.
topology : simtk.openmm.app.Topology
The topology that we are trying to atomtype.
max_iter : int
The maximum number of iterations.
"""
atoms = list(topology.atoms())
for _ in range(max_iter):
max_iter -= 1
found_something = False
for rule in rules.values():
for match_index in rule.find_matches(topology):
atom = atoms[match_index]
if rule.name not in atom.whitelist:
atom.whitelist.add(rule.name)
atom.blacklist |= rule.overrides
found_something = True
if not found_something:
break
else:
warn("Reached maximum iterations. Something probably went wrong.") | [
"def",
"_iterate_rules",
"(",
"rules",
",",
"topology",
",",
"max_iter",
")",
":",
"atoms",
"=",
"list",
"(",
"topology",
".",
"atoms",
"(",
")",
")",
"for",
"_",
"in",
"range",
"(",
"max_iter",
")",
":",
"max_iter",
"-=",
"1",
"found_something",
"=",
... | 35.034483 | 14.965517 |
def executable_path(conn, executable):
"""
Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found.
"""
executable_path = conn.remote_module.which(executable)
if not executable_path:
raise ExecutableNotFound(executable, conn.hostname)
return executable_path | [
"def",
"executable_path",
"(",
"conn",
",",
"executable",
")",
":",
"executable_path",
"=",
"conn",
".",
"remote_module",
".",
"which",
"(",
"executable",
")",
"if",
"not",
"executable_path",
":",
"raise",
"ExecutableNotFound",
"(",
"executable",
",",
"conn",
... | 39.916667 | 16.916667 |
def chunk_count(self):
"""Return a count of the chunks in this world folder."""
c = 0
for r in self.iter_regions():
c += r.chunk_count()
return c | [
"def",
"chunk_count",
"(",
"self",
")",
":",
"c",
"=",
"0",
"for",
"r",
"in",
"self",
".",
"iter_regions",
"(",
")",
":",
"c",
"+=",
"r",
".",
"chunk_count",
"(",
")",
"return",
"c"
] | 30.666667 | 13.333333 |
def strip_output(nb):
"""
strip the outputs from a notebook object
"""
for cell in nb.worksheets[0].cells:
if 'outputs' in cell:
cell['outputs'] = []
if 'prompt_number' in cell:
cell['prompt_number'] = None
return nb | [
"def",
"strip_output",
"(",
"nb",
")",
":",
"for",
"cell",
"in",
"nb",
".",
"worksheets",
"[",
"0",
"]",
".",
"cells",
":",
"if",
"'outputs'",
"in",
"cell",
":",
"cell",
"[",
"'outputs'",
"]",
"=",
"[",
"]",
"if",
"'prompt_number'",
"in",
"cell",
"... | 26.7 | 7.5 |
def get_friend_info(self):
"""Return information about this friend, including personal notes.
The personal note can be added or overwritten with :meth:friend, but
only if the user has reddit Gold.
:returns: The json response from the server.
"""
url = self.reddit_session.config['friend_v1'].format(user=self.name)
data = {'id': self.name}
return self.reddit_session.request_json(url, data=data, method='GET') | [
"def",
"get_friend_info",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"reddit_session",
".",
"config",
"[",
"'friend_v1'",
"]",
".",
"format",
"(",
"user",
"=",
"self",
".",
"name",
")",
"data",
"=",
"{",
"'id'",
":",
"self",
".",
"name",
"}",
... | 39.083333 | 22.333333 |
def Page_setDocumentContent(self, frameId, html):
"""
Function path: Page.setDocumentContent
Domain: Page
Method name: setDocumentContent
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'frameId' (type: FrameId) -> Frame id to set HTML for.
'html' (type: string) -> HTML content to set.
No return value.
Description: Sets given markup as the document's HTML.
"""
assert isinstance(html, (str,)
), "Argument 'html' must be of type '['str']'. Received type: '%s'" % type(
html)
subdom_funcs = self.synchronous_command('Page.setDocumentContent',
frameId=frameId, html=html)
return subdom_funcs | [
"def",
"Page_setDocumentContent",
"(",
"self",
",",
"frameId",
",",
"html",
")",
":",
"assert",
"isinstance",
"(",
"html",
",",
"(",
"str",
",",
")",
")",
",",
"\"Argument 'html' must be of type '['str']'. Received type: '%s'\"",
"%",
"type",
"(",
"html",
")",
"... | 30.590909 | 18.5 |
def get_modelnames() -> List[str]:
"""Return a sorted |list| containing all application model names.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_modelnames()) # doctest: +ELLIPSIS
[...'dam_v001', 'dam_v002', 'dam_v003', 'dam_v004', 'dam_v005',...]
"""
return sorted(str(fn.split('.')[0])
for fn in os.listdir(models.__path__[0])
if (fn.endswith('.py') and (fn != '__init__.py'))) | [
"def",
"get_modelnames",
"(",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"sorted",
"(",
"str",
"(",
"fn",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"models",
".",
"__path__",
"[",
"... | 49.2 | 18 |
def _finish(self):
"""
Closes and waits for subprocess to exit.
"""
if self._process.returncode is None:
self._process.stdin.flush()
self._process.stdin.close()
self._process.wait()
self.closed = True | [
"def",
"_finish",
"(",
"self",
")",
":",
"if",
"self",
".",
"_process",
".",
"returncode",
"is",
"None",
":",
"self",
".",
"_process",
".",
"stdin",
".",
"flush",
"(",
")",
"self",
".",
"_process",
".",
"stdin",
".",
"close",
"(",
")",
"self",
".",... | 30.222222 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.