text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def _shuffle_tfrecord(path, random_gen):
"""Shuffle a single record file in memory."""
# Read all records
record_iter = tf.compat.v1.io.tf_record_iterator(path)
all_records = [
r for r in utils.tqdm(
record_iter, desc="Reading...", unit=" examples", leave=False)
]
# Shuffling in memory
random_gen.shuffle(all_records)
# Write all record back
with tf.io.TFRecordWriter(path) as writer:
for record in utils.tqdm(
all_records, desc="Writing...", unit=" examples", leave=False):
writer.write(record) | [
"def",
"_shuffle_tfrecord",
"(",
"path",
",",
"random_gen",
")",
":",
"# Read all records",
"record_iter",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"io",
".",
"tf_record_iterator",
"(",
"path",
")",
"all_records",
"=",
"[",
"r",
"for",
"r",
"in",
"utils",... | 35.6 | 15.933333 |
def _setup_source_and_destination(self):
"""use the base class to setup the source and destinations but add to
that setup the instantiation of the "new_crash_source" """
super(FetchTransformSaveWithSeparateNewCrashSourceApp, self) \
._setup_source_and_destination()
if self.config.new_crash_source.new_crash_source_class:
self.new_crash_source = \
self.config.new_crash_source.new_crash_source_class(
self.config.new_crash_source,
name=self.app_instance_name,
quit_check_callback=self.quit_check
)
else:
# the configuration failed to provide a "new_crash_source", fall
# back to tying the "new_crash_source" to the "source".
self.new_crash_source = self.source | [
"def",
"_setup_source_and_destination",
"(",
"self",
")",
":",
"super",
"(",
"FetchTransformSaveWithSeparateNewCrashSourceApp",
",",
"self",
")",
".",
"_setup_source_and_destination",
"(",
")",
"if",
"self",
".",
"config",
".",
"new_crash_source",
".",
"new_crash_source... | 52.3125 | 15 |
def xover_gen(self, range=None):
"""Generator for the XOVER command.
The XOVER command returns information from the overview database for
the article(s) specified.
<http://tools.ietf.org/html/rfc2980#section-2.8>
Args:
range: An article number as an integer, or a tuple of specifying a
range of article numbers in the form (first, [last]). If last is
omitted then all articles after first are included. A range of
None (the default) uses the current article.
Returns:
A list of fields as given by the overview database for each
available article in the specified range. The fields that are
returned can be determined using the LIST OVERVIEW.FMT command if
the server supports it.
Raises:
NNTPReplyError: If no such article exists or the currently selected
newsgroup is invalid.
"""
args = None
if range is not None:
args = utils.unparse_range(range)
code, message = self.command("XOVER", args)
if code != 224:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield line.rstrip().split("\t") | [
"def",
"xover_gen",
"(",
"self",
",",
"range",
"=",
"None",
")",
":",
"args",
"=",
"None",
"if",
"range",
"is",
"not",
"None",
":",
"args",
"=",
"utils",
".",
"unparse_range",
"(",
"range",
")",
"code",
",",
"message",
"=",
"self",
".",
"command",
... | 37.323529 | 23.264706 |
def get_var_primal(self, name):
"""Get the primal value of a variable. Returns None if the problem has not bee optimized."""
if self._var_primals is None:
return None
else:
index = self._get_var_index(name)
return self._var_primals[index] | [
"def",
"get_var_primal",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"_var_primals",
"is",
"None",
":",
"return",
"None",
"else",
":",
"index",
"=",
"self",
".",
"_get_var_index",
"(",
"name",
")",
"return",
"self",
".",
"_var_primals",
"[",
... | 41.714286 | 9.142857 |
async def _observe(self, api_command):
"""Observe an endpoint."""
duration = api_command.observe_duration
url = api_command.url(self._host)
err_callback = api_command.err_callback
msg = Message(code=Code.GET, uri=url, observe=duration)
# Note that this is necessary to start observing
pr, r = await self._get_response(msg)
api_command.result = _process_output(r)
def success_callback(res):
api_command.result = _process_output(res)
def error_callback(ex):
err_callback(ex)
ob = pr.observation
ob.register_callback(success_callback)
ob.register_errback(error_callback)
self._observations_err_callbacks.append(ob.error) | [
"async",
"def",
"_observe",
"(",
"self",
",",
"api_command",
")",
":",
"duration",
"=",
"api_command",
".",
"observe_duration",
"url",
"=",
"api_command",
".",
"url",
"(",
"self",
".",
"_host",
")",
"err_callback",
"=",
"api_command",
".",
"err_callback",
"m... | 32.043478 | 16.826087 |
def _pwm_to_str(self, precision=4):
"""Return string representation of pwm.
Parameters
----------
precision : int, optional, default 4
Floating-point precision.
Returns
-------
pwm_string : str
"""
if not self.pwm:
return ""
fmt = "{{:.{:d}f}}".format(precision)
return "\n".join(
["\t".join([fmt.format(p) for p in row])
for row in self.pwm]
) | [
"def",
"_pwm_to_str",
"(",
"self",
",",
"precision",
"=",
"4",
")",
":",
"if",
"not",
"self",
".",
"pwm",
":",
"return",
"\"\"",
"fmt",
"=",
"\"{{:.{:d}f}}\"",
".",
"format",
"(",
"precision",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"\\t\"",... | 24.8 | 16.6 |
def choices(cls, order='natural'):
"""
Generate the choices as required by Django models.
Parameters
----------
order : str
in which the elements should be returned. Possible values are:
* 'sorted', the elements will be sorted by `value`
* 'reverse', the elements will be sorted by `value` as if each comparison were
reversed
* 'natural' (default), the elements are ordered as when instantiated in the enumeration
Returns
-------
iterable of tuple
"""
INC, DEC, NAT = 'sorted', 'reverse', 'natural'
options = [INC, DEC, NAT]
assert order in options, "Sorting order not recognized: {}. Available options are: {}".format(order, options)
if order in [INC, DEC]:
reverse = order == DEC
if reverse:
attr = '__choices_reverse_'
else:
attr = '__choices_sorted_'
if not hasattr(cls, attr):
setattr(cls, attr, tuple([(c.value, c.label) for c in sorted(list(cls), reverse=reverse)]))
return getattr(cls, attr)
else:
return super(EOrderedChoice, cls).choices() | [
"def",
"choices",
"(",
"cls",
",",
"order",
"=",
"'natural'",
")",
":",
"INC",
",",
"DEC",
",",
"NAT",
"=",
"'sorted'",
",",
"'reverse'",
",",
"'natural'",
"options",
"=",
"[",
"INC",
",",
"DEC",
",",
"NAT",
"]",
"assert",
"order",
"in",
"options",
... | 38.5625 | 22.25 |
async def run_script(self, script):
"""Execute the script and save results."""
# Create a Bash command to add all the tools to PATH.
tools_paths = ':'.join([map_["dest"] for map_ in self.tools_volumes])
add_tools_path = 'export PATH=$PATH:{}'.format(tools_paths)
# Spawn another child bash, to avoid running anything as PID 1, which has special
# signal handling (e.g., cannot be SIGKILL-ed from inside).
# A login Bash shell is needed to source /etc/profile.
bash_line = '/bin/bash --login; exit $?' + os.linesep
script = os.linesep.join(['set -x', 'set +B', add_tools_path, script]) + os.linesep
self.proc.stdin.write(bash_line.encode('utf-8'))
await self.proc.stdin.drain()
self.proc.stdin.write(script.encode('utf-8'))
await self.proc.stdin.drain()
self.proc.stdin.close() | [
"async",
"def",
"run_script",
"(",
"self",
",",
"script",
")",
":",
"# Create a Bash command to add all the tools to PATH.",
"tools_paths",
"=",
"':'",
".",
"join",
"(",
"[",
"map_",
"[",
"\"dest\"",
"]",
"for",
"map_",
"in",
"self",
".",
"tools_volumes",
"]",
... | 58.266667 | 20.266667 |
def get_interface_name():
"""
Returns the interface name of the first not link_local and not loopback interface.
"""
interface_name = ''
interfaces = psutil.net_if_addrs()
for name, details in interfaces.items():
for detail in details:
if detail.family == socket.AF_INET:
ip_address = ipaddress.ip_address(detail.address)
if not (ip_address.is_link_local or ip_address.is_loopback):
interface_name = name
break
return interface_name | [
"def",
"get_interface_name",
"(",
")",
":",
"interface_name",
"=",
"''",
"interfaces",
"=",
"psutil",
".",
"net_if_addrs",
"(",
")",
"for",
"name",
",",
"details",
"in",
"interfaces",
".",
"items",
"(",
")",
":",
"for",
"detail",
"in",
"details",
":",
"i... | 38.785714 | 14.071429 |
def draw_text(self, content):
"""Draws text cell content to context"""
wx2pango_alignment = {
"left": pango.ALIGN_LEFT,
"center": pango.ALIGN_CENTER,
"right": pango.ALIGN_RIGHT,
}
cell_attributes = self.code_array.cell_attributes[self.key]
angle = cell_attributes["angle"]
if angle in [-90, 90]:
rect = self.rect[1], self.rect[0], self.rect[3], self.rect[2]
else:
rect = self.rect
# Text color attributes
self.context.set_source_rgb(*self._get_text_color())
ptx = pangocairo.CairoContext(self.context)
pango_layout = ptx.create_layout()
self.set_font(pango_layout)
pango_layout.set_wrap(pango.WRAP_WORD_CHAR)
pango_layout.set_width(int(round((rect[2] - 4.0) * pango.SCALE)))
try:
markup = cell_attributes["markup"]
except KeyError:
# Old file
markup = False
if markup:
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always")
pango_layout.set_markup(unicode(content))
if warning_lines:
w2unicode = lambda m: unicode(m.message)
msg = u"\n".join(map(w2unicode, warning_lines))
pango_layout.set_text(msg)
else:
pango_layout.set_text(unicode(content))
alignment = cell_attributes["justification"]
pango_layout.set_alignment(wx2pango_alignment[alignment])
# Shift text for vertical alignment
extents = pango_layout.get_pixel_extents()
downshift = 0
if cell_attributes["vertical_align"] == "bottom":
downshift = rect[3] - extents[1][3] - 4
elif cell_attributes["vertical_align"] == "middle":
downshift = int((rect[3] - extents[1][3]) / 2) - 2
self.context.save()
self._rotate_cell(angle, rect)
self.context.translate(0, downshift)
# Spell check underline drawing
if SpellChecker is not None and self.spell_check:
text = unicode(pango_layout.get_text())
lang = config["spell_lang"]
for start, stop in self._check_spelling(text, lang=lang):
self._draw_error_underline(ptx, pango_layout, start, stop-1)
ptx.update_layout(pango_layout)
ptx.show_layout(pango_layout)
self.context.restore() | [
"def",
"draw_text",
"(",
"self",
",",
"content",
")",
":",
"wx2pango_alignment",
"=",
"{",
"\"left\"",
":",
"pango",
".",
"ALIGN_LEFT",
",",
"\"center\"",
":",
"pango",
".",
"ALIGN_CENTER",
",",
"\"right\"",
":",
"pango",
".",
"ALIGN_RIGHT",
",",
"}",
"cel... | 31.615385 | 20.769231 |
def itermonthdates(cls, year, month):
"""
Returns an iterator for the month in a year
This iterator will return all days (as NepDate objects) for the month
and all days before the start of the month or after the end of the month
that are required to get a complete week.
"""
curday = NepDate.from_bs_date(year, month, 1)
start_weekday = curday.weekday()
# Start_weekday represents the number of days we have to pad
for i in range(start_weekday, 0, -1):
yield (curday - timedelta(days=i))
for i in range(0, values.NEPALI_MONTH_DAY_DATA[year][month - 1]):
if i > 0:
curday.day += 1
curday.en_date = curday.en_date + timedelta(days=1)
# Create a new object and return it
n_date = NepDate(curday.year, curday.month, curday.day)
n_date.en_date = curday.en_date
yield n_date
# Now, curday points to the last day of the month. Check it's weekday
# and return days from next month to complete the week
last_weekday = curday.weekday()
remain = 6 - last_weekday
for i in range(1, remain + 1):
yield (curday + timedelta(days=i)) | [
"def",
"itermonthdates",
"(",
"cls",
",",
"year",
",",
"month",
")",
":",
"curday",
"=",
"NepDate",
".",
"from_bs_date",
"(",
"year",
",",
"month",
",",
"1",
")",
"start_weekday",
"=",
"curday",
".",
"weekday",
"(",
")",
"# Start_weekday represents the numbe... | 44.142857 | 16 |
def get_rna(self) -> Rna:
"""Get the corresponding RNA or raise an exception if it's not the reference node.
:raises: InferCentralDogmaException
"""
if self.variants:
raise InferCentralDogmaException('can not get rna for variant')
return Rna(
namespace=self.namespace,
name=self.name,
identifier=self.identifier
) | [
"def",
"get_rna",
"(",
"self",
")",
"->",
"Rna",
":",
"if",
"self",
".",
"variants",
":",
"raise",
"InferCentralDogmaException",
"(",
"'can not get rna for variant'",
")",
"return",
"Rna",
"(",
"namespace",
"=",
"self",
".",
"namespace",
",",
"name",
"=",
"s... | 30.692308 | 16.769231 |
def no_intersections(nodes1, degree1, nodes2, degree2):
r"""Determine if one surface is in the other.
Helper for :func:`combine_intersections` that handles the case
of no points of intersection. In this case, either the surfaces
are disjoint or one is fully contained in the other.
To check containment, it's enough to check if one of the corners
is contained in the other surface.
Args:
nodes1 (numpy.ndarray): The nodes defining the first surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree1 (int): The degree of the surface given by ``nodes1``.
nodes2 (numpy.ndarray): The nodes defining the second surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree2 (int): The degree of the surface given by ``nodes2``.
Returns:
Tuple[Optional[list], Optional[bool]]: Pair (2-tuple) of
* Edges info list; will be empty or :data:`None`
* "Contained" boolean. If not :data:`None`, indicates
that one of the surfaces is contained in the other.
"""
# NOTE: This is a circular import.
from bezier import _surface_intersection
located = _surface_intersection.locate_point(
nodes2, degree2, nodes1[0, 0], nodes1[1, 0]
)
if located is not None:
return None, True
located = _surface_intersection.locate_point(
nodes1, degree1, nodes2[0, 0], nodes2[1, 0]
)
if located is not None:
return None, False
return [], None | [
"def",
"no_intersections",
"(",
"nodes1",
",",
"degree1",
",",
"nodes2",
",",
"degree2",
")",
":",
"# NOTE: This is a circular import.",
"from",
"bezier",
"import",
"_surface_intersection",
"located",
"=",
"_surface_intersection",
".",
"locate_point",
"(",
"nodes2",
"... | 36.341463 | 22.731707 |
def save(self, filepath = None, password = None, keyfile = None):
"""This method saves the database.
It's possible to parse a data path to an alternative file.
"""
if (password is None and keyfile is not None and keyfile != "" and
type(keyfile) is str):
self.keyfile = keyfile
elif (keyfile is None and password is not None and password != "" and
type(password is str)):
self.password = password
elif (keyfile is not None and password is not None and
keyfile != "" and password != "" and type(keyfile) is str and
type(password) is str):
self.keyfile = keyfile
self.password = password
if self.read_only:
raise KPError("The database has been opened read-only.")
elif ((self.password is None and self.keyfile is None) or
(filepath is None and self.filepath is None) or
(keyfile == "" and password == "")):
raise KPError("Need a password/keyfile and a filepath to save the "
"file.")
elif ((type(self.filepath) is not str and self.filepath is not None) or
(type(self.password) is not str and self.password is not None) or
(type(self.keyfile) is not str and self.keyfile is not None)):
raise KPError("filepath, password and keyfile must be strings.")
elif self._num_groups == 0:
raise KPError("Need at least one group!")
content = bytearray()
# First, read out all groups
for i in self.groups:
# Get the packed bytes
# j stands for a possible field type
for j in range(1, 10):
ret_save = self._save_group_field(j, i)
# The field type and the size is always in front of the data
if ret_save is not False:
content += struct.pack('<H', j)
content += struct.pack('<I', ret_save[0])
content += ret_save[1]
# End of field
content += struct.pack('<H', 0xFFFF)
content += struct.pack('<I', 0)
# Same with entries
for i in self.entries:
for j in range(1, 15):
ret_save = self._save_entry_field(j, i)
if ret_save is not False:
content += struct.pack('<H', j)
content += struct.pack('<I', ret_save[0])
content += ret_save[1]
content += struct.pack('<H', 0xFFFF)
content += struct.pack('<I', 0)
# Generate new seed and new vector; calculate the new hash
Random.atfork()
self._final_randomseed = Random.get_random_bytes(16)
self._enc_iv = Random.get_random_bytes(16)
sha_obj = SHA256.new()
sha_obj.update(bytes(content))
self._contents_hash = sha_obj.digest()
del sha_obj
# Pack the header
header = bytearray()
header += struct.pack('<I', 0x9AA2D903)
header += struct.pack('<I', 0xB54BFB65)
header += struct.pack('<I', self._enc_flag)
header += struct.pack('<I', self._version)
header += struct.pack('<16s', self._final_randomseed)
header += struct.pack('<16s', self._enc_iv)
header += struct.pack('<I', self._num_groups)
header += struct.pack('<I', self._num_entries)
header += struct.pack('<32s', self._contents_hash)
header += struct.pack('<32s', self._transf_randomseed)
if self._key_transf_rounds < 150000:
self._key_transf_rounds = 150000
header += struct.pack('<I', self._key_transf_rounds)
# Finally encrypt everything...
if self.password is None:
masterkey = self._get_filekey()
elif self.password is not None and self.keyfile is not None:
passwordkey = self._get_passwordkey()
filekey = self._get_filekey()
sha = SHA256.new()
sha.update(passwordkey+filekey)
masterkey = sha.digest()
else:
masterkey = self._get_passwordkey()
final_key = self._transform_key(masterkey)
encrypted_content = self._cbc_encrypt(content, final_key)
del content
del masterkey
del final_key
# ...and write it out
if filepath is not None:
try:
handler = open(filepath, "wb")
except IOError:
raise KPError("Can't open {0}".format(filepath))
if self.filepath is None:
self.filepath = filepath
elif filepath is None and self.filepath is not None:
try:
handler = open(self.filepath, "wb")
except IOError:
raise KPError("Can't open {0}".format(self.filepath))
else:
raise KPError("Need a filepath.")
try:
handler.write(header+encrypted_content)
except IOError:
raise KPError("Can't write to file.")
finally:
handler.close()
if not path.isfile(self.filepath+".lock"):
try:
lock = open(self.filepath+".lock", "w")
lock.write('')
except IOError:
raise KPError("Can't create lock-file {0}".format(self.filepath
+".lock"))
else:
lock.close()
return True | [
"def",
"save",
"(",
"self",
",",
"filepath",
"=",
"None",
",",
"password",
"=",
"None",
",",
"keyfile",
"=",
"None",
")",
":",
"if",
"(",
"password",
"is",
"None",
"and",
"keyfile",
"is",
"not",
"None",
"and",
"keyfile",
"!=",
"\"\"",
"and",
"type",
... | 40.102941 | 16.757353 |
def display_graphic(self, flag_curves, ui):
"""
This function plots results of a file into the canvas.
Inputs : flag_curves : A boolean to know with we have to plot all curves or not.
ui : The main_Window.
"""
ui.graphic_widget.canvas.picture.clear()
x = scipy.linspace(self.x_data[0], self.x_data[-1], len(self.x_data)) #X-axis
curve_wanted = 0 #Iterator on lines of y_data
for curve in self.y_data:
if flag_curves:
if curve_wanted == self.num_plot: #If the iterator is equal of the slider's value, this curve is different
ui.graphic_widget.canvas.picture.plot(x, curve, '-r',
label='Case : {0}/{1}'.format(str(curve_wanted + 1),
str(len(self.y_data))),
linewidth=4)
else:
ui.graphic_widget.canvas.picture.plot(x, curve, '0.75')
else:
if curve_wanted == self.num_plot:
ui.graphic_widget.canvas.picture.plot(x, curve, '-r',
label='Case : {0}/{1}'.format(str(curve_wanted + 1),
str(len(self.y_data))))
curve_wanted += 1
ui.graphic_widget.canvas.picture.set_title('Rrs.csv')
ui.graphic_widget.canvas.picture.set_xlabel('Wavelength (${nm}$)')
ui.graphic_widget.canvas.picture.set_ylabel('Reflectance ($Sr^{-1}$)')
self.legend = ui.graphic_widget.canvas.picture.legend() #Display in a legend curves's labels.
ui.graphic_widget.canvas.picture.legend(bbox_to_anchor=(1.1, 1.05))
ui.graphic_widget.canvas.draw() | [
"def",
"display_graphic",
"(",
"self",
",",
"flag_curves",
",",
"ui",
")",
":",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"clear",
"(",
")",
"x",
"=",
"scipy",
".",
"linspace",
"(",
"self",
".",
"x_data",
"[",
"0",
"]",
",",
... | 55.941176 | 31.529412 |
def flatten(list_of_lists):
"""Flatten a list of lists but maintain strings and ints as entries."""
flat_list = []
for sublist in list_of_lists:
if isinstance(sublist, string_types) or isinstance(sublist, int):
flat_list.append(sublist)
elif sublist is None:
continue
elif not isinstance(sublist, string_types) and len(sublist) == 1:
flat_list.append(sublist[0])
else:
flat_list.append(tuple(sublist))
return flat_list | [
"def",
"flatten",
"(",
"list_of_lists",
")",
":",
"flat_list",
"=",
"[",
"]",
"for",
"sublist",
"in",
"list_of_lists",
":",
"if",
"isinstance",
"(",
"sublist",
",",
"string_types",
")",
"or",
"isinstance",
"(",
"sublist",
",",
"int",
")",
":",
"flat_list",... | 38.615385 | 14.846154 |
def FDMT(data, f_min, f_max, maxDT, dataType):
"""
This function implements the FDMT algorithm.
Input: Input visibility array (nints, nbl, nchan, npol)
f_min,f_max are the base-band begin and end frequencies.
The frequencies should be entered in MHz
maxDT - the maximal delay (in time bins) of the maximal dispersion.
Appears in the paper as N_{\Delta}
A typical input is maxDT = N_f
dataType - a valid numpy dtype.
reccomended: either int32, or int64.
Output: The dispersion measure transform of the Input matrix.
The output dimensions are [Input.shape[1],maxDT]
For details, see algorithm 1 in Zackay & Ofek (2014)
"""
nint, nbl, nchan, npol = data.shape
niters = int(np.log2(nchan))
assert nchan in 2**np.arange(30) and nint in 2**np.arange(30), "Input dimensions must be a power of 2"
logger.info('Input data dimensions: {0}'.format(data.shape))
data = FDMT_initialization(data, f_min, f_max, maxDT, dataType)
logger.info('Iterating {0} times to calculate to maxDT of {1}'.format(niters, maxDT))
for i_t in range(1, niters+1):
data = FDMT_iteration(data, maxDT, nchan, f_min, f_max, i_t, dataType)
[nint, dT, nbl, nchan, npol] = data.shape
assert nchan == 1, 'Channel axis should have length 1 after all FDMT iterations.'
# put dT axis first and remove chan axis
return np.rollaxis(data[:,:,:,0,:], 1) | [
"def",
"FDMT",
"(",
"data",
",",
"f_min",
",",
"f_max",
",",
"maxDT",
",",
"dataType",
")",
":",
"nint",
",",
"nbl",
",",
"nchan",
",",
"npol",
"=",
"data",
".",
"shape",
"niters",
"=",
"int",
"(",
"np",
".",
"log2",
"(",
"nchan",
")",
")",
"as... | 43.676471 | 22.735294 |
def get_revision_history(brain_or_object):
"""Get the revision history for the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Workflow history
:rtype: obj
"""
obj = get_object(brain_or_object)
chv = ContentHistoryView(obj, safe_getattr(obj, "REQUEST", None))
return chv.fullHistory() | [
"def",
"get_revision_history",
"(",
"brain_or_object",
")",
":",
"obj",
"=",
"get_object",
"(",
"brain_or_object",
")",
"chv",
"=",
"ContentHistoryView",
"(",
"obj",
",",
"safe_getattr",
"(",
"obj",
",",
"\"REQUEST\"",
",",
"None",
")",
")",
"return",
"chv",
... | 39.363636 | 16.636364 |
def fit_transform(self, col):
"""Prepare the transformer and return processed data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
"""
if self.anonymize:
col = self.anonymize_column(col)
self._fit(col)
return self.transform(col) | [
"def",
"fit_transform",
"(",
"self",
",",
"col",
")",
":",
"if",
"self",
".",
"anonymize",
":",
"col",
"=",
"self",
".",
"anonymize_column",
"(",
"col",
")",
"self",
".",
"_fit",
"(",
"col",
")",
"return",
"self",
".",
"transform",
"(",
"col",
")"
] | 22.466667 | 19.266667 |
def get(self, *args, **kwargs):
"""Get the next waiting message from the queue.
:returns: A :class:`Message` instance, or ``None`` if there is
no messages waiting.
"""
if not mqueue.qsize():
return None
message_data, content_type, content_encoding = mqueue.get()
return self.Message(backend=self, body=message_data,
content_type=content_type,
content_encoding=content_encoding) | [
"def",
"get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"mqueue",
".",
"qsize",
"(",
")",
":",
"return",
"None",
"message_data",
",",
"content_type",
",",
"content_encoding",
"=",
"mqueue",
".",
"get",
"(",
")",
... | 37.307692 | 17.461538 |
def to_import_properties(properties):
# type: (dict) -> dict
"""
Returns a dictionary where export properties have been replaced by import
ones
:param properties: A dictionary of service properties (with export keys)
:return: A dictionary with import properties
"""
# Copy the given dictionary
props = properties.copy()
# Add the "imported" property
props[pelix.remote.PROP_IMPORTED] = True
# Remote service ID
try:
props[pelix.remote.PROP_ENDPOINT_SERVICE_ID] = props.pop(
pelix.constants.SERVICE_ID
)
except KeyError:
# No service ID
pass
# Replace the "export configs"
configs = props.pop(pelix.remote.PROP_EXPORTED_CONFIGS, None)
if configs:
props[pelix.remote.PROP_IMPORTED_CONFIGS] = configs
# Clear other export properties
for key in (
pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA,
pelix.remote.PROP_EXPORTED_INTERFACES,
):
try:
del props[key]
except KeyError:
# Key wasn't there
pass
return props | [
"def",
"to_import_properties",
"(",
"properties",
")",
":",
"# type: (dict) -> dict",
"# Copy the given dictionary",
"props",
"=",
"properties",
".",
"copy",
"(",
")",
"# Add the \"imported\" property",
"props",
"[",
"pelix",
".",
"remote",
".",
"PROP_IMPORTED",
"]",
... | 26.5 | 20.119048 |
def safe_open(filename, *args, **kwargs):
"""Open a file safely, ensuring that its directory exists.
:API: public
"""
safe_mkdir_for(filename)
return open(filename, *args, **kwargs) | [
"def",
"safe_open",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"safe_mkdir_for",
"(",
"filename",
")",
"return",
"open",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 26.571429 | 11.571429 |
def import_name(self, import_loc, names):
"""import_name: 'import' dotted_as_names"""
return ast.Import(names=names,
keyword_loc=import_loc, loc=import_loc.join(names[-1].loc)) | [
"def",
"import_name",
"(",
"self",
",",
"import_loc",
",",
"names",
")",
":",
"return",
"ast",
".",
"Import",
"(",
"names",
"=",
"names",
",",
"keyword_loc",
"=",
"import_loc",
",",
"loc",
"=",
"import_loc",
".",
"join",
"(",
"names",
"[",
"-",
"1",
... | 53.75 | 12 |
def _nac(self, q_direction):
"""nac_term = (A1 (x) A2) / B * coef.
"""
num_atom = self._pcell.get_number_of_atoms()
nac_q = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
if (np.abs(q_direction) < 1e-5).all():
return nac_q
rec_lat = np.linalg.inv(self._pcell.get_cell())
nac_factor = self._dynmat.get_nac_factor()
Z = self._dynmat.get_born_effective_charges()
e = self._dynmat.get_dielectric_constant()
q = np.dot(rec_lat, q_direction)
B = self._B(e, q)
for i in range(num_atom):
A_i = self._A(q, Z, i)
for j in range(num_atom):
A_j = self._A(q, Z, j)
nac_q[i, j] = np.outer(A_i, A_j) / B
num_satom = self._scell.get_number_of_atoms()
N = num_satom // num_atom
return nac_q * nac_factor / N | [
"def",
"_nac",
"(",
"self",
",",
"q_direction",
")",
":",
"num_atom",
"=",
"self",
".",
"_pcell",
".",
"get_number_of_atoms",
"(",
")",
"nac_q",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_atom",
",",
"num_atom",
",",
"3",
",",
"3",
")",
",",
"dtype",
... | 34.56 | 14 |
def make_scale(ae, series, *args, **kwargs):
"""
Return a proper scale object for the series
The scale is for the aesthetic ae, and args & kwargs
are passed on to the scale creating class
"""
stype = scale_type(series)
# filter parameters by scale type
if stype == 'discrete':
with suppress(KeyError):
del kwargs['trans']
scale_name = 'scale_{}_{}'.format(ae, stype)
scale_klass = Registry[scale_name]
return scale_klass(*args, **kwargs) | [
"def",
"make_scale",
"(",
"ae",
",",
"series",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"stype",
"=",
"scale_type",
"(",
"series",
")",
"# filter parameters by scale type",
"if",
"stype",
"==",
"'discrete'",
":",
"with",
"suppress",
"(",
"KeyEr... | 28.705882 | 12.117647 |
def data(self):
"""returns the reference to the data functions as a class"""
if self._resources is None:
self.__init()
if "data" in self._resources:
url = self._url + "/data"
return _data.Data(url=url,
securityHandler=self._securityHandler,
initialize=True,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
return None | [
"def",
"data",
"(",
"self",
")",
":",
"if",
"self",
".",
"_resources",
"is",
"None",
":",
"self",
".",
"__init",
"(",
")",
"if",
"\"data\"",
"in",
"self",
".",
"_resources",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/data\"",
"return",
"_data",
... | 39.923077 | 12.692308 |
def iterate_with_name(cls):
"""Iterate over fields, but also give `structure_name`.
Format is `(attribute_name, structue_name, field_instance)`.
Structure name is name under which value is seen in structure and
schema (in primitives) and only there.
"""
for attr_name, field in cls.iterate_over_fields():
structure_name = field.structue_name(attr_name)
yield attr_name, structure_name, field | [
"def",
"iterate_with_name",
"(",
"cls",
")",
":",
"for",
"attr_name",
",",
"field",
"in",
"cls",
".",
"iterate_over_fields",
"(",
")",
":",
"structure_name",
"=",
"field",
".",
"structue_name",
"(",
"attr_name",
")",
"yield",
"attr_name",
",",
"structure_name"... | 45.5 | 16.7 |
def walk(self, dag, walk_func):
""" Walks each node of the graph, in parallel if it can.
The walk_func is only called when the nodes dependencies have been
satisfied
"""
# First, we'll topologically sort all of the nodes, with nodes that
# have no dependencies first. We do this to ensure that we don't call
# .join on a thread that hasn't yet been started.
#
# TODO(ejholmes): An alternative would be to ensure that Thread.join
# blocks if the thread has not yet been started.
nodes = dag.topological_sort()
nodes.reverse()
# This maps a node name to a thread of execution.
threads = {}
# Blocks until all of the given nodes have completed execution (whether
# successfully, or errored). Returns True if all nodes returned True.
def wait_for(nodes):
for node in nodes:
thread = threads[node]
while thread.is_alive():
threads[node].join(0.5)
# For each node in the graph, we're going to allocate a thread to
# execute. The thread will block executing walk_func, until all of the
# nodes dependencies have executed.
for node in nodes:
def fn(n, deps):
if deps:
logger.debug(
"%s waiting for %s to complete",
n,
", ".join(deps))
# Wait for all dependencies to complete.
wait_for(deps)
logger.debug("%s starting", n)
self.semaphore.acquire()
try:
return walk_func(n)
finally:
self.semaphore.release()
deps = dag.all_downstreams(node)
threads[node] = Thread(target=fn, args=(node, deps), name=node)
# Start up all of the threads.
for node in nodes:
threads[node].start()
# Wait for all threads to complete executing.
wait_for(nodes) | [
"def",
"walk",
"(",
"self",
",",
"dag",
",",
"walk_func",
")",
":",
"# First, we'll topologically sort all of the nodes, with nodes that",
"# have no dependencies first. We do this to ensure that we don't call",
"# .join on a thread that hasn't yet been started.",
"#",
"# TODO(ejholmes):... | 35.754386 | 19.561404 |
def first_invoke(func1, func2):
"""
Return a function that when invoked will invoke func1 without
any parameters (for its side-effect) and then invoke func2
with whatever parameters were passed, returning its result.
"""
def wrapper(*args, **kwargs):
func1()
return func2(*args, **kwargs)
return wrapper | [
"def",
"first_invoke",
"(",
"func1",
",",
"func2",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"func1",
"(",
")",
"return",
"func2",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | 30.5 | 14.5 |
def from_file_obj(cls, fp):
"""
Init a new object from a file-like object.
Not for Outlook msg.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file object")
try:
fp.seek(0)
except IOError:
# When stdout is a TTY it's a character device
# and it's not seekable, you cannot seek in a TTY.
pass
finally:
s = fp.read()
return cls.from_string(s) | [
"def",
"from_file_obj",
"(",
"cls",
",",
"fp",
")",
":",
"log",
".",
"debug",
"(",
"\"Parsing email from file object\"",
")",
"try",
":",
"fp",
".",
"seek",
"(",
"0",
")",
"except",
"IOError",
":",
"# When stdout is a TTY it's a character device",
"# and it's not ... | 26 | 19.090909 |
def timestamp(self, timestamp):
"""
Allows for custom timestamps to be saved with the record.
"""
clone = copy.deepcopy(self)
clone._timestamp = timestamp
return clone | [
"def",
"timestamp",
"(",
"self",
",",
"timestamp",
")",
":",
"clone",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"clone",
".",
"_timestamp",
"=",
"timestamp",
"return",
"clone"
] | 29.857143 | 9 |
def is_not_empty(self, value, strict=False):
"""if value is not empty"""
value = stringify(value)
if value is not None:
return
self.shout('Value %r is empty', strict, value) | [
"def",
"is_not_empty",
"(",
"self",
",",
"value",
",",
"strict",
"=",
"False",
")",
":",
"value",
"=",
"stringify",
"(",
"value",
")",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"self",
".",
"shout",
"(",
"'Value %r is empty'",
",",
"strict",
"... | 35.333333 | 9.833333 |
def mapTrace(trace, net, delta, verbose=False):
"""
matching a list of 2D positions to consecutive edges in a network
"""
result = []
paths = {}
if verbose:
print("mapping trace with %s points" % len(trace))
for pos in trace:
newPaths = {}
candidates = net.getNeighboringEdges(pos[0], pos[1], delta)
if len(candidates) == 0 and verbose:
print("Found no candidate edges for %s,%s" % pos)
for edge, d in candidates:
if paths:
minDist = 1e400
minPath = None
for path, dist in paths.iteritems():
if dist < minDist:
if edge == path[-1]:
minPath = path
minDist = dist
elif edge in path[-1].getOutgoing():
minPath = path + (edge,)
minDist = dist
else:
minPath = path + (edge,)
minDist = dist + euclidean(
path[-1].getToNode().getCoord(),
edge.getFromNode().getCoord())
if minPath:
newPaths[minPath] = minDist + d * d
else:
newPaths[(edge,)] = d * d
if not newPaths:
if paths:
result += [e.getID() for e in _getMinPath(paths)]
paths = newPaths
if paths:
return result + [e.getID() for e in _getMinPath(paths)]
return result | [
"def",
"mapTrace",
"(",
"trace",
",",
"net",
",",
"delta",
",",
"verbose",
"=",
"False",
")",
":",
"result",
"=",
"[",
"]",
"paths",
"=",
"{",
"}",
"if",
"verbose",
":",
"print",
"(",
"\"mapping trace with %s points\"",
"%",
"len",
"(",
"trace",
")",
... | 38.121951 | 14.756098 |
def log_exception(func, handler, args, kwargs):
"""
Wrap the handler ``log_exception`` method to finish the Span for the
given request, if available. This method is called when an Exception
is not handled in the user code.
"""
# safe-guard: expected arguments -> log_exception(self, typ, value, tb)
value = args[1] if len(args) == 3 else None
if value is None:
return func(*args, **kwargs)
tracing = handler.settings.get('opentracing_tracing')
if not isinstance(value, HTTPError) or 500 <= value.status_code <= 599:
tracing._finish_tracing(handler, error=value)
return func(*args, **kwargs) | [
"def",
"log_exception",
"(",
"func",
",",
"handler",
",",
"args",
",",
"kwargs",
")",
":",
"# safe-guard: expected arguments -> log_exception(self, typ, value, tb)",
"value",
"=",
"args",
"[",
"1",
"]",
"if",
"len",
"(",
"args",
")",
"==",
"3",
"else",
"None",
... | 39.8125 | 18.3125 |
def install_mesos_single_box_mode(distribution):
""" install mesos (all of it) on a single node"""
if 'ubuntu' in distribution:
log_green('adding mesosphere apt-key')
apt_add_key(keyid='E56151BF')
os = lsb_release()
apt_string = 'deb http://repos.mesosphere.io/%s %s main' % (
os['DISTRIB_ID'], os['DISTRIB_CODENAME'])
log_green('adding mesosphere apt repository')
apt_add_repository_from_apt_string(apt_string, 'mesosphere.list')
log_green('installing ubuntu development tools')
install_ubuntu_development_tools()
install_oracle_java(distribution, '8')
log_green('installing mesos and marathon')
apt_install(packages=['mesos', 'marathon'])
if not file_contains('/etc/default/mesos-master',
'MESOS_QUORUM=1', use_sudo=True):
file_append('/etc/default/mesos-master',
'MESOS_QUORUM=1', use_sudo=True)
log_green('restarting services...')
for svc in ['zookeeper', 'mesos-master', 'mesos-slave', 'marathon']:
restart_service(svc)
if not file_contains('/etc/mesos-slave/work_dir',
'/data/mesos', use_sudo=True):
file_append('/etc/mesos-slave/work_dir',
'/data/mesos', use_sudo=True)
log_green('restarting services...')
for svc in ['mesos-slave']:
restart_service(svc)
log_green('enabling nginx autoindex on /...')
with quiet():
cmd = 'cat /etc/nginx/sites-available/default'
contents = sudo(cmd).replace('\n', ' ').replace('\r', '')
if not bool(re.search('.*#*location \/ {.*autoindex on;.*', contents)):
insert_line_in_file_after_regex(
path='/etc/nginx/sites-available/default',
line=' autoindex on;',
after_regex='^[^#]*location \/ {',
use_sudo=True)
log_green('restarting nginx')
restart_service('nginx') | [
"def",
"install_mesos_single_box_mode",
"(",
"distribution",
")",
":",
"if",
"'ubuntu'",
"in",
"distribution",
":",
"log_green",
"(",
"'adding mesosphere apt-key'",
")",
"apt_add_key",
"(",
"keyid",
"=",
"'E56151BF'",
")",
"os",
"=",
"lsb_release",
"(",
")",
"apt_... | 38.12963 | 19.62963 |
def stats(self):
""" Returns a data frame with Sample data and state. """
nameordered = self.samples.keys()
nameordered.sort()
## Set pandas to display all samples instead of truncating
pd.options.display.max_rows = len(self.samples)
statdat = pd.DataFrame([self.samples[i].stats for i in nameordered],
index=nameordered).dropna(axis=1, how='all')
# ensure non h,e columns print as ints
for column in statdat:
if column not in ["hetero_est", "error_est"]:
statdat[column] = np.nan_to_num(statdat[column]).astype(int)
return statdat | [
"def",
"stats",
"(",
"self",
")",
":",
"nameordered",
"=",
"self",
".",
"samples",
".",
"keys",
"(",
")",
"nameordered",
".",
"sort",
"(",
")",
"## Set pandas to display all samples instead of truncating",
"pd",
".",
"options",
".",
"display",
".",
"max_rows",
... | 45.785714 | 19.214286 |
def apply(self, **kwexpr):
"""
Specify one or more projection expressions to add to each result
### Parameters
- **kwexpr**: One or more key-value pairs for a projection. The key is
the alias for the projection, and the value is the projection
expression itself, for example `apply(square_root="sqrt(@foo)")`
"""
for alias, expr in kwexpr.items():
self._projections.append([alias, expr])
return self | [
"def",
"apply",
"(",
"self",
",",
"*",
"*",
"kwexpr",
")",
":",
"for",
"alias",
",",
"expr",
"in",
"kwexpr",
".",
"items",
"(",
")",
":",
"self",
".",
"_projections",
".",
"append",
"(",
"[",
"alias",
",",
"expr",
"]",
")",
"return",
"self"
] | 34.357143 | 23.214286 |
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
:param pivot_col: Name of the column to pivot.
:param values: List of values that will be translated to columns in the output DataFrame.
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df) | [
"def",
"pivot",
"(",
"self",
",",
"pivot_col",
",",
"values",
"=",
"None",
")",
":",
"if",
"values",
"is",
"None",
":",
"jgd",
"=",
"self",
".",
"_jgd",
".",
"pivot",
"(",
"pivot_col",
")",
"else",
":",
"jgd",
"=",
"self",
".",
"_jgd",
".",
"pivo... | 54.444444 | 34.444444 |
def request_acquisition(self, acquisition_request):
"""RequestAcquisition.
[Preview API]
:param :class:`<ExtensionAcquisitionRequest> <azure.devops.v5_1.gallery.models.ExtensionAcquisitionRequest>` acquisition_request:
:rtype: :class:`<ExtensionAcquisitionRequest> <azure.devops.v5_1.gallery.models.ExtensionAcquisitionRequest>`
"""
content = self._serialize.body(acquisition_request, 'ExtensionAcquisitionRequest')
response = self._send(http_method='POST',
location_id='3adb1f2d-e328-446e-be73-9f6d98071c45',
version='5.1-preview.1',
content=content)
return self._deserialize('ExtensionAcquisitionRequest', response) | [
"def",
"request_acquisition",
"(",
"self",
",",
"acquisition_request",
")",
":",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"acquisition_request",
",",
"'ExtensionAcquisitionRequest'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method... | 63.333333 | 29.75 |
def verify_signature_unicode(self, address, signature, message):
"""Verify <signature> of <unicode> by <address>."""
hexdata = binascii.hexlify(message.encode("utf-8"))
return self.verify_signature(address, signature, hexdata) | [
"def",
"verify_signature_unicode",
"(",
"self",
",",
"address",
",",
"signature",
",",
"message",
")",
":",
"hexdata",
"=",
"binascii",
".",
"hexlify",
"(",
"message",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"return",
"self",
".",
"verify_signature",
"(",... | 61.75 | 17 |
def create(context, job_id, name, type, url, data):
"""create(context, job_id, name, type, url, data)
Create an analytic.
>>> dcictl analytic-create [OPTIONS]
:param string job-id: The job on which to attach the analytic
:param string name: Name of the analytic [required]
:param string type: Type of the analytic [required]
:param string url: Url of the bug [optional]
:param string data: JSON data of the analytic
"""
result = analytic.create(context, job_id=job_id, name=name, type=type,
url=url, data=data)
utils.format_output(result, context.format) | [
"def",
"create",
"(",
"context",
",",
"job_id",
",",
"name",
",",
"type",
",",
"url",
",",
"data",
")",
":",
"result",
"=",
"analytic",
".",
"create",
"(",
"context",
",",
"job_id",
"=",
"job_id",
",",
"name",
"=",
"name",
",",
"type",
"=",
"type",... | 36.176471 | 18.176471 |
def _request(self, text, properties, retries=0):
"""Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (dict) properties: properties that the server expects
:return: request result
"""
text = to_unicode(text) # ensures unicode
try:
r = requests.post(self.server, params={'properties': str(properties)}, data=text.encode('utf-8'))
r.raise_for_status()
return r
except requests.ConnectionError as e:
if retries > 5:
logging.critical('Max retries exceeded!')
raise e
else:
logging.critical(repr(e))
logging.critical("It seems like we've temporarily ran out of ports. Taking a 30s break...")
time.sleep(30)
logging.critical("Retrying...")
return self._request(text, properties, retries=retries+1)
except requests.HTTPError:
if r.text == "CoreNLP request timed out. Your document may be too long.":
raise TimeoutException(r.text)
else:
raise AnnotationException(r.text) | [
"def",
"_request",
"(",
"self",
",",
"text",
",",
"properties",
",",
"retries",
"=",
"0",
")",
":",
"text",
"=",
"to_unicode",
"(",
"text",
")",
"# ensures unicode",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"server",
",",
"par... | 44.555556 | 19.925926 |
def call(name, function, *args, **kwargs):
'''
Executes a Salt function inside a chroot environment.
The chroot does not need to have Salt installed, but Python is
required.
name
Path to the chroot environment
function
Salt execution module function
CLI Example:
.. code-block:: bash
salt myminion chroot.call /chroot test.ping
'''
if not function:
raise CommandExecutionError('Missing function parameter')
if not exist(name):
raise CommandExecutionError('Chroot environment not found')
# Create a temporary directory inside the chroot where we can
# untar salt-thin
thin_dest_path = tempfile.mkdtemp(dir=name)
thin_path = __utils__['thin.gen_thin'](
__opts__['cachedir'],
extra_mods=__salt__['config.option']('thin_extra_mods', ''),
so_mods=__salt__['config.option']('thin_so_mods', '')
)
stdout = __salt__['archive.tar']('xzf', thin_path, dest=thin_dest_path)
if stdout:
__utils__['files.rm_rf'](thin_dest_path)
return {'result': False, 'comment': stdout}
chroot_path = os.path.join(os.path.sep,
os.path.relpath(thin_dest_path, name))
try:
safe_kwargs = clean_kwargs(**kwargs)
salt_argv = [
'python{}'.format(sys.version_info[0]),
os.path.join(chroot_path, 'salt-call'),
'--metadata',
'--local',
'--log-file', os.path.join(chroot_path, 'log'),
'--cachedir', os.path.join(chroot_path, 'cache'),
'--out', 'json',
'-l', 'quiet',
'--',
function
] + list(args) + ['{}={}'.format(k, v) for (k, v) in safe_kwargs]
ret = __salt__['cmd.run_chroot'](name, [str(x) for x in salt_argv])
if ret['retcode'] != EX_OK:
raise CommandExecutionError(ret['stderr'])
# Process "real" result in stdout
try:
data = __utils__['json.find_json'](ret['stdout'])
local = data.get('local', data)
if isinstance(local, dict) and 'retcode' in local:
__context__['retcode'] = local['retcode']
return local.get('return', data)
except ValueError:
return {
'result': False,
'comment': "Can't parse container command output"
}
finally:
__utils__['files.rm_rf'](thin_dest_path) | [
"def",
"call",
"(",
"name",
",",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"function",
":",
"raise",
"CommandExecutionError",
"(",
"'Missing function parameter'",
")",
"if",
"not",
"exist",
"(",
"name",
")",
":",
"rais... | 32.418919 | 21.824324 |
def create_checkered_image(width, height, c1=(154, 154, 154, 255),
c2=(100, 100, 100, 255), s=6):
"""
Return a checkered image of size width x height.
Arguments:
* width: image width
* height: image height
* c1: first color (RGBA)
* c2: second color (RGBA)
* s: size of the squares
"""
im = Image.new("RGBA", (width, height), c1)
draw = ImageDraw.Draw(im, "RGBA")
for i in range(s, width, 2 * s):
for j in range(0, height, 2 * s):
draw.rectangle(((i, j), ((i + s - 1, j + s - 1))), fill=c2)
for i in range(0, width, 2 * s):
for j in range(s, height, 2 * s):
draw.rectangle(((i, j), ((i + s - 1, j + s - 1))), fill=c2)
return im | [
"def",
"create_checkered_image",
"(",
"width",
",",
"height",
",",
"c1",
"=",
"(",
"154",
",",
"154",
",",
"154",
",",
"255",
")",
",",
"c2",
"=",
"(",
"100",
",",
"100",
",",
"100",
",",
"255",
")",
",",
"s",
"=",
"6",
")",
":",
"im",
"=",
... | 35.761905 | 13.095238 |
def project_workspace_addsitedir(sitedir):
"""
Similar to site.addsitedir() but prefers new sitedir over existing ones.
Therefore, prefers local packages over installed packages.
.. note::
This allows to support *.pth files and zip-/egg-imports
similar to an installed site-packages directory.
"""
assert os.path.isdir(sitedir)
try:
from site import addsitedir
except ImportError:
# -- USE: Python2.7 site.py package
from pysite import addsitedir
next_package_pos = len(sys.path)
addsitedir(sitedir)
# -- POST-PROCESS: Move new packages from end to begin of sys.path list.
pos = 0
new_packages = sys.path[next_package_pos:]
del sys.path[next_package_pos:]
sys.path[pos:pos] = new_packages | [
"def",
"project_workspace_addsitedir",
"(",
"sitedir",
")",
":",
"assert",
"os",
".",
"path",
".",
"isdir",
"(",
"sitedir",
")",
"try",
":",
"from",
"site",
"import",
"addsitedir",
"except",
"ImportError",
":",
"# -- USE: Python2.7 site.py package",
"from",
"pysit... | 33.391304 | 16.26087 |
def translate_src(src, cortex):
"""
Convert source nodes to new surface (without medial wall).
"""
src_new = np.array(np.where(np.in1d(cortex, src))[0], dtype=np.int32)
return src_new | [
"def",
"translate_src",
"(",
"src",
",",
"cortex",
")",
":",
"src_new",
"=",
"np",
".",
"array",
"(",
"np",
".",
"where",
"(",
"np",
".",
"in1d",
"(",
"cortex",
",",
"src",
")",
")",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
... | 28.285714 | 18 |
def decrypt(source, dest=None, passphrase=None):
"""Attempts to decrypt a file"""
if not os.path.exists(source):
raise CryptoritoError("Encrypted file %s not found" % source)
cmd = [gnupg_bin(), gnupg_verbose(), "--decrypt", gnupg_home(),
passphrase_file(passphrase)]
if dest:
cmd.append(["--output", dest])
cmd.append([source])
stderr_output(flatten(cmd))
return True | [
"def",
"decrypt",
"(",
"source",
",",
"dest",
"=",
"None",
",",
"passphrase",
"=",
"None",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"source",
")",
":",
"raise",
"CryptoritoError",
"(",
"\"Encrypted file %s not found\"",
"%",
"source",
... | 29.5 | 19.357143 |
def flatten(list_to_flatten):
"""Flatten out a list."""
def genflatten(lst):
for elem in lst:
if isinstance(elem, (list, tuple)):
for x in flatten(elem):
yield x
else:
yield elem
return list(genflatten(list_to_flatten)) | [
"def",
"flatten",
"(",
"list_to_flatten",
")",
":",
"def",
"genflatten",
"(",
"lst",
")",
":",
"for",
"elem",
"in",
"lst",
":",
"if",
"isinstance",
"(",
"elem",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"x",
"in",
"flatten",
"(",
"elem"... | 25.5 | 15.416667 |
def Zabransky_cubic_integral(T, a1, a2, a3, a4):
r'''Calculates the integral of liquid heat capacity using the model
developed in [1]_.
Parameters
----------
T : float
Temperature [K]
a1-a4 : float
Coefficients
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/mol]
Notes
-----
The analytical integral was derived with Sympy; it is a simple polynomial.
Examples
--------
>>> Zabransky_cubic_integral(298.15, 20.9634, -10.1344, 2.8253, -0.256738)
31051.679845520586
References
----------
.. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski.
Heat Capacity of Liquids: Critical Review and Recommended Values.
2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996.
'''
T = T/100.
return 100*R*T*(T*(T*(T*a4*0.25 + a3/3.) + a2*0.5) + a1) | [
"def",
"Zabransky_cubic_integral",
"(",
"T",
",",
"a1",
",",
"a2",
",",
"a3",
",",
"a4",
")",
":",
"T",
"=",
"T",
"/",
"100.",
"return",
"100",
"*",
"R",
"*",
"T",
"*",
"(",
"T",
"*",
"(",
"T",
"*",
"(",
"T",
"*",
"a4",
"*",
"0.25",
"+",
... | 26.272727 | 28.030303 |
def fromkeys(cls, seq, value=None, **kwargs):
"""
Create a new collection with keys from *seq* and values set to
*value*. The keyword arguments are passed to the persistent ``Dict``.
"""
other = cls(**kwargs)
other.update(((key, value) for key in seq))
return other | [
"def",
"fromkeys",
"(",
"cls",
",",
"seq",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"other",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"other",
".",
"update",
"(",
"(",
"(",
"key",
",",
"value",
")",
"for",
"key",
"in",
"... | 34.888889 | 17.111111 |
def _suppressed(self, filename, line, code):
"""Return true if linter error code is suppressed inline.
The suppression format is suppress(CODE1,CODE2,CODE3) etc.
"""
if code in self.suppress_codes:
return True
lines = self._file_lines(filename)
# File is zero length, cannot be suppressed
if not lines:
return False
# Handle errors which appear after the end of the document.
while line > len(lines):
line = line - 1
relevant_line = lines[line - 1]
try:
suppressions_function = relevant_line.split("#")[1].strip()
if suppressions_function.startswith("suppress("):
return code in _parse_suppressions(suppressions_function)
except IndexError:
above_line = lines[max(0, line - 2)]
suppressions_function = above_line.strip()[1:].strip()
if suppressions_function.startswith("suppress("):
return code in _parse_suppressions(suppressions_function)
finally:
pass | [
"def",
"_suppressed",
"(",
"self",
",",
"filename",
",",
"line",
",",
"code",
")",
":",
"if",
"code",
"in",
"self",
".",
"suppress_codes",
":",
"return",
"True",
"lines",
"=",
"self",
".",
"_file_lines",
"(",
"filename",
")",
"# File is zero length, cannot b... | 34.645161 | 20.903226 |
def absorption_coefficient( dielectric ):
"""
Calculate the optical absorption coefficient from an input set of
pymatgen vasprun dielectric constant data.
Args:
dielectric (list): A list containing the dielectric response function
in the pymatgen vasprun format.
| element 0: list of energies
| element 1: real dielectric tensors, in ``[xx, yy, zz, xy, xz, yz]`` format.
| element 2: imaginary dielectric tensors, in ``[xx, yy, zz, xy, xz, yz]`` format.
Returns:
(np.array): absorption coefficient using eV as frequency units (cm^-1).
Notes:
The absorption coefficient is calculated as
.. math:: \\alpha = \\frac{2\sqrt{2} \pi}{\lambda} \sqrt{-\epsilon_1+\sqrt{\epsilon_1^2+\epsilon_2^2}}
"""
energies_in_eV = np.array( dielectric[0] )
real_dielectric = parse_dielectric_data( dielectric[1] )
imag_dielectric = parse_dielectric_data( dielectric[2] )
epsilon_1 = np.mean( real_dielectric, axis=1 )
epsilon_2 = np.mean( imag_dielectric, axis=1 )
return ( 2.0 * np.sqrt(2.0)*pi*eV_to_recip_cm*energies_in_eV
* np.sqrt( -epsilon_1 + np.sqrt( epsilon_1**2 + epsilon_2**2 ) ) ) | [
"def",
"absorption_coefficient",
"(",
"dielectric",
")",
":",
"energies_in_eV",
"=",
"np",
".",
"array",
"(",
"dielectric",
"[",
"0",
"]",
")",
"real_dielectric",
"=",
"parse_dielectric_data",
"(",
"dielectric",
"[",
"1",
"]",
")",
"imag_dielectric",
"=",
"par... | 43.517241 | 28.206897 |
def _get_key_from_raw_synset(raw_synset):
"""Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class,
Notes
-----
Internal function. Do not call directly.
Parameters
----------
raw_synset : eurown.Synset
Synset representation from which lemma, part-of-speech and sense is derived.
Returns
-------
string
Key of the synset in the form of `lemma.pos.sense_no`.
"""
pos = raw_synset.pos
literal = raw_synset.variants[0].literal
sense = "%02d"%raw_synset.variants[0].sense
return '.'.join([literal,pos,sense]) | [
"def",
"_get_key_from_raw_synset",
"(",
"raw_synset",
")",
":",
"pos",
"=",
"raw_synset",
".",
"pos",
"literal",
"=",
"raw_synset",
".",
"variants",
"[",
"0",
"]",
".",
"literal",
"sense",
"=",
"\"%02d\"",
"%",
"raw_synset",
".",
"variants",
"[",
"0",
"]",... | 26.347826 | 21.73913 |
def extern_store_utf8(self, context_handle, utf8_ptr, utf8_len):
"""Given a context and UTF8 bytes, return a new Handle to represent the content."""
c = self._ffi.from_handle(context_handle)
return c.to_value(self._ffi.string(utf8_ptr, utf8_len).decode('utf-8')) | [
"def",
"extern_store_utf8",
"(",
"self",
",",
"context_handle",
",",
"utf8_ptr",
",",
"utf8_len",
")",
":",
"c",
"=",
"self",
".",
"_ffi",
".",
"from_handle",
"(",
"context_handle",
")",
"return",
"c",
".",
"to_value",
"(",
"self",
".",
"_ffi",
".",
"str... | 67.75 | 16 |
def asset(class_obj: type) -> type:
"""
Decorator to annotate the Asset class. Registers the decorated class
as the Asset known type.
"""
assert isinstance(class_obj, type), "class_obj is not a Class"
global _asset_resource_type
_asset_resource_type = class_obj
return class_obj | [
"def",
"asset",
"(",
"class_obj",
":",
"type",
")",
"->",
"type",
":",
"assert",
"isinstance",
"(",
"class_obj",
",",
"type",
")",
",",
"\"class_obj is not a Class\"",
"global",
"_asset_resource_type",
"_asset_resource_type",
"=",
"class_obj",
"return",
"class_obj"
... | 33.555556 | 12 |
def group(values, min_len=0, max_len=np.inf):
"""
Return the indices of values that are identical
Parameters
----------
values: 1D array
min_len: int, the shortest group allowed
All groups will have len >= min_length
max_len: int, the longest group allowed
All groups will have len <= max_length
Returns
----------
groups: sequence of indices to form groups
IE [0,1,0,1] returns [[0,2], [1,3]]
"""
original = np.asanyarray(values)
# save the sorted order and then apply it
order = original.argsort()
values = original[order]
# find the indexes which are duplicates
if values.dtype.kind == 'f':
# for floats in a sorted array, neighbors are not duplicates
# if the difference between them is greater than approximate zero
nondupe = np.greater(np.abs(np.diff(values)), tol.zero)
else:
# for ints and strings we can check exact non- equality
# for all other types this will only work if they defined
# an __eq__
nondupe = values[1:] != values[:-1]
dupe_idx = np.append(0, np.nonzero(nondupe)[0] + 1)
dupe_len = np.diff(np.concatenate((dupe_idx, [len(values)])))
dupe_ok = np.logical_and(np.greater_equal(dupe_len, min_len),
np.less_equal(dupe_len, max_len))
groups = [order[i:(i + j)]
for i, j in zip(dupe_idx[dupe_ok],
dupe_len[dupe_ok])]
groups = np.array(groups)
return groups | [
"def",
"group",
"(",
"values",
",",
"min_len",
"=",
"0",
",",
"max_len",
"=",
"np",
".",
"inf",
")",
":",
"original",
"=",
"np",
".",
"asanyarray",
"(",
"values",
")",
"# save the sorted order and then apply it",
"order",
"=",
"original",
".",
"argsort",
"... | 34.5 | 18.409091 |
def _save_customization(self, widgets):
"""
Save the complete customization to the activity.
:param widgets: The complete set of widgets to be customized
"""
if len(widgets) > 0:
# Get the current customization and only replace the 'ext' part of it
customization = self.activity._json_data.get('customization', dict())
if customization:
customization['ext'] = dict(widgets=widgets)
else:
customization = dict(ext=dict(widgets=widgets))
# Empty the customization if if the widgets list is empty
else:
customization = None
# perform validation
if customization:
validate(customization, widgetconfig_json_schema)
# Save to the activity and store the saved activity to self
response = self._client._request("PUT",
self._client._build_url("activity", activity_id=str(self.activity.id)),
json=dict(customization=customization))
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not save customization ({})".format(response))
else:
# refresh the activity json
self.activity = self._client.activity(pk=self.activity.id) | [
"def",
"_save_customization",
"(",
"self",
",",
"widgets",
")",
":",
"if",
"len",
"(",
"widgets",
")",
">",
"0",
":",
"# Get the current customization and only replace the 'ext' part of it",
"customization",
"=",
"self",
".",
"activity",
".",
"_json_data",
".",
"get... | 43.516129 | 24.548387 |
def flatten_if(cond: Callable[[Union[T, ActualIterable[T]]], bool]):
"""
>>> from Redy.Collections import Traversal, Flow
>>> lst: Iterable[int] = [[1, 2, 3]]
>>> x = Flow(lst)[Traversal.flatten_if(lambda _: isinstance(_, list))]
>>> assert isinstance(x.unbox, Generator) and list(x.unbox) == [1, 2, 3]
"""
def inner(nested: ActualIterable[Union[T, ActualIterable[T]]]) -> ActualIterable[T]:
for each in nested:
if cond(each):
yield from inner(each)
else:
yield each
return inner | [
"def",
"flatten_if",
"(",
"cond",
":",
"Callable",
"[",
"[",
"Union",
"[",
"T",
",",
"ActualIterable",
"[",
"T",
"]",
"]",
"]",
",",
"bool",
"]",
")",
":",
"def",
"inner",
"(",
"nested",
":",
"ActualIterable",
"[",
"Union",
"[",
"T",
",",
"ActualIt... | 35.125 | 20.5 |
def get_cookie_browse_sorting(path, default):
'''
Get sorting-cookie data for path of current request.
:returns: sorting property
:rtype: string
'''
if request:
for cpath, cprop in iter_cookie_browse_sorting(request.cookies):
if path == cpath:
return cprop
return default | [
"def",
"get_cookie_browse_sorting",
"(",
"path",
",",
"default",
")",
":",
"if",
"request",
":",
"for",
"cpath",
",",
"cprop",
"in",
"iter_cookie_browse_sorting",
"(",
"request",
".",
"cookies",
")",
":",
"if",
"path",
"==",
"cpath",
":",
"return",
"cprop",
... | 27.083333 | 21.75 |
def populateFromRow(self, quantificationSetRecord):
"""
Populates the instance variables of this RnaQuantificationSet from the
specified DB row.
"""
self._dbFilePath = quantificationSetRecord.dataurl
self.setAttributesJson(quantificationSetRecord.attributes)
self._db = SqliteRnaBackend(self._dbFilePath)
self.addRnaQuants() | [
"def",
"populateFromRow",
"(",
"self",
",",
"quantificationSetRecord",
")",
":",
"self",
".",
"_dbFilePath",
"=",
"quantificationSetRecord",
".",
"dataurl",
"self",
".",
"setAttributesJson",
"(",
"quantificationSetRecord",
".",
"attributes",
")",
"self",
".",
"_db",... | 42.222222 | 14.888889 |
def count(self):
"""
Return the number of hits matching the query and filters. Note that
only the actual number is returned.
"""
if hasattr(self, '_response'):
return self._response.hits.total
es = connections.get_connection(self._using)
d = self.to_dict(count=True)
# TODO: failed shards detection
return es.count(
index=self._index,
body=d,
**self._params
)['count'] | [
"def",
"count",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_response'",
")",
":",
"return",
"self",
".",
"_response",
".",
"hits",
".",
"total",
"es",
"=",
"connections",
".",
"get_connection",
"(",
"self",
".",
"_using",
")",
"d",
"... | 28.352941 | 14.588235 |
def emit_code_from_ir(compound_match_query, compiler_metadata):
"""Return a MATCH query string from a CompoundMatchQuery."""
# If the compound match query contains only one match query,
# just call `emit_code_from_single_match_query`
# If there are multiple match queries, construct the query string for each
# individual query and combine them as follows.
#
# SELECT EXPAND($result)
# LET
# $optional__0 = (
# <query_string_0>
# ),
# $optional__1 = (
# <query_string_1>
# ),
# $optional__2 = (
# <query_string_2>
# ),
#
# . . .
#
# $result = UNIONALL($optional__0, $optional__1, . . . )
match_queries = compound_match_query.match_queries
if len(match_queries) == 1:
query_string = emit_code_from_single_match_query(match_queries[0])
elif len(match_queries) > 1:
query_string = emit_code_from_multiple_match_queries(match_queries)
else:
raise AssertionError(u'Received CompoundMatchQuery with an empty list of MatchQueries: '
u'{}'.format(match_queries))
return query_string | [
"def",
"emit_code_from_ir",
"(",
"compound_match_query",
",",
"compiler_metadata",
")",
":",
"# If the compound match query contains only one match query,",
"# just call `emit_code_from_single_match_query`",
"# If there are multiple match queries, construct the query string for each",
"# indiv... | 34.727273 | 23.333333 |
def container(self, cls, **kwargs):
"""Container context manager."""
self.start_container(cls, **kwargs)
yield
self.end_container() | [
"def",
"container",
"(",
"self",
",",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"start_container",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
"yield",
"self",
".",
"end_container",
"(",
")"
] | 31.8 | 9.4 |
def show_portindex_interface_info_input_all(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_portindex_interface_info = ET.Element("show_portindex_interface_info")
config = show_portindex_interface_info
input = ET.SubElement(show_portindex_interface_info, "input")
all = ET.SubElement(input, "all")
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"show_portindex_interface_info_input_all",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"show_portindex_interface_info",
"=",
"ET",
".",
"Element",
"(",
"\"show_portindex_interface_info\"",
")",
... | 42.272727 | 15.272727 |
def minkowski_distance(point1, point2, degree=2):
"""!
@brief Calculate Minkowski distance between two vectors.
\f[
dist(a, b) = \sqrt[p]{ \sum_{i=0}^{N}\left(a_{i} - b_{i}\right)^{p} };
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@param[in] degree (numeric): Degree of that is used for Minkowski distance.
@return (double) Minkowski distance between two vectors.
@see euclidean_distance
"""
distance = 0.0
for i in range(len(point1)):
distance += (point1[i] - point2[i]) ** degree
return distance ** (1.0 / degree) | [
"def",
"minkowski_distance",
"(",
"point1",
",",
"point2",
",",
"degree",
"=",
"2",
")",
":",
"distance",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"point1",
")",
")",
":",
"distance",
"+=",
"(",
"point1",
"[",
"i",
"]",
"-",
"point2"... | 29.363636 | 23.227273 |
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
try:
unpacked_data = struct.unpack('!4B', buff[offset:offset+4])
self._value = '.'.join([str(x) for x in unpacked_data])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff)) | [
"def",
"unpack",
"(",
"self",
",",
"buff",
",",
"offset",
"=",
"0",
")",
":",
"try",
":",
"unpacked_data",
"=",
"struct",
".",
"unpack",
"(",
"'!4B'",
",",
"buff",
"[",
"offset",
":",
"offset",
"+",
"4",
"]",
")",
"self",
".",
"_value",
"=",
"'.'... | 37.35 | 23.95 |
def validate_headers(self):
"""
Check if CSV metadata files have the right format.
"""
super().validate()
self.validate_header(self.channeldir, self.channelinfo, CHANNEL_INFO_HEADER)
self.validate_header(self.channeldir, self.contentinfo, CONTENT_INFO_HEADER)
if self.has_exercises():
self.validate_header(self.channeldir, self.exercisesinfo, EXERCISE_INFO_HEADER)
self.validate_header(self.channeldir, self.questionsinfo, EXERCISE_QUESTIONS_INFO_HEADER) | [
"def",
"validate_headers",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"validate",
"(",
")",
"self",
".",
"validate_header",
"(",
"self",
".",
"channeldir",
",",
"self",
".",
"channelinfo",
",",
"CHANNEL_INFO_HEADER",
")",
"self",
".",
"validate_header",
... | 52.5 | 25.3 |
def maps_get_rules_output_rules_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
value = ET.SubElement(rules, "value")
value.text = kwargs.pop('value')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"maps_get_rules_output_rules_value",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"maps_get_rules",
"=",
"ET",
".",
"Element",
"(",
"\"maps_get_rules\"",
")",
"config",
"=",
"maps_get_rules... | 37.769231 | 10.153846 |
def fromargskw(argskw, argspecs, slf_or_clsm = False):
"""Turns a linearized list of args into (args, keywords) form
according to given argspecs (like inspect module provides).
"""
res_args = argskw
try:
kwds = argspecs.keywords
except AttributeError:
kwds = argspecs.varkw
if not kwds is None:
res_kw = argskw[-1]
res_args = argskw[:-1]
else:
res_kw = None
if not argspecs.varargs is None:
vargs_pos = (len(argspecs.args)-1) \
if slf_or_clsm else len(argspecs.args)
if vargs_pos > 0:
res_lst = list(argskw[:vargs_pos])
res_lst.extend(argskw[vargs_pos])
res_args = tuple(res_lst)
else:
res_args = argskw[0]
try:
if len(argspecs.kwonlyargs) > 0:
res_kw = {} if res_kw is None else dict(res_kw)
ipos = -len(argspecs.kwonlyargs) - (0 if kwds is None else 1)
for name in argspecs.kwonlyargs:
res_kw[name] = argskw[ipos]
ipos += 1
except AttributeError:
pass
if res_kw is None:
res_kw = {}
return res_args, res_kw | [
"def",
"fromargskw",
"(",
"argskw",
",",
"argspecs",
",",
"slf_or_clsm",
"=",
"False",
")",
":",
"res_args",
"=",
"argskw",
"try",
":",
"kwds",
"=",
"argspecs",
".",
"keywords",
"except",
"AttributeError",
":",
"kwds",
"=",
"argspecs",
".",
"varkw",
"if",
... | 32.742857 | 14.171429 |
def encrypt_email(email):
"""
The default encryption function for storing emails in the database. This
uses AES and the encryption key defined in the applications configuration.
:param email:
The email address.
"""
aes = SimpleAES(flask.current_app.config["AES_KEY"])
return aes.encrypt(email) | [
"def",
"encrypt_email",
"(",
"email",
")",
":",
"aes",
"=",
"SimpleAES",
"(",
"flask",
".",
"current_app",
".",
"config",
"[",
"\"AES_KEY\"",
"]",
")",
"return",
"aes",
".",
"encrypt",
"(",
"email",
")"
] | 29.181818 | 21.181818 |
def set_info(self, key, value, append=True):
"""
Set any special info you wish to the given key. Each info is stored in
a list and will be appended to rather then overriden unless append is
False.
"""
if append:
if key not in self.info:
self.info[key] = []
self.info[key].append(value)
else:
self.info[key] = value | [
"def",
"set_info",
"(",
"self",
",",
"key",
",",
"value",
",",
"append",
"=",
"True",
")",
":",
"if",
"append",
":",
"if",
"key",
"not",
"in",
"self",
".",
"info",
":",
"self",
".",
"info",
"[",
"key",
"]",
"=",
"[",
"]",
"self",
".",
"info",
... | 34.25 | 14.083333 |
def check_infos(self, expected_info_messages=[], allowed_info_messages=[]):
"""
This method should be called whenever you need to check if there is some
info. Normally you need only ``check_expected_infos`` called after each
test (which you specify only once), but it will check infos only at the
end of test. When you have big use case and you need to check messages
on every step, use this.
To parameters you should pass same values like to decorators
:py:func:`.expected_info_messages` and :py:func:`.allowed_info_messages`.
"""
# Close unexpected alerts (it's blocking).
self.close_alert(ignore_exception=True)
expected_info_messages = set(expected_info_messages)
allowed_info_messages = set(allowed_info_messages)
info_messages = set(self.get_info_messages())
if (
info_messages & expected_info_messages != expected_info_messages
or
(expected_info_messages and info_messages - (expected_info_messages | allowed_info_messages))
):
raise InfoMessagesException(self.current_url, info_messages, expected_info_messages, allowed_info_messages) | [
"def",
"check_infos",
"(",
"self",
",",
"expected_info_messages",
"=",
"[",
"]",
",",
"allowed_info_messages",
"=",
"[",
"]",
")",
":",
"# Close unexpected alerts (it's blocking).",
"self",
".",
"close_alert",
"(",
"ignore_exception",
"=",
"True",
")",
"expected_inf... | 52.608696 | 29.73913 |
def compute_discounts(self, precision=None):
'''
Returns the total amount of discounts for this line with a specific
number of decimals.
@param precision:int number of decimal places
@return: Decimal
'''
gross = self.compute_gross(precision)
return min(gross,
sum([d.compute(gross, precision) for d in self.__discounts])) | [
"def",
"compute_discounts",
"(",
"self",
",",
"precision",
"=",
"None",
")",
":",
"gross",
"=",
"self",
".",
"compute_gross",
"(",
"precision",
")",
"return",
"min",
"(",
"gross",
",",
"sum",
"(",
"[",
"d",
".",
"compute",
"(",
"gross",
",",
"precision... | 39.5 | 19.9 |
def _compare_lists(list1, list2, custom_cmp):
"""Compare twolists using given comparing function.
:param list1: first list to compare
:param list2: second list to compare
:param custom_cmp: a function taking two arguments (element of
list 1, element of list 2) and
:return: True or False depending if the values are the same
"""
if len(list1) != len(list2):
return False
for element1, element2 in zip(list1, list2):
if not custom_cmp(element1, element2):
return False
return True | [
"def",
"_compare_lists",
"(",
"list1",
",",
"list2",
",",
"custom_cmp",
")",
":",
"if",
"len",
"(",
"list1",
")",
"!=",
"len",
"(",
"list2",
")",
":",
"return",
"False",
"for",
"element1",
",",
"element2",
"in",
"zip",
"(",
"list1",
",",
"list2",
")"... | 35.866667 | 12 |
def appendRandomLenPadding(str, blocksize=AES_blocksize):
'ISO 10126 Padding (withdrawn, 2007): Pad with random bytes + last byte equal to the number of padding bytes'
pad_len = paddingLength(len(str), blocksize) - 1
from os import urandom
padding = urandom(pad_len)+chr(pad_len)
return str + padding | [
"def",
"appendRandomLenPadding",
"(",
"str",
",",
"blocksize",
"=",
"AES_blocksize",
")",
":",
"pad_len",
"=",
"paddingLength",
"(",
"len",
"(",
"str",
")",
",",
"blocksize",
")",
"-",
"1",
"from",
"os",
"import",
"urandom",
"padding",
"=",
"urandom",
"(",... | 34.555556 | 27.444444 |
def _set_get_last_config_update_time(self, v, load=False):
"""
Setter method for get_last_config_update_time, mapped from YANG variable /brocade_vcs_rpc/get_last_config_update_time (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_last_config_update_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_last_config_update_time() directly.
YANG Description: This rpc function provides time-stamp of the last
configutation change done on the managed device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_last_config_update_time.get_last_config_update_time, is_leaf=True, yang_name="get-last-config-update-time", rest_name="get-last-config-update-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'last-config-update-time-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_last_config_update_time must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_last_config_update_time.get_last_config_update_time, is_leaf=True, yang_name="get-last-config-update-time", rest_name="get-last-config-update-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'last-config-update-time-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='rpc', is_config=True)""",
})
self.__get_last_config_update_time = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_get_last_config_update_time",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"... | 78.36 | 38.32 |
def load_mib(filenames):
"""Load the conf.mib dict from a list of filenames"""
the_mib = {'iso': ['1']}
unresolved = {}
for k in six.iterkeys(conf.mib):
_mib_register(conf.mib[k], k.split("."), the_mib, unresolved)
if isinstance(filenames, (str, bytes)):
filenames = [filenames]
for fnames in filenames:
for fname in glob(fnames):
f = open(fname)
text = f.read()
cleantext = " ".join(_mib_re_strings.split(" ".join(_mib_re_comments.split(text)))) # noqa: E501
for m in _mib_re_oiddecl.finditer(cleantext):
gr = m.groups()
ident, oid = gr[0], gr[-1]
ident = fixname(ident)
oid = oid.split()
for i, elt in enumerate(oid):
m = _mib_re_both.match(elt)
if m:
oid[i] = m.groups()[1]
_mib_register(ident, oid, the_mib, unresolved)
newmib = MIBDict(_name="MIB")
for oid, key in six.iteritems(the_mib):
newmib[".".join(key)] = oid
for oid, key in six.iteritems(unresolved):
newmib[".".join(key)] = oid
conf.mib = newmib | [
"def",
"load_mib",
"(",
"filenames",
")",
":",
"the_mib",
"=",
"{",
"'iso'",
":",
"[",
"'1'",
"]",
"}",
"unresolved",
"=",
"{",
"}",
"for",
"k",
"in",
"six",
".",
"iterkeys",
"(",
"conf",
".",
"mib",
")",
":",
"_mib_register",
"(",
"conf",
".",
"... | 36.59375 | 14.5 |
def set_web_index_page(self, container, page):
"""
Sets the header indicating the index page in a container
when creating a static website.
Note: the container must be CDN-enabled for this to have
any effect.
"""
headers = {"X-Container-Meta-Web-Index": "%s" % page}
self.api.cdn_request("/%s" % utils.get_name(container), method="POST",
headers=headers) | [
"def",
"set_web_index_page",
"(",
"self",
",",
"container",
",",
"page",
")",
":",
"headers",
"=",
"{",
"\"X-Container-Meta-Web-Index\"",
":",
"\"%s\"",
"%",
"page",
"}",
"self",
".",
"api",
".",
"cdn_request",
"(",
"\"/%s\"",
"%",
"utils",
".",
"get_name",
... | 38.636364 | 16.636364 |
def true_neg_rate(self):
"""Calculates true negative rate
:return: true negative rate
"""
false_pos = self.matrix[1][0]
true_neg = self.matrix[1][1]
return divide(1.0 * true_neg, true_neg + false_pos) | [
"def",
"true_neg_rate",
"(",
"self",
")",
":",
"false_pos",
"=",
"self",
".",
"matrix",
"[",
"1",
"]",
"[",
"0",
"]",
"true_neg",
"=",
"self",
".",
"matrix",
"[",
"1",
"]",
"[",
"1",
"]",
"return",
"divide",
"(",
"1.0",
"*",
"true_neg",
",",
"tru... | 30.25 | 10.875 |
def cli(ctx, env):
"""Print shell help text."""
env.out("Welcome to the SoftLayer shell.")
env.out("")
formatter = formatting.HelpFormatter()
commands = []
shell_commands = []
for name in cli_core.cli.list_commands(ctx):
command = cli_core.cli.get_command(ctx, name)
if command.short_help is None:
command.short_help = command.help
details = (name, command.short_help)
if name in dict(routes.ALL_ROUTES):
shell_commands.append(details)
else:
commands.append(details)
with formatter.section('Shell Commands'):
formatter.write_dl(shell_commands)
with formatter.section('Commands'):
formatter.write_dl(commands)
for line in formatter.buffer:
env.out(line, newline=False) | [
"def",
"cli",
"(",
"ctx",
",",
"env",
")",
":",
"env",
".",
"out",
"(",
"\"Welcome to the SoftLayer shell.\"",
")",
"env",
".",
"out",
"(",
"\"\"",
")",
"formatter",
"=",
"formatting",
".",
"HelpFormatter",
"(",
")",
"commands",
"=",
"[",
"]",
"shell_com... | 30.230769 | 13.307692 |
def filter_bandpass_fourier(t, data, method='stft', detrend='linear',
df=None, harm=True,
df_out=None, harm_out=True):
""" Return bandpass FFT-filtered signal (and the rest)
Optionnally include all higher harmonics
Can also exclude a frequency interval and its higher harmonics
Parameters
----------
t : np.ndarray
1D array, monotonously increasing time vector with regular spacing
data : np.ndarray
1 or 2D array, with shape[0]=t.size, the data to be filtered
method: str
Flag indicating which method to use:
- 'rfft': scipy.fftpack.rfft
- 'stft': scipy.signal.stft
df : None / list
List or tuple of len()=2, containing the bandpass lower / upper bounds
harm : bool
If True all the higher harmonics of df will also be included
df_out : None / list
List or tuple of len()=2, containing the bandpass lower / upper bounds
to be excluded from filtering (if it overlaps with high harmonics of df)
harm_out : bool
If True, the higher harmonics of the interval df_out are also excluded
Test : bool
If True tests all the input arguments for any mistake
Returns
-------
In : np.ndarray
Array with shape=data.shape, filtered signal retrieved from inverse FFT
Out : np.ndarray
Array with shape=data.shape, excluded signal from filtering
"""
# Check / format inputs
assert data.ndim==2 and t.ndim==1
assert data.shape[0]==(t.size,)
assert np.allclose(t,np.unique(t)) and np.std(np.diff(t))<=1.e-12
assert method in ['rfft','stft']
lC = [df is None, df_out is None]
assert np.sum(lC)<=1, "At least one of df or df_out must be provided !"
assert type(harm) is bool and type(harm_out) is bool
if df is not None:
df = np.unique(df)
assert df.shape==(2,)
if df_out is not None:
df_out = np.unique(df_out)
assert df_out.shape==(2,)
nt, nch = data.shape
dt = np.mean(np.diff(t))
fs = 1./dt
if method=='rfft':
data_in, data_out = _filter_bandpass_rfft(data, t, dt, fs, nt, nch,
df=df, harm=harm,
df_out=df_out,
harm_out=harm_out)
elif methd=='stft':
data_in, data_out = _filter_bandpass_stft(data, t, dt, fs, nt, nch,
df=df, harm=harm,
df_out=df_out,
harm_out=harm_out) | [
"def",
"filter_bandpass_fourier",
"(",
"t",
",",
"data",
",",
"method",
"=",
"'stft'",
",",
"detrend",
"=",
"'linear'",
",",
"df",
"=",
"None",
",",
"harm",
"=",
"True",
",",
"df_out",
"=",
"None",
",",
"harm_out",
"=",
"True",
")",
":",
"# Check / for... | 40.19403 | 21.313433 |
def _prep_config(items, paired, work_dir):
"""Run initial configuration, generating a run directory for Manta.
"""
assert utils.which("configManta.py"), "Could not find installed configManta.py"
out_file = os.path.join(work_dir, "runWorkflow.py")
if not utils.file_exists(out_file) or _out_of_date(out_file):
config_script = os.path.realpath(utils.which("configManta.py"))
cmd = [utils.get_program_python("configManta.py"), config_script]
if paired:
if paired.normal_bam:
cmd += ["--normalBam=%s" % paired.normal_bam, "--tumorBam=%s" % paired.tumor_bam]
else:
cmd += ["--tumorBam=%s" % paired.tumor_bam]
else:
cmd += ["--bam=%s" % dd.get_align_bam(data) for data in items]
data = paired.tumor_data if paired else items[0]
cmd += ["--referenceFasta=%s" % dd.get_ref_file(data), "--runDir=%s" % work_dir]
if dd.get_coverage_interval(data) not in ["genome"]:
cmd += ["--exome"]
for region in _maybe_limit_chromosomes(data):
cmd += ["--region", region]
resources = config_utils.get_resources("manta", data["config"])
if resources.get("options"):
cmd += [str(x) for x in resources["options"]]
# If we are removing polyX, avoid calling on small indels which require
# excessively long runtimes on noisy WGS runs
if "polyx" in dd.get_exclude_regions(data):
cmd += ["--config", _prep_streamlined_config(config_script, work_dir)]
do.run(cmd, "Configure manta SV analysis")
return out_file | [
"def",
"_prep_config",
"(",
"items",
",",
"paired",
",",
"work_dir",
")",
":",
"assert",
"utils",
".",
"which",
"(",
"\"configManta.py\"",
")",
",",
"\"Could not find installed configManta.py\"",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
... | 53.4 | 21.133333 |
def fingerprint(dirnames, prefix=None, previous=[]):
#pylint:disable=dangerous-default-value
"""
Returns a list of paths available from *dirname*. When previous
is specified, returns a list of additional files only.
Example:
[{ "Key": "abc.txt",
"LastModified": "Mon, 05 Jan 2015 12:00:00 UTC"},
{ "Key": "def.txt",
"LastModified": "Mon, 05 Jan 2015 12:00:001 UTC"},
]
"""
results = []
for dirname in dirnames:
for filename in os.listdir(dirname):
fullpath = os.path.join(dirname, filename)
if os.path.isdir(fullpath):
results += fingerprint(
[fullpath], prefix=filename, previous=previous)
else:
fullname = fullpath
if prefix and fullname.startswith(prefix):
fullname = fullname[len(prefix):]
found = False
for prevpath in previous:
if fullname == prevpath['Key']:
found = True
break
if not found:
mtime = datetime.datetime.fromtimestamp(
os.path.getmtime(fullpath), tz=utc)
results += [{"Key": fullname,
"LastModified": mtime.strftime(
'%a, %d %b %Y %H:%M:%S %Z')}]
return results | [
"def",
"fingerprint",
"(",
"dirnames",
",",
"prefix",
"=",
"None",
",",
"previous",
"=",
"[",
"]",
")",
":",
"#pylint:disable=dangerous-default-value",
"results",
"=",
"[",
"]",
"for",
"dirname",
"in",
"dirnames",
":",
"for",
"filename",
"in",
"os",
".",
"... | 38.694444 | 14.972222 |
def get_usrgos(self, fin_goids, prt):
"""Return source GO IDs ."""
ret = self.get_goids(None, fin_goids, prt)
# If there have been no GO IDs explicitly specified by the user
if not ret:
# If the GO-DAG is sufficiently small, print all GO IDs
if self.max_gos is not None and len(self.go2obj) < self.max_gos:
main_gos = set(o.id for go, o in self.go2obj.items() if go != o.id)
go_leafs = set(go for go, o in self.go2obj.items() if not o.children)
ret = go_leafs.difference(main_gos)
else:
raise RuntimeError("GO IDs NEEDED")
go2obj = self.get_go2obj(ret)
return get_go2obj_unique(go2obj) | [
"def",
"get_usrgos",
"(",
"self",
",",
"fin_goids",
",",
"prt",
")",
":",
"ret",
"=",
"self",
".",
"get_goids",
"(",
"None",
",",
"fin_goids",
",",
"prt",
")",
"# If there have been no GO IDs explicitly specified by the user",
"if",
"not",
"ret",
":",
"# If the ... | 51.428571 | 18.857143 |
def generate_report(out_dir, latex_summaries, nb_markers, nb_samples, options):
"""Generates the report.
:param out_dir: the output directory.
:param latex_summaries: the list of LaTeX summaries.
:param nb_markers: the final number of markers.
:param nb_samples: the final number of samples.
:param options: the list of options.
:type out_dir: str
:type latex_summaries: list
:type nb_markers: str
:type nb_samples: str
:type options: argparse.Namespace
"""
# Getting the graphic paths file
graphic_paths_fn = None
if os.path.isfile(os.path.join(out_dir, "graphic_paths.txt")):
graphic_paths_fn = os.path.join(out_dir, "graphic_paths.txt")
# We create the automatic report
report_name = os.path.join(out_dir, "merged_report.tex")
auto_report.create_report(
out_dir,
report_name,
project_name=options.report_number,
steps_filename=os.path.join(out_dir, "steps_summary.tex"),
summaries=latex_summaries,
background=options.report_background,
summary_fn=os.path.join(out_dir, "results_summary.txt"),
report_title=options.report_title,
report_author=options.report_author,
initial_files=os.path.join(out_dir, "initial_files.txt"),
final_files=os.path.join(out_dir, "final_files.txt"),
final_nb_markers=nb_markers,
final_nb_samples=nb_samples,
plink_version=get_plink_version(),
graphic_paths_fn=graphic_paths_fn,
) | [
"def",
"generate_report",
"(",
"out_dir",
",",
"latex_summaries",
",",
"nb_markers",
",",
"nb_samples",
",",
"options",
")",
":",
"# Getting the graphic paths file",
"graphic_paths_fn",
"=",
"None",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",... | 36.95 | 15.275 |
def cache(self, con):
"""Put a connection back into the pool cache."""
try:
if self._reset == 2:
con.reset() # reset the connection completely
else:
if self._reset or con._transaction:
try:
con.rollback() # rollback a possible transaction
except Exception:
pass
self._cache.put(con, 0) # and then put it back into the cache
except Full:
con.close()
if self._connections:
self._connections.release() | [
"def",
"cache",
"(",
"self",
",",
"con",
")",
":",
"try",
":",
"if",
"self",
".",
"_reset",
"==",
"2",
":",
"con",
".",
"reset",
"(",
")",
"# reset the connection completely",
"else",
":",
"if",
"self",
".",
"_reset",
"or",
"con",
".",
"_transaction",
... | 37.375 | 16.125 |
def accounts():
"""Load the accounts YAML file and return a dict """
import yaml
for path in account_files:
try:
c_dir = os.path.dirname(path)
if not os.path.exists(c_dir):
os.makedirs(c_dir)
with open(path, 'rb') as f:
return yaml.load(f)['accounts']
except (OSError, IOError) as e:
pass
return {} | [
"def",
"accounts",
"(",
")",
":",
"import",
"yaml",
"for",
"path",
"in",
"account_files",
":",
"try",
":",
"c_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"c_dir",
")",
":",
"... | 20.947368 | 20.842105 |
def index(self, row, column, parent):
""" Returns the index of the item in the model specified by the given row, column and parent index.
row, column == int, parent == QModelIndex
"""
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.root
else:
# So, here we go from QModelIndex to the actual object .. ?
parentItem = parent.internalPointer()
# the only place where a child item is queried
childItem = parentItem.getChild(row)
if childItem:
# return self.createIndex(row, column)
return self.createIndex(row, column, childItem)
"""
# .. that one does not work for PySide 5.12+
TypeError: 'PySide2.QtCore.QAbstractItemModel.createIndex' called with wrong argument types:
PySide2.QtCore.QAbstractItemModel.createIndex(int, int, ServerListItem)
Supported signatures:
PySide2.QtCore.QAbstractItemModel.createIndex(int, int, quintptr = 0)
PySide2.QtCore.QAbstractItemModel.createIndex(int, int, void = nullptr)
"""
else:
return QtCore.QModelIndex() | [
"def",
"index",
"(",
"self",
",",
"row",
",",
"column",
",",
"parent",
")",
":",
"if",
"not",
"self",
".",
"hasIndex",
"(",
"row",
",",
"column",
",",
"parent",
")",
":",
"return",
"QtCore",
".",
"QModelIndex",
"(",
")",
"if",
"not",
"parent",
".",... | 42.827586 | 18.344828 |
def execute_interactive_code(elem, doc):
"""Executes code blocks for a python shell.
Parses the code in `elem.text` into blocks and
executes them.
Args:
elem The AST element.
doc The document.
Return:
The code with inline results.
"""
code_lines = [l[4:] for l in elem.text.split('\n')]
code_blocks = [[code_lines[0]]]
for line in code_lines[1:]:
if line.startswith(' ') or line == '':
code_blocks[-1].append(line)
else:
code_blocks.append([line])
final_code = []
try:
child = replwrap.REPLWrapper("python", ">>> ", None)
except NameError:
pf.debug('Can not run interactive session. No output produced ' +
'(Code was:\n{!s}\n)'
.format(elem))
pf.debug('Please pip install pexpect.')
return ''
for code_block in code_blocks:
result = child.run_command('\n'.join(code_block) + '\n').rstrip('\r\n')
final_code += [('>>> ' if i == 0 else '... ') + l for i, l in
enumerate(code_block)]
if result:
final_code += [r for r in result.split('\n')
if r.strip() not in code_block]
return '\n'.join(final_code) | [
"def",
"execute_interactive_code",
"(",
"elem",
",",
"doc",
")",
":",
"code_lines",
"=",
"[",
"l",
"[",
"4",
":",
"]",
"for",
"l",
"in",
"elem",
".",
"text",
".",
"split",
"(",
"'\\n'",
")",
"]",
"code_blocks",
"=",
"[",
"[",
"code_lines",
"[",
"0"... | 31.717949 | 17.769231 |
def list_reference_bases(self, id_, start=0, end=None):
"""
Returns an iterator over the bases from the server in the form
of consecutive strings. This command does not conform to the
patterns of the other search and get requests, and is implemented
differently.
"""
request = protocol.ListReferenceBasesRequest()
request.start = pb.int(start)
request.end = pb.int(end)
request.reference_id = id_
not_done = True
# TODO We should probably use a StringIO here to make string buffering
# a bit more efficient.
bases_list = []
while not_done:
response = self._run_list_reference_bases_page_request(request)
bases_list.append(response.sequence)
not_done = bool(response.next_page_token)
request.page_token = response.next_page_token
return "".join(bases_list) | [
"def",
"list_reference_bases",
"(",
"self",
",",
"id_",
",",
"start",
"=",
"0",
",",
"end",
"=",
"None",
")",
":",
"request",
"=",
"protocol",
".",
"ListReferenceBasesRequest",
"(",
")",
"request",
".",
"start",
"=",
"pb",
".",
"int",
"(",
"start",
")"... | 43.380952 | 15.857143 |
def render_template(templates_path, template_filename, context):
"""Render Jinja2 template for a NApp structure."""
template_env = Environment(
autoescape=False, trim_blocks=False,
loader=FileSystemLoader(str(templates_path)))
return template_env.get_template(str(template_filename)) \
.render(context) | [
"def",
"render_template",
"(",
"templates_path",
",",
"template_filename",
",",
"context",
")",
":",
"template_env",
"=",
"Environment",
"(",
"autoescape",
"=",
"False",
",",
"trim_blocks",
"=",
"False",
",",
"loader",
"=",
"FileSystemLoader",
"(",
"str",
"(",
... | 50.857143 | 13.142857 |
def flatten_to_documents(model, include_pointers=False):
"""Flatten the model to a list of documents (aka ``Document`` objects).
This is to flatten a ``Binder``'ish model down to a list of documents.
If ``include_pointers`` has been set to ``True``, ``DocumentPointers``
will also be included in the results.
"""
types = [Document]
if include_pointers:
types.append(DocumentPointer)
types = tuple(types)
def _filter(m):
return isinstance(m, types)
return flatten_to(model, _filter) | [
"def",
"flatten_to_documents",
"(",
"model",
",",
"include_pointers",
"=",
"False",
")",
":",
"types",
"=",
"[",
"Document",
"]",
"if",
"include_pointers",
":",
"types",
".",
"append",
"(",
"DocumentPointer",
")",
"types",
"=",
"tuple",
"(",
"types",
")",
... | 32.8125 | 17.9375 |
def pack_dir_cmd():
'List the contents of a subdirectory of a zipfile'
parser = argparse.ArgumentParser(description=inspect.getdoc(part_edit_cmd))
parser.add_argument(
'path',
help=(
'Path to list (including path to zip file, '
'i.e. ./file.zipx or ./file.zipx/subdir)'
),
)
args = parser.parse_args()
for item, is_file in sorted(list_contents(args.path)):
prefix = 'd ' if not is_file else ' '
msg = prefix + item
print(msg) | [
"def",
"pack_dir_cmd",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"inspect",
".",
"getdoc",
"(",
"part_edit_cmd",
")",
")",
"parser",
".",
"add_argument",
"(",
"'path'",
",",
"help",
"=",
"(",
"'Path to list (in... | 29.066667 | 20.666667 |
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut | [
"def",
"primaryKeys",
"(",
"self",
",",
"table",
",",
"catalog",
"=",
"None",
",",
"schema",
"=",
"None",
")",
":",
"# nopep8",
"fut",
"=",
"self",
".",
"_run_operation",
"(",
"self",
".",
"_impl",
".",
"primaryKeys",
",",
"table",
",",
"catalog",
"=",... | 58.833333 | 16.166667 |
def call_command_handler(command, pymux, arguments):
"""
Execute command.
:param arguments: List of options.
"""
assert isinstance(arguments, list)
# Resolve aliases.
command = ALIASES.get(command, command)
try:
handler = COMMANDS_TO_HANDLERS[command]
except KeyError:
pymux.show_message('Invalid command: %s' % (command,))
else:
try:
handler(pymux, arguments)
except CommandException as e:
pymux.show_message(e.message) | [
"def",
"call_command_handler",
"(",
"command",
",",
"pymux",
",",
"arguments",
")",
":",
"assert",
"isinstance",
"(",
"arguments",
",",
"list",
")",
"# Resolve aliases.",
"command",
"=",
"ALIASES",
".",
"get",
"(",
"command",
",",
"command",
")",
"try",
":",... | 25 | 16.2 |
def add(self, *nonterminals):
# type: (Iterable[Type[Nonterminal]]) -> None
"""
Add nonterminals into the set.
:param nonterminals: Nonterminals to insert.
:raise NotNonterminalException: If the object doesn't inherit from Nonterminal class.
"""
for nonterm in nonterminals:
if nonterm in self:
continue
_NonterminalSet._control_nonterminal(nonterm)
super().add(nonterm)
self._assign_map[nonterm] = set() | [
"def",
"add",
"(",
"self",
",",
"*",
"nonterminals",
")",
":",
"# type: (Iterable[Type[Nonterminal]]) -> None",
"for",
"nonterm",
"in",
"nonterminals",
":",
"if",
"nonterm",
"in",
"self",
":",
"continue",
"_NonterminalSet",
".",
"_control_nonterminal",
"(",
"nonterm... | 39.384615 | 11.538462 |
def enable_passive_host_checks(self, host):
"""Enable passive checks for a host
Format of the line that triggers function call::
ENABLE_PASSIVE_HOST_CHECKS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
if not host.passive_checks_enabled:
host.modified_attributes |= \
DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
host.passive_checks_enabled = True
self.send_an_element(host.get_update_status_brok()) | [
"def",
"enable_passive_host_checks",
"(",
"self",
",",
"host",
")",
":",
"if",
"not",
"host",
".",
"passive_checks_enabled",
":",
"host",
".",
"modified_attributes",
"|=",
"DICT_MODATTR",
"[",
"\"MODATTR_PASSIVE_CHECKS_ENABLED\"",
"]",
".",
"value",
"host",
".",
"... | 37.266667 | 13.133333 |
def prepare(
self, engine=None, mode=None, model=None, index=None, key=None,
filter=None, projection=None, consistent=None, forward=None, parallel=None):
"""Validates the search parameters and builds the base request dict for each Query/Scan call."""
self.prepare_iterator_cls(engine, mode)
self.prepare_model(model, index, consistent)
self.prepare_key(key)
self.prepare_projection(projection)
self.prepare_filter(filter)
self.prepare_constraints(forward, parallel)
self.prepare_request() | [
"def",
"prepare",
"(",
"self",
",",
"engine",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"model",
"=",
"None",
",",
"index",
"=",
"None",
",",
"key",
"=",
"None",
",",
"filter",
"=",
"None",
",",
"projection",
"=",
"None",
",",
"consistent",
"=",
... | 43.538462 | 19.230769 |
def notify(self, state, notifications):
'''
Call this to schedule sending partner notification.
'''
def do_append(desc, notifications):
for notification in notifications:
if not isinstance(notification, PendingNotification):
raise ValueError("Expected notify() params to be a list "
"of PendingNotification instance, got %r."
% notification)
key = str(notification.recipient.key)
if key not in desc.pending_notifications:
desc.pending_notifications[key] = list()
desc.pending_notifications[key].append(notification)
return state.agent.update_descriptor(do_append, notifications) | [
"def",
"notify",
"(",
"self",
",",
"state",
",",
"notifications",
")",
":",
"def",
"do_append",
"(",
"desc",
",",
"notifications",
")",
":",
"for",
"notification",
"in",
"notifications",
":",
"if",
"not",
"isinstance",
"(",
"notification",
",",
"PendingNotif... | 46.705882 | 23.058824 |
def minimize(self,
session=None,
feed_dict=None,
fetches=None,
step_callback=None,
loss_callback=None,
**run_kwargs):
"""Minimize a scalar `Tensor`.
Variables subject to optimization are updated in-place at the end of
optimization.
Note that this method does *not* just return a minimization `Op`, unlike
`Optimizer.minimize()`; instead it actually performs minimization by
executing commands to control a `Session`.
Args:
session: A `Session` instance.
feed_dict: A feed dict to be passed to calls to `session.run`.
fetches: A list of `Tensor`s to fetch and supply to `loss_callback`
as positional arguments.
step_callback: A function to be called at each optimization step;
arguments are the current values of all optimization variables
flattened into a single vector.
loss_callback: A function to be called every time the loss and gradients
are computed, with evaluated fetches supplied as positional arguments.
**run_kwargs: kwargs to pass to `session.run`.
"""
session = session or ops.get_default_session()
feed_dict = feed_dict or {}
fetches = fetches or []
loss_callback = loss_callback or (lambda *fetches: None)
step_callback = step_callback or (lambda xk: None)
self._initialize_updated_shapes(session)
# Construct loss function and associated gradient.
loss_grad_func = self._make_eval_func([self._loss,
self._packed_loss_grad], session,
feed_dict, fetches, loss_callback)
# Construct equality constraint functions and associated gradients.
equality_funcs = self._make_eval_funcs(self._equalities, session, feed_dict,
fetches)
equality_grad_funcs = self._make_eval_funcs(self._packed_equality_grads,
session, feed_dict, fetches)
# Construct inequality constraint functions and associated gradients.
inequality_funcs = self._make_eval_funcs(self._inequalities, session,
feed_dict, fetches)
inequality_grad_funcs = self._make_eval_funcs(self._packed_inequality_grads,
session, feed_dict, fetches)
# Get initial value from TF session.
initial_packed_var_val = session.run(self._packed_var)
# Perform minimization.
packed_var_val = self._minimize(
initial_val=initial_packed_var_val,
loss_grad_func=loss_grad_func,
equality_funcs=equality_funcs,
equality_grad_funcs=equality_grad_funcs,
inequality_funcs=inequality_funcs,
inequality_grad_funcs=inequality_grad_funcs,
packed_bounds=self._packed_bounds,
step_callback=step_callback,
optimizer_kwargs=self.optimizer_kwargs)
var_vals = [
packed_var_val[packing_slice] for packing_slice in self._packing_slices
]
# Set optimization variables to their new values.
session.run(
self._var_updates,
feed_dict=dict(zip(self._update_placeholders, var_vals)),
**run_kwargs) | [
"def",
"minimize",
"(",
"self",
",",
"session",
"=",
"None",
",",
"feed_dict",
"=",
"None",
",",
"fetches",
"=",
"None",
",",
"step_callback",
"=",
"None",
",",
"loss_callback",
"=",
"None",
",",
"*",
"*",
"run_kwargs",
")",
":",
"session",
"=",
"sessi... | 41.649351 | 21.61039 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.