text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def com_adobe_fonts_check_cff2_call_depth(ttFont):
"""Is the CFF2 subr/gsubr call depth > 10?"""
any_failures = False
cff = ttFont['CFF2'].cff
for top_dict in cff.topDictIndex:
for fd_index, font_dict in enumerate(top_dict.FDArray):
if hasattr(font_dict, 'Private'):
private_dict = font_dict.Private
else:
private_dict = None
failed = yield from \
_check_call_depth(top_dict, private_dict, fd_index)
any_failures = any_failures or failed
if not any_failures:
yield PASS, 'Maximum call depth not exceeded.' | [
"def",
"com_adobe_fonts_check_cff2_call_depth",
"(",
"ttFont",
")",
":",
"any_failures",
"=",
"False",
"cff",
"=",
"ttFont",
"[",
"'CFF2'",
"]",
".",
"cff",
"for",
"top_dict",
"in",
"cff",
".",
"topDictIndex",
":",
"for",
"fd_index",
",",
"font_dict",
"in",
... | 36.647059 | 15.176471 |
def __view_to_intervals(self, data_and_metadata: DataAndMetadata.DataAndMetadata, intervals: typing.List[typing.Tuple[float, float]]) -> None:
"""Change the view to encompass the channels and data represented by the given intervals."""
left = None
right = None
for interval in intervals:
left = min(left, interval[0]) if left is not None else interval[0]
right = max(right, interval[1]) if right is not None else interval[1]
left = left if left is not None else 0.0
right = right if right is not None else 1.0
left_channel = int(max(0.0, left) * data_and_metadata.data_shape[-1])
right_channel = int(min(1.0, right) * data_and_metadata.data_shape[-1])
data_min = numpy.amin(data_and_metadata.data[..., left_channel:right_channel])
data_max = numpy.amax(data_and_metadata.data[..., left_channel:right_channel])
if data_min > 0 and data_max > 0:
y_min = 0.0
y_max = data_max * 1.2
elif data_min < 0 and data_max < 0:
y_min = data_min * 1.2
y_max = 0.0
else:
y_min = data_min * 1.2
y_max = data_max * 1.2
extra = (right - left) * 0.5
display_left_channel = int(max(0.0, left - extra) * data_and_metadata.data_shape[-1])
display_right_channel = int(min(1.0, right + extra) * data_and_metadata.data_shape[-1])
# command = self.delegate.create_change_display_command()
self.delegate.update_display_properties({"left_channel": display_left_channel, "right_channel": display_right_channel, "y_min": y_min, "y_max": y_max}) | [
"def",
"__view_to_intervals",
"(",
"self",
",",
"data_and_metadata",
":",
"DataAndMetadata",
".",
"DataAndMetadata",
",",
"intervals",
":",
"typing",
".",
"List",
"[",
"typing",
".",
"Tuple",
"[",
"float",
",",
"float",
"]",
"]",
")",
"->",
"None",
":",
"l... | 60.296296 | 28.148148 |
def opt_width(self, width):
""" Set width of output ('auto' will auto-detect terminal width) """
if width != "auto":
width = int(width)
self.conf["width"] = width | [
"def",
"opt_width",
"(",
"self",
",",
"width",
")",
":",
"if",
"width",
"!=",
"\"auto\"",
":",
"width",
"=",
"int",
"(",
"width",
")",
"self",
".",
"conf",
"[",
"\"width\"",
"]",
"=",
"width"
] | 38.8 | 8.4 |
def get_i_text(node):
"""
Get the text for an Indicator node.
:param node: Indicator node.
:return:
"""
if node.tag != 'Indicator':
raise IOCParseError('Invalid tag: {}'.format(node.tag))
s = node.get('operator').upper()
return s | [
"def",
"get_i_text",
"(",
"node",
")",
":",
"if",
"node",
".",
"tag",
"!=",
"'Indicator'",
":",
"raise",
"IOCParseError",
"(",
"'Invalid tag: {}'",
".",
"format",
"(",
"node",
".",
"tag",
")",
")",
"s",
"=",
"node",
".",
"get",
"(",
"'operator'",
")",
... | 26.909091 | 13.272727 |
def get_program(self, program_path, controller=None):
"""
Find the program within this manifest. If key is found, and it contains
a list, iterate over the list and return the program that matches
the controller tag. NOTICE: program_path must have a leading slash.
"""
if not program_path or program_path[0] != '/':
raise ValueError("program_path must be a full path with leading slash")
items = program_path[1:].split('/')
result = self
for item in items:
result = result[item]
if hasattr(result, "lower"):
# string redirect
return self.get_program(result)
elif type(result) is Manifest:
return result.get_program('/')
elif hasattr(result, 'append'):
matching_blank = []
for program in result:
if controller in program.controllers:
return program
if not program.controllers or not controller:
# no exact matching controllers for this program.
# Use the first controller with no
matching_blank.append(program)
if matching_blank:
return matching_blank[0]
else:
raise ProgramNotFound("No matcning program for %s controller" % controller)
return result | [
"def",
"get_program",
"(",
"self",
",",
"program_path",
",",
"controller",
"=",
"None",
")",
":",
"if",
"not",
"program_path",
"or",
"program_path",
"[",
"0",
"]",
"!=",
"'/'",
":",
"raise",
"ValueError",
"(",
"\"program_path must be a full path with leading slash... | 37.459459 | 18.432432 |
def upload(import_path, verbose=False, skip_subfolders=False, number_threads=None, max_attempts=None, video_import_path=None, dry_run=False,api_version=1.0):
'''
Upload local images to Mapillary
Args:
import_path: Directory path to where the images are stored.
verbose: Print extra warnings and errors.
skip_subfolders: Skip images stored in subdirectories.
Returns:
Images are uploaded to Mapillary and flagged locally as uploaded.
'''
# sanity check if video file is passed
if video_import_path and (not os.path.isdir(video_import_path) and not os.path.isfile(video_import_path)):
print("Error, video path " + video_import_path +
" does not exist, exiting...")
sys.exit(1)
# in case of video processing, adjust the import path
if video_import_path:
# set sampling path
video_sampling_path = "mapillary_sampled_video_frames"
video_dirname = video_import_path if os.path.isdir(
video_import_path) else os.path.dirname(video_import_path)
import_path = os.path.join(os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join(
os.path.abspath(video_dirname), video_sampling_path)
# basic check for all
if not import_path or not os.path.isdir(import_path):
print("Error, import directory " + import_path +
" does not exist, exiting...")
sys.exit(1)
# get list of file to process
total_file_list = uploader.get_total_file_list(
import_path, skip_subfolders)
upload_file_list = uploader.get_upload_file_list(
import_path, skip_subfolders)
failed_file_list = uploader.get_failed_upload_file_list(
import_path, skip_subfolders)
success_file_list = uploader.get_success_upload_file_list(
import_path, skip_subfolders)
to_finalize_file_list = uploader.get_finalize_file_list(
import_path, skip_subfolders)
if len(success_file_list) == len(total_file_list):
print("All images have already been uploaded")
else:
if len(failed_file_list):
upload_failed = raw_input(
"Retry uploading previously failed image uploads? [y/n]: ") if not ipc.is_enabled() else 'y'
# if yes, add images to the upload list
if upload_failed in ["y", "Y", "yes", "Yes"]:
upload_file_list.extend(failed_file_list)
# verify the images in the upload list, they need to have the image
# description and certain MAP properties
upload_file_list = [
f for f in upload_file_list if verify_mapillary_tag(f)]
if not len(upload_file_list) and not len(to_finalize_file_list):
print("No images to upload.")
print('Please check if all images contain the required Mapillary metadata. If not, you can use "mapillary_tools process" to add them')
sys.exit(1)
if len(upload_file_list):
# get upload params for the manual upload images, group them per sequence
# and separate direct upload images
params = {}
list_per_sequence_mapping = {}
direct_upload_file_list = []
for image in upload_file_list:
log_root = uploader.log_rootpath(image)
upload_params_path = os.path.join(
log_root, "upload_params_process.json")
if os.path.isfile(upload_params_path):
with open(upload_params_path, "rb") as jf:
params[image] = json.load(
jf, object_hook=uploader.ascii_encode_dict)
sequence = params[image]["key"]
if sequence in list_per_sequence_mapping:
list_per_sequence_mapping[sequence].append(image)
else:
list_per_sequence_mapping[sequence] = [image]
else:
direct_upload_file_list.append(image)
# inform how many images are to be uploaded and how many are being skipped
# from upload
print("Uploading {} images with valid mapillary tags (Skipping {})".format(
len(upload_file_list), len(total_file_list) - len(upload_file_list)))
if api_version==2.0:
uploder.uploadfile_list
if len(direct_upload_file_list):
uploader.upload_file_list_direct(
direct_upload_file_list, number_threads, max_attempts)
for idx, sequence in enumerate(list_per_sequence_mapping):
uploader.upload_file_list_manual(
list_per_sequence_mapping[sequence], params, idx, number_threads, max_attempts)
if len(to_finalize_file_list):
params = {}
sequences = []
for image in to_finalize_file_list:
log_root = uploader.log_rootpath(image)
upload_params_path = os.path.join(
log_root, "upload_params_process.json")
if os.path.isfile(upload_params_path):
with open(upload_params_path, "rb") as jf:
image_params = json.load(
jf, object_hook=uploader.ascii_encode_dict)
sequence = image_params["key"]
if sequence not in sequences:
params[image] = image_params
sequences.append(sequence)
for image in params:
uploader.upload_done_file(**params[image])
uploader.flag_finalization(to_finalize_file_list)
uploader.print_summary(upload_file_list) | [
"def",
"upload",
"(",
"import_path",
",",
"verbose",
"=",
"False",
",",
"skip_subfolders",
"=",
"False",
",",
"number_threads",
"=",
"None",
",",
"max_attempts",
"=",
"None",
",",
"video_import_path",
"=",
"None",
",",
"dry_run",
"=",
"False",
",",
"api_vers... | 47.680672 | 22.201681 |
def _set_network(self, v, load=False):
"""
Setter method for network, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/network (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_network is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("network_ipv4_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-ipv4-address', extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("network_ipv4_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-ipv4-address', extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""",
})
self.__network = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_network",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",... | 126 | 61.909091 |
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path | [
"def",
"__deserialize_file",
"(",
"self",
",",
"response",
")",
":",
"fd",
",",
"path",
"=",
"tempfile",
".",
"mkstemp",
"(",
"dir",
"=",
"self",
".",
"configuration",
".",
"temp_folder_path",
")",
"os",
".",
"close",
"(",
"fd",
")",
"os",
".",
"remove... | 34.565217 | 21.478261 |
def point_data_to_cell_data(dataset, pass_point_data=False):
"""Transforms point data (i.e., data specified per node) into cell data
(i.e., data specified within cells).
Optionally, the input point data can be passed through to the output.
See aslo: :func:`vtki.DataSetFilters.cell_data_to_point_data`
Parameters
----------
pass_point_data : bool
If enabled, pass the input point data through to the output
"""
alg = vtk.vtkPointDataToCellData()
alg.SetInputDataObject(dataset)
alg.SetPassPointData(pass_point_data)
alg.Update()
return _get_output(alg, active_scalar=dataset.active_scalar_name) | [
"def",
"point_data_to_cell_data",
"(",
"dataset",
",",
"pass_point_data",
"=",
"False",
")",
":",
"alg",
"=",
"vtk",
".",
"vtkPointDataToCellData",
"(",
")",
"alg",
".",
"SetInputDataObject",
"(",
"dataset",
")",
"alg",
".",
"SetPassPointData",
"(",
"pass_point_... | 40.941176 | 18.588235 |
def assert_200(response, max_len=500):
""" Check that a HTTP response returned 200. """
if response.status_code == 200:
return
raise ValueError(
"Response was {}, not 200:\n{}\n{}".format(
response.status_code,
json.dumps(dict(response.headers), indent=2),
response.content[:max_len])) | [
"def",
"assert_200",
"(",
"response",
",",
"max_len",
"=",
"500",
")",
":",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"raise",
"ValueError",
"(",
"\"Response was {}, not 200:\\n{}\\n{}\"",
".",
"format",
"(",
"response",
".",
"status_code... | 32.3 | 13.5 |
def sim(self, src, tar):
"""Return the Ratcliff-Obershelp similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Ratcliff-Obershelp similarity
Examples
--------
>>> cmp = RatcliffObershelp()
>>> round(cmp.sim('cat', 'hat'), 12)
0.666666666667
>>> round(cmp.sim('Niall', 'Neil'), 12)
0.666666666667
>>> round(cmp.sim('aluminum', 'Catalan'), 12)
0.4
>>> cmp.sim('ATCG', 'TAGC')
0.5
"""
def _lcsstr_stl(src, tar):
"""Return start positions & length for Ratcliff-Obershelp.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
tuple
The start position in the source string, start position in the
target string, and length of the longest common substring of
strings src and tar.
"""
lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int)
longest, src_longest, tar_longest = 0, 0, 0
for i in range(1, len(src) + 1):
for j in range(1, len(tar) + 1):
if src[i - 1] == tar[j - 1]:
lengths[i, j] = lengths[i - 1, j - 1] + 1
if lengths[i, j] > longest:
longest = lengths[i, j]
src_longest = i
tar_longest = j
else:
lengths[i, j] = 0
return src_longest - longest, tar_longest - longest, longest
def _sstr_matches(src, tar):
"""Return the sum of substring match lengths.
This follows the Ratcliff-Obershelp algorithm
:cite:`Ratcliff:1988`:
1. Find the length of the longest common substring in src &
tar.
2. Recurse on the strings to the left & right of each this
substring in src & tar.
3. Base case is a 0 length common substring, in which case,
return 0.
4. Return the sum.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
int
Sum of substring match lengths
"""
src_start, tar_start, length = _lcsstr_stl(src, tar)
if length == 0:
return 0
return (
_sstr_matches(src[:src_start], tar[:tar_start])
+ length
+ _sstr_matches(
src[src_start + length :], tar[tar_start + length :]
)
)
if src == tar:
return 1.0
elif not src or not tar:
return 0.0
return 2 * _sstr_matches(src, tar) / (len(src) + len(tar)) | [
"def",
"sim",
"(",
"self",
",",
"src",
",",
"tar",
")",
":",
"def",
"_lcsstr_stl",
"(",
"src",
",",
"tar",
")",
":",
"\"\"\"Return start positions & length for Ratcliff-Obershelp.\n\n Parameters\n ----------\n src : str\n Source str... | 31.038835 | 18.718447 |
def process_columns(self, columns):
"""
Handle provided columns and if necessary, convert columns to a list for
internal strage.
:columns: A sequence of columns for the table. Can be list, comma
-delimited string, or IntEnum.
"""
if type(columns) == list:
self.columns = columns
elif type(columns) == str:
self.columns = [c.strip() for c in columns.split()]
elif type(columns) == IntEnum:
self.columns = [str(c) for c in columns]
else:
raise RawlException("Unknown format for columns") | [
"def",
"process_columns",
"(",
"self",
",",
"columns",
")",
":",
"if",
"type",
"(",
"columns",
")",
"==",
"list",
":",
"self",
".",
"columns",
"=",
"columns",
"elif",
"type",
"(",
"columns",
")",
"==",
"str",
":",
"self",
".",
"columns",
"=",
"[",
... | 37.8125 | 15 |
def _process_key(evt):
"""Helper to convert from wx keycode to vispy keycode"""
key = evt.GetKeyCode()
if key in KEYMAP:
return KEYMAP[key], ''
if 97 <= key <= 122:
key -= 32
if key >= 32 and key <= 127:
return keys.Key(chr(key)), chr(key)
else:
return None, None | [
"def",
"_process_key",
"(",
"evt",
")",
":",
"key",
"=",
"evt",
".",
"GetKeyCode",
"(",
")",
"if",
"key",
"in",
"KEYMAP",
":",
"return",
"KEYMAP",
"[",
"key",
"]",
",",
"''",
"if",
"97",
"<=",
"key",
"<=",
"122",
":",
"key",
"-=",
"32",
"if",
"... | 28.090909 | 14.272727 |
def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n):
"""
check if the ratio go /n is different between the study group and
the population
"""
if min_ratio is None:
return True
stu_ratio = float(study_go) / study_n
pop_ratio = float(pop_go) / pop_n
if stu_ratio == 0.0:
stu_ratio = 0.0000001
if pop_ratio == 0.0:
pop_ratio = 0.0000001
if stu_ratio > pop_ratio:
return stu_ratio / pop_ratio > min_ratio
return pop_ratio / stu_ratio > min_ratio | [
"def",
"is_ratio_different",
"(",
"min_ratio",
",",
"study_go",
",",
"study_n",
",",
"pop_go",
",",
"pop_n",
")",
":",
"if",
"min_ratio",
"is",
"None",
":",
"return",
"True",
"stu_ratio",
"=",
"float",
"(",
"study_go",
")",
"/",
"study_n",
"pop_ratio",
"="... | 32.375 | 12.25 |
def comments(self, issue):
"""Return all comments for this issue/pull request
"""
commit = self.as_id(issue)
return self.get_list(url='%s/%s/comments' % (self, commit)) | [
"def",
"comments",
"(",
"self",
",",
"issue",
")",
":",
"commit",
"=",
"self",
".",
"as_id",
"(",
"issue",
")",
"return",
"self",
".",
"get_list",
"(",
"url",
"=",
"'%s/%s/comments'",
"%",
"(",
"self",
",",
"commit",
")",
")"
] | 39.2 | 9.4 |
def _parse_splits(patch, splits):
"""
Parse splits string to get list of all associated subset strings.
Parameters
----------
patch : obj
Patch object containing data to subset
splits : str
Specifies how a column of a dataset should be split. See Notes.
Returns
-------
list
List of subset strings derived from splits string
Notes
-----
{0}
"""
split_list = splits.replace(' ','').split(';')
subset_list = [] # List of all subset strings
for split in split_list:
col, val = split.split(':')
if val == 'split':
uniques = []
for level in patch.table[col]:
if level not in uniques:
uniques.append(level)
level_list = [col + '==' + str(x) + '; ' for x in uniques]
else:
starts, ends = _col_starts_ends(patch, col, val)
level_list = [col + '>=' + str(x) + '; ' + col + '<' + str(y)+'; '
for x, y in zip(starts, ends)]
subset_list.append(level_list)
# Get product of all string levels as list, conv to string, drop final ;
return [''.join(x)[:-2] for x in _product(*subset_list)] | [
"def",
"_parse_splits",
"(",
"patch",
",",
"splits",
")",
":",
"split_list",
"=",
"splits",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"split",
"(",
"';'",
")",
"subset_list",
"=",
"[",
"]",
"# List of all subset strings",
"for",
"split",
"in",
"sp... | 27.697674 | 23.139535 |
def main(args):
"""Command-line tool to transform html style to inline css
Usage::
$ echo '<style>h1 { color:red; }</style><h1>Title</h1>' | \
python -m premailer
<h1 style="color:red"></h1>
$ cat newsletter.html | python -m premailer
"""
parser = argparse.ArgumentParser(usage="python -m premailer [options]")
parser.add_argument(
"-f",
"--file",
nargs="?",
type=argparse.FileType("r"),
help="Specifies the input file. The default is stdin.",
default=sys.stdin,
dest="infile",
)
parser.add_argument(
"-o",
"--output",
nargs="?",
type=argparse.FileType("w"),
help="Specifies the output file. The default is stdout.",
default=sys.stdout,
dest="outfile",
)
parser.add_argument("--base-url", default=None, type=str, dest="base_url")
parser.add_argument(
"--remove-internal-links",
default=True,
help="Remove links that start with a '#' like anchors.",
dest="preserve_internal_links",
)
parser.add_argument(
"--exclude-pseudoclasses",
default=False,
help="Pseudo classes like p:last-child', p:first-child, etc",
action="store_true",
dest="exclude_pseudoclasses",
)
parser.add_argument(
"--preserve-style-tags",
default=False,
help="Do not delete <style></style> tags from the html document.",
action="store_true",
dest="keep_style_tags",
)
parser.add_argument(
"--remove-star-selectors",
default=True,
help="All wildcard selectors like '* {color: black}' will be removed.",
action="store_false",
dest="include_star_selectors",
)
parser.add_argument(
"--remove-classes",
default=False,
help="Remove all class attributes from all elements",
action="store_true",
dest="remove_classes",
)
parser.add_argument(
"--capitalize-float-margin",
default=False,
help="Capitalize float and margin properties for outlook.com compat.",
action="store_true",
dest="capitalize_float_margin",
)
parser.add_argument(
"--strip-important",
default=False,
help="Remove '!important' for all css declarations.",
action="store_true",
dest="strip_important",
)
parser.add_argument(
"--method",
default="html",
dest="method",
help="The type of html to output. 'html' for HTML, 'xml' for XHTML.",
)
parser.add_argument(
"--base-path",
default=None,
dest="base_path",
help="The base path for all external stylsheets.",
)
parser.add_argument(
"--external-style",
action="append",
dest="external_styles",
help="The path to an external stylesheet to be loaded.",
)
parser.add_argument(
"--css-text",
action="append",
dest="css_text",
help="CSS text to be applied to the html.",
)
parser.add_argument(
"--disable-basic-attributes",
dest="disable_basic_attributes",
help="Disable provided basic attributes (comma separated)",
default=[],
)
parser.add_argument(
"--disable-validation",
default=False,
action="store_true",
dest="disable_validation",
help="Disable CSSParser validation of attributes and values",
)
parser.add_argument(
"--pretty",
default=False,
action="store_true",
help="Pretty-print the outputted HTML.",
)
parser.add_argument(
"--encoding", default="utf-8", help="Output encoding. The default is utf-8"
)
options = parser.parse_args(args)
if options.disable_basic_attributes:
options.disable_basic_attributes = options.disable_basic_attributes.split()
html = options.infile.read()
if hasattr(html, "decode"): # Forgive me: Python 2 compatability
html = html.decode("utf-8")
p = Premailer(
html=html,
base_url=options.base_url,
preserve_internal_links=options.preserve_internal_links,
exclude_pseudoclasses=options.exclude_pseudoclasses,
keep_style_tags=options.keep_style_tags,
include_star_selectors=options.include_star_selectors,
remove_classes=options.remove_classes,
strip_important=options.strip_important,
external_styles=options.external_styles,
css_text=options.css_text,
method=options.method,
base_path=options.base_path,
disable_basic_attributes=options.disable_basic_attributes,
disable_validation=options.disable_validation,
)
options.outfile.write(
p.transform(encoding=options.encoding, pretty_print=options.pretty)
)
return 0 | [
"def",
"main",
"(",
"args",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"usage",
"=",
"\"python -m premailer [options]\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--file\"",
",",
"nargs",
"=",
"\"?\"",
",",
"type",
"="... | 27.520231 | 21.745665 |
def transmissions(self, status="all"):
"""Get transmissions sent along this Vector.
Status can be "all" (the default), "pending", or "received".
"""
if status not in ["all", "pending", "received"]:
raise(ValueError("You cannot get {} transmissions."
.format(status) +
"Status can only be pending, received or all"))
if status == "all":
return Transmission\
.query\
.filter_by(vector_id=self.id,
failed=False)\
.all()
else:
return Transmission\
.query\
.filter_by(vector_id=self.id,
status=status,
failed=False)\
.all() | [
"def",
"transmissions",
"(",
"self",
",",
"status",
"=",
"\"all\"",
")",
":",
"if",
"status",
"not",
"in",
"[",
"\"all\"",
",",
"\"pending\"",
",",
"\"received\"",
"]",
":",
"raise",
"(",
"ValueError",
"(",
"\"You cannot get {} transmissions.\"",
".",
"format"... | 35.521739 | 14.347826 |
def load(self, file=CONFIG_FILE):
"""
load a configuration file. loads default config if file is not found
"""
if not os.path.exists(file):
print("Config file was not found under %s. Default file has been created" % CONFIG_FILE)
self._settings = yaml.load(DEFAULT_CONFIG, yaml.RoundTripLoader)
self.save(file)
sys.exit()
with open(file, 'r') as f:
self._settings = yaml.load(f, yaml.RoundTripLoader) | [
"def",
"load",
"(",
"self",
",",
"file",
"=",
"CONFIG_FILE",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file",
")",
":",
"print",
"(",
"\"Config file was not found under %s. Default file has been created\"",
"%",
"CONFIG_FILE",
")",
"self",
... | 44.454545 | 18.454545 |
def _sort_lambda(sortedby='cpu_percent',
sortedby_secondary='memory_percent'):
"""Return a sort lambda function for the sortedbykey"""
ret = None
if sortedby == 'io_counters':
ret = _sort_io_counters
elif sortedby == 'cpu_times':
ret = _sort_cpu_times
return ret | [
"def",
"_sort_lambda",
"(",
"sortedby",
"=",
"'cpu_percent'",
",",
"sortedby_secondary",
"=",
"'memory_percent'",
")",
":",
"ret",
"=",
"None",
"if",
"sortedby",
"==",
"'io_counters'",
":",
"ret",
"=",
"_sort_io_counters",
"elif",
"sortedby",
"==",
"'cpu_times'",
... | 34.111111 | 11.111111 |
def validate_training_data_stats(training_data_stats):
"""
Method to validate the structure of training data stats
"""
stat_keys = list(training_data_stats.keys())
valid_stat_keys = ["means", "mins", "maxs", "stds", "feature_values", "feature_frequencies"]
missing_keys = list(set(valid_stat_keys) - set(stat_keys))
if len(missing_keys) > 0:
raise Exception("Missing keys in training_data_stats. Details:" % (missing_keys)) | [
"def",
"validate_training_data_stats",
"(",
"training_data_stats",
")",
":",
"stat_keys",
"=",
"list",
"(",
"training_data_stats",
".",
"keys",
"(",
")",
")",
"valid_stat_keys",
"=",
"[",
"\"means\"",
",",
"\"mins\"",
",",
"\"maxs\"",
",",
"\"stds\"",
",",
"\"fe... | 54.111111 | 22.111111 |
def package(input_dir, output_dir, meta_path=None, create_meta=False, force=False):
"""
Generate Python package for model data, including meta and required
installation files. A new directory will be created in the specified
output directory, and model data will be copied over. If --create-meta is
set and a meta.json already exists in the output directory, the existing
values will be used as the defaults in the command-line prompt.
"""
msg = Printer()
input_path = util.ensure_path(input_dir)
output_path = util.ensure_path(output_dir)
meta_path = util.ensure_path(meta_path)
if not input_path or not input_path.exists():
msg.fail("Can't locate model data", input_path, exits=1)
if not output_path or not output_path.exists():
msg.fail("Output directory not found", output_path, exits=1)
if meta_path and not meta_path.exists():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta_path = meta_path or input_path / "meta.json"
if meta_path.is_file():
meta = srsly.read_json(meta_path)
if not create_meta: # only print if user doesn't want to overwrite
msg.good("Loaded meta.json from file", meta_path)
else:
meta = generate_meta(input_dir, meta, msg)
for key in ("lang", "name", "version"):
if key not in meta or meta[key] == "":
msg.fail(
"No '{}' setting found in meta.json".format(key),
"This setting is required to build your package.",
exits=1,
)
model_name = meta["lang"] + "_" + meta["name"]
model_name_v = model_name + "-" + meta["version"]
main_path = output_path / model_name_v
package_path = main_path / model_name
if package_path.exists():
if force:
shutil.rmtree(path2str(package_path))
else:
msg.fail(
"Package directory already exists",
"Please delete the directory and try again, or use the "
"`--force` flag to overwrite existing "
"directories.".format(path=path2str(package_path)),
exits=1,
)
Path.mkdir(package_path, parents=True)
shutil.copytree(path2str(input_path), path2str(package_path / model_name_v))
create_file(main_path / "meta.json", srsly.json_dumps(meta, indent=2))
create_file(main_path / "setup.py", TEMPLATE_SETUP)
create_file(main_path / "MANIFEST.in", TEMPLATE_MANIFEST)
create_file(package_path / "__init__.py", TEMPLATE_INIT)
msg.good("Successfully created package '{}'".format(model_name_v), main_path)
msg.text("To build the package, run `python setup.py sdist` in this directory.") | [
"def",
"package",
"(",
"input_dir",
",",
"output_dir",
",",
"meta_path",
"=",
"None",
",",
"create_meta",
"=",
"False",
",",
"force",
"=",
"False",
")",
":",
"msg",
"=",
"Printer",
"(",
")",
"input_path",
"=",
"util",
".",
"ensure_path",
"(",
"input_dir"... | 47.192982 | 19.789474 |
def to_datetime(timestamp):
"""Return datetime object from timestamp."""
return dt.fromtimestamp(time.mktime(
time.localtime(int(str(timestamp)[:10])))) | [
"def",
"to_datetime",
"(",
"timestamp",
")",
":",
"return",
"dt",
".",
"fromtimestamp",
"(",
"time",
".",
"mktime",
"(",
"time",
".",
"localtime",
"(",
"int",
"(",
"str",
"(",
"timestamp",
")",
"[",
":",
"10",
"]",
")",
")",
")",
")"
] | 41.25 | 5.75 |
def freeze(self):
"""
Freeze (disable) all settings
"""
for fields in zip(self.xsll, self.xsul, self.xslr, self.xsur,
self.ys, self.nx, self.ny):
for field in fields:
field.disable()
self.nquad.disable()
self.xbin.disable()
self.ybin.disable()
self.sbutt.disable()
self.frozen = True | [
"def",
"freeze",
"(",
"self",
")",
":",
"for",
"fields",
"in",
"zip",
"(",
"self",
".",
"xsll",
",",
"self",
".",
"xsul",
",",
"self",
".",
"xslr",
",",
"self",
".",
"xsur",
",",
"self",
".",
"ys",
",",
"self",
".",
"nx",
",",
"self",
".",
"n... | 30.538462 | 11.461538 |
def _init_jupyter(run):
"""Asks for user input to configure the machine if it isn't already and creates a new run.
Log pushing and system stats don't start until `wandb.monitor()` is called.
"""
from wandb import jupyter
# TODO: Should we log to jupyter?
# global logging had to be disabled because it set the level to debug
# I also disabled run logging because we're rairly using it.
# try_to_set_up_global_logging()
# run.enable_logging()
api = InternalApi()
if not api.api_key:
termerror(
"Not authenticated. Copy a key from https://app.wandb.ai/profile?message=true")
key = getpass.getpass("API Key: ").strip()
if len(key) == 40:
os.environ[env.API_KEY] = key
util.write_netrc(api.api_url, "user", key)
else:
raise ValueError("API Key must be 40 characters long")
# Ensure our api client picks up the new key
api = InternalApi()
os.environ["WANDB_JUPYTER"] = "true"
run.resume = "allow"
api.set_current_run_id(run.id)
print("W&B Run: %s" % run.get_url(api))
print("Call `%%wandb` in the cell containing your training loop to display live results.")
try:
run.save(api=api)
except (CommError, ValueError) as e:
termerror(str(e))
run.set_environment()
run._init_jupyter_agent()
ipython = get_ipython()
ipython.register_magics(jupyter.WandBMagics)
def reset_start():
"""Reset START_TIME to when the cell starts"""
global START_TIME
START_TIME = time.time()
ipython.events.register("pre_run_cell", reset_start)
ipython.events.register('post_run_cell', run._stop_jupyter_agent) | [
"def",
"_init_jupyter",
"(",
"run",
")",
":",
"from",
"wandb",
"import",
"jupyter",
"# TODO: Should we log to jupyter?",
"# global logging had to be disabled because it set the level to debug",
"# I also disabled run logging because we're rairly using it.",
"# try_to_set_up_global_logging(... | 38.906977 | 16.837209 |
def _set_show_raslog(self, v, load=False):
"""
Setter method for show_raslog, mapped from YANG variable /brocade_ras_ext_rpc/show_raslog (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_raslog is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_raslog() directly.
YANG Description: Shows the entries of RASLOG
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_raslog.show_raslog, is_leaf=True, yang_name="show-raslog", rest_name="show-raslog", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showRaslog'}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_raslog must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_raslog.show_raslog, is_leaf=True, yang_name="show-raslog", rest_name="show-raslog", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showRaslog'}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='rpc', is_config=True)""",
})
self.__show_raslog = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_show_raslog",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"ba... | 67.333333 | 32.541667 |
def singledispatch(*, nargs=None, nouts=None, ndefs=None):
"""
singledispatch decorate of both functools.singledispatch and func
"""
def wrapper(f):
return wraps(f)(SingleDispatchFunction(f, nargs=nargs, nouts=nouts, ndefs=ndefs))
return wrapper | [
"def",
"singledispatch",
"(",
"*",
",",
"nargs",
"=",
"None",
",",
"nouts",
"=",
"None",
",",
"ndefs",
"=",
"None",
")",
":",
"def",
"wrapper",
"(",
"f",
")",
":",
"return",
"wraps",
"(",
"f",
")",
"(",
"SingleDispatchFunction",
"(",
"f",
",",
"nar... | 30.111111 | 24.777778 |
def monkey_patch_override_instance_method(instance):
"""
Override an instance method with a new version of the same name. The
original method implementation is made available within the override method
as `_original_<METHOD_NAME>`.
"""
def perform_override(override_fn):
fn_name = override_fn.__name__
original_fn_name = '_original_' + fn_name
# Override instance method, if it hasn't already been done
if not hasattr(instance, original_fn_name):
original_fn = getattr(instance, fn_name)
setattr(instance, original_fn_name, original_fn)
bound_override_fn = override_fn.__get__(instance)
setattr(instance, fn_name, bound_override_fn)
return perform_override | [
"def",
"monkey_patch_override_instance_method",
"(",
"instance",
")",
":",
"def",
"perform_override",
"(",
"override_fn",
")",
":",
"fn_name",
"=",
"override_fn",
".",
"__name__",
"original_fn_name",
"=",
"'_original_'",
"+",
"fn_name",
"# Override instance method, if it ... | 46.8125 | 13.9375 |
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyCircularAperture` object defined
in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyCircularAperture` object
A `SkyCircularAperture` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyCircularAperture(**sky_params) | [
"def",
"to_sky",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"sky_params",
"=",
"self",
".",
"_to_sky_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"SkyCircularAperture",
"(",
"*",
"*",
"sky_params",
")"
] | 31.782609 | 18.391304 |
def _extract_email(gh):
"""Get user email from github."""
return next(
(x.email for x in gh.emails() if x.verified and x.primary), None) | [
"def",
"_extract_email",
"(",
"gh",
")",
":",
"return",
"next",
"(",
"(",
"x",
".",
"email",
"for",
"x",
"in",
"gh",
".",
"emails",
"(",
")",
"if",
"x",
".",
"verified",
"and",
"x",
".",
"primary",
")",
",",
"None",
")"
] | 37.25 | 18.5 |
def add_section(self, section):
"""You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree.
"""
if not issubclass(section.__class__, SubSection):
raise TypeError("Argument should be a subclass of SubSection, \
not :" + str(section.__class__))
self.sections[section.name] = section
return section | [
"def",
"add_section",
"(",
"self",
",",
"section",
")",
":",
"if",
"not",
"issubclass",
"(",
"section",
".",
"__class__",
",",
"SubSection",
")",
":",
"raise",
"TypeError",
"(",
"\"Argument should be a subclass of SubSection, \\\n not :\"",
"... | 44.7 | 18 |
def create_function_from_request_pdu(pdu):
""" Return function instance, based on request PDU.
:param pdu: Array of bytes.
:return: Instance of a function.
"""
function_code = get_function_code_from_request_pdu(pdu)
try:
function_class = function_code_to_function_map[function_code]
except KeyError:
raise IllegalFunctionError(function_code)
return function_class.create_from_request_pdu(pdu) | [
"def",
"create_function_from_request_pdu",
"(",
"pdu",
")",
":",
"function_code",
"=",
"get_function_code_from_request_pdu",
"(",
"pdu",
")",
"try",
":",
"function_class",
"=",
"function_code_to_function_map",
"[",
"function_code",
"]",
"except",
"KeyError",
":",
"raise... | 33.076923 | 16.769231 |
def stats():
'''Read a stream of floats and give summary statistics'''
import re
import sys
import math
values = []
for line in sys.stdin:
values.extend(map(float, re.findall(r'\d+\.?\d+', line)))
mean = sum(values) / len(values)
variance = sum((val - mean) ** 2 for val in values) / len(values)
print '%3i items; mean: %10.5f; std-dev: %10.5f' % (
len(values), mean, math.sqrt(variance)) | [
"def",
"stats",
"(",
")",
":",
"import",
"re",
"import",
"sys",
"import",
"math",
"values",
"=",
"[",
"]",
"for",
"line",
"in",
"sys",
".",
"stdin",
":",
"values",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"re",
".",
"findall",
"(",
"r'\\d+\\.?... | 33 | 22.076923 |
def get_timeseries_values_for_indicators(
self, resolution: str = "month", months: Iterable[int] = range(6, 9)
):
""" Attach timeseries to indicators, for performing Bayesian inference.
"""
if resolution == "month":
funcs = [
partial(get_indicator_value, month=month) for month in months
]
else:
raise NotImplementedError(
"Currently, only the 'month' resolution is supported."
)
for n in self.nodes(data=True):
for indicator in n[1]["indicators"].values():
indicator.timeseries = [
func(indicator, year="2017")[0] for func in funcs
]
if len(set(indicator.timeseries)) == 1:
indicator.timeseries = None | [
"def",
"get_timeseries_values_for_indicators",
"(",
"self",
",",
"resolution",
":",
"str",
"=",
"\"month\"",
",",
"months",
":",
"Iterable",
"[",
"int",
"]",
"=",
"range",
"(",
"6",
",",
"9",
")",
")",
":",
"if",
"resolution",
"==",
"\"month\"",
":",
"fu... | 38.857143 | 18.047619 |
def _get_once(self, page_text):
"""Get once which will be used when you login."""
soup = BeautifulSoup(page_text, 'html.parser')
once = soup.find('input', attrs={'name': 'once'})['value']
return once | [
"def",
"_get_once",
"(",
"self",
",",
"page_text",
")",
":",
"soup",
"=",
"BeautifulSoup",
"(",
"page_text",
",",
"'html.parser'",
")",
"once",
"=",
"soup",
".",
"find",
"(",
"'input'",
",",
"attrs",
"=",
"{",
"'name'",
":",
"'once'",
"}",
")",
"[",
... | 45.4 | 14 |
def lognorm(x, mu, sigma=1.0):
""" Log-normal function from scipy """
return stats.lognorm(sigma, scale=mu).pdf(x) | [
"def",
"lognorm",
"(",
"x",
",",
"mu",
",",
"sigma",
"=",
"1.0",
")",
":",
"return",
"stats",
".",
"lognorm",
"(",
"sigma",
",",
"scale",
"=",
"mu",
")",
".",
"pdf",
"(",
"x",
")"
] | 40 | 6 |
def firmware_manifest_list(self, **kwargs): # noqa: E501
"""List manifests # noqa: E501
List firmware manifests. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.firmware_manifest_list(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: How many firmware manifests to retrieve
:param str order: ASC or DESC
:param str after: The ID of the the item after which to retrieve the next page
:param str include: A comma-separated list of data fields to return. Currently supported: total_count
:param str filter: URL-encoded query string parameter to filter returned data `?filter={URL-encoded query string}` ###### Filterable fields: The table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>datafile</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>datafile_size</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>description</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>device_class</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>timestamp</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> The query string is made up of key-value pairs separated by ampersands. For example, this query: `key1__eq=value1&key2__eq=value2&key3__eq=value3` would be URL-encoded as: `?filter=key1__eq%3Dvalue1%26key2__eq%3Dvalue2%26key3__eq%3Dvalue3` **Filtering by properties** `name__eq=mymanifest` **Filtering on date-time fields** Date-time fields should be specified in UTC RFC3339 format, `YYYY-MM-DDThh:mm:ss.msZ`. There are three permitted variations: * UTC RFC3339 with milliseconds. Example: `2016-11-30T16:25:12.1234Z` * UTC RFC3339 without milliseconds. Example: `2016-11-30T16:25:12Z` * UTC RFC3339 shortened without milliseconds and punctuation. Example: `20161130T162512Z` Date-time filtering supports three operators: * equality by appending `__eq` to the field name * greater than or equal to by appending `__gte` to the field name * less than or equal to by appending `__lte` to the field name `{field name}[|__eq|__lte|__gte]={UTC RFC3339 date-time}` Time ranges may be specified by including both the `__gte` and `__lte` forms in the filter. For example: `created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering on multiple fields** `name__eq=mymanifest&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering with filter operators** String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__in=fw-manifest1,fw-manifest2`
:return: FirmwareManifestPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.firmware_manifest_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.firmware_manifest_list_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"firmware_manifest_list",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"firmware_manifest... | 164.96 | 137.92 |
def diff(**kwargs):
'''
Returns the difference between the candidate and the current configuration
id : 0
The rollback ID value (0-49)
CLI Example:
.. code-block:: bash
salt 'device_name' junos.diff 3
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
id_ = kwargs.pop('id', 0)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
ret['message'] = conn.cu.diff(rb_id=id_)
except Exception as exception:
ret['message'] = 'Could not get diff with error "{0}"'.format(
exception)
ret['out'] = False
return ret | [
"def",
"diff",
"(",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"clean_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"id_",
"=",
"kwargs",
".",
"pop",
"(",
"'id'",
",",
"0",
")",
"if",
"kwargs",
":",
"salt",
".... | 23 | 23.413793 |
def decode_texts(self, encoded_texts, unknown_token="<UNK>", inplace=True):
"""Decodes the texts using internal vocabulary. The list structure is maintained.
Args:
encoded_texts: The list of texts to decode.
unknown_token: The placeholder value for unknown token. (Default value: "<UNK>")
inplace: True to make changes inplace. (Default value: True)
Returns:
The decoded texts.
"""
if len(self._token2idx) == 0:
raise ValueError(
"You need to build vocabulary using `build_vocab` before using `decode_texts`")
if not isinstance(encoded_texts, list):
# assume it's a numpy array
encoded_texts = encoded_texts.tolist()
if not inplace:
encoded_texts = deepcopy(encoded_texts)
utils._recursive_apply(encoded_texts,
lambda token_id: self._idx2token.get(token_id) or unknown_token)
return encoded_texts | [
"def",
"decode_texts",
"(",
"self",
",",
"encoded_texts",
",",
"unknown_token",
"=",
"\"<UNK>\"",
",",
"inplace",
"=",
"True",
")",
":",
"if",
"len",
"(",
"self",
".",
"_token2idx",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"You need to build vocabu... | 41.333333 | 22.583333 |
def _addr_to_function(self, addr, blockaddr_to_function, known_functions):
"""
Convert an address to a Function object, and store the mapping in a dict. If the block is known to be part of a
function, just return that function.
:param int addr: Address to convert
:param dict blockaddr_to_function: A mapping between block addresses to Function instances.
:param angr.knowledge_plugins.FunctionManager known_functions: Recovered functions.
:return: a Function object
:rtype: angr.knowledge.Function
"""
if addr in blockaddr_to_function:
f = blockaddr_to_function[addr]
else:
is_syscall = self.project.simos.is_syscall_addr(addr)
n = self.model.get_any_node(addr, is_syscall=is_syscall)
if n is None: node = addr
else: node = self._to_snippet(n)
if isinstance(addr, SootAddressDescriptor):
addr = addr.method
self.kb.functions._add_node(addr, node, syscall=is_syscall)
f = self.kb.functions.function(addr=addr)
blockaddr_to_function[addr] = f
function_is_returning = False
if addr in known_functions:
if known_functions.function(addr).returning:
f.returning = True
function_is_returning = True
if not function_is_returning:
# We will rerun function feature analysis on this function later. Add it to
# self._updated_nonreturning_functions so it can be picked up by function feature analysis later.
if self._updated_nonreturning_functions is not None:
self._updated_nonreturning_functions.add(addr)
return f | [
"def",
"_addr_to_function",
"(",
"self",
",",
"addr",
",",
"blockaddr_to_function",
",",
"known_functions",
")",
":",
"if",
"addr",
"in",
"blockaddr_to_function",
":",
"f",
"=",
"blockaddr_to_function",
"[",
"addr",
"]",
"else",
":",
"is_syscall",
"=",
"self",
... | 41.738095 | 23.595238 |
def admin_link(obj):
"""
Returns a link to the admin URL of an object.
No permissions checking is involved, so use with caution to avoid exposing
the link to unauthorised users.
Example::
{{ foo_obj|admin_link }}
renders as::
<a href='/admin/foo/123'>Foo</a>
:param obj: A Django model instance.
:return: A safe string expressing an HTML link to the admin page for an
object.
"""
if hasattr(obj, 'get_admin_link'):
return mark_safe(obj.get_admin_link())
return mark_safe(admin_link_fn(obj)) | [
"def",
"admin_link",
"(",
"obj",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"'get_admin_link'",
")",
":",
"return",
"mark_safe",
"(",
"obj",
".",
"get_admin_link",
"(",
")",
")",
"return",
"mark_safe",
"(",
"admin_link_fn",
"(",
"obj",
")",
")"
] | 24.863636 | 20.136364 |
def featuretypes(self):
"""
Iterate over feature types found in the database.
Returns
-------
A generator object that yields featuretypes (as strings)
"""
c = self.conn.cursor()
c.execute(
'''
SELECT DISTINCT featuretype from features
''')
for i, in c:
yield i | [
"def",
"featuretypes",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"'''\n SELECT DISTINCT featuretype from features\n '''",
")",
"for",
"i",
",",
"in",
"c",
":",
"yield",
"i"
] | 24.466667 | 18.866667 |
def stop(self):
"""Stop listening for keyboard input events."""
self.state = False
with display_manager(self.display) as d:
d.record_disable_context(self.ctx)
d.ungrab_keyboard(X.CurrentTime)
with display_manager(self.display2):
d.record_disable_context(self.ctx)
d.ungrab_keyboard(X.CurrentTime) | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"state",
"=",
"False",
"with",
"display_manager",
"(",
"self",
".",
"display",
")",
"as",
"d",
":",
"d",
".",
"record_disable_context",
"(",
"self",
".",
"ctx",
")",
"d",
".",
"ungrab_keyboard",
"(",
... | 40.888889 | 7.888889 |
def execute_deploy_from_linked_clone(self, si, logger, vcenter_data_model, reservation_id, deployment_params, cancellation_context, folder_manager):
"""
Calls the deployer to deploy vm from snapshot
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromLinkedClone
:param vcenter_data_model:
:return:
"""
self._prepare_deployed_apps_folder(deployment_params, si, logger, folder_manager, vcenter_data_model)
deploy_result = self.deployer.deploy_from_linked_clone(si, logger, deployment_params, vcenter_data_model,
reservation_id, cancellation_context)
return deploy_result | [
"def",
"execute_deploy_from_linked_clone",
"(",
"self",
",",
"si",
",",
"logger",
",",
"vcenter_data_model",
",",
"reservation_id",
",",
"deployment_params",
",",
"cancellation_context",
",",
"folder_manager",
")",
":",
"self",
".",
"_prepare_deployed_apps_folder",
"(",... | 49.1875 | 29.3125 |
def _check_contraint(self, edge1, edge2):
"""Check if two edges satisfy vine constraint.
Args:
:param edge1: edge object representing edge1
:param edge2: edge object representing edge2
:type edge1: Edge object
:type edge2: Edge object
Returns:
Boolean True if the two edges satisfy vine constraints
"""
full_node = set([edge1.L, edge1.R, edge2.L, edge2.R])
full_node.update(edge1.D)
full_node.update(edge2.D)
return len(full_node) == (self.level + 1) | [
"def",
"_check_contraint",
"(",
"self",
",",
"edge1",
",",
"edge2",
")",
":",
"full_node",
"=",
"set",
"(",
"[",
"edge1",
".",
"L",
",",
"edge1",
".",
"R",
",",
"edge2",
".",
"L",
",",
"edge2",
".",
"R",
"]",
")",
"full_node",
".",
"update",
"(",... | 35.0625 | 15.125 |
def update_links_and_ffts(self):
"""FFT (856) Dealing with files."""
for field in record_get_field_instances(self.record,
tag='856',
ind1='4'):
subs = field_get_subfields(field)
newsubs = []
url = subs.get("u", [])
if not url:
record_delete_field(self.record, '856', ind1='4',
field_position_global=field[4])
continue
url = url[0]
if "inspirehep.net/record" in url and url.endswith("pdf"):
# We have an FFT from INSPIRE
newsubs.append(('a', url))
description = subs.get("y", [])
if description:
newsubs.append(('d', description[0]))
if newsubs:
record_add_field(self.record, 'FFT', subfields=newsubs)
record_delete_field(self.record, '856', ind1='4',
field_position_global=field[4])
else:
# Remove $w
for idx, (key, value) in enumerate(field[0]):
if key == 'w':
del field[0][idx] | [
"def",
"update_links_and_ffts",
"(",
"self",
")",
":",
"for",
"field",
"in",
"record_get_field_instances",
"(",
"self",
".",
"record",
",",
"tag",
"=",
"'856'",
",",
"ind1",
"=",
"'4'",
")",
":",
"subs",
"=",
"field_get_subfields",
"(",
"field",
")",
"news... | 43.862069 | 16.310345 |
def recv(self):
"""Receive message from the backend or wait unilt next message."""
try:
message = self.ws.recv()
return json.loads(message)
except websocket._exceptions.WebSocketConnectionClosedException as ex:
raise SelenolWebSocketClosedException() from ex | [
"def",
"recv",
"(",
"self",
")",
":",
"try",
":",
"message",
"=",
"self",
".",
"ws",
".",
"recv",
"(",
")",
"return",
"json",
".",
"loads",
"(",
"message",
")",
"except",
"websocket",
".",
"_exceptions",
".",
"WebSocketConnectionClosedException",
"as",
"... | 44.571429 | 16.571429 |
def create_event(self, event):
"""Create event
Parameters
----------
event : iCalendar file as a string
(calendar containing one event to be added)
"""
ev = api.Event.create(self.journal.collection, event)
ev.save() | [
"def",
"create_event",
"(",
"self",
",",
"event",
")",
":",
"ev",
"=",
"api",
".",
"Event",
".",
"create",
"(",
"self",
".",
"journal",
".",
"collection",
",",
"event",
")",
"ev",
".",
"save",
"(",
")"
] | 27.1 | 15.1 |
def load_plug_in(self, name):
"""Loads a DBGF plug-in.
in name of type str
The plug-in name or DLL. Special name 'all' loads all installed plug-ins.
return plug_in_name of type str
The name of the loaded plug-in.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
plug_in_name = self._call("loadPlugIn",
in_p=[name])
return plug_in_name | [
"def",
"load_plug_in",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"name can only be an instance of type basestring\"",
")",
"plug_in_name",
"=",
"self",
".",
"_call",
"(",... | 33 | 17.466667 |
def filter(self, u):
"""Filter the valid identities for this matcher.
:param u: unique identity which stores the identities to filter
:returns: a list of identities valid to work with this matcher.
:raises ValueError: when the unique identity is not an instance
of UniqueIdentity class
"""
if not isinstance(u, UniqueIdentity):
raise ValueError("<u> is not an instance of UniqueIdentity")
filtered = []
for id_ in u.identities:
if self.sources and id_.source.lower() not in self.sources:
continue
if self._check_blacklist(id_):
continue
source = id_.source.lower()
if source.startswith('github'):
fid = GitHubUsernameIdentity(id_.id, id_.uuid,
id_.username, id_.source)
filtered.append(fid)
return filtered | [
"def",
"filter",
"(",
"self",
",",
"u",
")",
":",
"if",
"not",
"isinstance",
"(",
"u",
",",
"UniqueIdentity",
")",
":",
"raise",
"ValueError",
"(",
"\"<u> is not an instance of UniqueIdentity\"",
")",
"filtered",
"=",
"[",
"]",
"for",
"id_",
"in",
"u",
"."... | 30.290323 | 23.354839 |
def main(args, stop=False):
"""
Arguments parsing, etc..
"""
daemon = AMQPDaemon(
con_param=getConParams(
settings.RABBITMQ_CALIBRE_VIRTUALHOST
),
queue=settings.RABBITMQ_CALIBRE_INPUT_QUEUE,
out_exch=settings.RABBITMQ_CALIBRE_EXCHANGE,
out_key=settings.RABBITMQ_CALIBRE_OUTPUT_KEY,
react_fn=reactToAMQPMessage,
glob=globals() # used in deserializer
)
if not stop and args.foreground: # run at foreground
daemon.run()
else:
daemon.run_daemon() | [
"def",
"main",
"(",
"args",
",",
"stop",
"=",
"False",
")",
":",
"daemon",
"=",
"AMQPDaemon",
"(",
"con_param",
"=",
"getConParams",
"(",
"settings",
".",
"RABBITMQ_CALIBRE_VIRTUALHOST",
")",
",",
"queue",
"=",
"settings",
".",
"RABBITMQ_CALIBRE_INPUT_QUEUE",
... | 29.157895 | 16.105263 |
def _cmp_date(self):
"""Returns Calendar date used for comparison.
Use the earliest date out of all CalendarDates in this instance,
or some date in the future if there are no CalendarDates (e.g.
when Date is a phrase).
"""
dates = sorted(val for val in self.kw.values()
if isinstance(val, CalendarDate))
if dates:
return dates[0]
# return date very far in the future
return CalendarDate() | [
"def",
"_cmp_date",
"(",
"self",
")",
":",
"dates",
"=",
"sorted",
"(",
"val",
"for",
"val",
"in",
"self",
".",
"kw",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"val",
",",
"CalendarDate",
")",
")",
"if",
"dates",
":",
"return",
"dates",
"["... | 37.230769 | 16.307692 |
def wrap_subscribe(transport_layer, channel, callback, *args, **kwargs):
"""Listen to a queue on the transport layer, similar to the subscribe call in
transport/common_transport.py. Intercept all incoming messages and parse
for recipe information.
See common_transport.subscribe for possible additional keyword arguments.
:param transport_layer: Reference to underlying transport object.
:param channel: Queue name to subscribe to.
:param callback: Function to be called when messages are received.
The callback will pass three arguments,
a RecipeWrapper object (details below), the header as
a dictionary structure, and the message.
:return: A unique subscription ID
"""
return _wrap_subscription(
transport_layer, transport_layer.subscribe, channel, callback, *args, **kwargs
) | [
"def",
"wrap_subscribe",
"(",
"transport_layer",
",",
"channel",
",",
"callback",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_wrap_subscription",
"(",
"transport_layer",
",",
"transport_layer",
".",
"subscribe",
",",
"channel",
",",
"callb... | 52.588235 | 23.764706 |
def find_melody(file='440_480_clean.wav', chunksize=512):
"""Cut the sample into chunks and analyze each chunk.
Return a list [(Note, chunks)] where chunks is the number of chunks
where that note is the most dominant.
If two consequent chunks turn out to return the same Note they are
grouped together.
This is an experimental function.
"""
(data, freq, bits) = data_from_file(file)
res = []
for d in analyze_chunks(data, freq, bits, chunksize):
if res != []:
if res[-1][0] == d:
val = res[-1][1]
res[-1] = (d, val + 1)
else:
res.append((d, 1))
else:
res.append((d, 1))
return [(x, freq) for (x, freq) in res] | [
"def",
"find_melody",
"(",
"file",
"=",
"'440_480_clean.wav'",
",",
"chunksize",
"=",
"512",
")",
":",
"(",
"data",
",",
"freq",
",",
"bits",
")",
"=",
"data_from_file",
"(",
"file",
")",
"res",
"=",
"[",
"]",
"for",
"d",
"in",
"analyze_chunks",
"(",
... | 31.913043 | 16.434783 |
def save_veto_definer(cp, out_dir, tags=None):
""" Retrieve the veto definer file and save it locally
Parameters
-----------
cp : ConfigParser instance
out_dir : path
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
veto_def_url = cp.get_opt_tags("workflow-segments",
"segments-veto-definer-url", tags)
veto_def_base_name = os.path.basename(veto_def_url)
veto_def_new_path = os.path.abspath(os.path.join(out_dir,
veto_def_base_name))
# Don't need to do this if already done
resolve_url(veto_def_url,out_dir)
# and update location
cp.set("workflow-segments", "segments-veto-definer-file", veto_def_new_path)
return veto_def_new_path | [
"def",
"save_veto_definer",
"(",
"cp",
",",
"out_dir",
",",
"tags",
"=",
"None",
")",
":",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"[",
"]",
"make_analysis_dir",
"(",
"out_dir",
")",
"veto_def_url",
"=",
"cp",
".",
"get_opt_tags",
"(",
"\"workflow-... | 35.12 | 17.32 |
def read_csv_from_file(filename):
"""
Opens the target CSV file and creates a dictionary with one list for each CSV column.
:param str filename:
:return list of lists: column values
"""
logger_csvs.info("enter read_csv_from_file")
d = {}
l = []
try:
logger_csvs.info("open file: {}".format(filename))
with open(filename, 'r') as f:
r = csv.reader(f, delimiter=',')
# Create a dict with X lists corresponding to X columns
for idx, col in enumerate(next(r)):
d[idx] = []
d = cast_values_csvs(d, idx, col)
# Start iter through CSV data
for row in r:
for idx, col in enumerate(row):
# Append the cell to the correct column list
d = cast_values_csvs(d, idx, col)
# Make a list of lists out of the dictionary instead
for idx, col in d.items():
l.append(col)
except FileNotFoundError as e:
print('CSV FileNotFound: ' + filename)
logger_csvs.warn("read_csv_to_columns: FileNotFound: {}, {}".format(filename, e))
logger_csvs.info("exit read_csv_from_file")
return l | [
"def",
"read_csv_from_file",
"(",
"filename",
")",
":",
"logger_csvs",
".",
"info",
"(",
"\"enter read_csv_from_file\"",
")",
"d",
"=",
"{",
"}",
"l",
"=",
"[",
"]",
"try",
":",
"logger_csvs",
".",
"info",
"(",
"\"open file: {}\"",
".",
"format",
"(",
"fil... | 34.794118 | 17.911765 |
def parse(self, filename=None, file=None, debuglevel=0):
""" Parse file.
kwargs:
filename (str): File to parse
debuglevel (int): Parser debuglevel
"""
self.scope.push()
if not file:
# We use a path.
file = filename
else:
# We use a stream and try to extract the name from the stream.
if hasattr(file, 'name'):
if filename is not None:
raise AssertionError(
'names of file and filename are in conflict')
filename = file.name
else:
filename = '(stream)'
self.target = filename
if self.verbose and not self.fail_with_exc:
print('Compiling target: %s' % filename, file=sys.stderr)
self.result = self.parser.parse(file, lexer=self.lex, debug=debuglevel)
self.post_parse()
self.register.close() | [
"def",
"parse",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"file",
"=",
"None",
",",
"debuglevel",
"=",
"0",
")",
":",
"self",
".",
"scope",
".",
"push",
"(",
")",
"if",
"not",
"file",
":",
"# We use a path.",
"file",
"=",
"filename",
"else",
... | 33.571429 | 16.714286 |
def c_drop(self, frequency):
'''
Capacitance of an electrode covered in liquid, normalized per unit
area (i.e., units are F/mm^2).
'''
try:
return np.interp(frequency,
self._c_drop['frequency'],
self._c_drop['capacitance']
)
except:
pass
return self._c_drop | [
"def",
"c_drop",
"(",
"self",
",",
"frequency",
")",
":",
"try",
":",
"return",
"np",
".",
"interp",
"(",
"frequency",
",",
"self",
".",
"_c_drop",
"[",
"'frequency'",
"]",
",",
"self",
".",
"_c_drop",
"[",
"'capacitance'",
"]",
")",
"except",
":",
"... | 30.384615 | 19.615385 |
def get_response(self):
"""
Get a response from the chatbot and display it.
"""
user_input = self.usr_input.get()
self.usr_input.delete(0, tk.END)
response = self.chatbot.get_response(user_input)
self.conversation['state'] = 'normal'
self.conversation.insert(
tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n"
)
self.conversation['state'] = 'disabled'
time.sleep(0.5) | [
"def",
"get_response",
"(",
"self",
")",
":",
"user_input",
"=",
"self",
".",
"usr_input",
".",
"get",
"(",
")",
"self",
".",
"usr_input",
".",
"delete",
"(",
"0",
",",
"tk",
".",
"END",
")",
"response",
"=",
"self",
".",
"chatbot",
".",
"get_respons... | 30.3125 | 17.9375 |
def print_http_nfc_lease_info(info):
""" Prints information about the lease,
such as the entity covered by the lease,
and HTTP URLs for up/downloading file backings.
:param info:
:type info: vim.HttpNfcLease.Info
:return:
"""
print 'Lease timeout: {0.leaseTimeout}\n' \
'Disk Capacity KB: {0.totalDiskCapacityInKB}'.format(info)
device_number = 1
if info.deviceUrl:
for device_url in info.deviceUrl:
print 'HttpNfcLeaseDeviceUrl: {1}\n' \
'Device URL Import Key: {0.importKey}\n' \
'Device URL Key: {0.key}\n' \
'Device URL: {0.url}\n' \
'Device URL Size: {0.fileSize}\n' \
'SSL Thumbprint: {0.sslThumbprint}\n'.format(device_url,
device_number)
if not device_url.targetId:
print "No targetId found for this device"
print "Device is not eligible for export. This could be a mounted iso or img of some sort"
print "It will NOT be downloaded\n"
device_number += 1
else:
print 'No devices were found.' | [
"def",
"print_http_nfc_lease_info",
"(",
"info",
")",
":",
"print",
"'Lease timeout: {0.leaseTimeout}\\n'",
"'Disk Capacity KB: {0.totalDiskCapacityInKB}'",
".",
"format",
"(",
"info",
")",
"device_number",
"=",
"1",
"if",
"info",
".",
"deviceUrl",
":",
"for",
"device_u... | 42.107143 | 16.035714 |
def delete_jobs(self,
user_ids,
job_ids,
task_ids,
labels,
create_time_min=None,
create_time_max=None):
"""Kills the operations associated with the specified job or job.task.
Args:
user_ids: List of user ids who "own" the job(s) to cancel.
job_ids: List of job_ids to cancel.
task_ids: List of task-ids to cancel.
labels: List of LabelParam, each must match the job(s) to be canceled.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent
create time of a task, inclusive.
Returns:
A list of tasks canceled and a list of error messages.
"""
# Look up the job(s)
tasks = list(
self.lookup_job_tasks(
{'RUNNING'},
user_ids=user_ids,
job_ids=job_ids,
task_ids=task_ids,
labels=labels,
create_time_min=create_time_min,
create_time_max=create_time_max))
print('Found %d tasks to delete.' % len(tasks))
return google_base.cancel(self._service.new_batch_http_request,
self._service.operations().cancel, tasks) | [
"def",
"delete_jobs",
"(",
"self",
",",
"user_ids",
",",
"job_ids",
",",
"task_ids",
",",
"labels",
",",
"create_time_min",
"=",
"None",
",",
"create_time_max",
"=",
"None",
")",
":",
"# Look up the job(s)",
"tasks",
"=",
"list",
"(",
"self",
".",
"lookup_jo... | 36.081081 | 17.945946 |
def _decode_caveat_v1(key, caveat):
'''Decode a base64 encoded JSON id.
@param key the nacl private key to decode.
@param caveat a base64 encoded JSON string.
'''
data = base64.b64decode(caveat).decode('utf-8')
wrapper = json.loads(data)
tp_public_key = nacl.public.PublicKey(
base64.b64decode(wrapper['ThirdPartyPublicKey']))
if key.public_key.key != tp_public_key:
raise Exception('public key mismatch') # TODO
if wrapper.get('FirstPartyPublicKey', None) is None:
raise Exception('target service public key not specified')
# The encrypted string is base64 encoded in the JSON representation.
secret = base64.b64decode(wrapper.get('Id'))
nonce = base64.b64decode(wrapper.get('Nonce'))
fp_public_key = nacl.public.PublicKey(base64.b64decode(
wrapper.get('FirstPartyPublicKey')))
box = nacl.public.Box(key.key, fp_public_key)
c = box.decrypt(secret, nonce)
record = json.loads(c.decode('utf-8'))
fp_key = nacl.public.PublicKey(
base64.b64decode(wrapper.get('FirstPartyPublicKey')))
return ThirdPartyCaveatInfo(
condition=record.get('Condition'),
first_party_public_key=PublicKey(fp_key),
third_party_key_pair=key,
root_key=base64.b64decode(record.get('RootKey')),
caveat=caveat,
id=None,
version=VERSION_1,
namespace=legacy_namespace()
) | [
"def",
"_decode_caveat_v1",
"(",
"key",
",",
"caveat",
")",
":",
"data",
"=",
"base64",
".",
"b64decode",
"(",
"caveat",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"wrapper",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"tp_public_key",
"=",
"nacl",
".",... | 35.512821 | 16.538462 |
def execute(helper, config, args):
"""
The init command
"""
# check to see if the application exists
if not helper.application_exists():
helper.create_application(get(config, 'app.description'))
else:
out("Application "+get(config, 'app.app_name')+" exists")
# create environments
environment_names = []
environments_to_wait_for_green = []
for env_name, env_config in list(get(config, 'app.environments').items()):
environment_names.append(env_name)
env_config = parse_env_config(config, env_name)
if not helper.environment_exists(env_name):
option_settings = parse_option_settings(env_config.get('option_settings', {}))
helper.create_environment(env_name,
solution_stack_name=env_config.get('solution_stack_name'),
cname_prefix=env_config.get('cname_prefix', None),
description=env_config.get('description', None),
option_settings=option_settings,
tier_name=env_config.get('tier_name'),
tier_type=env_config.get('tier_type'),
tier_version=env_config.get('tier_version'),
version_label=args.version_label)
environments_to_wait_for_green.append(env_name)
else:
out("Environment "+env_name)
# get the environments
environments_to_wait_for_term = []
if args.delete:
environments = helper.get_environments()
for env in environments:
if env['EnvironmentName'] not in environment_names:
if env['Status'] != 'Ready':
out("Unable to delete "+env['EnvironmentName']+" because it's not in status Ready ("+env['Status']+")")
else:
out("Deleting environment: "+env['EnvironmentName'])
helper.delete_environment(env['EnvironmentName'])
environments_to_wait_for_term.append(env['EnvironmentName'])
# wait
if not args.dont_wait and len(environments_to_wait_for_green)>0:
helper.wait_for_environments(environments_to_wait_for_green, status='Ready', include_deleted=False)
if not args.dont_wait and len(environments_to_wait_for_term)>0:
helper.wait_for_environments(environments_to_wait_for_term, status='Terminated', include_deleted=False)
out("Application initialized")
return 0 | [
"def",
"execute",
"(",
"helper",
",",
"config",
",",
"args",
")",
":",
"# check to see if the application exists",
"if",
"not",
"helper",
".",
"application_exists",
"(",
")",
":",
"helper",
".",
"create_application",
"(",
"get",
"(",
"config",
",",
"'app.descrip... | 44.679245 | 22.641509 |
def swd_read16(self, offset):
"""Gets a unit of ``16`` bits from the input buffer.
Args:
self (JLink): the ``JLink`` instance
offset (int): the offset (in bits) from which to start reading
Returns:
The integer read from the input buffer.
"""
value = self._dll.JLINK_SWD_GetU16(offset)
return ctypes.c_uint16(value).value | [
"def",
"swd_read16",
"(",
"self",
",",
"offset",
")",
":",
"value",
"=",
"self",
".",
"_dll",
".",
"JLINK_SWD_GetU16",
"(",
"offset",
")",
"return",
"ctypes",
".",
"c_uint16",
"(",
"value",
")",
".",
"value"
] | 32.416667 | 16.833333 |
def scroll(self, x, y):
"""Scroll the contents of the console in the direction of x,y.
Uncovered areas will be cleared to the default background color.
Does not move the virutal cursor.
Args:
x (int): Distance to scroll along the x-axis.
y (int): Distance to scroll along the y-axis.
Returns:
Iterator[Tuple[int, int]]: An iterator over the (x, y) coordinates
of any tile uncovered after scrolling.
.. seealso:: :any:`set_colors`
"""
assert isinstance(x, _INTTYPES), "x must be an integer, got %s" % repr(x)
assert isinstance(y, _INTTYPES), "y must be an integer, got %s" % repr(x)
def getSlide(x, length):
"""get the parameters needed to scroll the console in the given
direction with x
returns (x, length, srcx)
"""
if x > 0:
srcx = 0
length -= x
elif x < 0:
srcx = abs(x)
x = 0
length -= srcx
else:
srcx = 0
return x, length, srcx
def getCover(x, length):
"""return the (x, width) ranges of what is covered and uncovered"""
cover = (0, length) # everything covered
uncover = None # nothing uncovered
if x > 0: # left side uncovered
cover = (x, length - x)
uncover = (0, x)
elif x < 0: # right side uncovered
x = abs(x)
cover = (0, length - x)
uncover = (length - x, x)
return cover, uncover
width, height = self.get_size()
if abs(x) >= width or abs(y) >= height:
return self.clear() # just clear the console normally
# get the ranges of the areas that will be uncovered
coverX, uncoverX = getCover(x, width)
coverY, uncoverY = getCover(y, height)
# so at this point we know that coverX and coverY makes a rect that
# encases the area that we end up blitting to. uncoverX/Y makes a
# rect in the corner of the uncovered area. So we need to combine
# the uncoverX/Y with coverY/X to make what's left of the uncovered
# area. Explaining it makes it mush easier to do now.
# But first we need to blit.
x, width, srcx = getSlide(x, width)
y, height, srcy = getSlide(y, height)
self.blit(self, x, y, width, height, srcx, srcy)
if uncoverX: # clear sides (0x20 is space)
self.draw_rect(uncoverX[0], coverY[0], uncoverX[1], coverY[1],
0x20, self._fg, self._bg)
if uncoverY: # clear top/bottom
self.draw_rect(coverX[0], uncoverY[0], coverX[1], uncoverY[1],
0x20, self._fg, self._bg)
if uncoverX and uncoverY: # clear corner
self.draw_rect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1],
0x20, self._fg, self._bg) | [
"def",
"scroll",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"assert",
"isinstance",
"(",
"x",
",",
"_INTTYPES",
")",
",",
"\"x must be an integer, got %s\"",
"%",
"repr",
"(",
"x",
")",
"assert",
"isinstance",
"(",
"y",
",",
"_INTTYPES",
")",
",",
"\"y... | 41.597222 | 16.680556 |
def download_attachments(self, dataset_identifier, content_type="json",
download_dir="~/sodapy_downloads"):
'''
Download all of the attachments associated with a dataset. Return the paths of downloaded
files.
'''
metadata = self.get_metadata(dataset_identifier, content_type=content_type)
files = []
attachments = metadata['metadata'].get("attachments")
if not attachments:
logging.info("No attachments were found or downloaded.")
return files
download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
for attachment in attachments:
file_path = os.path.join(download_dir, attachment["filename"])
has_assetid = attachment.get("assetId", False)
if has_assetid:
base = _format_old_api_request(dataid=dataset_identifier)
assetid = attachment["assetId"]
resource = "{0}/files/{1}?download=true&filename={2}"\
.format(base, assetid, attachment["filename"])
else:
base = "/api/assets"
assetid = attachment["blobId"]
resource = "{0}/{1}?download=true".format(base, assetid)
uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource)
_download_file(uri, file_path)
files.append(file_path)
logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files)))
return files | [
"def",
"download_attachments",
"(",
"self",
",",
"dataset_identifier",
",",
"content_type",
"=",
"\"json\"",
",",
"download_dir",
"=",
"\"~/sodapy_downloads\"",
")",
":",
"metadata",
"=",
"self",
".",
"get_metadata",
"(",
"dataset_identifier",
",",
"content_type",
"... | 44.916667 | 25.194444 |
def space_before(self):
"""
The EMU equivalent of the centipoints value in
`./a:spcBef/a:spcPts/@val`.
"""
spcBef = self.spcBef
if spcBef is None:
return None
spcPts = spcBef.spcPts
if spcPts is None:
return None
return spcPts.val | [
"def",
"space_before",
"(",
"self",
")",
":",
"spcBef",
"=",
"self",
".",
"spcBef",
"if",
"spcBef",
"is",
"None",
":",
"return",
"None",
"spcPts",
"=",
"spcBef",
".",
"spcPts",
"if",
"spcPts",
"is",
"None",
":",
"return",
"None",
"return",
"spcPts",
".... | 26.25 | 11.25 |
def verify_uri(endpoint_context, request, uri_type, client_id=None):
"""
A redirect URI
MUST NOT contain a fragment
MAY contain query component
:param endpoint_context:
:param request:
:param uri_type: redirect_uri/post_logout_redirect_uri
:return: An error response if the redirect URI is faulty otherwise
None
"""
try:
_cid = request["client_id"]
except KeyError:
_cid = client_id
if not _cid:
logger.error('No client id found')
raise UnknownClient('No client_id provided')
_redirect_uri = unquote(request[uri_type])
part = urlparse(_redirect_uri)
if part.fragment:
raise URIError("Contains fragment")
(_base, _query) = splitquery(_redirect_uri)
if _query:
_query = parse_qs(_query)
match = False
try:
values = endpoint_context.cdb[_cid]['{}s'.format(uri_type)]
except KeyError:
raise ValueError('No registered {}'.format(uri_type))
else:
for regbase, rquery in values:
# The URI MUST exactly match one of the Redirection URI
if _base == regbase:
# every registered query component must exist in the uri
if rquery:
if not _query:
raise ValueError('Missing query part')
for key, vals in rquery.items():
if key not in _query:
raise ValueError('"{}" not in query part'.format(key))
for val in vals:
if val not in _query[key]:
raise ValueError('{}={} value not in query part'.format(key, val))
# and vice versa, every query component in the uri
# must be registered
if _query:
if not rquery:
raise ValueError('No registered query part')
for key, vals in _query.items():
if key not in rquery:
raise ValueError('"{}" extra in query part'.format(key))
for val in vals:
if val not in rquery[key]:
raise ValueError('Extra {}={} value in query part'.format(key, val))
match = True
break
if not match:
raise RedirectURIError("Doesn't match any registered uris") | [
"def",
"verify_uri",
"(",
"endpoint_context",
",",
"request",
",",
"uri_type",
",",
"client_id",
"=",
"None",
")",
":",
"try",
":",
"_cid",
"=",
"request",
"[",
"\"client_id\"",
"]",
"except",
"KeyError",
":",
"_cid",
"=",
"client_id",
"if",
"not",
"_cid",... | 34.985507 | 21.043478 |
def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed | [
"def",
"parse_keqv_list",
"(",
"l",
")",
":",
"parsed",
"=",
"{",
"}",
"for",
"elt",
"in",
"l",
":",
"k",
",",
"v",
"=",
"elt",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"v",
"[",
"0",
"]",
"==",
"'\"'",
"and",
"v",
"[",
"-",
"1",
"]... | 28.888889 | 14.666667 |
def traverse_commits(self) -> Generator[Commit, None, None]:
"""
Analyze all the specified commits (all of them by default), returning
a generator of commits.
"""
if isinstance(self._path_to_repo, str):
self._path_to_repo = [self._path_to_repo]
for path_repo in self._path_to_repo:
# if it is a remote repo, clone it first in a temporary folder!
if self._isremote(path_repo):
tmp_folder = tempfile.TemporaryDirectory()
path_repo = self._clone_remote_repos(tmp_folder.name,
path_repo)
git_repo = GitRepository(path_repo)
self._sanity_check_filters(git_repo)
self._check_timezones()
logger.info('Analyzing git repository in %s', git_repo.path)
if self._filepath is not None:
self._filepath_commits = git_repo.get_commits_modified_file(
self._filepath)
for commit in git_repo.get_list_commits(self._only_in_branch,
not self._reversed_order):
logger.info('Commit #%s in %s from %s', commit.hash,
commit.committer_date,
commit.author.name)
if self._is_commit_filtered(commit):
logger.info('Commit #%s filtered', commit.hash)
continue
yield commit | [
"def",
"traverse_commits",
"(",
"self",
")",
"->",
"Generator",
"[",
"Commit",
",",
"None",
",",
"None",
"]",
":",
"if",
"isinstance",
"(",
"self",
".",
"_path_to_repo",
",",
"str",
")",
":",
"self",
".",
"_path_to_repo",
"=",
"[",
"self",
".",
"_path_... | 39.105263 | 21.842105 |
def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset) | [
"def",
"run_parse",
"(",
"self",
")",
":",
"# Data set already has source file names from load_inputs",
"parsedset",
"=",
"{",
"}",
"parsedset",
"[",
"'data_set'",
"]",
"=",
"[",
"]",
"for",
"log",
"in",
"self",
".",
"input_files",
":",
"parsemodule",
"=",
"self... | 38.857143 | 14.214286 |
def read_storage_class(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.read_storage_class_with_http_info(name, **kwargs)
return data | [
"def",
"read_storage_class",
"(",
"self",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"read_storage_class_with_h... | 52.565217 | 25.26087 |
def status(Name,
region=None, key=None, keyid=None, profile=None):
'''
Given a trail name describe its properties.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_cloudtrail.describe mytrail
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
trail = conn.get_trail_status(Name=Name)
if trail:
keys = ('IsLogging', 'LatestDeliveryError', 'LatestNotificationError',
'LatestDeliveryTime', 'LatestNotificationTime',
'StartLoggingTime', 'StopLoggingTime',
'LatestCloudWatchLogsDeliveryError',
'LatestCloudWatchLogsDeliveryTime',
'LatestDigestDeliveryTime', 'LatestDigestDeliveryError',
'LatestDeliveryAttemptTime',
'LatestNotificationAttemptTime',
'LatestNotificationAttemptSucceeded',
'LatestDeliveryAttemptSucceeded',
'TimeLoggingStarted',
'TimeLoggingStopped')
return {'trail': dict([(k, trail.get(k)) for k in keys])}
else:
return {'trail': None}
except ClientError as e:
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'TrailNotFoundException':
return {'trail': None}
return {'error': __utils__['boto3.get_error'](e)} | [
"def",
"status",
"(",
"Name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
... | 37.948718 | 22.358974 |
def get_data(session=None, day=None, year=None):
"""
Get data for day (1-25) and year (>= 2015)
User's session cookie is needed (puzzle inputs differ by user)
"""
if session is None:
user = default_user()
else:
user = User(token=session)
if day is None:
day = current_day()
log.info("current day=%s", day)
if year is None:
year = most_recent_year()
log.info("most recent year=%s", year)
puzzle = Puzzle(year=year, day=day, user=user)
return puzzle.input_data | [
"def",
"get_data",
"(",
"session",
"=",
"None",
",",
"day",
"=",
"None",
",",
"year",
"=",
"None",
")",
":",
"if",
"session",
"is",
"None",
":",
"user",
"=",
"default_user",
"(",
")",
"else",
":",
"user",
"=",
"User",
"(",
"token",
"=",
"session",
... | 31.176471 | 11.411765 |
def get_abbr_impl():
# type: () -> str
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl | [
"def",
"get_abbr_impl",
"(",
")",
":",
"# type: () -> str",
"if",
"hasattr",
"(",
"sys",
",",
"'pypy_version_info'",
")",
":",
"pyimpl",
"=",
"'pp'",
"elif",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'java'",
")",
":",
"pyimpl",
"=",
"'jy'",
"elif",... | 26.083333 | 15 |
def set_parallel_multiple(self, value):
"""
Setter for 'parallel_multiple' field.
:param value - a new value of 'parallel_multiple' field. Must be a boolean type. Does not accept None value.
"""
if value is None or not isinstance(value, bool):
raise TypeError("ParallelMultiple must be set to a bool")
else:
self.__parallel_multiple = value | [
"def",
"set_parallel_multiple",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"\"ParallelMultiple must be set to a bool\"",
")",
"else",
":",
"self",... | 44.888889 | 17.555556 |
def handle(self):
"""Handles kick off request."""
# Get and verify mr state.
mr_id = self.request.get("mapreduce_id")
# Log the mr_id since this is started in an unnamed task
logging.info("Processing kickoff for job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Create input readers.
readers, serialized_readers_entity = self._get_input_readers(state)
if readers is None:
# We don't have any data. Finish map.
logging.warning("Found no mapper input data to process.")
state.active = False
state.result_status = model.MapreduceState.RESULT_SUCCESS
ControllerCallbackHandler._finalize_job(
state.mapreduce_spec, state)
return False
# Create output writers.
self._setup_output_writer(state)
# Save states and make sure we use the saved input readers for
# subsequent operations.
result = self._save_states(state, serialized_readers_entity)
if result is None:
readers, _ = self._get_input_readers(state)
elif not result:
return
queue_name = self.request.headers.get("X-AppEngine-QueueName")
KickOffJobHandler._schedule_shards(state.mapreduce_spec, readers,
queue_name,
state.mapreduce_spec.params["base_path"],
state)
ControllerCallbackHandler.reschedule(
state, state.mapreduce_spec, serial_id=0, queue_name=queue_name) | [
"def",
"handle",
"(",
"self",
")",
":",
"# Get and verify mr state.",
"mr_id",
"=",
"self",
".",
"request",
".",
"get",
"(",
"\"mapreduce_id\"",
")",
"# Log the mr_id since this is started in an unnamed task",
"logging",
".",
"info",
"(",
"\"Processing kickoff for job %s\... | 37.975 | 19.25 |
async def create_connection(self, protocol_factory, host, port, *,
resolve=False, ssl=None,
family=0, proto=0, flags=0):
'''Set up a connection to (host, port) through the proxy.
If resolve is True then host is resolved locally with
getaddrinfo using family, proto and flags, otherwise the proxy
is asked to resolve host.
The function signature is similar to loop.create_connection()
with the same result. The attribute _address is set on the
protocol to the address of the successful remote connection.
Additionally raises SOCKSError if something goes wrong with
the proxy handshake.
'''
loop = asyncio.get_event_loop()
if resolve:
remote_addresses = [NetAddress(info[4][0], info[4][1]) for info in
await loop.getaddrinfo(host, port, family=family, proto=proto,
type=socket.SOCK_STREAM, flags=flags)]
else:
remote_addresses = [NetAddress(host, port)]
sock, remote_address = await self._connect(remote_addresses)
def set_address():
protocol = protocol_factory()
protocol._proxy = self
protocol._remote_address = remote_address
return protocol
return await loop.create_connection(set_address, sock=sock, ssl=ssl,
server_hostname=host if ssl else None) | [
"async",
"def",
"create_connection",
"(",
"self",
",",
"protocol_factory",
",",
"host",
",",
"port",
",",
"*",
",",
"resolve",
"=",
"False",
",",
"ssl",
"=",
"None",
",",
"family",
"=",
"0",
",",
"proto",
"=",
"0",
",",
"flags",
"=",
"0",
")",
":",... | 46.030303 | 26.030303 |
def getCallSet(self, id_):
"""
Returns a CallSet with the specified id, or raises a
CallSetNotFoundException if it does not exist.
"""
if id_ not in self._callSetIdMap:
raise exceptions.CallSetNotFoundException(id_)
return self._callSetIdMap[id_] | [
"def",
"getCallSet",
"(",
"self",
",",
"id_",
")",
":",
"if",
"id_",
"not",
"in",
"self",
".",
"_callSetIdMap",
":",
"raise",
"exceptions",
".",
"CallSetNotFoundException",
"(",
"id_",
")",
"return",
"self",
".",
"_callSetIdMap",
"[",
"id_",
"]"
] | 37.375 | 8.625 |
def import_deleted_fields(self, data):
"""
Set data fields to deleted
"""
if self.get_read_only() and self.is_locked():
return
if isinstance(data, str):
data = [data]
for key in data:
if hasattr(self, key):
delattr(self, key)
continue
keys = key.split('.', 1)
if len(keys) != 2:
continue
child = getattr(self, keys[0])
child.import_deleted_fields(keys[1]) | [
"def",
"import_deleted_fields",
"(",
"self",
",",
"data",
")",
":",
"if",
"self",
".",
"get_read_only",
"(",
")",
"and",
"self",
".",
"is_locked",
"(",
")",
":",
"return",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"data",
"=",
"[",
"data... | 22.565217 | 16.913043 |
def allowed_values(self):
"""A tuple containing the allowed values for this Slot.
The Python equivalent of the CLIPS slot-allowed-values function.
"""
data = clips.data.DataObject(self._env)
lib.EnvDeftemplateSlotAllowedValues(
self._env, self._tpl, self._name, data.byref)
return tuple(data.value) if isinstance(data.value, list) else () | [
"def",
"allowed_values",
"(",
"self",
")",
":",
"data",
"=",
"clips",
".",
"data",
".",
"DataObject",
"(",
"self",
".",
"_env",
")",
"lib",
".",
"EnvDeftemplateSlotAllowedValues",
"(",
"self",
".",
"_env",
",",
"self",
".",
"_tpl",
",",
"self",
".",
"_... | 32.583333 | 22.25 |
def _end_channel(self, channel):
"""
Soft end of ssh channel. End the writing thread as soon as the message queue is empty.
"""
self.stop_on_empty_queue[channel] = True
# by joining the we wait until its loop finishes.
# it won't loop forever since we've set self.stop_on_empty_queue=True
write_thread = self.thread_write_instances[channel]
thread_join_non_blocking(write_thread) | [
"def",
"_end_channel",
"(",
"self",
",",
"channel",
")",
":",
"self",
".",
"stop_on_empty_queue",
"[",
"channel",
"]",
"=",
"True",
"# by joining the we wait until its loop finishes.",
"# it won't loop forever since we've set self.stop_on_empty_queue=True",
"write_thread",
"=",... | 39.545455 | 20.818182 |
def available_backends(self, hub=None, group=None, project=None, access_token=None, user_id=None):
"""
Get the backends available to use in the QX Platform
"""
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
raise CredentialsError('credentials invalid')
else:
url = get_backend_url(self.config, hub, group, project)
ret = self.req.get(url)
if (ret is not None) and (isinstance(ret, dict)):
return []
return [backend for backend in ret
if backend.get('status') == 'on'] | [
"def",
"available_backends",
"(",
"self",
",",
"hub",
"=",
"None",
",",
"group",
"=",
"None",
",",
"project",
"=",
"None",
",",
"access_token",
"=",
"None",
",",
"user_id",
"=",
"None",
")",
":",
"if",
"access_token",
":",
"self",
".",
"req",
".",
"c... | 38.263158 | 18.578947 |
def Connect(self, Username, WaitConnected=False):
"""Connects application to user.
:Parameters:
Username : str
Name of the user to connect to.
WaitConnected : bool
If True, causes the method to wait until the connection is established.
:return: If ``WaitConnected`` is True, returns the stream which can be used to send the
data. Otherwise returns None.
:rtype: `ApplicationStream` or None
"""
if WaitConnected:
self._Connect_Event = threading.Event()
self._Connect_Stream = [None]
self._Connect_Username = Username
self._Connect_ApplicationStreams(self, self.Streams)
self._Owner.RegisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams)
self._Alter('CONNECT', Username)
self._Connect_Event.wait()
self._Owner.UnregisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams)
try:
return self._Connect_Stream[0]
finally:
del self._Connect_Stream, self._Connect_Event, self._Connect_Username
else:
self._Alter('CONNECT', Username) | [
"def",
"Connect",
"(",
"self",
",",
"Username",
",",
"WaitConnected",
"=",
"False",
")",
":",
"if",
"WaitConnected",
":",
"self",
".",
"_Connect_Event",
"=",
"threading",
".",
"Event",
"(",
")",
"self",
".",
"_Connect_Stream",
"=",
"[",
"None",
"]",
"sel... | 43.5 | 19.821429 |
def find_optimal_allocation(self, tokens):
"""
Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie
:param tokens: tokens tokenize
:type tokens: list of str
:return: Optimal allocation of tokens to phrases
:rtype: list of TokenTrie.Token
"""
token_ranges = self.find_tracked_words(tokens)
token_ranges.sort()
for offset in range(1, len(token_ranges)):
to_be_removed = []
for candidate in token_ranges[offset:]:
for i in range(offset):
if token_ranges[i].overlaps_with(candidate):
to_be_removed.append(candidate)
break
token_ranges = [token for token in token_ranges if token not in to_be_removed]
token_ranges.sort(key=lambda token: token.get_start_index())
return token_ranges | [
"def",
"find_optimal_allocation",
"(",
"self",
",",
"tokens",
")",
":",
"token_ranges",
"=",
"self",
".",
"find_tracked_words",
"(",
"tokens",
")",
"token_ranges",
".",
"sort",
"(",
")",
"for",
"offset",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"token_ran... | 37.708333 | 18.291667 |
def from_str(string):
"""Generate a `SetReadingEvent` object from a string
"""
match = re.match(r'^START READING (\w+) FROM \w+ (\d+)$', string)
if match:
return SetReadingEvent(match.group(1), int(match.group(2)))
else:
raise EventParseError | [
"def",
"from_str",
"(",
"string",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^START READING (\\w+) FROM \\w+ (\\d+)$'",
",",
"string",
")",
"if",
"match",
":",
"return",
"SetReadingEvent",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"int",
"("... | 37.375 | 17.5 |
def NonUniformImage(x, y, z, ax=None, fig=None, cmap=None, alpha=None, scalex=True, scaley=True, add_cbar=True, **kwargs):
"""
Used to plot a set of coordinates.
Parameters
----------
x, y : :class:`numpy.ndarray`
1-D ndarrays of lengths N and M, respectively, specifying pixel centers
z : :class:`numpy.ndarray`
An (M, N) ndarray or masked array of values to be colormapped, or a (M, N, 3) RGB array, or a (M, N, 4) RGBA array.
ax : :class:`matplotlib.axes.Axes`, optional
The axis to plot to.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot to.
cmap : :class:`matplotlib.colors.Colormap`, optional
The colormap to use.
alpha : float, optional
The transparency to use.
scalex : bool, optional
To set the x limits to available data
scaley : bool, optional
To set the y limits to available data
add_cbar : bool, optional
Whether ot add a colorbar or not.
Returns
-------
img : :class:`matplotlib.image.NonUniformImage`
Object representing the :class:`matplotlib.image.NonUniformImage`.
"""
if ax is None and fig is None:
fig, ax = _setup_axes()
elif ax is None:
ax = fig.gca()
elif fig is None:
fig = ax.get_figure()
norm = kwargs.get('norm', None)
im = _mplim.NonUniformImage(ax, **kwargs)
vmin = kwargs.pop('vmin', _np.min(z))
vmax = kwargs.pop('vmax', _np.max(z))
# im.set_clim(vmin=vmin, vmax=vmax)
if cmap is not None:
im.set_cmap(cmap)
m = _cm.ScalarMappable(cmap=im.get_cmap(), norm=norm)
m.set_array(z)
if add_cbar:
cax, cb = _cb(ax=ax, im=m, fig=fig)
if alpha is not None:
im.set_alpha(alpha)
im.set_data(x, y, z)
ax.images.append(im)
if scalex:
xmin = min(x)
xmax = max(x)
ax.set_xlim(xmin, xmax)
if scaley:
ymin = min(y)
ymax = max(y)
ax.set_ylim(ymin, ymax)
return _SI(im=im, cb=cb, cax=cax) | [
"def",
"NonUniformImage",
"(",
"x",
",",
"y",
",",
"z",
",",
"ax",
"=",
"None",
",",
"fig",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"alpha",
"=",
"None",
",",
"scalex",
"=",
"True",
",",
"scaley",
"=",
"True",
",",
"add_cbar",
"=",
"True",
... | 27.527778 | 20.555556 |
def reload_(jboss_config, host=None):
'''
Reload running jboss instance
jboss_config
Configuration dictionary with properties specified above.
host
The name of the host. JBoss domain mode only - and required if running in domain mode.
The host name is the "name" attribute of the "host" element in host.xml
CLI Example:
.. code-block:: bash
salt '*' jboss7.reload '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
'''
log.debug("======================== MODULE FUNCTION: jboss7.reload")
if host is None:
operation = ':reload'
else:
operation = '/host="{host}"/:reload'.format(host=host)
reload_result = __salt__['jboss7_cli.run_operation'](jboss_config, operation, fail_on_error=False)
# JBoss seems to occasionaly close the channel immediately when :reload is sent
if reload_result['success'] or (not reload_result['success'] and
('Operation failed: Channel closed' in reload_result['stdout'] or
'Communication error: java.util.concurrent.ExecutionException: Operation failed' in reload_result['stdout'])):
return reload_result
else:
raise Exception('''Cannot handle error, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**reload_result)) | [
"def",
"reload_",
"(",
"jboss_config",
",",
"host",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"\"======================== MODULE FUNCTION: jboss7.reload\"",
")",
"if",
"host",
"is",
"None",
":",
"operation",
"=",
"':reload'",
"else",
":",
"operation",
"="... | 48.533333 | 40.133333 |
def _ReturnConnection(self):
"""
Returns a connection back to the pool
@author: Nick Verbeck
@since: 9/7/2008
"""
if self.conn is not None:
if self.connInfo.commitOnEnd is True or self.commitOnEnd is True:
self.conn.Commit()
Pool().returnConnection(self.conn)
self.conn = None | [
"def",
"_ReturnConnection",
"(",
"self",
")",
":",
"if",
"self",
".",
"conn",
"is",
"not",
"None",
":",
"if",
"self",
".",
"connInfo",
".",
"commitOnEnd",
"is",
"True",
"or",
"self",
".",
"commitOnEnd",
"is",
"True",
":",
"self",
".",
"conn",
".",
"C... | 22.923077 | 16 |
def get_assessment_taken_form(self, *args, **kwargs):
"""Pass through to provider AssessmentTakenAdminSession.get_assessment_taken_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'assessment_taken_record_types' in kwargs:
return self.get_assessment_taken_form_for_create(*args, **kwargs)
else:
return self.get_assessment_taken_form_for_update(*args, **kwargs) | [
"def",
"get_assessment_taken_form",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Implemented from kitosid template for -",
"# osid.resource.ResourceAdminSession.get_resource_form_for_update",
"# This method might be a bit sketchy. Time will tell.",
"if",
"is... | 65.444444 | 24.444444 |
def _get_struct_rect(self):
"""Get the RECT structure."""
bc = BitConsumer(self._src)
nbits = bc.u_get(5)
if self._read_twips:
return tuple(bc.s_get(nbits) for _ in range(4))
else:
return tuple(bc.s_get(nbits) / 20.0 for _ in range(4)) | [
"def",
"_get_struct_rect",
"(",
"self",
")",
":",
"bc",
"=",
"BitConsumer",
"(",
"self",
".",
"_src",
")",
"nbits",
"=",
"bc",
".",
"u_get",
"(",
"5",
")",
"if",
"self",
".",
"_read_twips",
":",
"return",
"tuple",
"(",
"bc",
".",
"s_get",
"(",
"nbi... | 36.5 | 14.375 |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Locate response payload and decode it
into its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(LocateResponsePayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.LOCATED_ITEMS, local_buffer):
self._located_items = primitives.Integer(
tag=enums.Tags.LOCATED_ITEMS
)
self._located_items.read(
local_buffer,
kmip_version=kmip_version
)
self._unique_identifiers = []
while self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
unique_identifier.read(local_buffer, kmip_version=kmip_version)
self._unique_identifiers.append(unique_identifier)
self.is_oversized(local_buffer) | [
"def",
"read",
"(",
"self",
",",
"input_buffer",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_0",
")",
":",
"super",
"(",
"LocateResponsePayload",
",",
"self",
")",
".",
"read",
"(",
"input_buffer",
",",
"kmip_version",
"=",
"kmip_vers... | 39.166667 | 19.833333 |
def page_format(self, topmargin, bottommargin):
'''Specify settings for top and bottom margins. Physically printable area depends on media.
Args:
topmargin: the top margin, in dots. The top margin must be less than the bottom margin.
bottommargin: the bottom margin, in dots. The bottom margin must be less than the top margin.
Returns:
None
Raises:
RuntimeError: Top margin must be less than the bottom margin.
'''
tL = topmargin%256
tH = topmargin/256
BL = bottommargin%256
BH = topmargin/256
if (tL+tH*256) < (BL + BH*256):
self.send(chr(27)+'('+'c'+chr(4)+chr(0)+chr(tL)+chr(tH)+chr(BL)+chr(BH))
else:
raise RuntimeError('The top margin must be less than the bottom margin') | [
"def",
"page_format",
"(",
"self",
",",
"topmargin",
",",
"bottommargin",
")",
":",
"tL",
"=",
"topmargin",
"%",
"256",
"tH",
"=",
"topmargin",
"/",
"256",
"BL",
"=",
"bottommargin",
"%",
"256",
"BH",
"=",
"topmargin",
"/",
"256",
"if",
"(",
"tL",
"+... | 43.631579 | 29.105263 |
def configure(self, options, conf):
"""Configure plugin.
"""
if not self.available():
self.enabled = False
return
Plugin.configure(self, options, conf)
self.conf = conf
if options.profile_stats_file:
self.pfile = options.profile_stats_file
self.clean_stats_file = False
else:
self.pfile = None
self.clean_stats_file = True
self.fileno = None
self.sort = options.profile_sort
self.restrict = tolist(options.profile_restrict) | [
"def",
"configure",
"(",
"self",
",",
"options",
",",
"conf",
")",
":",
"if",
"not",
"self",
".",
"available",
"(",
")",
":",
"self",
".",
"enabled",
"=",
"False",
"return",
"Plugin",
".",
"configure",
"(",
"self",
",",
"options",
",",
"conf",
")",
... | 32.882353 | 8.588235 |
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
) | [
"def",
"cli",
"(",
")",
":",
"parser",
"=",
"get_argparser",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"check_args",
"(",
"args",
")",
"if",
"args",
".",
"v",
":",
"print",
"(",
"'ERAlchemy version {}.'",
".",
"format",
"(",
"__vers... | 26.055556 | 18.166667 |
def validate_path_parameters(target_path, api_path, path_parameters, context):
"""
Helper function for validating a request path
"""
base_path = context.get('basePath', '')
full_api_path = re.sub(NORMALIZE_SLASH_REGEX, '/', base_path + api_path)
parameter_values = get_path_parameter_values(
target_path, full_api_path, path_parameters, context,
)
validate_parameters(parameter_values, path_parameters, context=context) | [
"def",
"validate_path_parameters",
"(",
"target_path",
",",
"api_path",
",",
"path_parameters",
",",
"context",
")",
":",
"base_path",
"=",
"context",
".",
"get",
"(",
"'basePath'",
",",
"''",
")",
"full_api_path",
"=",
"re",
".",
"sub",
"(",
"NORMALIZE_SLASH_... | 45 | 18.6 |
def connections_to_object(self, to_obj):
"""
Returns a ``Connection`` query set matching all connections with
the given object as a destination.
"""
self._validate_ctypes(None, to_obj)
return self.connections.filter(to_pk=to_obj.pk) | [
"def",
"connections_to_object",
"(",
"self",
",",
"to_obj",
")",
":",
"self",
".",
"_validate_ctypes",
"(",
"None",
",",
"to_obj",
")",
"return",
"self",
".",
"connections",
".",
"filter",
"(",
"to_pk",
"=",
"to_obj",
".",
"pk",
")"
] | 39.142857 | 7.428571 |
def _processCommandLineArgs():
"""
Get the command line arguments
Parameters: NONE
Returns:
files list of file specifications to be converted
outputFileNames list of output file specifications
(one per input file)
Default: a list of None values (one per input file)
conversionFormat string indicating the conversion format requested
Default: "mulitextension"
verbose flag indicating if verbose output is desired
Default: False
Exceptions: NONE
"""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "hvmo:",
["help",
"verbose",
"multiExtensionConversion",
"outputFileName"])
except getopt.GetoptError as e:
print(str(e))
_usage()
sys.exit(1)
conversionFormat = ""
outputFileNames = []
verbose = False
for o, a in opts:
if o in ("-h", "--help"):
_usage()
print(" Convert the waivered FITS Files (FILEs) to various formats.")
print(" The default conversion format is multi-extension FITS.")
print(" Options:")
print(" -h, --help display this help message and exit")
print(" -v, --verbose provide verbose output")
print(" -m, --multiExtensionConversion convert to multiExtension FITS format")
print(" -o, --outputFileName comma separated list of output file")
print(" specifications (one per input FILE)")
sys.exit()
if o in ("-v", "--verbose"):
verbose = True
if o in ("-m", "--multiExtensionConversion"):
if conversionFormat != "":
print("convertwaiveredfits.py: only one conversion format allowed")
_usage()
sys.exit(1)
conversionFormat = "multiExtension"
if o in ("-o", "--outputFileName"):
outputFileNames = a.split(',')
if conversionFormat == "":
#
# Set the default conversion format if none was provided
#
conversionFormat = "multiExtension"
if not args:
print("convertwaiveredfits.py: nothing to convert")
_usage()
sys.exit(1)
else:
files = args
if outputFileNames:
if len(files) != len(outputFileNames):
print("convertwaiveredfits.py: number of output file names does not match")
print(" the number of FILEs to convert")
_usage()
sys.exit(1)
else:
for i in range(0,len(files)):
outputFileNames.append(None)
return files,outputFileNames,conversionFormat,verbose | [
"def",
"_processCommandLineArgs",
"(",
")",
":",
"import",
"getopt",
"try",
":",
"opts",
",",
"args",
"=",
"getopt",
".",
"getopt",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
",",
"\"hvmo:\"",
",",
"[",
"\"help\"",
",",
"\"verbose\"",
",",
"\"multiEx... | 33.527473 | 24.626374 |
def list_sensors(parent_class, sensor_items, filter, strategy, status,
use_python_identifiers, tuple, refresh):
"""Helper for implementing :meth:`katcp.resource.KATCPResource.list_sensors`
Parameters
----------
sensor_items : tuple of sensor-item tuples
As would be returned the items() method of a dict containing KATCPSensor objects
keyed by Python-identifiers.
parent_class: KATCPClientResource or KATCPClientResourceContainer
Is used for prefix calculation
Rest of parameters as for :meth:`katcp.resource.KATCPResource.list_sensors`
"""
filter_re = re.compile(filter)
found_sensors = []
none_strat = resource.normalize_strategy_parameters('none')
sensor_dict = dict(sensor_items)
for sensor_identifier in sorted(sensor_dict.keys()):
sensor_obj = sensor_dict[sensor_identifier]
search_name = (sensor_identifier if use_python_identifiers
else sensor_obj.name)
name_match = filter_re.search(search_name)
# Only include sensors with strategies
strat_match = not strategy or sensor_obj.sampling_strategy != none_strat
if name_match and strat_match:
if refresh:
# First refresh the sensor reading
yield sensor_obj.get_value()
# Determine the sensorname prefix:
# parent_name. except for aggs when in KATCPClientResourceContinaer
prefix = ""
if isinstance(parent_class, KATCPClientResourceContainer):
if sensor_obj.name.startswith("agg_"):
prefix = ""
else:
prefix = sensor_obj.parent_name + "."
if not status or (sensor_obj.reading.status in status):
# Only include sensors of the given status
if tuple:
# (sensor.name, sensor.value, sensor.value_seconds, sensor.type, sensor.units, sensor.update_seconds, sensor.status, strategy_and_params)
found_sensors.append((
prefix+sensor_obj.name,
sensor_obj.reading.value,
sensor_obj.reading.timestamp,
sensor_obj.type,
sensor_obj.units,
sensor_obj.reading.received_timestamp,
sensor_obj.reading.status,
sensor_obj.sampling_strategy
))
else:
found_sensors.append(resource.SensorResultTuple(
object=sensor_obj,
name=prefix+sensor_obj.name,
python_identifier=sensor_identifier,
description=sensor_obj.description,
units=sensor_obj.units,
type=sensor_obj.type,
reading=sensor_obj.reading))
raise tornado.gen.Return(found_sensors) | [
"def",
"list_sensors",
"(",
"parent_class",
",",
"sensor_items",
",",
"filter",
",",
"strategy",
",",
"status",
",",
"use_python_identifiers",
",",
"tuple",
",",
"refresh",
")",
":",
"filter_re",
"=",
"re",
".",
"compile",
"(",
"filter",
")",
"found_sensors",
... | 48.196721 | 17.196721 |
def indices_to_points(indices, pitch, origin):
"""
Convert indices of an (n,m,p) matrix into a set of voxel center points.
Parameters
----------
indices: (q, 3) int, index of voxel matrix (n,m,p)
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
points: (q, 3) float, list of points
"""
indices = np.asanyarray(indices, dtype=np.float64)
origin = np.asanyarray(origin, dtype=np.float64)
pitch = float(pitch)
if indices.shape != (indices.shape[0], 3):
from IPython import embed
embed()
raise ValueError('shape of indices must be (q, 3)')
if origin.shape != (3,):
raise ValueError('shape of origin must be (3,)')
points = indices * pitch + origin
return points | [
"def",
"indices_to_points",
"(",
"indices",
",",
"pitch",
",",
"origin",
")",
":",
"indices",
"=",
"np",
".",
"asanyarray",
"(",
"indices",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"origin",
"=",
"np",
".",
"asanyarray",
"(",
"origin",
",",
"dtype... | 29.571429 | 20 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.