text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def _make_graphite_api_points_list(influxdb_data):
"""Make graphite-api data points dictionary from Influxdb ResultSet data"""
_data = {}
for key in influxdb_data.keys():
_data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])),
d['value']) for d in influxdb_data.get_points(key[0])]
return _data | [
"def",
"_make_graphite_api_points_list",
"(",
"influxdb_data",
")",
":",
"_data",
"=",
"{",
"}",
"for",
"key",
"in",
"influxdb_data",
".",
"keys",
"(",
")",
":",
"_data",
"[",
"key",
"[",
"0",
"]",
"]",
"=",
"[",
"(",
"datetime",
".",
"datetime",
".",
... | 50.142857 | 20 |
def on_success(self, fn, *args, **kwargs):
"""
Call the given callback if or when the connected deferred succeeds.
"""
self._callbacks.append((fn, args, kwargs))
result = self._resulted_in
if result is not _NOTHING_YET:
self._succeed(result=result) | [
"def",
"on_success",
"(",
"self",
",",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_callbacks",
".",
"append",
"(",
"(",
"fn",
",",
"args",
",",
"kwargs",
")",
")",
"result",
"=",
"self",
".",
"_resulted_in",
"if",
"r... | 27.363636 | 15.909091 |
def _variant_sv(checkpoints):
"""Structural variant workflow.
"""
if not checkpoints.get("sv"):
return [], []
sv = [s("detect_sv", "batch-single",
[["sv_batch_rec"]],
[cwlout("sv_rec", "record",
fields=[cwlout(["sv", "variantcaller"], ["string", "null"]),
cwlout(["sv", "vrn_file"], ["File", "null"], [".tbi"]),
cwlout(["sv", "supplemental"], {"type": "array", "items": ["File"]}),
cwlout(["svvalidate", "summary"], ["File", "null"]),
cwlout("inherit", exclude=[["align_bam"], ["work_bam_plus"],
["reference", "snpeff"]])])],
"bcbio-vc", ["bedtools", "cnvkit", "delly", "duphold", "extract-sv-reads", "gsort",
"lumpy-sv;env=python2", "manta;env=python2", "break-point-inspector", "mosdepth", "samtools",
"smoove;env=python2", "pysam>=0.13.0",
"seq2c", "simple_sv_annotation;env=python2", "survivor", "svtools;env=python2",
"svtyper;env=python2",
"r=3.5.1", "r-base", "xorg-libxt", "vawk;env=python2"],
disk={"files": 2.0})]
sv_batch_inputs = [["analysis"], ["genome_build"],
["work_bam_plus", "disc"], ["work_bam_plus", "sr"],
["config", "algorithm", "background", "cnv_reference"],
["config", "algorithm", "tools_on"],
["config", "algorithm", "tools_off"],
["config", "algorithm", "svprioritize"],
["config", "algorithm", "svvalidate"], ["regions", "sample_callable"],
["genome_resources", "variation", "gc_profile"],
["genome_resources", "variation", "germline_het_pon"],
["genome_resources", "aliases", "snpeff"], ["reference", "snpeff", "genome_build"],
["sv_coverage_rec"]]
if checkpoints.get("vc"):
sv_batch_inputs.append(["variants", "samples"])
steps = [s("calculate_sv_bins", "multi-combined",
[["align_bam"], ["reference", "fasta", "base"],
["metadata", "batch"], ["metadata", "phenotype"],
["config", "algorithm", "background", "cnv_reference"],
["config", "algorithm", "callable_regions"],
["config", "algorithm", "coverage_interval"],
["config", "algorithm", "exclude_regions"],
["config", "algorithm", "sv_regions"],
["config", "algorithm", "variant_regions"],
["config", "algorithm", "variant_regions_merged"],
["config", "algorithm", "seq2c_bed_ready"],
["config", "algorithm", "svcaller"],
["depth", "variant_regions", "regions"],
["genome_resources", "variation", "lcr"], ["genome_resources", "variation", "polyx"],
["genome_resources", "variation", "encode_blacklist"],
["genome_resources", "rnaseq", "gene_bed"]],
[cwlout("sv_bin_rec", "record",
fields=[cwlout(["regions", "bins", "target"], ["File", "null"]),
cwlout(["regions", "bins", "antitarget"], ["File", "null"]),
cwlout(["regions", "bins", "gcannotated"], ["File", "null"]),
cwlout(["regions", "bins", "group"], ["string", "null"]),
cwlout("inherit")])],
"bcbio-vc", ["bedtools", "cnvkit"],
disk={"files": 1.5}, cores=1),
s("calculate_sv_coverage", "multi-parallel",
[["sv_bin_rec"]],
[cwlout("sv_rawcoverage_rec", "record",
fields=[cwlout(["depth", "bins", "target"], ["File", "null"]),
cwlout(["depth", "bins", "antitarget"], ["File", "null"]),
cwlout(["depth", "bins", "seq2c"], ["File", "null"]),
cwlout("inherit")])],
"bcbio-vc", ["mosdepth", "cnvkit", "seq2c"],
disk={"files": 1.5}),
s("normalize_sv_coverage", "multi-combined",
[["sv_rawcoverage_rec"]],
[cwlout("sv_coverage_rec", "record",
fields=[cwlout(["depth", "bins", "normalized"], ["File", "null"]),
cwlout(["depth", "bins", "background"], ["File", "null"]),
cwlout("inherit")])],
"bcbio-vc", ["cnvkit"],
disk={"files": 1.5}),
s("batch_for_sv", "multi-batch", sv_batch_inputs,
[cwlout("sv_batch_rec", "record")],
"bcbio-vc",
unlist=[["config", "algorithm", "svcaller"]]),
w("svcall", "multi-parallel", sv, []),
s("summarize_sv", "multi-combined",
[["sv_rec"]],
[cwlout(["sv", "calls"], {"type": "array", "items": ["File", "null"]}),
cwlout(["sv", "supplemental"], {"type": "array", "items": ["File"]}),
cwlout(["sv", "prioritize", "tsv"], {"type": "array", "items": ["File", "null"]}),
cwlout(["sv", "prioritize", "raw"], {"type": "array", "items": ["File", "null"]}),
cwlout(["svvalidate", "grading_summary"], ["File", "null"]),
cwlout(["svvalidate", "grading_plots"], {"type": "array", "items": ["File", "null"]})],
"bcbio-vc", ["bcbio-prioritize"], disk={"files": 1.0}, cores=1)]
final_outputs = [["sv", "calls"], ["svvalidate", "grading_summary"], ["sv", "prioritize", "tsv"],
["sv", "prioritize", "raw"], ["sv", "supplemental"]]
return steps, final_outputs | [
"def",
"_variant_sv",
"(",
"checkpoints",
")",
":",
"if",
"not",
"checkpoints",
".",
"get",
"(",
"\"sv\"",
")",
":",
"return",
"[",
"]",
",",
"[",
"]",
"sv",
"=",
"[",
"s",
"(",
"\"detect_sv\"",
",",
"\"batch-single\"",
",",
"[",
"[",
"\"sv_batch_rec\"... | 63.73913 | 26.717391 |
def rename(self, rename_pair_list, **kwargs):
"""重命名
:param rename_pair_list: 需要重命名的文件(夹)pair (路径,新名称)列表,如[('/aa.txt','bb.txt')]
:type rename_pair_list: list
"""
foo = []
for path, newname in rename_pair_list:
foo.append({'path': path,
'newname': newname
})
data = {'filelist': json.dumps(foo)}
params = {
'opera': 'rename'
}
url = 'http://{0}/api/filemanager'.format(BAIDUPAN_SERVER)
logging.debug('rename ' + str(data) + 'URL:' + url)
return self._request('filemanager', 'rename', url=url, data=data, extra_params=params, **kwargs) | [
"def",
"rename",
"(",
"self",
",",
"rename_pair_list",
",",
"*",
"*",
"kwargs",
")",
":",
"foo",
"=",
"[",
"]",
"for",
"path",
",",
"newname",
"in",
"rename_pair_list",
":",
"foo",
".",
"append",
"(",
"{",
"'path'",
":",
"path",
",",
"'newname'",
":"... | 32.619048 | 20.857143 |
def get_nulldata(self, rawtx):
"""Returns nulldata from <rawtx> as hexdata."""
tx = deserialize.tx(rawtx)
index, data = control.get_nulldata(tx)
return serialize.data(data) | [
"def",
"get_nulldata",
"(",
"self",
",",
"rawtx",
")",
":",
"tx",
"=",
"deserialize",
".",
"tx",
"(",
"rawtx",
")",
"index",
",",
"data",
"=",
"control",
".",
"get_nulldata",
"(",
"tx",
")",
"return",
"serialize",
".",
"data",
"(",
"data",
")"
] | 40 | 5.4 |
def asset_element_present(self, locator, msg=None):
"""
Hard assert for whether and element is present in the current window/frame
:params locator: the locator of the element to search for
:params msg: (Optional) msg explaining the difference
"""
e = driver.find_elements_by_locator(locator)
if len(e) == 0:
raise AssertionError("Element at %s was not found" % locator) | [
"def",
"asset_element_present",
"(",
"self",
",",
"locator",
",",
"msg",
"=",
"None",
")",
":",
"e",
"=",
"driver",
".",
"find_elements_by_locator",
"(",
"locator",
")",
"if",
"len",
"(",
"e",
")",
"==",
"0",
":",
"raise",
"AssertionError",
"(",
"\"Eleme... | 42.9 | 20.1 |
def execute_show(args, root_dir):
"""Print stderr and stdout of the current running process.
Args:
args['watch'] (bool): If True, we open a curses session and tail
the output live in the console.
root_dir (string): The path to the root directory the daemon is running in.
"""
key = None
if args.get('key'):
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
if key not in status['data'] or status['data'][key]['status'] != 'running':
print('No running process with this key, use `log` to show finished processes.')
return
# In case no key provided, we take the oldest running process
else:
status = command_factory('status')({}, root_dir=root_dir)
if isinstance(status['data'], str):
print(status['data'])
return
for k in sorted(status['data'].keys()):
if status['data'][k]['status'] == 'running':
key = k
break
if key is None:
print('No running process, use `log` to show finished processes.')
return
config_dir = os.path.join(root_dir, '.config/pueue')
# Get current pueueSTDout file from tmp
stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key))
stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key))
stdoutDescriptor = open(stdoutFile, 'r')
stderrDescriptor = open(stderrFile, 'r')
running = True
# Continually print output with curses or just print once
if args['watch']:
# Initialize curses
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(2)
stdscr.keypad(True)
stdscr.refresh()
try:
# Update output every two seconds
while running:
stdscr.clear()
stdoutDescriptor.seek(0)
message = stdoutDescriptor.read()
stdscr.addstr(0, 0, message)
stdscr.refresh()
time.sleep(2)
except Exception:
# Curses cleanup
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
else:
print('Stdout output:\n')
stdoutDescriptor.seek(0)
print(get_descriptor_output(stdoutDescriptor, key))
print('\n\nStderr output:\n')
stderrDescriptor.seek(0)
print(get_descriptor_output(stderrDescriptor, key)) | [
"def",
"execute_show",
"(",
"args",
",",
"root_dir",
")",
":",
"key",
"=",
"None",
"if",
"args",
".",
"get",
"(",
"'key'",
")",
":",
"key",
"=",
"args",
"[",
"'key'",
"]",
"status",
"=",
"command_factory",
"(",
"'status'",
")",
"(",
"{",
"}",
",",
... | 35.857143 | 18.671429 |
def add_user(self, user, group):
""" Adds user to a group """
if self.is_user_in(user, group):
raise UserAlreadyInAGroup
self.new_groups.add(group, user) | [
"def",
"add_user",
"(",
"self",
",",
"user",
",",
"group",
")",
":",
"if",
"self",
".",
"is_user_in",
"(",
"user",
",",
"group",
")",
":",
"raise",
"UserAlreadyInAGroup",
"self",
".",
"new_groups",
".",
"add",
"(",
"group",
",",
"user",
")"
] | 37 | 2.2 |
def get_version_string(version):
"""Translate a version tuple into a string.
Specify the __version__ as a tuple for more precise comparisons, and
translate it to __version_string__ for when that's needed.
This function exists primarily for easier unit testing.
Args:
version (Tuple[int, int, int, str]): three ints and an optional string.
Returns:
version_string (str): the tuple translated into a string per semver.org
"""
version_len = len(version)
if version_len == 3:
version_string = '%d.%d.%d' % version
elif version_len == 4:
version_string = '%d.%d.%d-%s' % version
else:
raise Exception(
'Version tuple is non-semver-compliant {} length!'.format(version_len)
)
return version_string | [
"def",
"get_version_string",
"(",
"version",
")",
":",
"version_len",
"=",
"len",
"(",
"version",
")",
"if",
"version_len",
"==",
"3",
":",
"version_string",
"=",
"'%d.%d.%d'",
"%",
"version",
"elif",
"version_len",
"==",
"4",
":",
"version_string",
"=",
"'%... | 31.04 | 24.08 |
def plot_ecg_grids(ecg_grids, fs, units, time_units, axes):
"Add ecg grids to the axes"
if ecg_grids == 'all':
ecg_grids = range(0, len(axes))
for ch in ecg_grids:
# Get the initial plot limits
auto_xlims = axes[ch].get_xlim()
auto_ylims= axes[ch].get_ylim()
(major_ticks_x, minor_ticks_x, major_ticks_y,
minor_ticks_y) = calc_ecg_grids(auto_ylims[0], auto_ylims[1],
units[ch], fs, auto_xlims[1],
time_units)
min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x)
min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y)
for tick in minor_ticks_x:
axes[ch].plot([tick, tick], [min_y, max_y], c='#ededed',
marker='|', zorder=1)
for tick in major_ticks_x:
axes[ch].plot([tick, tick], [min_y, max_y], c='#bababa',
marker='|', zorder=2)
for tick in minor_ticks_y:
axes[ch].plot([min_x, max_x], [tick, tick], c='#ededed',
marker='_', zorder=1)
for tick in major_ticks_y:
axes[ch].plot([min_x, max_x], [tick, tick], c='#bababa',
marker='_', zorder=2)
# Plotting the lines changes the graph. Set the limits back
axes[ch].set_xlim(auto_xlims)
axes[ch].set_ylim(auto_ylims) | [
"def",
"plot_ecg_grids",
"(",
"ecg_grids",
",",
"fs",
",",
"units",
",",
"time_units",
",",
"axes",
")",
":",
"if",
"ecg_grids",
"==",
"'all'",
":",
"ecg_grids",
"=",
"range",
"(",
"0",
",",
"len",
"(",
"axes",
")",
")",
"for",
"ch",
"in",
"ecg_grids... | 40.6 | 18.542857 |
def degree_reduction(degree, ctrlpts, **kwargs):
""" Computes the control points of the rational/non-rational spline after degree reduction.
Implementation of Eqs. 5.41 and 5.42 of The NURBS Book by Piegl & Tiller, 2nd Edition, p.220
Please note that degree reduction algorithm can only operate on Bezier shapes, i.e. curves, surfaces, volumes and
this implementation does NOT compute the maximum error tolerance as described via Eqs. 5.45 and 5.46 of The NURBS
Book by Piegl & Tiller, 2nd Edition, p.221 to determine whether the shape is degree reducible or not.
:param degree: degree
:type degree: int
:param ctrlpts: control points
:type ctrlpts: list, tuple
:return: control points of the degree-reduced shape
:rtype: list
"""
# Get keyword arguments
check_op = kwargs.get('check_num', True) # enable/disable input validation checks
if check_op:
if degree + 1 != len(ctrlpts):
raise GeomdlException("Degree reduction can only work with Bezier-type geometries")
if degree < 2:
raise GeomdlException("Input spline geometry must have degree > 1")
# Initialize variables
pts_red = [[0.0 for _ in range(len(ctrlpts[0]))] for _ in range(degree)]
# Fix start and end control points
pts_red[0] = ctrlpts[0]
pts_red[-1] = ctrlpts[-1]
# Find if the degree is an even or an odd number
p_is_odd = True if degree % 2 != 0 else False
# Compute control points of degree-reduced 1-dimensional shape
r = int((degree - 1) / 2)
# Handle a special case when degree = 2
if degree == 2:
r1 = r - 2
else:
# Determine r1 w.r.t. degree evenness
r1 = r - 1 if p_is_odd else r
for i in range(1, r1 + 1):
alpha = float(i) / float(degree)
pts_red[i] = [(c1 - (alpha * c2)) / (1 - alpha) for c1, c2 in zip(ctrlpts[i], pts_red[i - 1])]
for i in range(degree - 2, r1 + 2):
alpha = float(i + 1) / float(degree)
pts_red[i] = [(c1 - ((1 - alpha) * c2)) / alpha for c1, c2 in zip(ctrlpts[i + 1], pts_red[i + 1])]
if p_is_odd:
alpha = float(r) / float(degree)
left = [(c1 - (alpha * c2)) / (1 - alpha) for c1, c2 in zip(ctrlpts[r], pts_red[r - 1])]
alpha = float(r + 1) / float(degree)
right = [(c1 - ((1 - alpha) * c2)) / alpha for c1, c2 in zip(ctrlpts[r + 1], pts_red[r + 1])]
pts_red[r] = [0.5 * (pl + pr) for pl, pr in zip(left, right)]
# Return computed control points after degree reduction
return pts_red | [
"def",
"degree_reduction",
"(",
"degree",
",",
"ctrlpts",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get keyword arguments",
"check_op",
"=",
"kwargs",
".",
"get",
"(",
"'check_num'",
",",
"True",
")",
"# enable/disable input validation checks",
"if",
"check_op",
":",
... | 42.305085 | 26.20339 |
def _negotiate_value(response):
"""Extracts the gssapi authentication token from the appropriate header"""
if hasattr(_negotiate_value, 'regex'):
regex = _negotiate_value.regex
else:
# There's no need to re-compile this EVERY time it is called. Compile
# it once and you won't have the performance hit of the compilation.
regex = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
_negotiate_value.regex = regex
authreq = response.headers.get('www-authenticate', None)
if authreq:
match_obj = regex.search(authreq)
if match_obj:
return match_obj.group(1)
return None | [
"def",
"_negotiate_value",
"(",
"response",
")",
":",
"if",
"hasattr",
"(",
"_negotiate_value",
",",
"'regex'",
")",
":",
"regex",
"=",
"_negotiate_value",
".",
"regex",
"else",
":",
"# There's no need to re-compile this EVERY time it is called. Compile",
"# it once and y... | 35.888889 | 20 |
def get_batch(self, batch_id):
"""
Check to see if the requested batch_id is in the current chain. If so,
find the batch with the batch_id and return it. This is done by
finding the block and searching for the batch.
:param batch_id (string): The id of the batch requested.
:return:
The batch with the batch_id.
"""
payload = self._get_data_by_id(batch_id, 'commit_store_get_batch')
batch = Batch()
batch.ParseFromString(payload)
return batch | [
"def",
"get_batch",
"(",
"self",
",",
"batch_id",
")",
":",
"payload",
"=",
"self",
".",
"_get_data_by_id",
"(",
"batch_id",
",",
"'commit_store_get_batch'",
")",
"batch",
"=",
"Batch",
"(",
")",
"batch",
".",
"ParseFromString",
"(",
"payload",
")",
"return"... | 30.941176 | 22.235294 |
def updateFgiAnnotationFromFi(fgiContainer, fiContainer, largerBetter):
""" #TODO: docstring
:param fgiContainer:
:param fiContainer:
:param largerBetter:
"""
for fgi in listvalues(fgiContainer.container):
annotations = list()
for specfile, fiId in zip(fgi.specfiles, fgi.featureIds):
fi = fiContainer.getItem(specfile, fiId)
if not fi.isAnnotated:
continue
annotations.append([fi.score, fi.peptide, fi.sequence])
annotations.sort(reverse=largerBetter)
if len(annotations) > 0:
fgi.isAnnotated = True
fgi.score = annotations[0][0]
fgi.peptide = annotations[0][1]
fgi.sequence = annotations[0][2]
else:
fgi.isAnnotated = False | [
"def",
"updateFgiAnnotationFromFi",
"(",
"fgiContainer",
",",
"fiContainer",
",",
"largerBetter",
")",
":",
"for",
"fgi",
"in",
"listvalues",
"(",
"fgiContainer",
".",
"container",
")",
":",
"annotations",
"=",
"list",
"(",
")",
"for",
"specfile",
",",
"fiId",... | 35.5 | 13.090909 |
def upload(self, data, callback=None, content_type=None,
size=None):
'''
Upload a multi-part file for content to ingest. Returns a
temporary upload id that can be used as a datstream location.
:param data: content string, file-like object, or iterable with
content to be uploaded
:param callback: optional callback method to monitor the upload;
see :mod:`requests-toolbelt` documentation for more
details: https://toolbelt.readthedocs.org/en/latest/user.html#uploading-data
:param content_type: optional content type of the data
:param size: optional size of the data; required when using an
iterable for the data
:returns: upload id on success
'''
url = 'upload'
# fedora only expects content uploaded as multipart file;
# make string content into a file-like object so requests.post
# sends it the way Fedora expects.
# NOTE: checking for both python 2.x next method and
# python 3.x __next__ to test if data is iteraable
if not hasattr(data, 'read') and \
not (hasattr(data, '__next__') or hasattr(data, 'next')):
data = six.BytesIO(force_bytes(data))
# if data is an iterable, wrap in a readable iterator that
# requests-toolbelt can read data from
elif not hasattr(data, 'read') and \
(hasattr(data, '__next__') or hasattr(data, 'next')):
if size is None:
raise Exception('Cannot upload iterable with unknown size')
data = ReadableIterator(data, size)
# use requests-toolbelt multipart encoder to avoid reading
# the full content of large files into memory
menc = MultipartEncoder(fields={'file': ('file', data, content_type)})
if callback is not None:
menc = MultipartEncoderMonitor(menc, callback)
headers = {'Content-Type': menc.content_type}
if size:
# latest version of requests requires str or bytes, not int
if not isinstance(size, six.string_types):
size = str(size)
headers['Content-Length'] = size
try:
response = self.post(url, data=menc, headers=headers)
except OverflowError:
# Python __len__ uses integer so it is limited to system maxint,
# and requests and requests-toolbelt use len() throughout.
# This results in an overflow error when trying to upload a file
# larger than system maxint (2GB on 32-bit OSes).
# See http://bugs.python.org/issue12159
msg = 'upload content larger than system maxint (32-bit OS limitation)'
logger.error('OverflowError: %s', msg)
raise OverflowError(msg)
if response.status_code == requests.codes.accepted:
return response.text.strip() | [
"def",
"upload",
"(",
"self",
",",
"data",
",",
"callback",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"size",
"=",
"None",
")",
":",
"url",
"=",
"'upload'",
"# fedora only expects content uploaded as multipart file;",
"# make string content into a file-like ... | 44.984375 | 22.484375 |
def ndb_put(self, entity):
"""Like put(), but for NDB entities."""
assert ndb is not None and isinstance(entity, ndb.Model)
self.ndb_puts.append(entity) | [
"def",
"ndb_put",
"(",
"self",
",",
"entity",
")",
":",
"assert",
"ndb",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"entity",
",",
"ndb",
".",
"Model",
")",
"self",
".",
"ndb_puts",
".",
"append",
"(",
"entity",
")"
] | 40.25 | 10.5 |
def samples_nb(message, wpm, framerate=FRAMERATE, word_spaced=False):
"""
Calculate the number of samples for a given word at a given framerate (samples / seconds)
>>> samples_nb('SOS', 15)
23814
"""
return int(duration(message, wpm, output='float', word_spaced=word_spaced) / 1000.0 * framerate) | [
"def",
"samples_nb",
"(",
"message",
",",
"wpm",
",",
"framerate",
"=",
"FRAMERATE",
",",
"word_spaced",
"=",
"False",
")",
":",
"return",
"int",
"(",
"duration",
"(",
"message",
",",
"wpm",
",",
"output",
"=",
"'float'",
",",
"word_spaced",
"=",
"word_s... | 39.25 | 28 |
def get_rowspanned_columns(self):
"""Return a dictionary mapping column indices to the number of columns
spanned."""
spanned_columns = {}
current_row_index = self._index
current_row_cols = sum(cell.colspan for cell in self)
prev_rows = iter(reversed(self.section[:current_row_index]))
while current_row_cols < self.section.num_columns:
row = next(prev_rows)
min_rowspan = current_row_index - int(row._index)
if row.maximum_rowspan > min_rowspan:
for cell in (c for c in row if c.rowspan > min_rowspan):
col_index = int(cell.column_index)
spanned_columns[col_index] = cell.colspan
current_row_cols += cell.colspan
return spanned_columns | [
"def",
"get_rowspanned_columns",
"(",
"self",
")",
":",
"spanned_columns",
"=",
"{",
"}",
"current_row_index",
"=",
"self",
".",
"_index",
"current_row_cols",
"=",
"sum",
"(",
"cell",
".",
"colspan",
"for",
"cell",
"in",
"self",
")",
"prev_rows",
"=",
"iter"... | 49.75 | 13.3125 |
def _accumulate(self, old_accum, next_val):
"""
Implement exponential moving average
"""
return old_accum * (1 - self.alpha) + next_val * self.alpha | [
"def",
"_accumulate",
"(",
"self",
",",
"old_accum",
",",
"next_val",
")",
":",
"return",
"old_accum",
"*",
"(",
"1",
"-",
"self",
".",
"alpha",
")",
"+",
"next_val",
"*",
"self",
".",
"alpha"
] | 35.2 | 6.8 |
def get_timestamp(d):
"""
Returns a UTC timestamp for a C{datetime.datetime} object.
@type d: C{datetime.datetime}
@return: UTC timestamp.
@rtype: C{float}
@see: Inspiration taken from the U{Intertwingly blog
<http://intertwingly.net/blog/2007/09/02/Dealing-With-Dates>}.
"""
if isinstance(d, datetime.date) and not isinstance(d, datetime.datetime):
d = datetime.datetime.combine(d, datetime.time(0, 0, 0, 0))
msec = str(d.microsecond).rjust(6).replace(' ', '0')
return float('%s.%s' % (calendar.timegm(d.utctimetuple()), msec)) | [
"def",
"get_timestamp",
"(",
"d",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"datetime",
".",
"date",
")",
"and",
"not",
"isinstance",
"(",
"d",
",",
"datetime",
".",
"datetime",
")",
":",
"d",
"=",
"datetime",
".",
"datetime",
".",
"combine",
"(",... | 35.75 | 22.25 |
def element_to_unicode(element):
"""Serialize an XML element into a unicode string.
This should work the same on Python2 and Python3 and with all
:etree:`ElementTree` implementations.
:Parameters:
- `element`: the XML element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
"""
if hasattr(ElementTree, 'tounicode'):
# pylint: disable=E1103
return ElementTree.tounicode("element")
elif sys.version_info.major < 3:
return unicode(ElementTree.tostring(element))
else:
return ElementTree.tostring(element, encoding = "unicode") | [
"def",
"element_to_unicode",
"(",
"element",
")",
":",
"if",
"hasattr",
"(",
"ElementTree",
",",
"'tounicode'",
")",
":",
"# pylint: disable=E1103",
"return",
"ElementTree",
".",
"tounicode",
"(",
"\"element\"",
")",
"elif",
"sys",
".",
"version_info",
".",
"maj... | 33.722222 | 15.333333 |
def scale_streaming_endpoint(access_token, streaming_endpoint_id, scale_units):
'''Scale Media Service Streaming Endpoint.
Args:
access_token (str): A valid Azure authentication token.
streaming_endpoint_id (str): A Media Service Streaming Endpoint ID.
scale_units (str): A Media Service Scale Units Number.
Returns:
HTTP response. JSON body.
'''
path = '/StreamingEndpoints'
full_path = ''.join([path, "('", streaming_endpoint_id, "')", "/Scale"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
body = '{"scaleUnits": "' + str(scale_units) + '"}'
return do_ams_post(endpoint, full_path_encoded, body, access_token) | [
"def",
"scale_streaming_endpoint",
"(",
"access_token",
",",
"streaming_endpoint_id",
",",
"scale_units",
")",
":",
"path",
"=",
"'/StreamingEndpoints'",
"full_path",
"=",
"''",
".",
"join",
"(",
"[",
"path",
",",
"\"('\"",
",",
"streaming_endpoint_id",
",",
"\"')... | 43.764706 | 25.764706 |
def strip_mentions_links(self, text):
""" Strips Mentions and Links
:param text: Text to be stripped from.
"""
#print 'Before:', text
new_text = [word for word in text.split() if not self.is_mention_line(word)]
#print 'After:', u' '.join(new_text)
return u' '.join(new_text) | [
"def",
"strip_mentions_links",
"(",
"self",
",",
"text",
")",
":",
"#print 'Before:', text",
"new_text",
"=",
"[",
"word",
"for",
"word",
"in",
"text",
".",
"split",
"(",
")",
"if",
"not",
"self",
".",
"is_mention_line",
"(",
"word",
")",
"]",
"#print 'Aft... | 36.333333 | 13 |
def _send_request(self, job_type, params={}):
"""
Construct and submit a structured/authenticated request.
## Arguments
* `job_type` (str): The job type identifier to use.
## Keyword Arguments
* `params` (dict): Any additional entries to include in the POST
request.
## Returns
* `r` (requests.Response): The response from the server.
"""
params["wsid"] = params.get("wsid", self.userid)
params["pw"] = params.get("pw", self.password)
path = os.path.join(self.base_url, job_type)
if self.request_type == 'GET':
r = requests.get(path, params=params)
elif self.request_type == 'POST':
r = requests.post(path, data=params)
else:
raise ValueError('`resest_type` is invalid!')
code = r.status_code
if code != 200:
raise Exception("%s failed with status: %d"%(job_type, code))
return r | [
"def",
"_send_request",
"(",
"self",
",",
"job_type",
",",
"params",
"=",
"{",
"}",
")",
":",
"params",
"[",
"\"wsid\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"wsid\"",
",",
"self",
".",
"userid",
")",
"params",
"[",
"\"pw\"",
"]",
"=",
"params",
... | 28.235294 | 22.764706 |
async def destroy(self):
"""Destroy the whole gui and music player"""
self.logger.debug("destroy command")
self.state = 'destroyed'
await self.set_topic("")
self.nowplayinglog.debug("---")
self.nowplayingauthorlog.debug("---")
self.nowplayingsourcelog.debug("---")
self.timelog.debug(_timebar.make_timebar())
self.prev_time = "---"
self.statuslog.debug("Destroying")
self.mready = False
self.vready = False
self.pause_time = None
self.loop_type = 'off'
if self.vclient:
try:
await self.vclient.disconnect()
except Exception as e:
logger.error(e)
pass
if self.streamer:
try:
self.streamer.stop()
except:
pass
self.vclient = None
self.vchannel = None
self.streamer = None
self.current_duration = 0
self.current_download_elapsed = 0
self.is_live = False
self.queue = []
self.prev_queue = []
if self.embed:
await self.embed.delete()
self.embed = None
self.clear_cache() | [
"async",
"def",
"destroy",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"destroy command\"",
")",
"self",
".",
"state",
"=",
"'destroyed'",
"await",
"self",
".",
"set_topic",
"(",
"\"\"",
")",
"self",
".",
"nowplayinglog",
".",
"deb... | 25.826087 | 15.956522 |
def get_first_sounds(self):
"""
The first first sound of each word of the ShortLine.
:return:
"""
self.first_sounds = []
for viisuord in self.phonological_features_text:
self.first_sounds.append(viisuord[0]) | [
"def",
"get_first_sounds",
"(",
"self",
")",
":",
"self",
".",
"first_sounds",
"=",
"[",
"]",
"for",
"viisuord",
"in",
"self",
".",
"phonological_features_text",
":",
"self",
".",
"first_sounds",
".",
"append",
"(",
"viisuord",
"[",
"0",
"]",
")"
] | 32.5 | 11.5 |
def clean_indicators(indicators):
"""Remove any extra details from indicators."""
output = list()
for indicator in indicators:
strip = ['http://', 'https://']
for item in strip:
indicator = indicator.replace(item, '')
indicator = indicator.strip('.').strip()
parts = indicator.split('/')
if len(parts) > 0:
indicator = parts.pop(0)
output.append(indicator)
output = list(set(output))
return output | [
"def",
"clean_indicators",
"(",
"indicators",
")",
":",
"output",
"=",
"list",
"(",
")",
"for",
"indicator",
"in",
"indicators",
":",
"strip",
"=",
"[",
"'http://'",
",",
"'https://'",
"]",
"for",
"item",
"in",
"strip",
":",
"indicator",
"=",
"indicator",
... | 34 | 9.5 |
def summarise_pdfs(pdfs):
"""
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
"""
# Ignore None.
print('Summarising {0} articles ({1} had errors)'.format(
len(pdfs), pdfs.count(None)))
pdfs = [_ for _ in pdfs if _ is not None]
summary = PdfFileWriter()
for pdf in pdfs:
summary.addPage(PdfFileReader(StringIO(pdf)).getPage(0))
return summary | [
"def",
"summarise_pdfs",
"(",
"pdfs",
")",
":",
"# Ignore None.",
"print",
"(",
"'Summarising {0} articles ({1} had errors)'",
".",
"format",
"(",
"len",
"(",
"pdfs",
")",
",",
"pdfs",
".",
"count",
"(",
"None",
")",
")",
")",
"pdfs",
"=",
"[",
"_",
"for",... | 25.478261 | 22.26087 |
def overpass_request(data, pause_duration=None, timeout=180, error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the JSON
response.
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to the API
pause_duration : int
how long to pause in seconds before requests, if None, will query API
status endpoint to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
dict
"""
# define the Overpass API URL, then construct a GET-style URL as a string to
# hash to look up/save to cache
url = 'http://overpass-api.de/api/interpreter'
prepared_url = requests.Request('GET', url, params=data).prepare().url
cached_response_json = get_from_cache(prepared_url)
if cached_response_json is not None:
# found this request in the cache, just return it instead of making a
# new HTTP call
return cached_response_json
else:
# if this URL is not already in the cache, pause, then request it
if pause_duration is None:
this_pause_duration = get_pause_duration()
log('Pausing {:,.2f} seconds before making API POST request'.format(this_pause_duration))
time.sleep(this_pause_duration)
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout, headers=get_http_headers())
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'], level=lg.WARNING))
save_to_cache(prepared_url, response_json)
except Exception:
#429 is 'too many requests' and 504 is 'gateway timeout' from server
# overload - handle these errors by recursively calling
# overpass_request until we get a valid response
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. Re-trying request in {:.2f} seconds.'.format(domain,
response.status_code,
error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data, pause_duration=pause_duration, timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'.format(response, response.reason, response.text))
return response_json | [
"def",
"overpass_request",
"(",
"data",
",",
"pause_duration",
"=",
"None",
",",
"timeout",
"=",
"180",
",",
"error_pause_duration",
"=",
"None",
")",
":",
"# define the Overpass API URL, then construct a GET-style URL as a string to",
"# hash to look up/save to cache",
"url"... | 49.986486 | 30.851351 |
def parse_rules(self):
"""
Add a set of rules to the app, dividing between filter and other rule set
"""
# Load patterns: an app is removed when has no defined patterns.
try:
rule_options = self.config.items('rules')
except configparser.NoSectionError:
raise LogRaptorConfigError("the app %r has no defined rules!" % self.name)
rules = []
for option, value in rule_options:
pattern = value.replace('\n', '') # Strip newlines for multi-line declarations
if not self.args.filters:
# No filters case: substitute the filter fields with the corresponding patterns.
pattern = string.Template(pattern).safe_substitute(self.fields)
rules.append(AppRule(option, pattern, self.args))
continue
for filter_group in self.args.filters:
_pattern, filter_keys = exact_sub(pattern, filter_group)
_pattern = string.Template(_pattern).safe_substitute(self.fields)
if len(filter_keys) >= len(filter_group):
rules.append(AppRule(option, _pattern, self.args, filter_keys))
elif self._thread:
rules.append(AppRule(option, _pattern, self.args))
return rules | [
"def",
"parse_rules",
"(",
"self",
")",
":",
"# Load patterns: an app is removed when has no defined patterns.\r",
"try",
":",
"rule_options",
"=",
"self",
".",
"config",
".",
"items",
"(",
"'rules'",
")",
"except",
"configparser",
".",
"NoSectionError",
":",
"raise",... | 49.481481 | 25.259259 |
def prepare_data(problem, hparams, params, config):
"""Construct input pipeline."""
input_fn = problem.make_estimator_input_fn(
tf.estimator.ModeKeys.EVAL, hparams, force_repeat=True)
dataset = input_fn(params, config)
features, _ = dataset.make_one_shot_iterator().get_next()
inputs, labels = features["targets"], features["inputs"]
inputs = tf.to_float(inputs)
input_shape = inputs.shape.as_list()
inputs = tf.reshape(inputs, [hparams.batch_size] + input_shape[1:])
labels = tf.reshape(labels, [hparams.batch_size])
return inputs, labels, features | [
"def",
"prepare_data",
"(",
"problem",
",",
"hparams",
",",
"params",
",",
"config",
")",
":",
"input_fn",
"=",
"problem",
".",
"make_estimator_input_fn",
"(",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
",",
"hparams",
",",
"force_repeat",
"=",
... | 47 | 11.416667 |
def _weight_drop(module, weights, dropout):
"""
Helper for `WeightDrop`.
"""
for name_w in weights:
w = getattr(module, name_w)
del module._parameters[name_w]
module.register_parameter(name_w + '_raw', Parameter(w))
original_module_forward = module.forward
def forward(*args, **kwargs):
for name_w in weights:
raw_w = getattr(module, name_w + '_raw')
w = torch.nn.functional.dropout(raw_w, p=dropout, training=module.training)
setattr(module, name_w, w)
return original_module_forward(*args)
setattr(module, 'forward', forward) | [
"def",
"_weight_drop",
"(",
"module",
",",
"weights",
",",
"dropout",
")",
":",
"for",
"name_w",
"in",
"weights",
":",
"w",
"=",
"getattr",
"(",
"module",
",",
"name_w",
")",
"del",
"module",
".",
"_parameters",
"[",
"name_w",
"]",
"module",
".",
"regi... | 29.333333 | 16.571429 |
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
"""Gets a query from the data source, which contains a request for multiple objects.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
Returns:
The requested objects.
"""
pass | [
"def",
"get_many",
"(",
"self",
",",
"type",
":",
"Type",
"[",
"T",
"]",
",",
"query",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"context",
":",
"PipelineContext",
"=",
"None",
")",
"->",
"Iterable",
"[",
"T",
"]",
":",
"pass"
] | 39.545455 | 27.454545 |
def get_function(self, name: str) -> AbiFunction or None:
"""
This interface is used to get an AbiFunction object from AbiInfo object by given function name.
:param name: the function name in abi file
:return: if succeed, an AbiFunction will constructed based on given function name
"""
for func in self.functions:
if func['name'] == name:
return AbiFunction(func['name'], func['parameters'], func.get('returntype', ''))
return None | [
"def",
"get_function",
"(",
"self",
",",
"name",
":",
"str",
")",
"->",
"AbiFunction",
"or",
"None",
":",
"for",
"func",
"in",
"self",
".",
"functions",
":",
"if",
"func",
"[",
"'name'",
"]",
"==",
"name",
":",
"return",
"AbiFunction",
"(",
"func",
"... | 46.090909 | 24.090909 |
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url) | [
"def",
"get_bookmark",
"(",
"self",
",",
"bookmark_id",
")",
":",
"url",
"=",
"self",
".",
"_generate_url",
"(",
"'bookmarks/{0}'",
".",
"format",
"(",
"bookmark_id",
")",
")",
"return",
"self",
".",
"get",
"(",
"url",
")"
] | 33.6 | 18.6 |
def getUsers(context, roles, allow_empty=True):
""" Present a DisplayList containing users in the specified
list of roles
"""
mtool = getToolByName(context, 'portal_membership')
pairs = allow_empty and [['', '']] or []
users = mtool.searchForMembers(roles=roles)
for user in users:
uid = user.getId()
fullname = user.getProperty('fullname')
if not fullname:
fullname = uid
pairs.append((uid, fullname))
pairs.sort(lambda x, y: cmp(x[1], y[1]))
return DisplayList(pairs) | [
"def",
"getUsers",
"(",
"context",
",",
"roles",
",",
"allow_empty",
"=",
"True",
")",
":",
"mtool",
"=",
"getToolByName",
"(",
"context",
",",
"'portal_membership'",
")",
"pairs",
"=",
"allow_empty",
"and",
"[",
"[",
"''",
",",
"''",
"]",
"]",
"or",
"... | 35.933333 | 9.266667 |
def _walk_through(job_dir, display_progress=False):
'''
Walk through the job dir and return jobs
'''
serial = salt.payload.Serial(__opts__)
for top in os.listdir(job_dir):
t_path = os.path.join(job_dir, top)
for final in os.listdir(t_path):
load_path = os.path.join(t_path, final, '.load.p')
with salt.utils.files.fopen(load_path, 'rb') as rfh:
job = serial.load(rfh)
if not os.path.isfile(load_path):
continue
with salt.utils.files.fopen(load_path, 'rb') as rfh:
job = serial.load(rfh)
jid = job['jid']
if display_progress:
__jid_event__.fire_event(
{'message': 'Found JID {0}'.format(jid)},
'progress'
)
yield jid, job, t_path, final | [
"def",
"_walk_through",
"(",
"job_dir",
",",
"display_progress",
"=",
"False",
")",
":",
"serial",
"=",
"salt",
".",
"payload",
".",
"Serial",
"(",
"__opts__",
")",
"for",
"top",
"in",
"os",
".",
"listdir",
"(",
"job_dir",
")",
":",
"t_path",
"=",
"os"... | 32.846154 | 16.230769 |
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts | [
"def",
"get_app",
"(",
"opts",
")",
":",
"apiopts",
"=",
"opts",
".",
"get",
"(",
"__name__",
".",
"rsplit",
"(",
"'.'",
",",
"2",
")",
"[",
"-",
"2",
"]",
",",
"{",
"}",
")",
"# rest_cherrypy opts",
"# Add Salt and salt-api config options to the main Cherry... | 30.714286 | 22.857143 |
def create(self, validated_data):
""" This is a standard method called indirectly by calling
'save' on the serializer.
This method expects the 'parent_field' and 'parent_instance' to
be included in the Serializer context.
"""
if self.context.get('parent_field') \
and self.context.get('parent_instance'):
validated_data.update({
self.context.get('parent_field'):
self.context.get('parent_instance')})
instance = self.Meta.model(**validated_data)
instance.full_clean()
instance.save()
return instance | [
"def",
"create",
"(",
"self",
",",
"validated_data",
")",
":",
"if",
"self",
".",
"context",
".",
"get",
"(",
"'parent_field'",
")",
"and",
"self",
".",
"context",
".",
"get",
"(",
"'parent_instance'",
")",
":",
"validated_data",
".",
"update",
"(",
"{",... | 38.8125 | 12 |
def starter(comm_q, *args, **kwargs):
"""Start the interchange process
The executor is expected to call this function. The args, kwargs match that of the Interchange.__init__
"""
# logger = multiprocessing.get_logger()
ic = Interchange(*args, **kwargs)
comm_q.put(ic.worker_port)
ic.start()
logger.debug("Port information sent back to client") | [
"def",
"starter",
"(",
"comm_q",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# logger = multiprocessing.get_logger()",
"ic",
"=",
"Interchange",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"comm_q",
".",
"put",
"(",
"ic",
".",
"worker_port"... | 36.7 | 16.8 |
def fileinfo(path):
'''
Return information on a file located on the Moose
CLI Example:
.. code-block:: bash
salt '*' moosefs.fileinfo /path/to/dir/
'''
cmd = 'mfsfileinfo ' + path
ret = {}
chunknum = ''
out = __salt__['cmd.run_all'](cmd, python_shell=False)
output = out['stdout'].splitlines()
for line in output:
if not line:
continue
if '/' in line:
comps = line.split('/')
chunknum = comps[0].strip().split(':')
meta = comps[1].strip().split(' ')
chunk = chunknum[0].replace('chunk ', '')
loc = chunknum[1].strip()
id_ = meta[0].replace('(id:', '')
ver = meta[1].replace(')', '').replace('ver:', '')
ret[chunknum[0]] = {
'chunk': chunk,
'loc': loc,
'id': id_,
'ver': ver,
}
if 'copy' in line:
copyinfo = line.strip().split(':')
ret[chunknum[0]][copyinfo[0]] = {
'copy': copyinfo[0].replace('copy ', ''),
'ip': copyinfo[1].strip(),
'port': copyinfo[2],
}
return ret | [
"def",
"fileinfo",
"(",
"path",
")",
":",
"cmd",
"=",
"'mfsfileinfo '",
"+",
"path",
"ret",
"=",
"{",
"}",
"chunknum",
"=",
"''",
"out",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"output",
"=",
"o... | 26.795455 | 18.840909 |
def __dumptable(self, table):
""" Dumps table on screen
for debugging purposes
"""
for x in table.table.keys():
sys.stdout.write("{0}\t<--- {1} {2}".format(x, table[x], type(table[x])))
if isinstance(table[x], ID):
sys.stdout(" {0}".format(table[x].value)),
sys.stdout.write("\n") | [
"def",
"__dumptable",
"(",
"self",
",",
"table",
")",
":",
"for",
"x",
"in",
"table",
".",
"table",
".",
"keys",
"(",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"{0}\\t<--- {1} {2}\"",
".",
"format",
"(",
"x",
",",
"table",
"[",
"x",
"]",... | 39.555556 | 10.444444 |
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError("cannot schedule new futures after shutdown")
callback = kwargs.pop("callback", self.default_callback)
future = NewFuture(
self._timeout,
args,
kwargs,
callback=callback,
catch_exception=self.catch_exception,
)
w = _WorkItem(future, func, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._all_futures.add(future)
return future | [
"def",
"submit",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"_shutdown_lock",
":",
"if",
"self",
".",
"_shutdown",
":",
"raise",
"RuntimeError",
"(",
"\"cannot schedule new futures after shutdown\"",
"... | 38.631579 | 13.736842 |
async def destroy_tournament(self, t: Tournament):
""" completely removes a tournament from Challonge
|methcoro|
Note:
|from_api| Deletes a tournament along with all its associated records. There is no undo, so use with care!
Raises:
APIException
"""
await self.connection('DELETE', 'tournaments/{}'.format(t.id))
if t in self.tournaments:
self.tournaments.remove(t) | [
"async",
"def",
"destroy_tournament",
"(",
"self",
",",
"t",
":",
"Tournament",
")",
":",
"await",
"self",
".",
"connection",
"(",
"'DELETE'",
",",
"'tournaments/{}'",
".",
"format",
"(",
"t",
".",
"id",
")",
")",
"if",
"t",
"in",
"self",
".",
"tournam... | 29.866667 | 25.133333 |
def prt_report_grp0(self, prt=sys.stdout):
"""Print full GO/gene report without grouping."""
summaryline = self.str_summaryline()
kws_grp = {'use_sections':False,
'hdrgo_prt':False,
'sortby':lambda nt: [-1*nt.dcnt, nt.depth]}
# Print grouped GO IDs
prt.write("{SUMMARY}\n".format(SUMMARY=summaryline))
self.prt_gos_grouped(sys.stdout, **kws_grp)
# genes
genes = sorted(self.gene2gos.keys())
prt.write("\n\n{SUMMARY}\n\n".format(SUMMARY=summaryline))
self.prt_gene_aart(genes, prt)
# Sort genes
prt.write("\n\n{SUMMARY}\n\n".format(SUMMARY=summaryline))
self.prt_gene_aart_details(genes, prt)
return (self.name, self.get_section_marks()) | [
"def",
"prt_report_grp0",
"(",
"self",
",",
"prt",
"=",
"sys",
".",
"stdout",
")",
":",
"summaryline",
"=",
"self",
".",
"str_summaryline",
"(",
")",
"kws_grp",
"=",
"{",
"'use_sections'",
":",
"False",
",",
"'hdrgo_prt'",
":",
"False",
",",
"'sortby'",
... | 45.294118 | 11.352941 |
def _forceRefreshMinMax(self):
""" Refreshes the min max config values from the axes' state.
"""
#logger.debug("_forceRefreshMinMax", stack_info=True)
# Set the precision from by looking how many decimals are needed to show the difference
# between the minimum and maximum, given the maximum. E.g. if min = 0.04 and max = 0.07,
# we would only need zero decimals behind the point as we can write the range as
# [4e-2, 7e-2]. However if min = 1.04 and max = 1.07, we need 2 decimals behind the point.
# So, while the range is the same size we need more decimals because we are not zoomed in
# around zero.
rangeMin, rangeMax = self.getTargetRange() # [[xmin, xmax], [ymin, ymax]]
maxOrder = np.log10(np.abs(max(rangeMax, rangeMin)))
diffOrder = np.log10(np.abs(rangeMax - rangeMin))
extraDigits = 2 # add some extra digits to make each pan/zoom action show a new value.
precisionF = np.clip(abs(maxOrder - diffOrder) + extraDigits, extraDigits + 1, 25)
precision = int(precisionF) if np.isfinite(precisionF) else extraDigits + 1
#logger.debug("maxOrder: {}, diffOrder: {}, precision: {}"
# .format(maxOrder, diffOrder, precision))
self.rangeMinCti.precision = precision
self.rangeMaxCti.precision = precision
self.rangeMinCti.data, self.rangeMaxCti.data = rangeMin, rangeMax
# Update values in the tree
self.model.emitDataChanged(self.rangeMinCti)
self.model.emitDataChanged(self.rangeMaxCti) | [
"def",
"_forceRefreshMinMax",
"(",
"self",
")",
":",
"#logger.debug(\"_forceRefreshMinMax\", stack_info=True)",
"# Set the precision from by looking how many decimals are needed to show the difference",
"# between the minimum and maximum, given the maximum. E.g. if min = 0.04 and max = 0.07,",
"# ... | 55.857143 | 29.642857 |
def parse_callers(variant, category='snv'):
"""Parse how the different variant callers have performed
Args:
variant (cyvcf2.Variant): A variant object
Returns:
callers (dict): A dictionary on the format
{'gatk': <filter>,'freebayes': <filter>,'samtools': <filter>}
"""
relevant_callers = CALLERS[category]
callers = {caller['id']: None for caller in relevant_callers}
raw_info = variant.INFO.get('set')
if raw_info:
info = raw_info.split('-')
for call in info:
if call == 'FilteredInAll':
for caller in callers:
callers[caller] = 'Filtered'
elif call == 'Intersection':
for caller in callers:
callers[caller] = 'Pass'
elif 'filterIn' in call:
for caller in callers:
if caller in call:
callers[caller] = 'Filtered'
elif call in set(callers.keys()):
callers[call] = 'Pass'
# The following is parsing of a custom made merge
other_info = variant.INFO.get('FOUND_IN')
if other_info:
for info in other_info.split(','):
called_by = info.split('|')[0]
callers[called_by] = 'Pass'
return callers | [
"def",
"parse_callers",
"(",
"variant",
",",
"category",
"=",
"'snv'",
")",
":",
"relevant_callers",
"=",
"CALLERS",
"[",
"category",
"]",
"callers",
"=",
"{",
"caller",
"[",
"'id'",
"]",
":",
"None",
"for",
"caller",
"in",
"relevant_callers",
"}",
"raw_in... | 34.864865 | 12.378378 |
def from_file(filepath):
"""
Returns the crs object from a file, with the format determined from the filename extension.
Arguments:
- *filepath*: filepath to be loaded, including extension.
"""
if filepath.endswith(".prj"):
string = open(filepath, "r").read()
return parse.from_unknown_wkt(string)
elif filepath.endswith((".geojson",".json")):
raw = open(filepath).read()
geoj = json.loads(raw)
if "crs" in geoj:
crsinfo = geoj["crs"]
if crsinfo["type"] == "name":
string = crsinfo["properties"]["name"]
return parse.from_unknown_text(string)
elif crsinfo["type"] == "link":
url = crsinfo["properties"]["name"]
type = crsinfo["properties"].get("type")
return from_url(url, format=type)
else: raise FormatError("Invalid GeoJSON crs type: must be either 'name' or 'link'")
else:
# assume default wgs84 as per the spec
return parse.from_epsg_code("4326") | [
"def",
"from_file",
"(",
"filepath",
")",
":",
"if",
"filepath",
".",
"endswith",
"(",
"\".prj\"",
")",
":",
"string",
"=",
"open",
"(",
"filepath",
",",
"\"r\"",
")",
".",
"read",
"(",
")",
"return",
"parse",
".",
"from_unknown_wkt",
"(",
"string",
")... | 34.5 | 18.125 |
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes | [
"def",
"process_way",
"(",
"e",
")",
":",
"way",
"=",
"{",
"'id'",
":",
"e",
"[",
"'id'",
"]",
"}",
"if",
"'tags'",
"in",
"e",
":",
"if",
"e",
"[",
"'tags'",
"]",
"is",
"not",
"np",
".",
"nan",
":",
"for",
"t",
",",
"v",
"in",
"list",
"(",
... | 21.032258 | 22.580645 |
def hide(
input_image_file,
img_enc,
secret_message=None,
secret_file=None,
img_format=None,
):
"""Hide a message (string) in an image.
"""
from zlib import compress
from base64 import b64encode
if secret_file != None:
with open(secret_file, "r") as f:
secret_message = f.read()
try:
text = compress(b64encode(bytes(secret_message, "utf-8")))
except:
text = compress(b64encode(secret_message))
img = tools.open_image(input_image_file)
if img_format is None:
img_format = img.format
if "exif" in img.info:
exif_dict = piexif.load(img.info["exif"])
else:
exif_dict = {}
exif_dict["0th"] = {}
exif_dict["0th"][piexif.ImageIFD.ImageDescription] = text
exif_bytes = piexif.dump(exif_dict)
img.save(img_enc, format=img_format, exif=exif_bytes)
img.close()
return img | [
"def",
"hide",
"(",
"input_image_file",
",",
"img_enc",
",",
"secret_message",
"=",
"None",
",",
"secret_file",
"=",
"None",
",",
"img_format",
"=",
"None",
",",
")",
":",
"from",
"zlib",
"import",
"compress",
"from",
"base64",
"import",
"b64encode",
"if",
... | 24.5 | 19.555556 |
def secp256k1():
"""
create the secp256k1 curve
"""
GFp = FiniteField(2 ** 256 - 2 ** 32 - 977) # This is P from below... aka FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
ec = EllipticCurve(GFp, 0, 7)
return ECDSA(ec, ec.point(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8), 2 ** 256 - 432420386565659656852420866394968145599) | [
"def",
"secp256k1",
"(",
")",
":",
"GFp",
"=",
"FiniteField",
"(",
"2",
"**",
"256",
"-",
"2",
"**",
"32",
"-",
"977",
")",
"# This is P from below... aka FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F",
"ec",
"=",
"EllipticCurve",
"(",
"GFp",
",",... | 68.285714 | 46 |
def soft_fail(msg=''):
"""Adds error message to soft errors list if within soft assertions context.
Either just force test failure with the given message."""
global _soft_ctx
if _soft_ctx:
global _soft_err
_soft_err.append('Fail: %s!' % msg if msg else 'Fail!')
return
fail(msg) | [
"def",
"soft_fail",
"(",
"msg",
"=",
"''",
")",
":",
"global",
"_soft_ctx",
"if",
"_soft_ctx",
":",
"global",
"_soft_err",
"_soft_err",
".",
"append",
"(",
"'Fail: %s!'",
"%",
"msg",
"if",
"msg",
"else",
"'Fail!'",
")",
"return",
"fail",
"(",
"msg",
")"
... | 35.222222 | 17 |
def blur_input_each_step():
"""Minimizing this objective is equivelant to blurring input each step.
Optimizing (-k)*blur_input_each_step() is equivelant to:
input <- (1-k)*input + k*blur(input)
An operation that was used in early feature visualization work.
See Nguyen, et al., 2015.
"""
def inner(T):
t_input = T("input")
t_input_blurred = tf.stop_gradient(_tf_blur(t_input))
return 0.5*tf.reduce_sum((t_input - t_input_blurred)**2)
return inner | [
"def",
"blur_input_each_step",
"(",
")",
":",
"def",
"inner",
"(",
"T",
")",
":",
"t_input",
"=",
"T",
"(",
"\"input\"",
")",
"t_input_blurred",
"=",
"tf",
".",
"stop_gradient",
"(",
"_tf_blur",
"(",
"t_input",
")",
")",
"return",
"0.5",
"*",
"tf",
"."... | 31 | 19.533333 |
def __create_matcher(self, match_class, **keywds):
"""implementation details"""
matcher_args = keywds.copy()
del matcher_args['function']
del matcher_args['recursive']
if 'allow_empty' in matcher_args:
del matcher_args['allow_empty']
decl_matcher = decl_matcher = match_class(**matcher_args)
if keywds['function']:
self._logger.debug(
'running query: %s and <user defined function>',
str(decl_matcher))
return lambda decl: decl_matcher(decl) and keywds['function'](decl)
self._logger.debug('running query: %s', str(decl_matcher))
return decl_matcher | [
"def",
"__create_matcher",
"(",
"self",
",",
"match_class",
",",
"*",
"*",
"keywds",
")",
":",
"matcher_args",
"=",
"keywds",
".",
"copy",
"(",
")",
"del",
"matcher_args",
"[",
"'function'",
"]",
"del",
"matcher_args",
"[",
"'recursive'",
"]",
"if",
"'allo... | 39.705882 | 15.117647 |
def abi_get_element_by_name(abi, name):
""" Return element of abi (return None if fails to find) """
if (abi and "abi" in abi):
for a in abi["abi"]:
if ("name" in a and a["name"] == name):
return a
return None | [
"def",
"abi_get_element_by_name",
"(",
"abi",
",",
"name",
")",
":",
"if",
"(",
"abi",
"and",
"\"abi\"",
"in",
"abi",
")",
":",
"for",
"a",
"in",
"abi",
"[",
"\"abi\"",
"]",
":",
"if",
"(",
"\"name\"",
"in",
"a",
"and",
"a",
"[",
"\"name\"",
"]",
... | 35.857143 | 10.714286 |
def fork_processes(num_processes: Optional[int], max_restarts: int = None) -> int:
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
max_restarts defaults to 100.
Availability: Unix
"""
if max_restarts is None:
max_restarts = 100
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
gen_log.info("Starting %d processes", num_processes)
children = {}
def start_child(i: int) -> Optional[int]:
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError as e:
if errno_from_exception(e) == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
gen_log.warning(
"child %d (pid %d) killed by signal %d, restarting",
id,
pid,
os.WTERMSIG(status),
)
elif os.WEXITSTATUS(status) != 0:
gen_log.warning(
"child %d (pid %d) exited with status %d, restarting",
id,
pid,
os.WEXITSTATUS(status),
)
else:
gen_log.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0) | [
"def",
"fork_processes",
"(",
"num_processes",
":",
"Optional",
"[",
"int",
"]",
",",
"max_restarts",
":",
"int",
"=",
"None",
")",
"->",
"int",
":",
"if",
"max_restarts",
"is",
"None",
":",
"max_restarts",
"=",
"100",
"global",
"_task_id",
"assert",
"_tas... | 34.202128 | 19.829787 |
def result(self, psd_state):
"""Return freqs and averaged PSD for given center frequency"""
freq_array = numpy.fft.fftshift(psd_state['freq_array'])
pwr_array = numpy.fft.fftshift(psd_state['pwr_array'])
if self._crop_factor:
crop_bins_half = round((self._crop_factor * self._bins) / 2)
freq_array = freq_array[crop_bins_half:-crop_bins_half]
pwr_array = pwr_array[crop_bins_half:-crop_bins_half]
if psd_state['repeats'] > 1:
pwr_array = pwr_array / psd_state['repeats']
if self._log_scale:
pwr_array = 10 * numpy.log10(pwr_array)
return (freq_array, pwr_array) | [
"def",
"result",
"(",
"self",
",",
"psd_state",
")",
":",
"freq_array",
"=",
"numpy",
".",
"fft",
".",
"fftshift",
"(",
"psd_state",
"[",
"'freq_array'",
"]",
")",
"pwr_array",
"=",
"numpy",
".",
"fft",
".",
"fftshift",
"(",
"psd_state",
"[",
"'pwr_array... | 39.117647 | 21.117647 |
def live_scores(self, live_scores):
"""Store output of live scores to a CSV file"""
headers = ['League', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
result.extend([game['league'], game['homeTeamName'],
game['goalsHomeTeam'], game['goalsAwayTeam'],
game['awayTeamName']] for game in live_scores['games'])
self.generate_output(result) | [
"def",
"live_scores",
"(",
"self",
",",
"live_scores",
")",
":",
"headers",
"=",
"[",
"'League'",
",",
"'Home Team Name'",
",",
"'Home Team Goals'",
",",
"'Away Team Goals'",
",",
"'Away Team Name'",
"]",
"result",
"=",
"[",
"headers",
"]",
"result",
".",
"ext... | 53.111111 | 16.555556 |
def validate(self, value):
"""
This prevents setting any value more precise than 0.00001
"""
try:
# trap blank fields here
if value:
v = float(value)
if (v != 0 and v < self.fmin) or v > self.fmax:
return None
if abs(round(100000*v)-100000*v) > 1.e-12:
return None
return value
except ValueError:
return None | [
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"# trap blank fields here",
"if",
"value",
":",
"v",
"=",
"float",
"(",
"value",
")",
"if",
"(",
"v",
"!=",
"0",
"and",
"v",
"<",
"self",
".",
"fmin",
")",
"or",
"v",
">",
"self"... | 31.333333 | 13.6 |
def temporary_tables(**kwargs):
"""
Temporarily set DataFrames as registered tables.
Tables will be returned to their original state when the context
manager exits. Caching is not enabled for tables registered via
this function.
"""
global _TABLES
original = _TABLES.copy()
for k, v in kwargs.items():
if not isinstance(v, pd.DataFrame):
raise ValueError('tables only accepts DataFrames')
add_table(k, v)
yield
_TABLES = original | [
"def",
"temporary_tables",
"(",
"*",
"*",
"kwargs",
")",
":",
"global",
"_TABLES",
"original",
"=",
"_TABLES",
".",
"copy",
"(",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"pd",... | 23.190476 | 22.428571 |
def sign_cert(self, ca, csr, expires=None, algorithm=None, subject=None, cn_in_san=True,
csr_format=Encoding.PEM, subject_alternative_name=None, key_usage=None,
extended_key_usage=None, tls_feature=None, ocsp_no_check=False, extra_extensions=None,
password=None):
"""Create a signed certificate from a CSR.
**PLEASE NOTE:** This function creates the raw certificate and is usually not invoked directly. It is
called by :py:func:`Certificate.objects.init() <django_ca.managers.CertificateManager.init>`, which
passes along all parameters unchanged and saves the raw certificate to the database.
Parameters
----------
ca : :py:class:`~django_ca.models.CertificateAuthority`
The certificate authority to sign the certificate with.
csr : str
A valid CSR. The format is given by the ``csr_format`` parameter.
expires : datetime, optional
Datetime for when this certificate will expire, defaults to the ``CA_DEFAULT_EXPIRES`` setting.
algorithm : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional
Hash algorithm used when signing the certificate, passed to
:py:func:`~django_ca.utils.parse_hash_algorithm`. The default is the value of the
:ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>` setting.
subject : dict or str or :py:class:`~django_ca.subject.Subject`
Subject string, e.g. ``"/CN=example.com"`` or ``Subject("/CN=example.com")``.
The value is actually passed to :py:class:`~django_ca.subject.Subject` if it is not already an
instance of that class. If this value is not passed or if the value does not contain a CommonName,
the first value of the ``subject_alternative_name`` parameter is used as CommonName.
cn_in_san : bool, optional
Wether the CommonName should also be included as subjectAlternativeName. The default is
``True``, but the parameter is ignored if no CommonName is given. This is typically set
to ``False`` when creating a client certificate, where the subjects CommonName has no
meaningful value as subjectAlternativeName.
csr_format : :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding`, optional
The format of the CSR. The default is ``PEM``.
subject_alternative_name : list of str or :py:class:`~django_ca.extensions.SubjectAlternativeName`,
optional A list of alternative names for the certificate. The value is passed to
:py:class:`~django_ca.extensions.SubjectAlternativeName` if not already an instance of that class.
key_usage : str or dict or :py:class:`~django_ca.extensions.KeyUsage`, optional
Value for the ``keyUsage`` X509 extension. The value is passed to
:py:class:`~django_ca.extensions.KeyUsage` if not already an instance of that class.
extended_key_usage : str or dict or :py:class:`~django_ca.extensions.ExtendedKeyUsage`, optional
Value for the ``extendedKeyUsage`` X509 extension. The value is passed to
:py:class:`~django_ca.extensions.ExtendedKeyUsage` if not already an instance of that class.
tls_feature : str or dict or :py:class:`~django_ca.extensions.TLSFeature`, optional
Value for the ``TLSFeature`` X509 extension. The value is passed to
:py:class:`~django_ca.extensions.TLSFeature` if not already an instance of that class.
ocsp_no_check : bool, optional
Add the OCSPNoCheck flag, indicating that an OCSP client should trust this certificate for it's
lifetime. This value only makes sense if you intend to use the certificate for an OCSP responder,
the default is ``False``. See `RFC 6990, section 4.2.2.2.1
<https://tools.ietf.org/html/rfc6960#section-4.2.2.2>`_ for more information.
extra_extensions : list of :py:class:`cg:cryptography.x509.Extension` or \
:py:class:`django_ca.extensions.Extension`, optional
An optional list of additional extensions to add to the certificate.
password : bytes, optional
Password used to load the private key of the certificate authority. If not passed, the private key
is assumed to be unencrypted.
Returns
-------
cryptography.x509.Certificate
The signed certificate.
"""
########################
# Normalize parameters #
########################
if subject is None:
subject = Subject() # we need a subject instance so we can possibly add the CN
elif not isinstance(subject, Subject):
subject = Subject(subject)
if 'CN' not in subject and not subject_alternative_name:
raise ValueError("Must name at least a CN or a subjectAlternativeName.")
algorithm = parse_hash_algorithm(algorithm)
# Normalize extensions to django_ca.extensions.Extension subclasses
if key_usage and not isinstance(key_usage, KeyUsage):
key_usage = KeyUsage(key_usage)
if extended_key_usage and not isinstance(extended_key_usage, ExtendedKeyUsage):
extended_key_usage = ExtendedKeyUsage(extended_key_usage)
if tls_feature and not isinstance(tls_feature, TLSFeature):
tls_feature = TLSFeature(tls_feature)
if not subject_alternative_name:
subject_alternative_name = SubjectAlternativeName([])
elif not isinstance(subject_alternative_name, SubjectAlternativeName):
subject_alternative_name = SubjectAlternativeName(subject_alternative_name)
# use first SAN as CN if CN is not set
if 'CN' not in subject:
subject['CN'] = subject_alternative_name.value[0].value
elif cn_in_san and 'CN' in subject: # add CN to SAN if cn_in_san is True (default)
try:
cn_name = parse_general_name(subject['CN'])
except idna.IDNAError:
raise ValueError('%s: Could not parse CommonName as subjectAlternativeName.' % subject['CN'])
else:
if cn_name not in subject_alternative_name:
subject_alternative_name.insert(0, cn_name)
################
# Read the CSR #
################
if csr_format == Encoding.PEM:
req = x509.load_pem_x509_csr(force_bytes(csr), default_backend())
elif csr_format == Encoding.DER:
req = x509.load_der_x509_csr(force_bytes(csr), default_backend())
else:
raise ValueError('Unknown CSR format passed: %s' % csr_format)
#########################
# Send pre-issue signal #
#########################
pre_issue_cert.send(sender=self.model, ca=ca, csr=csr, expires=expires, algorithm=algorithm,
subject=subject, cn_in_san=cn_in_san, csr_format=csr_format,
subject_alternative_name=subject_alternative_name, key_usage=key_usage,
extended_key_usage=extended_key_usage, tls_featur=tls_feature,
extra_extensions=extra_extensions, password=password)
#######################
# Generate public key #
#######################
public_key = req.public_key()
builder = get_cert_builder(expires)
builder = builder.public_key(public_key)
builder = builder.issuer_name(ca.x509.subject)
builder = builder.subject_name(subject.name)
# Add extensions
builder = builder.add_extension(x509.BasicConstraints(ca=False, path_length=None), critical=True)
builder = builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(public_key), critical=False)
# Get authorityKeyIdentifier from subjectKeyIdentifier from signing CA
builder = builder.add_extension(ca.get_authority_key_identifier(), critical=False)
for critical, ext in self.get_common_extensions(ca.issuer_url, ca.crl_url, ca.ocsp_url):
builder = builder.add_extension(ext, critical=critical)
if subject_alternative_name:
builder = builder.add_extension(**subject_alternative_name.for_builder())
if key_usage:
builder = builder.add_extension(**key_usage.for_builder())
if extended_key_usage:
builder = builder.add_extension(**extended_key_usage.for_builder())
if tls_feature:
builder = builder.add_extension(**tls_feature.for_builder())
if ca.issuer_alt_name:
issuer_alt_name = IssuerAlternativeName(ca.issuer_alt_name)
builder = builder.add_extension(**issuer_alt_name.for_builder())
if ocsp_no_check:
builder = builder.add_extension(**OCSPNoCheck().for_builder())
if extra_extensions:
builder = self._extra_extensions(builder, extra_extensions)
###################
# Sign public key #
###################
cert = builder.sign(private_key=ca.key(password), algorithm=algorithm, backend=default_backend())
return cert, req | [
"def",
"sign_cert",
"(",
"self",
",",
"ca",
",",
"csr",
",",
"expires",
"=",
"None",
",",
"algorithm",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"cn_in_san",
"=",
"True",
",",
"csr_format",
"=",
"Encoding",
".",
"PEM",
",",
"subject_alternative_name... | 53.649123 | 32.204678 |
def nasnetalarge(num_classes=1001, pretrained='imagenet'):
r"""NASNetALarge model architecture from the
`"NASNet" <https://arxiv.org/abs/1707.07012>`_ paper.
"""
if pretrained:
settings = pretrained_settings['nasnetalarge'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = NASNetALarge(num_classes=1001)
model.load_state_dict(model_zoo.load_url(settings['url']))
if pretrained == 'imagenet':
new_last_linear = nn.Linear(model.last_linear.in_features, 1000)
new_last_linear.weight.data = model.last_linear.weight.data[1:]
new_last_linear.bias.data = model.last_linear.bias.data[1:]
model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = NASNetALarge(num_classes=num_classes)
return model | [
"def",
"nasnetalarge",
"(",
"num_classes",
"=",
"1001",
",",
"pretrained",
"=",
"'imagenet'",
")",
":",
"if",
"pretrained",
":",
"settings",
"=",
"pretrained_settings",
"[",
"'nasnetalarge'",
"]",
"[",
"pretrained",
"]",
"assert",
"num_classes",
"==",
"settings"... | 42.892857 | 21.571429 |
def templates(self):
"""
Property for accessing :class:`TemplateManager` instance, which is used to manage templates.
:rtype: yagocd.resources.template.TemplateManager
"""
if self._template_manager is None:
self._template_manager = TemplateManager(session=self._session)
return self._template_manager | [
"def",
"templates",
"(",
"self",
")",
":",
"if",
"self",
".",
"_template_manager",
"is",
"None",
":",
"self",
".",
"_template_manager",
"=",
"TemplateManager",
"(",
"session",
"=",
"self",
".",
"_session",
")",
"return",
"self",
".",
"_template_manager"
] | 39.222222 | 19.666667 |
def get_ffmpeg_lib_versions(ffmpeg_path=None):
""" Get FFmpeg library versions as 32 bit integers, with same format as sys.hexversion.
Example: 0x3040100 for FFmpeg 3.4.1
"""
r = collections.OrderedDict()
cmd = (ffmpeg_path or "ffmpeg", "-version")
output = subprocess.run(cmd,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True).stdout
output = output.splitlines()
lib_version_regex = re.compile("^\s*(lib[a-z]+)\s+([0-9]+).\s*([0-9]+).\s*([0-9]+)\s+")
for line in output:
match = lib_version_regex.search(line)
if match:
lib_name, *lib_version = match.group(1, 2, 3, 4)
int_lib_version = 0
for i, d in enumerate(map(int, reversed(lib_version)), 1):
int_lib_version |= d << (8 * i)
r[lib_name] = int_lib_version
return r | [
"def",
"get_ffmpeg_lib_versions",
"(",
"ffmpeg_path",
"=",
"None",
")",
":",
"r",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"cmd",
"=",
"(",
"ffmpeg_path",
"or",
"\"ffmpeg\"",
",",
"\"-version\"",
")",
"output",
"=",
"subprocess",
".",
"run",
"(",
... | 38.545455 | 13.545455 |
def classifier(self):
""" Returns classifier from classifier.pkl """
clf = pickle.load(open(os.path.join(self.repopath, 'classifier.pkl')))
return clf | [
"def",
"classifier",
"(",
"self",
")",
":",
"clf",
"=",
"pickle",
".",
"load",
"(",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"repopath",
",",
"'classifier.pkl'",
")",
")",
")",
"return",
"clf"
] | 35.8 | 23.8 |
def on(self, state):
""" Turn on or off.
:param state: True (on) or False (off).
"""
self._on = state
cmd = self.command_set.off()
if state:
cmd = self.command_set.on()
self.send(cmd) | [
"def",
"on",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"_on",
"=",
"state",
"cmd",
"=",
"self",
".",
"command_set",
".",
"off",
"(",
")",
"if",
"state",
":",
"cmd",
"=",
"self",
".",
"command_set",
".",
"on",
"(",
")",
"self",
".",
"sen... | 24.3 | 12.9 |
def find_descriptor(self, uuid):
"""Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found.
"""
for desc in self.list_descriptors():
if desc.uuid == uuid:
return desc
return None | [
"def",
"find_descriptor",
"(",
"self",
",",
"uuid",
")",
":",
"for",
"desc",
"in",
"self",
".",
"list_descriptors",
"(",
")",
":",
"if",
"desc",
".",
"uuid",
"==",
"uuid",
":",
"return",
"desc",
"return",
"None"
] | 38.75 | 10.5 |
def get_arg_or_attr(self, name, default=None):
"""Returns flow argument, as provided with sitegate decorators
or attribute set as a flow class attribute or default."""
if name in self.flow_args:
return self.flow_args[name]
try:
return getattr(self, name)
except AttributeError:
return default | [
"def",
"get_arg_or_attr",
"(",
"self",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"if",
"name",
"in",
"self",
".",
"flow_args",
":",
"return",
"self",
".",
"flow_args",
"[",
"name",
"]",
"try",
":",
"return",
"getattr",
"(",
"self",
",",
"na... | 40.333333 | 7.444444 |
def hide(self, event):
"""Toggles the visiblity of the content widget"""
if self.content.isHidden():
self.content.show()
self.hideBtn.setIcon(self.hideIcon)
self.setMaximumHeight(16777215)
else:
self.content.hide()
self.hideBtn.setIcon(self.showIcon)
self.setFixedHeight(30) | [
"def",
"hide",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"content",
".",
"isHidden",
"(",
")",
":",
"self",
".",
"content",
".",
"show",
"(",
")",
"self",
".",
"hideBtn",
".",
"setIcon",
"(",
"self",
".",
"hideIcon",
")",
"self",
".... | 36.1 | 9 |
def get_transition(self, # suppress(too-many-arguments)
line,
line_index,
column,
is_escaped,
comment_system_transitions,
eof=False):
"""Get transition from InCommentParser."""
del comment_system_transitions
if (_token_at_col_in_line(line,
column,
"```",
3) and
not _is_escaped(line, column, is_escaped)):
# Hit a disable token, so resume the last parser
return (DisabledParser((line_index, column + 3),
self.__class__,
self._waiting_until), 3, self._started_at)
elif self._waiting_until != ParserState.EOL:
wait_until_len = len(self._waiting_until)
if (_token_at_col_in_line(line,
column,
self._waiting_until,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
# Skip ahead to end of this token
return (InTextParser(),
len(self._waiting_until),
self._started_at)
elif self._waiting_until == ParserState.EOL and column == 0:
# We hit a new line and the state ends here. Return
# corresponding state
return (InTextParser(), 0, self._started_at)
elif eof:
# We hit the end of the file and were still in a comment
# state. Grab everything up to here.
return (InTextParser(), 0, self._started_at)
# Move ahead by one character otherwise
return (self, 1, None) | [
"def",
"get_transition",
"(",
"self",
",",
"# suppress(too-many-arguments)",
"line",
",",
"line_index",
",",
"column",
",",
"is_escaped",
",",
"comment_system_transitions",
",",
"eof",
"=",
"False",
")",
":",
"del",
"comment_system_transitions",
"if",
"(",
"_token_a... | 44.166667 | 13.642857 |
def urlencode2(query, doseq=0, safe="", querydelimiter="&"):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping object " + tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = quote_plus(str(k), safe=safe)
v = quote_plus(str(v), safe=safe)
l.append(k + '=' + v)
else:
for k, v in query:
k = quote_plus(str(k), safe=safe)
if isinstance(v, str):
v = quote_plus(v, safe=safe)
l.append(k + '=' + v)
elif _is_unicode(v):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v), safe=safe)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + quote_plus(str(elt)))
return querydelimiter.join(l) | [
"def",
"urlencode2",
"(",
"query",
",",
"doseq",
"=",
"0",
",",
"safe",
"=",
"\"\"",
",",
"querydelimiter",
"=",
"\"&\"",
")",
":",
"if",
"hasattr",
"(",
"query",
",",
"\"items\"",
")",
":",
"# mapping objects",
"query",
"=",
"query",
".",
"items",
"("... | 39.725806 | 18.241935 |
def get_dict(self):
'''
Convert Paginator instance to dict
:return: Paging data
:rtype: dict
'''
return dict(
current_page=self.current_page,
total_page_count=self.total_page_count,
items=self.items,
total_item_count=self.total_item_count,
page_size=self.page_size
) | [
"def",
"get_dict",
"(",
"self",
")",
":",
"return",
"dict",
"(",
"current_page",
"=",
"self",
".",
"current_page",
",",
"total_page_count",
"=",
"self",
".",
"total_page_count",
",",
"items",
"=",
"self",
".",
"items",
",",
"total_item_count",
"=",
"self",
... | 24.666667 | 18.933333 |
def textForSaving(self):
"""Get text with correct EOL symbols. Use this method for saving a file to storage
"""
lines = self.text.splitlines()
if self.text.endswith('\n'): # splitlines ignores last \n
lines.append('')
return self.eol.join(lines) + self.eol | [
"def",
"textForSaving",
"(",
"self",
")",
":",
"lines",
"=",
"self",
".",
"text",
".",
"splitlines",
"(",
")",
"if",
"self",
".",
"text",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"# splitlines ignores last \\n",
"lines",
".",
"append",
"(",
"''",
")",
... | 43.285714 | 8.857143 |
def mouse_down(self, evt):
"Get the selected object and store start position"
if DEBUG: print "down!"
if (not evt.ControlDown() and not evt.ShiftDown()) or evt.AltDown():
for obj in self.selection:
# clear marker
if obj.sel_marker:
obj.sel_marker.show(False)
obj.sel_marker.destroy()
obj.sel_marker = None
self.selection = [] # clear previous selection
wx_obj = evt.GetEventObject()
if wx_obj.Parent is None or evt.AltDown():
if not evt.AltDown():
evt.Skip()
# start the rubberband effect (multiple selection using the mouse)
self.current = wx_obj
self.overlay = wx.Overlay()
self.pos = evt.GetPosition()
self.parent.wx_obj.CaptureMouse()
#if self.inspector and hasattr(wx_obj, "obj"):
# self.inspector.inspect(wx_obj.obj) # inspect top level window
#self.dclick = False
else:
# create the selection marker and assign it to the control
obj = wx_obj.obj
self.overlay = None
if DEBUG: print wx_obj
sx, sy = wx_obj.ScreenToClient(wx_obj.GetPositionTuple())
dx, dy = wx_obj.ScreenToClient(wx.GetMousePosition())
self.pos = wx_obj.ScreenToClient(wx.GetMousePosition())
self.start = (sx - dx, sy - dy)
self.current = wx_obj
if DEBUG: print "capture..."
# do not capture on TextCtrl, it will fail (blocking) at least in gtk
# do not capture on wx.Notebook to allow selecting the tabs
if not isinstance(wx_obj, wx.Notebook):
self.parent.wx_obj.CaptureMouse()
self.select(obj, keep_selection=True) | [
"def",
"mouse_down",
"(",
"self",
",",
"evt",
")",
":",
"if",
"DEBUG",
":",
"print",
"\"down!\"",
"if",
"(",
"not",
"evt",
".",
"ControlDown",
"(",
")",
"and",
"not",
"evt",
".",
"ShiftDown",
"(",
")",
")",
"or",
"evt",
".",
"AltDown",
"(",
")",
... | 44.634146 | 15.414634 |
def calc_time(lower_bound, upper_bound, latitude, longitude, attribute, value,
altitude=0, pressure=101325, temperature=12, horizon='+0:00',
xtol=1.0e-12):
"""
Calculate the time between lower_bound and upper_bound
where the attribute is equal to value. Uses PyEphem for
solar position calculations.
Parameters
----------
lower_bound : datetime.datetime
upper_bound : datetime.datetime
latitude : float
longitude : float
attribute : str
The attribute of a pyephem.Sun object that
you want to solve for. Likely options are 'alt'
and 'az' (which must be given in radians).
value : int or float
The value of the attribute to solve for
altitude : float, default 0
Distance above sea level.
pressure : int or float, optional, default 101325
Air pressure in Pascals. Set to 0 for no
atmospheric correction.
temperature : int or float, optional, default 12
Air temperature in degrees C.
horizon : string, optional, default '+0:00'
arc degrees:arc minutes from geometrical horizon for sunrise and
sunset, e.g., horizon='+0:00' to use sun center crossing the
geometrical horizon to define sunrise and sunset,
horizon='-0:34' for when the sun's upper edge crosses the
geometrical horizon
xtol : float, optional, default 1.0e-12
The allowed error in the result from value
Returns
-------
datetime.datetime
Raises
------
ValueError
If the value is not contained between the bounds.
AttributeError
If the given attribute is not an attribute of a
PyEphem.Sun object.
"""
try:
import scipy.optimize as so
except ImportError:
raise ImportError('The calc_time function requires scipy')
obs, sun = _ephem_setup(latitude, longitude, altitude,
pressure, temperature, horizon)
def compute_attr(thetime, target, attr):
obs.date = thetime
sun.compute(obs)
return getattr(sun, attr) - target
lb = datetime_to_djd(lower_bound)
ub = datetime_to_djd(upper_bound)
djd_root = so.brentq(compute_attr, lb, ub,
(value, attribute), xtol=xtol)
return djd_to_datetime(djd_root) | [
"def",
"calc_time",
"(",
"lower_bound",
",",
"upper_bound",
",",
"latitude",
",",
"longitude",
",",
"attribute",
",",
"value",
",",
"altitude",
"=",
"0",
",",
"pressure",
"=",
"101325",
",",
"temperature",
"=",
"12",
",",
"horizon",
"=",
"'+0:00'",
",",
... | 32.927536 | 18.550725 |
def get_sigla(self, work):
"""Returns a list of all of the sigla for `work`.
:param work: name of work
:type work: `str`
:rtype: `list` of `str`
"""
return [os.path.splitext(os.path.basename(path))[0]
for path in glob.glob(os.path.join(self._path, work, '*.txt'))] | [
"def",
"get_sigla",
"(",
"self",
",",
"work",
")",
":",
"return",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
")",
"[",
"0",
"]",
"for",
"path",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
... | 32.1 | 18.3 |
def has_file_extension(filepath, ext_required):
'''Assert that a filepath has the required file extension
:param filepath: string filepath presumably containing a file extension
:param ext_required: the expected file extension
examples: ".pdf", ".html", ".tex"
'''
ext = os.path.splitext(filepath)[-1]
if ext != ext_required:
msg_tmpl = "The extension for {}, which is {}, does not equal {}"
msg_format = msg_tmpl.format(filepath, ext, ext_required)
raise ValueError(msg_format)
return True | [
"def",
"has_file_extension",
"(",
"filepath",
",",
"ext_required",
")",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filepath",
")",
"[",
"-",
"1",
"]",
"if",
"ext",
"!=",
"ext_required",
":",
"msg_tmpl",
"=",
"\"The extension for {}, which is ... | 41.461538 | 19.153846 |
def _trim(image):
"""Trim a PIL image and remove white space."""
background = PIL.Image.new(image.mode, image.size, image.getpixel((0, 0)))
diff = PIL.ImageChops.difference(image, background)
diff = PIL.ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
image = image.crop(bbox)
return image | [
"def",
"_trim",
"(",
"image",
")",
":",
"background",
"=",
"PIL",
".",
"Image",
".",
"new",
"(",
"image",
".",
"mode",
",",
"image",
".",
"size",
",",
"image",
".",
"getpixel",
"(",
"(",
"0",
",",
"0",
")",
")",
")",
"diff",
"=",
"PIL",
".",
... | 37.444444 | 18.111111 |
def set_binary_path(name):
'''
Sets the path, where the syslog-ng binary can be found. This function is
intended to be used from states.
If syslog-ng is installed via a package manager, users don't need to use
this function.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.set_binary_path name=/usr/sbin
'''
global __SYSLOG_NG_BINARY_PATH
old = __SYSLOG_NG_BINARY_PATH
__SYSLOG_NG_BINARY_PATH = name
changes = _format_changes(old, name)
return _format_state_result(name, result=True, changes=changes) | [
"def",
"set_binary_path",
"(",
"name",
")",
":",
"global",
"__SYSLOG_NG_BINARY_PATH",
"old",
"=",
"__SYSLOG_NG_BINARY_PATH",
"__SYSLOG_NG_BINARY_PATH",
"=",
"name",
"changes",
"=",
"_format_changes",
"(",
"old",
",",
"name",
")",
"return",
"_format_state_result",
"(",... | 27.55 | 24.05 |
def subscribed_tracks(self):
"""
Access the subscribed_tracks
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
"""
if self._subscribed_tracks is None:
self._subscribed_tracks = SubscribedTrackList(
self._version,
room_sid=self._solution['room_sid'],
subscriber_sid=self._solution['sid'],
)
return self._subscribed_tracks | [
"def",
"subscribed_tracks",
"(",
"self",
")",
":",
"if",
"self",
".",
"_subscribed_tracks",
"is",
"None",
":",
"self",
".",
"_subscribed_tracks",
"=",
"SubscribedTrackList",
"(",
"self",
".",
"_version",
",",
"room_sid",
"=",
"self",
".",
"_solution",
"[",
"... | 42.785714 | 20.5 |
def add_require(self, require):
""" Add a require object if it does not already exist """
for p in self.requires:
if p.value == require.value:
return
self.requires.append(require) | [
"def",
"add_require",
"(",
"self",
",",
"require",
")",
":",
"for",
"p",
"in",
"self",
".",
"requires",
":",
"if",
"p",
".",
"value",
"==",
"require",
".",
"value",
":",
"return",
"self",
".",
"requires",
".",
"append",
"(",
"require",
")"
] | 37.666667 | 6.5 |
def read_excel(
filename: PathLike,
sheet: Union[str, int],
dtype: str='float32',
) -> AnnData:
"""Read ``.xlsx`` (Excel) file.
Assumes that the first columns stores the row names and the first row the
column names.
Parameters
----------
filename
File name to read from.
sheet
Name of sheet in Excel file.
"""
# rely on pandas for reading an excel file
from pandas import read_excel
df = read_excel(fspath(filename), sheet)
X = df.values[:, 1:]
row = {'row_names': df.iloc[:, 0].values.astype(str)}
col = {'col_names': np.array(df.columns[1:], dtype=str)}
return AnnData(X, row, col, dtype=dtype) | [
"def",
"read_excel",
"(",
"filename",
":",
"PathLike",
",",
"sheet",
":",
"Union",
"[",
"str",
",",
"int",
"]",
",",
"dtype",
":",
"str",
"=",
"'float32'",
",",
")",
"->",
"AnnData",
":",
"# rely on pandas for reading an excel file",
"from",
"pandas",
"impor... | 27.625 | 18.125 |
def create_discount_promotion(cls, discount_promotion, **kwargs):
"""Create DiscountPromotion
Create a new DiscountPromotion
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_discount_promotion(discount_promotion, async=True)
>>> result = thread.get()
:param async bool
:param DiscountPromotion discount_promotion: Attributes of discountPromotion to create (required)
:return: DiscountPromotion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_discount_promotion_with_http_info(discount_promotion, **kwargs)
else:
(data) = cls._create_discount_promotion_with_http_info(discount_promotion, **kwargs)
return data | [
"def",
"create_discount_promotion",
"(",
"cls",
",",
"discount_promotion",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_create_di... | 45.904762 | 23.047619 |
def _countdown(self, waitTime=0, printString="Waiting %*d seconds...", verbose=True):
"""Makes a pretty countdown.
Args:
gitquery (str): The query or endpoint itself.
Examples:
query: 'query { viewer { login } }'
endpoint: '/user'
printString (Optional[str]): A counter message to display.
Defaults to 'Waiting %*d seconds...'
verbose (Optional[bool]): If False, all extra printouts will be
suppressed. Defaults to True.
"""
if waitTime <= 0:
waitTime = self.__retryDelay
for remaining in range(waitTime, 0, -1):
_vPrint(verbose, "\r" + printString % (len(str(waitTime)), remaining), end="", flush=True)
time.sleep(1)
if verbose:
_vPrint(verbose, "\r" + printString % (len(str(waitTime)), 0)) | [
"def",
"_countdown",
"(",
"self",
",",
"waitTime",
"=",
"0",
",",
"printString",
"=",
"\"Waiting %*d seconds...\"",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"waitTime",
"<=",
"0",
":",
"waitTime",
"=",
"self",
".",
"__retryDelay",
"for",
"remaining",
"... | 42.714286 | 21.047619 |
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]] | [
"def",
"split_on",
"(",
"s",
",",
"sep",
"=",
"\" \"",
")",
":",
"pattern",
"=",
"'''((?:[^%s\"']|\"[^\"]*\"|'[^']*')+)'''",
"%",
"sep",
"return",
"[",
"_strip_speechmarks",
"(",
"t",
")",
"for",
"t",
"in",
"re",
".",
"split",
"(",
"pattern",
",",
"s",
"... | 40.8 | 20.2 |
def color_str(self):
"Return an escape-coded string to write to the terminal."
s = self.s
for k, v in sorted(self.atts.items()):
# (self.atts sorted for the sake of always acting the same.)
if k not in xforms:
# Unsupported SGR code
continue
elif v is False:
continue
elif v is True:
s = xforms[k](s)
else:
s = xforms[k](s, v)
return s | [
"def",
"color_str",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"s",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"self",
".",
"atts",
".",
"items",
"(",
")",
")",
":",
"# (self.atts sorted for the sake of always acting the same.)",
"if",
"k",
"not",
"in... | 32.866667 | 15.533333 |
def gen_totals(report, file_type):
"""
Print the gen totals.
"""
label = clr.stringc(file_type + " files ", "bright purple")
ok = field_value(report["ok_role"], "ok", c.LOG_COLOR["ok"], 0)
skipped = field_value(report["skipped_role"], "skipped",
c.LOG_COLOR["skipped"], 16)
changed = field_value(report["changed_role"], "changed",
c.LOG_COLOR["changed"], 16)
# missing_meta = field_value(report["missing_meta_role"],
# "missing meta(s)",
# c.LOG_COLOR["missing_meta"], 16)
# print "\n{0} {1} {2} {3}".format(ok, skipped, changed, missing_meta)
print "\n{0} {1} {2} {3}".format(label, ok, skipped, changed) | [
"def",
"gen_totals",
"(",
"report",
",",
"file_type",
")",
":",
"label",
"=",
"clr",
".",
"stringc",
"(",
"file_type",
"+",
"\" files \"",
",",
"\"bright purple\"",
")",
"ok",
"=",
"field_value",
"(",
"report",
"[",
"\"ok_role\"",
"]",
",",
"\"ok\"",
","... | 39.315789 | 21.842105 |
def add_tag(self, new_tags):
"""Add a tag to existing device tags. This method will not add a duplicate, if already in the list.
:param new_tags: the tag(s) to be added. new_tags can be a comma-separated string or list
"""
tags = self.get_tags()
orig_tag_cnt = len(tags)
# print("self.get_tags() {}".format(tags))
if isinstance(new_tags, six.string_types):
new_tags = new_tags.split(',')
# print("spliting tags :: {}".format(new_tags))
for tag in new_tags:
if not tag in tags:
tags.append(tag.strip())
if len(tags) > orig_tag_cnt:
xml_tags = escape(",".join(tags))
post_data = TAGS_TEMPLATE.format(connectware_id=self.get_connectware_id(),
tags=xml_tags)
self._conn.put('/ws/DeviceCore', post_data)
# Invalidate cache
self._device_json = None | [
"def",
"add_tag",
"(",
"self",
",",
"new_tags",
")",
":",
"tags",
"=",
"self",
".",
"get_tags",
"(",
")",
"orig_tag_cnt",
"=",
"len",
"(",
"tags",
")",
"# print(\"self.get_tags() {}\".format(tags))",
"if",
"isinstance",
"(",
"new_tags",
",",
"six",
".",
"str... | 36.615385 | 18.923077 |
def program_select(self, chan, sfid, bank, preset):
"""Select a program."""
return fluid_synth_program_select(self.synth, chan, sfid, bank, preset) | [
"def",
"program_select",
"(",
"self",
",",
"chan",
",",
"sfid",
",",
"bank",
",",
"preset",
")",
":",
"return",
"fluid_synth_program_select",
"(",
"self",
".",
"synth",
",",
"chan",
",",
"sfid",
",",
"bank",
",",
"preset",
")"
] | 53.666667 | 16.666667 |
def adapt_item(item, package, filename=None):
"""Adapts ``.epub.Item`` to a ``DocumentItem``.
"""
if item.media_type == 'application/xhtml+xml':
try:
html = etree.parse(item.data)
except Exception as exc:
logger.error("failed parsing {}".format(item.name))
raise
metadata = DocumentPointerMetadataParser(
html, raise_value_error=False)()
item.data.seek(0)
if metadata.get('is_document_pointer'):
model = DocumentPointerItem(item, package)
else:
model = DocumentItem(item, package)
else:
model = Resource(item.name, item.data, item.media_type,
filename or item.name)
return model | [
"def",
"adapt_item",
"(",
"item",
",",
"package",
",",
"filename",
"=",
"None",
")",
":",
"if",
"item",
".",
"media_type",
"==",
"'application/xhtml+xml'",
":",
"try",
":",
"html",
"=",
"etree",
".",
"parse",
"(",
"item",
".",
"data",
")",
"except",
"E... | 34.857143 | 14.571429 |
def alterar(self, id_model, id_brand, name):
"""Change Model from by the identifier.
:param id_model: Identifier of the Model. Integer value and greater than zero.
:param id_brand: Identifier of the Brand. Integer value and greater than zero.
:param name: Model name. String with a minimum 3 and maximum of 100 characters
:return: None
:raise InvalidParameterError: The identifier of Model, Brand or name is null and invalid.
:raise MarcaNaoExisteError: Brand not registered.
:raise ModeloEquipamentoNaoExisteError: Model not registered.
:raise NomeMarcaModeloDuplicadoError: There is already a registered Model with the value of name and brand.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response
"""
if not is_valid_int_param(id_model):
raise InvalidParameterError(
u'The identifier of Model is invalid or was not informed.')
model_map = dict()
model_map['name'] = name
model_map['id_brand'] = id_brand
url = 'model/' + str(id_model) + '/'
code, xml = self.submit({'model': model_map}, 'PUT', url)
return self.response(code, xml) | [
"def",
"alterar",
"(",
"self",
",",
"id_model",
",",
"id_brand",
",",
"name",
")",
":",
"if",
"not",
"is_valid_int_param",
"(",
"id_model",
")",
":",
"raise",
"InvalidParameterError",
"(",
"u'The identifier of Model is invalid or was not informed.'",
")",
"model_map",... | 42.2 | 27.066667 |
def get_services(profile='pagerduty', subdomain=None, api_key=None):
'''
List services belonging to this account
CLI Example:
salt myminion pagerduty.get_services
'''
return _list_items(
'services',
'id',
profile=profile,
subdomain=subdomain,
api_key=api_key,
) | [
"def",
"get_services",
"(",
"profile",
"=",
"'pagerduty'",
",",
"subdomain",
"=",
"None",
",",
"api_key",
"=",
"None",
")",
":",
"return",
"_list_items",
"(",
"'services'",
",",
"'id'",
",",
"profile",
"=",
"profile",
",",
"subdomain",
"=",
"subdomain",
",... | 20.0625 | 24.3125 |
def image2surface(img):
"""
Convert a PIL image into a Cairo surface
"""
if not CAIRO_AVAILABLE:
raise Exception("Cairo not available(). image2surface() cannot work.")
# TODO(Jflesch): Python 3 problem
# cairo.ImageSurface.create_for_data() raises NotImplementedYet ...
# img.putalpha(256)
# (width, height) = img.size
# imgd = img.tobytes('raw', 'BGRA')
# imga = array.array('B', imgd)
# stride = width * 4
# return cairo.ImageSurface.create_for_data(
# imga, cairo.FORMAT_ARGB32, width, height, stride)
# So we fall back to this method:
global g_lock
with g_lock:
img_io = io.BytesIO()
img.save(img_io, format="PNG")
img_io.seek(0)
return cairo.ImageSurface.create_from_png(img_io) | [
"def",
"image2surface",
"(",
"img",
")",
":",
"if",
"not",
"CAIRO_AVAILABLE",
":",
"raise",
"Exception",
"(",
"\"Cairo not available(). image2surface() cannot work.\"",
")",
"# TODO(Jflesch): Python 3 problem",
"# cairo.ImageSurface.create_for_data() raises NotImplementedYet ...",
... | 30.88 | 16 |
def get_urls(self):
""" Extend the admin urls for the CompetitionEntryAdmin model
to be able to invoke a CSV export view on the admin model """
urls = super(CompetitionEntryAdmin, self).get_urls()
csv_urls = patterns('',
url(
r'^exportcsv/$',
self.admin_site.admin_view(self.csv_export),
name='competition-csv-export'
)
)
return csv_urls + urls | [
"def",
"get_urls",
"(",
"self",
")",
":",
"urls",
"=",
"super",
"(",
"CompetitionEntryAdmin",
",",
"self",
")",
".",
"get_urls",
"(",
")",
"csv_urls",
"=",
"patterns",
"(",
"''",
",",
"url",
"(",
"r'^exportcsv/$'",
",",
"self",
".",
"admin_site",
".",
... | 38.166667 | 14.583333 |
def cell_source(cell):
"""Return the source of the current cell, as an array of lines"""
source = cell.source
if source == '':
return ['']
if source.endswith('\n'):
return source.splitlines() + ['']
return source.splitlines() | [
"def",
"cell_source",
"(",
"cell",
")",
":",
"source",
"=",
"cell",
".",
"source",
"if",
"source",
"==",
"''",
":",
"return",
"[",
"''",
"]",
"if",
"source",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"return",
"source",
".",
"splitlines",
"(",
")",
... | 31.75 | 12.125 |
def validate_password_reset(cls, code, new_password):
"""
Validates an unhashed code against a hashed code.
Once the code has been validated and confirmed
new_password will replace the old users password
"""
password_reset_model = \
PasswordResetModel.where_code(code)
if password_reset_model is None:
return None
jwt = JWT()
if jwt.verify_token(password_reset_model.token):
user = cls.where_id(jwt.data['data']['user_id'])
if user is not None:
user.set_password(new_password)
PasswordResetModel.delete_where_user_id(user.id)
return user
password_reset_model.delete() # delete expired/invalid token
return None | [
"def",
"validate_password_reset",
"(",
"cls",
",",
"code",
",",
"new_password",
")",
":",
"password_reset_model",
"=",
"PasswordResetModel",
".",
"where_code",
"(",
"code",
")",
"if",
"password_reset_model",
"is",
"None",
":",
"return",
"None",
"jwt",
"=",
"JWT"... | 41.842105 | 13.526316 |
def pop_frame(self):
"""
Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position.
"""
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError('no frames to pop')
self._total_offset -= offset
self.seek(return_pos) | [
"def",
"pop_frame",
"(",
"self",
")",
":",
"try",
":",
"offset",
",",
"return_pos",
"=",
"self",
".",
"_frames",
".",
"pop",
"(",
")",
"except",
"IndexError",
":",
"raise",
"IndexError",
"(",
"'no frames to pop'",
")",
"self",
".",
"_total_offset",
"-=",
... | 30.333333 | 14.666667 |
def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id):
"""Will return True if hunt's task was assigned to this client before."""
if data_store.RelationalDBEnabled():
flow_id = hunt_id
if hunt.IsLegacyHunt(hunt_id):
# Strip "H:" prefix.
flow_id = flow_id[2:]
try:
data_store.REL_DB.ReadFlowObject(client_id, flow_id)
return True
except db.UnknownFlowError:
pass
else:
client_urn = rdfvalue.RDFURN(client_id)
for _ in aff4.FACTORY.Stat([
client_urn.Add("flows/%s:hunt" % rdfvalue.RDFURN(hunt_id).Basename())
]):
return True
return False | [
"def",
"_CheckIfHuntTaskWasAssigned",
"(",
"self",
",",
"client_id",
",",
"hunt_id",
")",
":",
"if",
"data_store",
".",
"RelationalDBEnabled",
"(",
")",
":",
"flow_id",
"=",
"hunt_id",
"if",
"hunt",
".",
"IsLegacyHunt",
"(",
"hunt_id",
")",
":",
"# Strip \"H:\... | 30.285714 | 19.285714 |
def getContext(self, context_name = 'default'):
"""Get a context by name, create the default context if it does not exist
Params:
context_name (string):
Context name
Raises:
KeyError:
If the context name does not exist
Returns:
bubbler.Bubbler:
Named context
"""
if context_name == 'default' and 'default' not in self.contexts:
self('default')
return self.contexts[context_name] | [
"def",
"getContext",
"(",
"self",
",",
"context_name",
"=",
"'default'",
")",
":",
"if",
"context_name",
"==",
"'default'",
"and",
"'default'",
"not",
"in",
"self",
".",
"contexts",
":",
"self",
"(",
"'default'",
")",
"return",
"self",
".",
"contexts",
"["... | 22.105263 | 21.684211 |
def lattice(self, lattice):
"""
Sets Lattice associated with PeriodicSite
"""
self._lattice = lattice
self._coords = self._lattice.get_cartesian_coords(self._frac_coords) | [
"def",
"lattice",
"(",
"self",
",",
"lattice",
")",
":",
"self",
".",
"_lattice",
"=",
"lattice",
"self",
".",
"_coords",
"=",
"self",
".",
"_lattice",
".",
"get_cartesian_coords",
"(",
"self",
".",
"_frac_coords",
")"
] | 34.166667 | 11.166667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.