id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
43,760 | def osx_clang_fix():
if (sys.platform != 'darwin'):
return
if (sys.version_info[0] >= 3):
from subprocess import getoutput
else:
from commands import getoutput
cc = getoutput('cc -v')
if (('gcc' in cc) or ('clang' not in cc)):
return
for flag in ['CFLAGS', 'CPPFLAGS']:
if (flag not in os.environ):
os.environ[flag] = '-Qunused-arguments'
elif ('-Qunused-arguments' not in os.environ[flag]):
os.environ[flag] += ' -Qunused-arguments'
| [
"def",
"osx_clang_fix",
"(",
")",
":",
"if",
"(",
"sys",
".",
"platform",
"!=",
"'darwin'",
")",
":",
"return",
"if",
"(",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
")",
":",
"from",
"subprocess",
"import",
"getoutput",
"else",
":",
"from... | add clang switch to ignore unused arguments to avoid os x compile error . | train | false |
43,761 | def assertRaisesRegexp(_exception, _regexp, _callable=None, *args, **kwargs):
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if (_callable is not None):
with manager:
_callable(*args, **kwargs)
else:
return manager
| [
"def",
"assertRaisesRegexp",
"(",
"_exception",
",",
"_regexp",
",",
"_callable",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"manager",
"=",
"_AssertRaisesContextmanager",
"(",
"exception",
"=",
"_exception",
",",
"regexp",
"=",
"_regexp",
... | port of assertraisesregexp from unittest in python 2 . | train | false |
43,763 | def iter_and_close(file_like, block_size):
while 1:
try:
block = file_like.read(block_size)
if block:
(yield block)
else:
raise StopIteration
except StopIteration as si:
file_like.close()
return
| [
"def",
"iter_and_close",
"(",
"file_like",
",",
"block_size",
")",
":",
"while",
"1",
":",
"try",
":",
"block",
"=",
"file_like",
".",
"read",
"(",
"block_size",
")",
"if",
"block",
":",
"(",
"yield",
"block",
")",
"else",
":",
"raise",
"StopIteration",
... | yield file contents by block then close the file . | train | true |
43,764 | def critical(title, message=None, details=None):
if (message is None):
message = title
mbox = ResizeableMessageBox(active_window())
mbox.setWindowTitle(title)
mbox.setTextFormat(Qt.PlainText)
mbox.setText(message)
mbox.setIcon(QtWidgets.QMessageBox.Critical)
mbox.setStandardButtons(QtWidgets.QMessageBox.Close)
mbox.setDefaultButton(QtWidgets.QMessageBox.Close)
if details:
mbox.setDetailedText(details)
mbox.exec_()
| [
"def",
"critical",
"(",
"title",
",",
"message",
"=",
"None",
",",
"details",
"=",
"None",
")",
":",
"if",
"(",
"message",
"is",
"None",
")",
":",
"message",
"=",
"title",
"mbox",
"=",
"ResizeableMessageBox",
"(",
"active_window",
"(",
")",
")",
"mbox"... | log a message with severity critical on the root logger . | train | false |
43,765 | def compose_diffs(iterable_of_diffs):
return Diff(changes=reduce((lambda x, y: x.extend(y.changes)), iterable_of_diffs, pvector().evolver()).persistent())
| [
"def",
"compose_diffs",
"(",
"iterable_of_diffs",
")",
":",
"return",
"Diff",
"(",
"changes",
"=",
"reduce",
"(",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
".",
"extend",
"(",
"y",
".",
"changes",
")",
")",
",",
"iterable_of_diffs",
",",
"pvector",
"(",
... | compose multiple diff objects into a single diff . | train | false |
43,766 | @sync_performer
def perform_delete_s3_keys(dispatcher, intent):
s3 = boto.connect_s3()
bucket = s3.get_bucket(intent.bucket)
bucket.delete_keys([(intent.prefix + key) for key in intent.keys])
| [
"@",
"sync_performer",
"def",
"perform_delete_s3_keys",
"(",
"dispatcher",
",",
"intent",
")",
":",
"s3",
"=",
"boto",
".",
"connect_s3",
"(",
")",
"bucket",
"=",
"s3",
".",
"get_bucket",
"(",
"intent",
".",
"bucket",
")",
"bucket",
".",
"delete_keys",
"("... | see :class:deletes3keys . | train | false |
43,767 | def _rewrite_inversion(fac, po, g, x):
(_, s) = _get_coeff_exp(po, x)
(a, b) = _get_coeff_exp(g.argument, x)
def tr(l):
return [(t + (s / b)) for t in l]
return (powdenest((fac / (a ** (s / b))), polar=True), meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother), g.argument))
| [
"def",
"_rewrite_inversion",
"(",
"fac",
",",
"po",
",",
"g",
",",
"x",
")",
":",
"(",
"_",
",",
"s",
")",
"=",
"_get_coeff_exp",
"(",
"po",
",",
"x",
")",
"(",
"a",
",",
"b",
")",
"=",
"_get_coeff_exp",
"(",
"g",
".",
"argument",
",",
"x",
"... | absorb po == x**s into g . | train | false |
43,768 | def py3_path(path):
if isinstance(path, six.text_type):
return path
assert isinstance(path, bytes)
if six.PY2:
return path
return os.fsdecode(path)
| [
"def",
"py3_path",
"(",
"path",
")",
":",
"if",
"isinstance",
"(",
"path",
",",
"six",
".",
"text_type",
")",
":",
"return",
"path",
"assert",
"isinstance",
"(",
"path",
",",
"bytes",
")",
"if",
"six",
".",
"PY2",
":",
"return",
"path",
"return",
"os... | convert a bytestring path to unicode on python 3 only . | train | false |
43,769 | def create_csv_response(filename, header, datarows):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)
csvwriter = csv.writer(response, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
encoded_header = [unicode(s).encode('utf-8') for s in header]
csvwriter.writerow(encoded_header)
for datarow in datarows:
encoded_row = [unicode(s).encode('utf-8') for s in datarow]
csvwriter.writerow(encoded_row)
return response
| [
"def",
"create_csv_response",
"(",
"filename",
",",
"header",
",",
"datarows",
")",
":",
"response",
"=",
"HttpResponse",
"(",
"content_type",
"=",
"'text/csv'",
")",
"response",
"[",
"'Content-Disposition'",
"]",
"=",
"'attachment; filename={0}'",
".",
"format",
... | create an httpresponse with an attached . | train | false |
43,770 | def GenerateTab(depth):
tab_list = []
if (depth > 0):
tab_list.append(' ')
tab_list.append(('| ' * depth))
tab_list.append('+--')
return ''.join(tab_list)
| [
"def",
"GenerateTab",
"(",
"depth",
")",
":",
"tab_list",
"=",
"[",
"]",
"if",
"(",
"depth",
">",
"0",
")",
":",
"tab_list",
".",
"append",
"(",
"' '",
")",
"tab_list",
".",
"append",
"(",
"(",
"'| '",
"*",
"depth",
")",
")",
"tab_list",
".",
"... | generate tabs to represent branching to children . | train | false |
43,771 | @click.command(u'reinstall')
@click.option(u'--admin-password', help=u'Administrator Password for reinstalled site')
@click.option(u'--yes', is_flag=True, default=False, help=u'Pass --yes to skip confirmation')
@pass_context
def reinstall(context, admin_password=None, yes=False):
if (not yes):
click.confirm(u'This will wipe your database. Are you sure you want to reinstall?', abort=True)
site = get_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.clear_cache()
installed = frappe.get_installed_apps()
frappe.clear_cache()
except Exception:
installed = []
finally:
if frappe.db:
frappe.db.close()
frappe.destroy()
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, verbose=context.verbose, force=True, reinstall=True, install_apps=installed, admin_password=admin_password)
| [
"@",
"click",
".",
"command",
"(",
"u'reinstall'",
")",
"@",
"click",
".",
"option",
"(",
"u'--admin-password'",
",",
"help",
"=",
"u'Administrator Password for reinstalled site'",
")",
"@",
"click",
".",
"option",
"(",
"u'--yes'",
",",
"is_flag",
"=",
"True",
... | reinstall site ie . | train | false |
43,772 | def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'):
if (not isinstance(text, basestring)):
raise TypeError(("%s can't be encoded" % type(text)))
if (not incoming):
incoming = (sys.stdin.encoding or sys.getdefaultencoding())
if isinstance(text, unicode):
return text.encode(encoding, errors)
elif (text and (encoding != incoming)):
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text
| [
"def",
"safe_encode",
"(",
"text",
",",
"incoming",
"=",
"None",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'strict'",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"text",
",",
"basestring",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
... | safely decodes a binary string to unicode . | train | false |
43,773 | def count_from_0(index, collection):
return index
| [
"def",
"count_from_0",
"(",
"index",
",",
"collection",
")",
":",
"return",
"index"
] | numbering function: consecutive integers starting at 0 . | train | false |
43,777 | def get_is_plural_num(match):
plural_str = match.group(2).strip()
if (plural_str not in _IS_PLURAL_NUM):
holds_num = _check_plural_arg_is_num(plural_str)
if (holds_num is None):
holds_num = prompt_user(('Ambiguous: Does %s handle a number? (y/[n]) ' % plural_str), default='n')
holds_num = ('y' in holds_num)
_IS_PLURAL_NUM[plural_str] = holds_num
return _IS_PLURAL_NUM[plural_str]
| [
"def",
"get_is_plural_num",
"(",
"match",
")",
":",
"plural_str",
"=",
"match",
".",
"group",
"(",
"2",
")",
".",
"strip",
"(",
")",
"if",
"(",
"plural_str",
"not",
"in",
"_IS_PLURAL_NUM",
")",
":",
"holds_num",
"=",
"_check_plural_arg_is_num",
"(",
"plura... | prompt to user for help in determining if the argument to plural() is a number . | train | false |
43,778 | @pytest.mark.parametrize('text, deleted, rest', [('test delete|foobar', 'delete', 'test |foobar'), ('test delete |foobar', 'delete ', 'test |foobar'), ('open -t github.com/foo/bar |', 'github.com/foo/bar ', 'open -t |'), ('open -t |github.com/foo/bar', '-t ', 'open |github.com/foo/bar'), fixme(('test del<ete>foobar', 'delete', 'test |foobar')), ('test del<ete >foobar', 'del', 'test |ete foobar')])
def test_rl_unix_word_rubout(lineedit, bridge, text, deleted, rest):
_validate_deletion(lineedit, bridge, bridge.rl_unix_word_rubout, text, deleted, rest)
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'text, deleted, rest'",
",",
"[",
"(",
"'test delete|foobar'",
",",
"'delete'",
",",
"'test |foobar'",
")",
",",
"(",
"'test delete |foobar'",
",",
"'delete '",
",",
"'test |foobar'",
")",
",",
"(",
"'open -t... | delete to word beginning and see if it comes back with yank . | train | false |
43,780 | def unspew():
sys.settrace(None)
| [
"def",
"unspew",
"(",
")",
":",
"sys",
".",
"settrace",
"(",
"None",
")"
] | remove the trace hook installed by spew . | train | false |
43,781 | @testing.requires_testing_data
def test_triangle_neighbors():
this = read_source_spaces(fname)[0]
this['neighbor_tri'] = [list() for _ in range(this['np'])]
for p in range(this['ntri']):
verts = this['tris'][p]
this['neighbor_tri'][verts[0]].append(p)
this['neighbor_tri'][verts[1]].append(p)
this['neighbor_tri'][verts[2]].append(p)
this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
neighbor_tri = _triangle_neighbors(this['tris'], this['np'])
assert_true((np.array_equal(nt1, nt2) for (nt1, nt2) in zip(neighbor_tri, this['neighbor_tri'])))
| [
"@",
"testing",
".",
"requires_testing_data",
"def",
"test_triangle_neighbors",
"(",
")",
":",
"this",
"=",
"read_source_spaces",
"(",
"fname",
")",
"[",
"0",
"]",
"this",
"[",
"'neighbor_tri'",
"]",
"=",
"[",
"list",
"(",
")",
"for",
"_",
"in",
"range",
... | test efficient vertex neighboring triangles for surfaces . | train | false |
43,782 | def in6_isuladdr(str):
return in6_isincluded(str, 'fc00::', 7)
| [
"def",
"in6_isuladdr",
"(",
"str",
")",
":",
"return",
"in6_isincluded",
"(",
"str",
",",
"'fc00::'",
",",
"7",
")"
] | returns true if provided address in printable format belongs to unique local address space . | train | false |
43,783 | def unicode_to_ascii_url(url, prepend_scheme):
groups = url_split_regex.match(url).groups()
if (not groups[3]):
scheme_to_prepend = (prepend_scheme or 'http')
groups = url_split_regex.match(((to_unicode(scheme_to_prepend) + u'://') + url)).groups()
if (not groups[3]):
raise Exception(('No authority component found, ' + 'could not decode unicode to US-ASCII'))
scheme = groups[1]
authority = groups[3]
path = (groups[4] or '')
query = (groups[5] or '')
fragment = (groups[7] or '')
if prepend_scheme:
scheme = (str(scheme) + '://')
else:
scheme = ''
return ((((scheme + unicode_to_ascii_authority(authority)) + escape_unicode(path)) + escape_unicode(query)) + str(fragment))
| [
"def",
"unicode_to_ascii_url",
"(",
"url",
",",
"prepend_scheme",
")",
":",
"groups",
"=",
"url_split_regex",
".",
"match",
"(",
"url",
")",
".",
"groups",
"(",
")",
"if",
"(",
"not",
"groups",
"[",
"3",
"]",
")",
":",
"scheme_to_prepend",
"=",
"(",
"p... | converts the inputed unicode url into a us-ascii equivalent . | train | false |
43,785 | @profiler.trace
def image_update_properties(request, image_id, remove_props=None, **kwargs):
return glanceclient(request, '2').images.update(image_id, remove_props, **kwargs)
| [
"@",
"profiler",
".",
"trace",
"def",
"image_update_properties",
"(",
"request",
",",
"image_id",
",",
"remove_props",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"return",
"glanceclient",
"(",
"request",
",",
"'2'",
")",
".",
"images",
".",
"update",
"(",... | add or update a custom property of an image . | train | true |
43,786 | def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt):
if (dest_fmt == 'iso'):
dest_fmt = 'raw'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
if (CONF.libvirt_snapshot_compression and (dest_fmt == 'qcow2')):
qemu_img_cmd += ('-c',)
if (snapshot_name is not None):
qemu_img_cmd += ('-s', snapshot_name)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
| [
"def",
"extract_snapshot",
"(",
"disk_path",
",",
"source_fmt",
",",
"snapshot_name",
",",
"out_path",
",",
"dest_fmt",
")",
":",
"if",
"(",
"dest_fmt",
"==",
"'iso'",
")",
":",
"dest_fmt",
"=",
"'raw'",
"qemu_img_cmd",
"=",
"(",
"'qemu-img'",
",",
"'convert... | extract a named snapshot from a disk image . | train | false |
43,788 | def init_viewer(dataset, rows, cols):
m = (rows * cols)
vis_batch = dataset.get_batch_topo(m)
(_, patch_rows, patch_cols, channels) = vis_batch.shape
assert (_ == m)
mapback = hasattr(dataset, 'mapback_for_viewer')
pv = PatchViewer((rows, (cols * (1 + mapback))), (patch_rows, patch_cols), is_color=(channels == 3))
return pv
| [
"def",
"init_viewer",
"(",
"dataset",
",",
"rows",
",",
"cols",
")",
":",
"m",
"=",
"(",
"rows",
"*",
"cols",
")",
"vis_batch",
"=",
"dataset",
".",
"get_batch_topo",
"(",
"m",
")",
"(",
"_",
",",
"patch_rows",
",",
"patch_cols",
",",
"channels",
")"... | initialisation of the patchviewer with given rows and columns . | train | false |
43,789 | def calculate_virtual_free_capacity(total_capacity, free_capacity, provisioned_capacity, thin_provisioning_support, max_over_subscription_ratio, reserved_percentage, thin):
total = float(total_capacity)
reserved = (float(reserved_percentage) / 100)
if (thin and thin_provisioning_support):
free = (((total * max_over_subscription_ratio) - provisioned_capacity) - math.floor((total * reserved)))
else:
free = (free_capacity - math.floor((total * reserved)))
return free
| [
"def",
"calculate_virtual_free_capacity",
"(",
"total_capacity",
",",
"free_capacity",
",",
"provisioned_capacity",
",",
"thin_provisioning_support",
",",
"max_over_subscription_ratio",
",",
"reserved_percentage",
",",
"thin",
")",
":",
"total",
"=",
"float",
"(",
"total_... | calculate the virtual free capacity based on thin provisioning support . | train | false |
43,790 | def test_pick_events():
events = np.array([[1, 0, 1], [2, 1, 0], [3, 0, 4], [4, 4, 2], [5, 2, 0]])
assert_array_equal(pick_events(events, include=[1, 4], exclude=4), [[1, 0, 1], [3, 0, 4]])
assert_array_equal(pick_events(events, exclude=[0, 2]), [[1, 0, 1], [3, 0, 4]])
assert_array_equal(pick_events(events, include=[1, 2], step=True), [[1, 0, 1], [2, 1, 0], [4, 4, 2], [5, 2, 0]])
| [
"def",
"test_pick_events",
"(",
")",
":",
"events",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
",",
"1",
"]",
",",
"[",
"2",
",",
"1",
",",
"0",
"]",
",",
"[",
"3",
",",
"0",
",",
"4",
"]",
",",
"[",
"4",
",",
"4",
",",
"2... | test pick events in a events ndarray . | train | false |
43,792 | def repo_run_command(repo, cmd, ignore_status=False, cd=True):
os_dep.command('ssh')
repo = repo.strip()
run_cmd = None
cd_str = ''
if repo.startswith('ssh://'):
username = None
(hostline, remote_path) = parse_ssh_path(repo)
if cd:
cd_str = ('cd %s && ' % remote_path)
if ('@' in hostline):
(username, host) = hostline.split('@')
run_cmd = ('ssh %s@%s "%s%s"' % (username, host, cd_str, cmd))
else:
run_cmd = ('ssh %s "%s%s"' % (hostline, cd_str, cmd))
else:
if cd:
cd_str = ('cd %s && ' % repo)
run_cmd = ('%s%s' % (cd_str, cmd))
if run_cmd:
return utils.run(run_cmd, ignore_status=ignore_status)
| [
"def",
"repo_run_command",
"(",
"repo",
",",
"cmd",
",",
"ignore_status",
"=",
"False",
",",
"cd",
"=",
"True",
")",
":",
"os_dep",
".",
"command",
"(",
"'ssh'",
")",
"repo",
"=",
"repo",
".",
"strip",
"(",
")",
"run_cmd",
"=",
"None",
"cd_str",
"=",... | run a command relative to the repo path this is basically a utils . | train | false |
43,793 | @profiler.trace
def remove_tenant_from_flavor(request, flavor, tenant):
return novaclient(request).flavor_access.remove_tenant_access(flavor=flavor, tenant=tenant)
| [
"@",
"profiler",
".",
"trace",
"def",
"remove_tenant_from_flavor",
"(",
"request",
",",
"flavor",
",",
"tenant",
")",
":",
"return",
"novaclient",
"(",
"request",
")",
".",
"flavor_access",
".",
"remove_tenant_access",
"(",
"flavor",
"=",
"flavor",
",",
"tenan... | remove a tenant from the given flavor access list . | train | false |
43,795 | def postChunked(host, selector, fields, files):
params = urllib.urlencode(fields)
url = ('http://%s%s?%s' % (host, selector, params))
u = urllib2.urlopen(url, files)
result = u.read()
[fp.close() for (key, fp) in files]
return result
| [
"def",
"postChunked",
"(",
"host",
",",
"selector",
",",
"fields",
",",
"files",
")",
":",
"params",
"=",
"urllib",
".",
"urlencode",
"(",
"fields",
")",
"url",
"=",
"(",
"'http://%s%s?%s'",
"%",
"(",
"host",
",",
"selector",
",",
"params",
")",
")",
... | attempt to replace postmultipart() with nearly-identical interface . | train | true |
43,796 | def is_pid_cmdline_correct(pid, match):
try:
with open(('/proc/%d/cmdline' % pid)) as f:
cmdline = f.read()
return (match in cmdline)
except EnvironmentError:
return False
| [
"def",
"is_pid_cmdline_correct",
"(",
"pid",
",",
"match",
")",
":",
"try",
":",
"with",
"open",
"(",
"(",
"'/proc/%d/cmdline'",
"%",
"pid",
")",
")",
"as",
"f",
":",
"cmdline",
"=",
"f",
".",
"read",
"(",
")",
"return",
"(",
"match",
"in",
"cmdline"... | ensure that the cmdline for a pid seems sane because pids are recycled . | train | false |
43,797 | def uslugify(text, sep):
if (text is None):
return u''
tag_id = RE_TAGS.sub(u'', unicodedata.normalize(u'NFKD', text)).strip().lower()
return RE_WORD.sub(u'', tag_id).replace(u' ', sep)
| [
"def",
"uslugify",
"(",
"text",
",",
"sep",
")",
":",
"if",
"(",
"text",
"is",
"None",
")",
":",
"return",
"u''",
"tag_id",
"=",
"RE_TAGS",
".",
"sub",
"(",
"u''",
",",
"unicodedata",
".",
"normalize",
"(",
"u'NFKD'",
",",
"text",
")",
")",
".",
... | unicode slugify . | train | false |
43,798 | def min_maximal_matching(G):
return nx.maximal_matching(G)
| [
"def",
"min_maximal_matching",
"(",
"G",
")",
":",
"return",
"nx",
".",
"maximal_matching",
"(",
"G",
")"
] | returns the minimum maximal matching of g . | train | false |
43,799 | def unary_concept(label, subj, records):
c = Concept(label, arity=1, extension=set())
for record in records:
c.augment(record[subj])
return c
| [
"def",
"unary_concept",
"(",
"label",
",",
"subj",
",",
"records",
")",
":",
"c",
"=",
"Concept",
"(",
"label",
",",
"arity",
"=",
"1",
",",
"extension",
"=",
"set",
"(",
")",
")",
"for",
"record",
"in",
"records",
":",
"c",
".",
"augment",
"(",
... | make a unary concept out of the primary key in a record . | train | false |
43,800 | def update_target_unit_index(writer, unit):
writer.update_document(pk=unit.pk, target=force_text(unit.target), comment=force_text(unit.comment))
| [
"def",
"update_target_unit_index",
"(",
"writer",
",",
"unit",
")",
":",
"writer",
".",
"update_document",
"(",
"pk",
"=",
"unit",
".",
"pk",
",",
"target",
"=",
"force_text",
"(",
"unit",
".",
"target",
")",
",",
"comment",
"=",
"force_text",
"(",
"unit... | updates target index for given unit . | train | false |
43,804 | def ExecuteOrImportScript(config, handler_path, cgi_path, import_hook):
(module_fullname, script_module, module_code) = LoadTargetModule(handler_path, cgi_path, import_hook)
script_module.__name__ = '__main__'
sys.modules['__main__'] = script_module
try:
import pdb
MonkeyPatchPdb(pdb)
if module_code:
exec module_code in script_module.__dict__
else:
script_module.main()
sys.stdout.flush()
sys.stdout.seek(0)
try:
headers = mimetools.Message(sys.stdout)
finally:
sys.stdout.seek(0, 2)
status_header = headers.get('status')
error_response = False
if status_header:
try:
status_code = int(status_header.split(' ', 1)[0])
error_response = (status_code >= 400)
except ValueError:
error_response = True
if (not error_response):
try:
parent_package = import_hook.GetParentPackage(module_fullname)
except Exception:
parent_package = None
if (parent_package is not None):
submodule = GetSubmoduleName(module_fullname)
setattr(parent_package, submodule, script_module)
return error_response
finally:
script_module.__name__ = module_fullname
| [
"def",
"ExecuteOrImportScript",
"(",
"config",
",",
"handler_path",
",",
"cgi_path",
",",
"import_hook",
")",
":",
"(",
"module_fullname",
",",
"script_module",
",",
"module_code",
")",
"=",
"LoadTargetModule",
"(",
"handler_path",
",",
"cgi_path",
",",
"import_ho... | executes a cgi script by importing it as a new module . | train | false |
43,805 | def rewrite_asserts(mod, module_path=None, config=None):
AssertionRewriter(module_path, config).run(mod)
| [
"def",
"rewrite_asserts",
"(",
"mod",
",",
"module_path",
"=",
"None",
",",
"config",
"=",
"None",
")",
":",
"AssertionRewriter",
"(",
"module_path",
",",
"config",
")",
".",
"run",
"(",
"mod",
")"
] | rewrite the assert statements in mod . | train | false |
43,806 | def warn_or_raise(warning_class, exception_class=None, args=(), config=None, pos=None, stacklevel=1):
if (config is None):
config = {}
if config.get(u'pedantic'):
if (exception_class is None):
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
else:
vo_warn(warning_class, args, config, pos, stacklevel=(stacklevel + 1))
| [
"def",
"warn_or_raise",
"(",
"warning_class",
",",
"exception_class",
"=",
"None",
",",
"args",
"=",
"(",
")",
",",
"config",
"=",
"None",
",",
"pos",
"=",
"None",
",",
"stacklevel",
"=",
"1",
")",
":",
"if",
"(",
"config",
"is",
"None",
")",
":",
... | warn or raise an exception . | train | false |
43,807 | def download_avatar_image(user, size):
url = avatar_for_email(user.email, size)
request = Request(url)
request.timeout = 0.5
request.add_header(u'User-Agent', USER_AGENT)
handle = urlopen(request)
return handle.read()
| [
"def",
"download_avatar_image",
"(",
"user",
",",
"size",
")",
":",
"url",
"=",
"avatar_for_email",
"(",
"user",
".",
"email",
",",
"size",
")",
"request",
"=",
"Request",
"(",
"url",
")",
"request",
".",
"timeout",
"=",
"0.5",
"request",
".",
"add_heade... | downloads avatar image from remote server . | train | false |
43,811 | def test_guess_fail():
with pytest.raises(ascii.InconsistentTableError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic')
assert ('** To figure out why the table did not read, use guess=False and' in str(err.value))
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='ipac')
assert ('At least one header line beginning and ending with delimiter required' in str(err.value))
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False)
assert ('Number of header columns (1) inconsistent with data columns (3)' in str(err.value))
| [
"def",
"test_guess_fail",
"(",
")",
":",
"with",
"pytest",
".",
"raises",
"(",
"ascii",
".",
"InconsistentTableError",
")",
"as",
"err",
":",
"ascii",
".",
"read",
"(",
"'asfdasdf\\n1 2 3'",
",",
"format",
"=",
"'basic'",
")",
"assert",
"(",
"'** To figure o... | check the error message when guess fails . | train | false |
43,812 | def ddt_named(parent, child):
args = RenamedTuple([parent, child])
args.__name__ = 'parent_{}_child_{}'.format(parent, child)
return args
| [
"def",
"ddt_named",
"(",
"parent",
",",
"child",
")",
":",
"args",
"=",
"RenamedTuple",
"(",
"[",
"parent",
",",
"child",
"]",
")",
"args",
".",
"__name__",
"=",
"'parent_{}_child_{}'",
".",
"format",
"(",
"parent",
",",
"child",
")",
"return",
"args"
] | helper to get more readable dynamically-generated test names from ddt . | train | false |
43,813 | def parse_date_time(d, t, network):
if (not network_dict):
load_network_dict()
parsed_time = time_regex.search(t)
network_tz = get_network_timezone(network)
hr = 0
m = 0
if parsed_time:
hr = try_int(parsed_time.group('hour'))
m = try_int(parsed_time.group('minute'))
ap = parsed_time.group('meridiem')
ap = (ap[0].lower() if ap else '')
if ((ap == 'a') and (hr == 12)):
hr -= 12
elif ((ap == 'p') and (hr != 12)):
hr += 12
hr = (hr if (0 <= hr <= 23) else 0)
m = (m if (0 <= m <= 59) else 0)
result = datetime.datetime.fromordinal(max(try_int(d), 1))
return result.replace(hour=hr, minute=m, tzinfo=network_tz)
| [
"def",
"parse_date_time",
"(",
"d",
",",
"t",
",",
"network",
")",
":",
"if",
"(",
"not",
"network_dict",
")",
":",
"load_network_dict",
"(",
")",
"parsed_time",
"=",
"time_regex",
".",
"search",
"(",
"t",
")",
"network_tz",
"=",
"get_network_timezone",
"(... | parse date and time string into local time . | train | false |
43,815 | def is_bipartite(G):
try:
color(G)
return True
except nx.NetworkXError:
return False
| [
"def",
"is_bipartite",
"(",
"G",
")",
":",
"try",
":",
"color",
"(",
"G",
")",
"return",
"True",
"except",
"nx",
".",
"NetworkXError",
":",
"return",
"False"
] | returns true if graph g is bipartite . | train | false |
43,816 | def get_classes(module, superclass=None):
objects = [getattr(module, name) for name in dir(module) if (not name.startswith('_'))]
classes = [obj for obj in objects if (isinstance(obj, type) and (obj.__module__ == module.__name__))]
if (superclass is not None):
classes = [cls for cls in classes if issubclass(cls, superclass)]
return classes
| [
"def",
"get_classes",
"(",
"module",
",",
"superclass",
"=",
"None",
")",
":",
"objects",
"=",
"[",
"getattr",
"(",
"module",
",",
"name",
")",
"for",
"name",
"in",
"dir",
"(",
"module",
")",
"if",
"(",
"not",
"name",
".",
"startswith",
"(",
"'_'",
... | return a list of new-style classes defined in *module* . | train | false |
43,818 | def atan(x):
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.arctan(x))
elif isinstance(x, interval):
start = np.arctan(x.start)
end = np.arctan(x.end)
return interval(start, end, is_valid=x.is_valid)
else:
raise NotImplementedError
| [
"def",
"atan",
"(",
"x",
")",
":",
"np",
"=",
"import_module",
"(",
"'numpy'",
")",
"if",
"isinstance",
"(",
"x",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"return",
"interval",
"(",
"np",
".",
"arctan",
"(",
"x",
")",
")",
"elif",
"isinstanc... | evaluates the tan inverse of an interval . | train | false |
43,820 | def concat_tuples(*tuples):
return tuple(chain(*tuples))
| [
"def",
"concat_tuples",
"(",
"*",
"tuples",
")",
":",
"return",
"tuple",
"(",
"chain",
"(",
"*",
"tuples",
")",
")"
] | concatenate a sequence of tuples into one tuple . | train | false |
43,822 | def _format_local(local_path, local_is_path):
if local_is_path:
return local_path
else:
return getattr(local_path, 'name', '<file obj>')
| [
"def",
"_format_local",
"(",
"local_path",
",",
"local_is_path",
")",
":",
"if",
"local_is_path",
":",
"return",
"local_path",
"else",
":",
"return",
"getattr",
"(",
"local_path",
",",
"'name'",
",",
"'<file obj>'",
")"
] | format a path for log output . | train | false |
43,823 | def formatUnits(units):
pass
| [
"def",
"formatUnits",
"(",
"units",
")",
":",
"pass"
] | format a unit specification into a string . | train | false |
43,824 | @pytest.mark.network
def test_multiple_search(script):
output = script.pip('search', 'pip', 'INITools')
assert ('The PyPA recommended tool for installing Python packages.' in output.stdout)
assert ('Tools for parsing and using INI-style files' in output.stdout)
| [
"@",
"pytest",
".",
"mark",
".",
"network",
"def",
"test_multiple_search",
"(",
"script",
")",
":",
"output",
"=",
"script",
".",
"pip",
"(",
"'search'",
",",
"'pip'",
",",
"'INITools'",
")",
"assert",
"(",
"'The PyPA recommended tool for installing Python package... | test searching for multiple packages at once . | train | false |
43,825 | def temp_ampersand_fixer(s):
return s.replace('&', '&')
| [
"def",
"temp_ampersand_fixer",
"(",
"s",
")",
":",
"return",
"s",
".",
"replace",
"(",
"'&'",
",",
"'&'",
")"
] | as a workaround for ampersands stored as escape sequences in database . | train | false |
43,826 | def _item_to_entity(iterator, entity_pb):
return helpers.entity_from_protobuf(entity_pb)
| [
"def",
"_item_to_entity",
"(",
"iterator",
",",
"entity_pb",
")",
":",
"return",
"helpers",
".",
"entity_from_protobuf",
"(",
"entity_pb",
")"
] | convert a raw protobuf entity to the native object . | train | false |
43,827 | def _prepare_trellis(n_cells, max_col):
import matplotlib.pyplot as plt
if (n_cells == 1):
nrow = ncol = 1
elif (n_cells <= max_col):
(nrow, ncol) = (1, n_cells)
else:
(nrow, ncol) = (int(math.ceil((n_cells / float(max_col)))), max_col)
(fig, axes) = plt.subplots(nrow, ncol, figsize=(7.4, ((1.5 * nrow) + 1)))
axes = ([axes] if (ncol == nrow == 1) else axes.flatten())
for ax in axes[n_cells:]:
from .topomap import _hide_frame
_hide_frame(ax)
return (fig, axes)
| [
"def",
"_prepare_trellis",
"(",
"n_cells",
",",
"max_col",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"(",
"n_cells",
"==",
"1",
")",
":",
"nrow",
"=",
"ncol",
"=",
"1",
"elif",
"(",
"n_cells",
"<=",
"max_col",
")",
":",
"(",... | aux function . | train | false |
43,828 | def CreateTag(region, resource_id, tag_name, tag_value):
ec2 = _Connect(region)
ec2.create_tags([resource_id], {tag_name: tag_value})
| [
"def",
"CreateTag",
"(",
"region",
",",
"resource_id",
",",
"tag_name",
",",
"tag_value",
")",
":",
"ec2",
"=",
"_Connect",
"(",
"region",
")",
"ec2",
".",
"create_tags",
"(",
"[",
"resource_id",
"]",
",",
"{",
"tag_name",
":",
"tag_value",
"}",
")"
] | create a tag for resource_id with specified name and value . | train | false |
43,830 | def NamesOfDeclaredKeyFlags():
return (NamesOfDefinedFlags() + DECLARED_KEY_FLAGS)
| [
"def",
"NamesOfDeclaredKeyFlags",
"(",
")",
":",
"return",
"(",
"NamesOfDefinedFlags",
"(",
")",
"+",
"DECLARED_KEY_FLAGS",
")"
] | returns: list of names of key flags for this module . | train | false |
43,831 | def initInstanceLimit(limitNamespace, instancemax):
global limitedNamespaces
if (not (limitNamespace in limitedNamespaces)):
limitedNamespaces[limitNamespace] = BoundedSemaphore(instancemax)
| [
"def",
"initInstanceLimit",
"(",
"limitNamespace",
",",
"instancemax",
")",
":",
"global",
"limitedNamespaces",
"if",
"(",
"not",
"(",
"limitNamespace",
"in",
"limitedNamespaces",
")",
")",
":",
"limitedNamespaces",
"[",
"limitNamespace",
"]",
"=",
"BoundedSemaphore... | initialize the instance-limited thread implementation . | train | false |
43,832 | def dict_raise_on_duplicates(ordered_pairs):
my_dict = dict()
for (key, values) in ordered_pairs:
if (key in my_dict):
raise ValueError('Duplicate key: {}'.format(key))
else:
my_dict[key] = values
return my_dict
| [
"def",
"dict_raise_on_duplicates",
"(",
"ordered_pairs",
")",
":",
"my_dict",
"=",
"dict",
"(",
")",
"for",
"(",
"key",
",",
"values",
")",
"in",
"ordered_pairs",
":",
"if",
"(",
"key",
"in",
"my_dict",
")",
":",
"raise",
"ValueError",
"(",
"'Duplicate key... | reject duplicate keys . | train | false |
43,833 | @hug.exception(FakeException)
def handle_exception(exception):
return True
| [
"@",
"hug",
".",
"exception",
"(",
"FakeException",
")",
"def",
"handle_exception",
"(",
"exception",
")",
":",
"return",
"True"
] | handles the provided exception for testing . | train | false |
43,834 | def is_dwm_compositing_enabled():
import ctypes
enabled = ctypes.c_bool()
try:
DwmIsCompositionEnabled = ctypes.windll.dwmapi.DwmIsCompositionEnabled
except (AttributeError, WindowsError):
return False
rval = DwmIsCompositionEnabled(ctypes.byref(enabled))
return ((rval == 0) and enabled.value)
| [
"def",
"is_dwm_compositing_enabled",
"(",
")",
":",
"import",
"ctypes",
"enabled",
"=",
"ctypes",
".",
"c_bool",
"(",
")",
"try",
":",
"DwmIsCompositionEnabled",
"=",
"ctypes",
".",
"windll",
".",
"dwmapi",
".",
"DwmIsCompositionEnabled",
"except",
"(",
"Attribu... | is desktop window manager compositing enabled . | train | false |
43,835 | def _get_file_mode(filename, default='readonly'):
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if (fmode is not None):
mode = FILE_MODES.get(fmode)
if (mode is None):
raise IOError('File mode of the input file object ({!r}) cannot be used to read/write FITS files.'.format(fmode))
return (mode, closed)
| [
"def",
"_get_file_mode",
"(",
"filename",
",",
"default",
"=",
"'readonly'",
")",
":",
"mode",
"=",
"default",
"closed",
"=",
"fileobj_closed",
"(",
"filename",
")",
"fmode",
"=",
"fileobj_mode",
"(",
"filename",
")",
"if",
"(",
"fmode",
"is",
"not",
"None... | allow file object to already be opened in any of the valid modes and and leave the file in the same state as when the function was called . | train | false |
43,837 | def survey_getAllAnswersForQuestionInSeries(question_id, series_id):
s3db = current.s3db
ctable = s3db.survey_complete
atable = s3db.survey_answer
query = (((atable.question_id == question_id) & (atable.complete_id == ctable.id)) & (ctable.series_id == series_id))
rows = current.db(query).select(atable.id, atable.value, atable.complete_id)
answers = []
for row in rows:
answer = {}
answer['answer_id'] = row.id
answer['value'] = row.value
answer['complete_id'] = row.complete_id
answers.append(answer)
return answers
| [
"def",
"survey_getAllAnswersForQuestionInSeries",
"(",
"question_id",
",",
"series_id",
")",
":",
"s3db",
"=",
"current",
".",
"s3db",
"ctable",
"=",
"s3db",
".",
"survey_complete",
"atable",
"=",
"s3db",
".",
"survey_answer",
"query",
"=",
"(",
"(",
"(",
"ata... | function to return all the answers for a given question from with a specified series . | train | false |
43,839 | def deprecatedWorkerClassMethod(scope, method, compat_name=None):
method_name = method.__name__
compat_name = _compat_name(method_name, compat_name=compat_name)
assert (compat_name not in scope)
def old_method(self, *args, **kwargs):
reportDeprecatedWorkerNameUsage("'{old}' method is deprecated, use '{new}' instead.".format(new=method_name, old=compat_name))
return getattr(self, method_name)(*args, **kwargs)
functools.update_wrapper(old_method, method)
scope[compat_name] = old_method
| [
"def",
"deprecatedWorkerClassMethod",
"(",
"scope",
",",
"method",
",",
"compat_name",
"=",
"None",
")",
":",
"method_name",
"=",
"method",
".",
"__name__",
"compat_name",
"=",
"_compat_name",
"(",
"method_name",
",",
"compat_name",
"=",
"compat_name",
")",
"ass... | define old-named method inside class . | train | false |
43,840 | def generate_version(addon, app=None):
min_app_version = '4.0'
max_app_version = '50.0'
version = ('%.1f' % random.uniform(0, 2))
v = Version.objects.create(addon=addon, version=version)
v.created = v.last_updated = datetime.now()
v.save()
if (app is not None):
(av_min, _) = AppVersion.objects.get_or_create(application=app.id, version=min_app_version)
(av_max, _) = AppVersion.objects.get_or_create(application=app.id, version=max_app_version)
ApplicationsVersions.objects.get_or_create(application=app.id, version=v, min=av_min, max=av_max)
File.objects.create(filename=('%s-%s' % (v.addon_id, v.id)), version=v, platform=amo.PLATFORM_ALL.id, status=amo.STATUS_PUBLIC)
return v
| [
"def",
"generate_version",
"(",
"addon",
",",
"app",
"=",
"None",
")",
":",
"min_app_version",
"=",
"'4.0'",
"max_app_version",
"=",
"'50.0'",
"version",
"=",
"(",
"'%.1f'",
"%",
"random",
".",
"uniform",
"(",
"0",
",",
"2",
")",
")",
"v",
"=",
"Versio... | generate a version for the given addon and the optional app . | train | false |
43,841 | def fixpath(path):
norm = (osp.normcase if (os.name == 'nt') else osp.normpath)
return norm(osp.abspath(osp.realpath(path)))
| [
"def",
"fixpath",
"(",
"path",
")",
":",
"norm",
"=",
"(",
"osp",
".",
"normcase",
"if",
"(",
"os",
".",
"name",
"==",
"'nt'",
")",
"else",
"osp",
".",
"normpath",
")",
"return",
"norm",
"(",
"osp",
".",
"abspath",
"(",
"osp",
".",
"realpath",
"(... | normalize path fixing case . | train | true |
43,842 | def test_no_data_with_lists_of_nones(Chart):
chart = Chart()
chart.add('Serie1', [None, None, None, None])
chart.add('Serie2', [None, None, None])
q = chart.render_pyquery()
assert (q('.text-overlay text').text() == 'No data')
| [
"def",
"test_no_data_with_lists_of_nones",
"(",
"Chart",
")",
":",
"chart",
"=",
"Chart",
"(",
")",
"chart",
".",
"add",
"(",
"'Serie1'",
",",
"[",
"None",
",",
"None",
",",
"None",
",",
"None",
"]",
")",
"chart",
".",
"add",
"(",
"'Serie2'",
",",
"[... | test no data for several none containing series . | train | false |
43,843 | def ValidateMSVSSettings(settings, stderr=sys.stderr):
_ValidateSettings(_msvs_validators, settings, stderr)
| [
"def",
"ValidateMSVSSettings",
"(",
"settings",
",",
"stderr",
"=",
"sys",
".",
"stderr",
")",
":",
"_ValidateSettings",
"(",
"_msvs_validators",
",",
"settings",
",",
"stderr",
")"
] | validates that the names of the settings are valid for msvs . | train | false |
43,844 | def mute_string(text):
start = 1
end = (len(text) - 1)
if text.endswith('"'):
start += text.index('"')
elif text.endswith("'"):
start += text.index("'")
if (text.endswith('"""') or text.endswith("'''")):
start += 2
end -= 2
return ((text[:start] + ('x' * (end - start))) + text[end:])
| [
"def",
"mute_string",
"(",
"text",
")",
":",
"start",
"=",
"1",
"end",
"=",
"(",
"len",
"(",
"text",
")",
"-",
"1",
")",
"if",
"text",
".",
"endswith",
"(",
"'\"'",
")",
":",
"start",
"+=",
"text",
".",
"index",
"(",
"'\"'",
")",
"elif",
"text"... | replace contents with xxx to prevent syntax matching . | train | true |
43,845 | @utils.arg('monitor', metavar='<monitor>', help='ID of the monitor to update metadata on.')
@utils.arg('action', metavar='<action>', choices=['set', 'unset'], help="Actions: 'set' or 'unset'")
@utils.arg('metadata', metavar='<key=value>', nargs='+', default=[], help='Metadata to set/unset (only key is necessary on unset)')
@utils.service_type('monitor')
def do_metadata(cs, args):
monitor = _find_monitor(cs, args.monitor)
metadata = _extract_metadata(args)
if (args.action == 'set'):
cs.monitors.set_metadata(monitor, metadata)
elif (args.action == 'unset'):
cs.monitors.delete_metadata(monitor, metadata.keys())
| [
"@",
"utils",
".",
"arg",
"(",
"'monitor'",
",",
"metavar",
"=",
"'<monitor>'",
",",
"help",
"=",
"'ID of the monitor to update metadata on.'",
")",
"@",
"utils",
".",
"arg",
"(",
"'action'",
",",
"metavar",
"=",
"'<action>'",
",",
"choices",
"=",
"[",
"'set... | set or delete metadata on a monitor . | train | false |
43,847 | def _connection_checker(func):
@functools.wraps(func)
def inner_connection_checker(self, *args, **kwargs):
LOG.debug('in _connection_checker')
for attempts in range(2):
try:
return func(self, *args, **kwargs)
except exception.SynoAuthError as e:
if (attempts < 1):
LOG.debug('Session might have expired. Trying to relogin')
self.new_session()
continue
else:
LOG.error(_LE('Try to renew session: [%s]'), e)
raise
return inner_connection_checker
| [
"def",
"_connection_checker",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"inner_connection_checker",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"LOG",
".",
"debug",
"(",
"'in _connection_checker'",
")... | decorator to check session has expired or not . | train | false |
43,848 | def condition2checker(condition):
if (type(condition) in [str, unicode]):
def smatcher(info):
return fnmatch.fnmatch(info.filename, condition)
return smatcher
elif ((type(condition) in [list, tuple]) and (type(condition[0]) in [int, long])):
def imatcher(info):
return (info.index in condition)
return imatcher
elif callable(condition):
return condition
else:
raise TypeError
| [
"def",
"condition2checker",
"(",
"condition",
")",
":",
"if",
"(",
"type",
"(",
"condition",
")",
"in",
"[",
"str",
",",
"unicode",
"]",
")",
":",
"def",
"smatcher",
"(",
"info",
")",
":",
"return",
"fnmatch",
".",
"fnmatch",
"(",
"info",
".",
"filen... | converts different condition types to callback . | train | false |
43,851 | def msk_from_problem_urlname(course_id, urlname, block_type='problem'):
if (not isinstance(course_id, CourseKey)):
raise ValueError
if urlname.endswith('.xml'):
urlname = urlname[:(-4)]
return course_id.make_usage_key(block_type, urlname)
| [
"def",
"msk_from_problem_urlname",
"(",
"course_id",
",",
"urlname",
",",
"block_type",
"=",
"'problem'",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"course_id",
",",
"CourseKey",
")",
")",
":",
"raise",
"ValueError",
"if",
"urlname",
".",
"endswith",
"(... | convert a problem urlname to a module state key . | train | false |
43,852 | def mercator(xy):
(_x, _y) = xy
(x, y) = (((pi * _x) / 180), ((pi * _y) / 180))
y = log(tan(((0.25 * pi) + (0.5 * y))))
return ((6378137 * x), (6378137 * y))
| [
"def",
"mercator",
"(",
"xy",
")",
":",
"(",
"_x",
",",
"_y",
")",
"=",
"xy",
"(",
"x",
",",
"y",
")",
"=",
"(",
"(",
"(",
"pi",
"*",
"_x",
")",
"/",
"180",
")",
",",
"(",
"(",
"pi",
"*",
"_y",
")",
"/",
"180",
")",
")",
"y",
"=",
"... | project an tuple to spherical mercator . | train | false |
43,853 | def dispose_engine():
if ('sqlite' not in IMPL.get_engine().name):
return IMPL.dispose_engine()
else:
return
| [
"def",
"dispose_engine",
"(",
")",
":",
"if",
"(",
"'sqlite'",
"not",
"in",
"IMPL",
".",
"get_engine",
"(",
")",
".",
"name",
")",
":",
"return",
"IMPL",
".",
"dispose_engine",
"(",
")",
"else",
":",
"return"
] | force the engine to establish new connections . | train | false |
43,857 | def decov_loss(xs):
x = tf.reshape(xs, [int(xs.get_shape()[0]), (-1)])
m = tf.reduce_mean(x, 0, True)
z = tf.expand_dims((x - m), 2)
corr = tf.reduce_mean(tf.batch_matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)
corr_frob_sqr = tf.reduce_sum(tf.square(corr))
corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
loss = (0.5 * (corr_frob_sqr - corr_diag_sqr))
return loss
| [
"def",
"decov_loss",
"(",
"xs",
")",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"xs",
",",
"[",
"int",
"(",
"xs",
".",
"get_shape",
"(",
")",
"[",
"0",
"]",
")",
",",
"(",
"-",
"1",
")",
"]",
")",
"m",
"=",
"tf",
".",
"reduce_mean",
"(",
"... | decov loss as described in URL reducing overfitting in deep networks by decorrelating representation . | train | false |
43,859 | def cosine(w, A=1, phi=0, offset=0):
from math import cos
def f(i):
return ((A * cos(((w * i) + phi))) + offset)
return partial(_force, sequence=_advance(f))
| [
"def",
"cosine",
"(",
"w",
",",
"A",
"=",
"1",
",",
"phi",
"=",
"0",
",",
"offset",
"=",
"0",
")",
":",
"from",
"math",
"import",
"cos",
"def",
"f",
"(",
"i",
")",
":",
"return",
"(",
"(",
"A",
"*",
"cos",
"(",
"(",
"(",
"w",
"*",
"i",
... | return a driver function that can advance a sequence of cosine values . | train | true |
43,860 | def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if (col_deriv == 0):
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = product(greater(err, 0.5), axis=0)
return (good, err)
| [
"def",
"check_gradient",
"(",
"fcn",
",",
"Dfcn",
",",
"x0",
",",
"args",
"=",
"(",
")",
",",
"col_deriv",
"=",
"0",
")",
":",
"x",
"=",
"atleast_1d",
"(",
"x0",
")",
"n",
"=",
"len",
"(",
"x",
")",
"x",
"=",
"x",
".",
"reshape",
"(",
"(",
... | perform a simple check on the gradient for correctness . | train | false |
43,861 | def distribute_not(domain):
result = []
stack = [False]
for token in domain:
negate = stack.pop()
if is_leaf(token):
if negate:
(left, operator, right) = token
if (operator in TERM_OPERATORS_NEGATION):
result.append((left, TERM_OPERATORS_NEGATION[operator], right))
else:
result.append(NOT_OPERATOR)
result.append(token)
else:
result.append(token)
elif (token == NOT_OPERATOR):
stack.append((not negate))
elif (token in DOMAIN_OPERATORS_NEGATION):
result.append((DOMAIN_OPERATORS_NEGATION[token] if negate else token))
stack.append(negate)
stack.append(negate)
else:
result.append(token)
return result
| [
"def",
"distribute_not",
"(",
"domain",
")",
":",
"result",
"=",
"[",
"]",
"stack",
"=",
"[",
"False",
"]",
"for",
"token",
"in",
"domain",
":",
"negate",
"=",
"stack",
".",
"pop",
"(",
")",
"if",
"is_leaf",
"(",
"token",
")",
":",
"if",
"negate",
... | distribute any ! domain operators found inside a normalized domain . | train | false |
43,862 | def EI_gaussian_empirical(mean, var, thresh, rng, N):
return EI_empirical(((rng.randn(N) * np.sqrt(var)) + mean), thresh)
| [
"def",
"EI_gaussian_empirical",
"(",
"mean",
",",
"var",
",",
"thresh",
",",
"rng",
",",
"N",
")",
":",
"return",
"EI_empirical",
"(",
"(",
"(",
"rng",
".",
"randn",
"(",
"N",
")",
"*",
"np",
".",
"sqrt",
"(",
"var",
")",
")",
"+",
"mean",
")",
... | expected improvement of gaussian over threshold . | train | false |
43,863 | def show_attrs(directory):
for f in os.listdir(directory):
if _is_audio(f):
path = os.path.join(directory, f)
_show_one(path)
| [
"def",
"show_attrs",
"(",
"directory",
")",
":",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"if",
"_is_audio",
"(",
"f",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"f",
")",
"_show_one",
... | print out the tempo for each audio file in the given directory . | train | true |
43,864 | def followers_of(username, number=(-1), etag=None):
return (gh.followers_of(username, number, etag) if username else [])
| [
"def",
"followers_of",
"(",
"username",
",",
"number",
"=",
"(",
"-",
"1",
")",
",",
"etag",
"=",
"None",
")",
":",
"return",
"(",
"gh",
".",
"followers_of",
"(",
"username",
",",
"number",
",",
"etag",
")",
"if",
"username",
"else",
"[",
"]",
")"
... | list the followers of username . | train | false |
43,865 | def _analytical_solution(a, y0, t):
(lam, v) = np.linalg.eig(a)
c = np.linalg.solve(v, y0)
e = (c * np.exp((lam * t.reshape((-1), 1))))
sol = e.dot(v.T)
return sol
| [
"def",
"_analytical_solution",
"(",
"a",
",",
"y0",
",",
"t",
")",
":",
"(",
"lam",
",",
"v",
")",
"=",
"np",
".",
"linalg",
".",
"eig",
"(",
"a",
")",
"c",
"=",
"np",
".",
"linalg",
".",
"solve",
"(",
"v",
",",
"y0",
")",
"e",
"=",
"(",
... | analytical solution to the linear differential equations dy/dt = a*y . | train | false |
43,866 | def update_parent_field(f, new):
if (f[u'fieldtype'] == u'Table'):
frappe.db.begin()
frappe.db.sql((u'update `tab%s` set parentfield=%s where parentfield=%s' % (f[u'options'], u'%s', u'%s')), (new, f[u'fieldname']))
frappe.db.commit()
| [
"def",
"update_parent_field",
"(",
"f",
",",
"new",
")",
":",
"if",
"(",
"f",
"[",
"u'fieldtype'",
"]",
"==",
"u'Table'",
")",
":",
"frappe",
".",
"db",
".",
"begin",
"(",
")",
"frappe",
".",
"db",
".",
"sql",
"(",
"(",
"u'update `tab%s` set parentfiel... | update parentfield in tables . | train | false |
43,867 | def list_launch_configurations(region=None, key=None, keyid=None, profile=None):
ret = get_all_launch_configurations(region, key, keyid, profile)
return [r.name for r in ret]
| [
"def",
"list_launch_configurations",
"(",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"ret",
"=",
"get_all_launch_configurations",
"(",
"region",
",",
"key",
",",
"keyid",
",",
"profile"... | list all launch configurations . | train | true |
43,868 | def test_get_det_debug_values_ignore():
prev_value = config.compute_test_value
try:
config.compute_test_value = 'ignore'
x = T.vector()
for x_val in op.get_debug_values(x):
assert False
finally:
config.compute_test_value = prev_value
| [
"def",
"test_get_det_debug_values_ignore",
"(",
")",
":",
"prev_value",
"=",
"config",
".",
"compute_test_value",
"try",
":",
"config",
".",
"compute_test_value",
"=",
"'ignore'",
"x",
"=",
"T",
".",
"vector",
"(",
")",
"for",
"x_val",
"in",
"op",
".",
"get_... | get_debug_values should return [] when debugger is ignore and some values are missing . | train | false |
43,869 | def hostname_valid(hostname):
if ((not hostname) or hostname.startswith('localhost') or hostname.endswith('localdomain') or hostname.endswith('novalocal') or (len(hostname.split('.')) < 2)):
return False
return True
| [
"def",
"hostname_valid",
"(",
"hostname",
")",
":",
"if",
"(",
"(",
"not",
"hostname",
")",
"or",
"hostname",
".",
"startswith",
"(",
"'localhost'",
")",
"or",
"hostname",
".",
"endswith",
"(",
"'localdomain'",
")",
"or",
"hostname",
".",
"endswith",
"(",
... | test if specified hostname should be considered valid args: hostname : hostname to test returns: bool: true if valid . | train | false |
43,870 | def _find_bad_optimizations0(order, reasons, r_vals):
for (i, node) in enumerate(order):
for new_r in node.outputs:
for (reason, r, old_graph_str, new_graph_str) in reasons[new_r]:
new_r_val = r_vals[new_r]
r_val = r_vals[r]
assert (r.type == new_r.type)
if hasattr(new_r.tag, 'values_eq_approx'):
check = new_r.tag.values_eq_approx(r_val, new_r_val)
elif hasattr(new_r, 'values_eq_approx'):
check = new_r.values_eq_approx(r_val, new_r_val)
else:
check = r.type.values_eq_approx(r_val, new_r_val)
if (not check):
raise BadOptimization(old_r=r, new_r=new_r, old_r_val=r_val, new_r_val=new_r_val, reason=reason, old_graph=old_graph_str, new_graph=new_graph_str)
| [
"def",
"_find_bad_optimizations0",
"(",
"order",
",",
"reasons",
",",
"r_vals",
")",
":",
"for",
"(",
"i",
",",
"node",
")",
"in",
"enumerate",
"(",
"order",
")",
":",
"for",
"new_r",
"in",
"node",
".",
"outputs",
":",
"for",
"(",
"reason",
",",
"r",... | use a simple algorithm to find broken optimizations . | train | false |
43,871 | def fastlog(x):
if ((not x) or (x == fzero)):
return MINUS_INF
return (x[2] + x[3])
| [
"def",
"fastlog",
"(",
"x",
")",
":",
"if",
"(",
"(",
"not",
"x",
")",
"or",
"(",
"x",
"==",
"fzero",
")",
")",
":",
"return",
"MINUS_INF",
"return",
"(",
"x",
"[",
"2",
"]",
"+",
"x",
"[",
"3",
"]",
")"
] | fast approximation of log2(x) for an mpf value tuple x . | train | false |
43,873 | def horizontal_flip(image_data, output_encoding=PNG, quality=None, correct_orientation=UNCHANGED_ORIENTATION, rpc=None, transparent_substitution_rgb=None):
rpc = horizontal_flip_async(image_data, output_encoding=output_encoding, quality=quality, correct_orientation=correct_orientation, rpc=rpc, transparent_substitution_rgb=transparent_substitution_rgb)
return rpc.get_result()
| [
"def",
"horizontal_flip",
"(",
"image_data",
",",
"output_encoding",
"=",
"PNG",
",",
"quality",
"=",
"None",
",",
"correct_orientation",
"=",
"UNCHANGED_ORIENTATION",
",",
"rpc",
"=",
"None",
",",
"transparent_substitution_rgb",
"=",
"None",
")",
":",
"rpc",
"=... | flip the image horizontally . | train | false |
43,875 | def pset_field(item_type, optional=False, initial=()):
return _sequence_field(CheckedPSet, 'PSet', item_type, optional, initial)
| [
"def",
"pset_field",
"(",
"item_type",
",",
"optional",
"=",
"False",
",",
"initial",
"=",
"(",
")",
")",
":",
"return",
"_sequence_field",
"(",
"CheckedPSet",
",",
"'PSet'",
",",
"item_type",
",",
"optional",
",",
"initial",
")"
] | create checked pset field . | train | true |
43,876 | def aggregationDivide(dividend, divisor):
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
if (((dividendMonthSec['months'] != 0) and (divisorMonthSec['seconds'] != 0)) or ((dividendMonthSec['seconds'] != 0) and (divisorMonthSec['months'] != 0))):
raise RuntimeError('Aggregation dicts with months/years can only be inter-operated with other aggregation dicts that contain months/years')
if (dividendMonthSec['months'] > 0):
return (float(dividendMonthSec['months']) / divisor['months'])
else:
return (float(dividendMonthSec['seconds']) / divisorMonthSec['seconds'])
| [
"def",
"aggregationDivide",
"(",
"dividend",
",",
"divisor",
")",
":",
"dividendMonthSec",
"=",
"aggregationToMonthsSeconds",
"(",
"dividend",
")",
"divisorMonthSec",
"=",
"aggregationToMonthsSeconds",
"(",
"divisor",
")",
"if",
"(",
"(",
"(",
"dividendMonthSec",
"[... | return the result from dividing two dicts that represent date and time . | train | true |
43,878 | def global_injector_decorator(inject_globals):
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator
| [
"def",
"global_injector_decorator",
"(",
"inject_globals",
")",
":",
"def",
"inner_decorator",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"with",
"salt",
".",
"u... | decorator used by the lazyloader to inject globals into a function at execute time . | train | true |
43,879 | def test_cc_bad_ratio():
ratio = (-1.0)
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
ratio = 100.0
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
ratio = 'rnd'
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
ratio = [0.5, 0.5]
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
| [
"def",
"test_cc_bad_ratio",
"(",
")",
":",
"ratio",
"=",
"(",
"-",
"1.0",
")",
"cc",
"=",
"ClusterCentroids",
"(",
"ratio",
"=",
"ratio",
",",
"random_state",
"=",
"RND_SEED",
")",
"assert_raises",
"(",
"ValueError",
",",
"cc",
".",
"fit",
",",
"X",
",... | test either if an error is raised with a wrong decimal value for the ratio . | train | false |
43,881 | def sanitize(string, ignore_characters=None):
if (string is None):
return
ignore_characters = (ignore_characters or set())
characters = ({'-', ':', '(', ')', '.'} - ignore_characters)
if characters:
string = re.sub(('[%s]' % re.escape(''.join(characters))), ' ', string)
characters = ({"'"} - ignore_characters)
if characters:
string = re.sub(('[%s]' % re.escape(''.join(characters))), '', string)
string = re.sub('\\s+', ' ', string)
return string.strip().lower()
| [
"def",
"sanitize",
"(",
"string",
",",
"ignore_characters",
"=",
"None",
")",
":",
"if",
"(",
"string",
"is",
"None",
")",
":",
"return",
"ignore_characters",
"=",
"(",
"ignore_characters",
"or",
"set",
"(",
")",
")",
"characters",
"=",
"(",
"{",
"'-'",
... | sanitize a string to strip special characters . | train | true |
43,882 | def educate_dashes_oldschool(s):
return s.replace('---', '—').replace('--', '–')
| [
"def",
"educate_dashes_oldschool",
"(",
"s",
")",
":",
"return",
"s",
".",
"replace",
"(",
"'---'",
",",
"'—'",
")",
".",
"replace",
"(",
"'--'",
",",
"'–'",
")"
] | parameter: string . | train | false |
43,884 | def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'):
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
_re = re.compile(pattern, flags=flags)
_bool = __builtins__.get('bool')
return _bool(getattr(_re, match_type, 'search')(value))
| [
"def",
"regex",
"(",
"value",
"=",
"''",
",",
"pattern",
"=",
"''",
",",
"ignorecase",
"=",
"False",
",",
"multiline",
"=",
"False",
",",
"match_type",
"=",
"'search'",
")",
":",
"flags",
"=",
"0",
"if",
"ignorecase",
":",
"flags",
"|=",
"re",
".",
... | expose re as a boolean filter using the search method by default . | train | false |
43,885 | def get_measure(argument, units):
match = re.match(('^([0-9.]+) *(%s)$' % '|'.join(units)), argument)
try:
float(match.group(1))
except (AttributeError, ValueError):
raise ValueError(('not a positive measure of one of the following units:\n%s' % ' '.join([('"%s"' % i) for i in units])))
return (match.group(1) + match.group(2))
| [
"def",
"get_measure",
"(",
"argument",
",",
"units",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"(",
"'^([0-9.]+) *(%s)$'",
"%",
"'|'",
".",
"join",
"(",
"units",
")",
")",
",",
"argument",
")",
"try",
":",
"float",
"(",
"match",
".",
"group",
... | check for a positive argument of one of the units and return a normalized string of the form "<value><unit>" . | train | false |
43,886 | def _Hash(content):
h = hashlib.sha1(content).hexdigest()
return _FormatHash(h)
| [
"def",
"_Hash",
"(",
"content",
")",
":",
"h",
"=",
"hashlib",
".",
"sha1",
"(",
"content",
")",
".",
"hexdigest",
"(",
")",
"return",
"_FormatHash",
"(",
"h",
")"
] | compute the sha1 hash of the content . | train | false |
43,887 | @pytest.mark.django_db
def test_contributors_sort_contributions(member, anon_submission_unit):
contribs = Contributors()
assert (contribs.sort_by == 'username')
contribs = Contributors(sort_by='contributions')
assert (contribs.sort_by == 'contributions')
assert (contribs.contributors == _contributors_list(contribs))
| [
"@",
"pytest",
".",
"mark",
".",
"django_db",
"def",
"test_contributors_sort_contributions",
"(",
"member",
",",
"anon_submission_unit",
")",
":",
"contribs",
"=",
"Contributors",
"(",
")",
"assert",
"(",
"contribs",
".",
"sort_by",
"==",
"'username'",
")",
"con... | contributors across the site . | train | false |
43,888 | def local_html_escape(data, quote=False):
if PY2:
import cgi
data = cgi.escape(data, quote)
return (data.replace("'", ''') if quote else data)
else:
import html
if isinstance(data, str):
return html.escape(data, quote=quote)
data = data.replace('&', '&')
data = data.replace('<', '<')
data = data.replace('>', '>')
if quote:
data = data.replace('"', '"')
data = data.replace("'", ''')
return data
| [
"def",
"local_html_escape",
"(",
"data",
",",
"quote",
"=",
"False",
")",
":",
"if",
"PY2",
":",
"import",
"cgi",
"data",
"=",
"cgi",
".",
"escape",
"(",
"data",
",",
"quote",
")",
"return",
"(",
"data",
".",
"replace",
"(",
"\"'\"",
",",
"'''",... | works with bytes . | train | false |
43,889 | def init(mpstate):
return SerialModule(mpstate)
| [
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] | initialise module . | train | false |
43,890 | def merge_sequences(target, other, function=operator.add):
assert (len(target) == len(other)), 'sequence lengths must match'
return type(target)([function(x, y) for (x, y) in zip(target, other)])
| [
"def",
"merge_sequences",
"(",
"target",
",",
"other",
",",
"function",
"=",
"operator",
".",
"add",
")",
":",
"assert",
"(",
"len",
"(",
"target",
")",
"==",
"len",
"(",
"other",
")",
")",
",",
"'sequence lengths must match'",
"return",
"type",
"(",
"ta... | merge two sequences into a single sequence . | train | false |
43,891 | def is_song(d):
return is_gm_id(d[u'id'])
| [
"def",
"is_song",
"(",
"d",
")",
":",
"return",
"is_gm_id",
"(",
"d",
"[",
"u'id'",
"]",
")"
] | returns true is the given dict is a gm song dict . | train | false |
43,892 | def get_file_str(path, num_files, labelled=False, valid_split=None, split_count_thre=None, subset_pct=100):
dir_name = (path.split('/')[(-1)] if (len(path.split('/')[(-1)]) > 0) else path.split('/')[(-2)])
label_str = ('labelled' if labelled else '')
split_thre_str = ('thre_{}'.format(split_count_thre) if split_count_thre else '')
dir_str = 'doc_{}_{}_{}_{}'.format(label_str, dir_name, num_files, split_thre_str)
if valid_split:
split_str = '_split_{}'.format((valid_split * 100))
else:
split_str = ''
if (subset_pct != 100):
subset_str = '_subset_{}'.format(subset_pct)
else:
subset_str = ''
file_str = ((dir_str + split_str) + subset_str)
return file_str
| [
"def",
"get_file_str",
"(",
"path",
",",
"num_files",
",",
"labelled",
"=",
"False",
",",
"valid_split",
"=",
"None",
",",
"split_count_thre",
"=",
"None",
",",
"subset_pct",
"=",
"100",
")",
":",
"dir_name",
"=",
"(",
"path",
".",
"split",
"(",
"'/'",
... | create unique file name for processed data from the number of files . | train | false |
43,893 | def write_cov(fname, cov):
cov.save(fname)
| [
"def",
"write_cov",
"(",
"fname",
",",
"cov",
")",
":",
"cov",
".",
"save",
"(",
"fname",
")"
] | write a noise covariance matrix . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.