id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
11,000 | def volume_detach(name, profile=None, timeout=300):
conn = _auth(profile)
return conn.volume_detach(name, timeout)
| [
"def",
"volume_detach",
"(",
"name",
",",
"profile",
"=",
"None",
",",
"timeout",
"=",
"300",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
")",
"return",
"conn",
".",
"volume_detach",
"(",
"name",
",",
"timeout",
")"
] | detach block volume . | train | true |
11,001 | def setup_user_email(request, user, addresses):
from .models import EmailAddress
assert (not EmailAddress.objects.filter(user=user).exists())
priority_addresses = []
adapter = get_adapter(request)
stashed_email = adapter.unstash_verified_email(request)
if stashed_email:
priority_addresses.append(EmailAddress(user=user, email=stashed_email, primary=True, verified=True))
email = user_email(user)
if email:
priority_addresses.append(EmailAddress(user=user, email=email, primary=True, verified=False))
(addresses, primary) = cleanup_email_addresses(request, (priority_addresses + addresses))
for a in addresses:
a.user = user
a.save()
EmailAddress.objects.fill_cache_for_user(user, addresses)
if (primary and email and (email.lower() != primary.email.lower())):
user_email(user, primary.email)
user.save()
return primary
| [
"def",
"setup_user_email",
"(",
"request",
",",
"user",
",",
"addresses",
")",
":",
"from",
".",
"models",
"import",
"EmailAddress",
"assert",
"(",
"not",
"EmailAddress",
".",
"objects",
".",
"filter",
"(",
"user",
"=",
"user",
")",
".",
"exists",
"(",
"... | creates proper emailaddress for the user that was just signed up . | train | true |
11,003 | def structured_dot(x, y):
if hasattr(x, 'getnnz'):
x = as_sparse_variable(x)
assert (x.format in ['csr', 'csc'])
if hasattr(y, 'getnnz'):
y = as_sparse_variable(y)
assert (y.format in ['csr', 'csc'])
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
if ((not x_is_sparse_variable) and (not y_is_sparse_variable)):
raise TypeError('structured_dot requires at least one sparse argument')
if x_is_sparse_variable:
return _structured_dot(x, y)
else:
assert y_is_sparse_variable
return _structured_dot(y.T, x.T).T
| [
"def",
"structured_dot",
"(",
"x",
",",
"y",
")",
":",
"if",
"hasattr",
"(",
"x",
",",
"'getnnz'",
")",
":",
"x",
"=",
"as_sparse_variable",
"(",
"x",
")",
"assert",
"(",
"x",
".",
"format",
"in",
"[",
"'csr'",
",",
"'csc'",
"]",
")",
"if",
"hasa... | structured dot is like dot . | train | false |
11,004 | def guess_mimetype(bin_data, default='application/octet-stream'):
for entry in _mime_mappings:
for signature in entry.signatures:
if bin_data.startswith(signature):
for discriminant in entry.discriminants:
try:
guess = discriminant(bin_data)
if guess:
return guess
except Exception:
_logger.getChild('guess_mimetype').warn("Sub-checker '%s' of type '%s' failed", discriminant.__name__, entry.mimetype, exc_info=True)
return entry.mimetype
return default
| [
"def",
"guess_mimetype",
"(",
"bin_data",
",",
"default",
"=",
"'application/octet-stream'",
")",
":",
"for",
"entry",
"in",
"_mime_mappings",
":",
"for",
"signature",
"in",
"entry",
".",
"signatures",
":",
"if",
"bin_data",
".",
"startswith",
"(",
"signature",
... | guess the mimetype of an uploaded file . | train | false |
11,005 | def stderr(a, axis=0, ddof=1):
(a, axis) = _chk_asarray(a, axis)
return (std(a, axis, ddof=1) / float(sqrt(a.shape[axis])))
| [
"def",
"stderr",
"(",
"a",
",",
"axis",
"=",
"0",
",",
"ddof",
"=",
"1",
")",
":",
"(",
"a",
",",
"axis",
")",
"=",
"_chk_asarray",
"(",
"a",
",",
"axis",
")",
"return",
"(",
"std",
"(",
"a",
",",
"axis",
",",
"ddof",
"=",
"1",
")",
"/",
... | returns the estimated population standard error of the values in the passed array . | train | false |
11,006 | def eval_number(parse_result):
return super_float(''.join(parse_result))
| [
"def",
"eval_number",
"(",
"parse_result",
")",
":",
"return",
"super_float",
"(",
"''",
".",
"join",
"(",
"parse_result",
")",
")"
] | create a float out of its string parts . | train | false |
11,007 | def compare_record(old, new, truncate=None):
if (old.id != new.id):
raise ValueError(("'%s' vs '%s' " % (old.id, new.id)))
if ((old.description != new.description) and (((old.id + ' ') + old.description).strip() != new.description) and (new.description != '<unknown description>') and (new.description != '')):
raise ValueError(("'%s' vs '%s' " % (old.description, new.description)))
if (len(old.seq) != len(new.seq)):
raise ValueError(('%i vs %i' % (len(old.seq), len(new.seq))))
if (isinstance(old.seq, UnknownSeq) or isinstance(new.seq, UnknownSeq)):
pass
elif (str(old.seq) != str(new.seq)):
if (len(old.seq) < 200):
raise ValueError(("'%s' vs '%s'" % (old.seq, new.seq)))
else:
raise ValueError(("'%s...' vs '%s...'" % (old.seq[:100], new.seq[:100])))
if (('phred_quality' in old.letter_annotations) and ('phred_quality' in new.letter_annotations) and (old.letter_annotations['phred_quality'] != new.letter_annotations['phred_quality'])):
if (truncate and ([min(q, truncate) for q in old.letter_annotations['phred_quality']] == [min(q, truncate) for q in new.letter_annotations['phred_quality']])):
pass
else:
raise ValuerError('Mismatch in phred_quality')
if (('solexa_quality' in old.letter_annotations) and ('solexa_quality' in new.letter_annotations) and (old.letter_annotations['solexa_quality'] != new.letter_annotations['solexa_quality'])):
if (truncate and ([min(q, truncate) for q in old.letter_annotations['solexa_quality']] == [min(q, truncate) for q in new.letter_annotations['solexa_quality']])):
pass
else:
raise ValueError('Mismatch in phred_quality')
if (('phred_quality' in old.letter_annotations) and ('solexa_quality' in new.letter_annotations)):
converted = [round(QualityIO.solexa_quality_from_phred(q)) for q in old.letter_annotations['phred_quality']]
if truncate:
converted = [min(q, truncate) for q in converted]
if (converted != new.letter_annotations['solexa_quality']):
print('')
print(old.letter_annotations['phred_quality'])
print(converted)
print(new.letter_annotations['solexa_quality'])
raise ValueError('Mismatch in phred_quality vs solexa_quality')
if (('solexa_quality' in old.letter_annotations) and ('phred_quality' in new.letter_annotations)):
converted = [round(QualityIO.phred_quality_from_solexa(q)) for q in old.letter_annotations['solexa_quality']]
if truncate:
converted = [min(q, truncate) for q in converted]
if (converted != new.letter_annotations['phred_quality']):
print(old.letter_annotations['solexa_quality'])
print(converted)
print(new.letter_annotations['phred_quality'])
raise ValueError('Mismatch in solexa_quality vs phred_quality')
return True
| [
"def",
"compare_record",
"(",
"old",
",",
"new",
",",
"truncate",
"=",
"None",
")",
":",
"if",
"(",
"old",
".",
"id",
"!=",
"new",
".",
"id",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"'%s' vs '%s' \"",
"%",
"(",
"old",
".",
"id",
",",
"new",
... | this is meant to be a strict comparison for exact agreement . | train | false |
11,008 | @pytest.mark.parametrize('qurl', [QUrl('http://abc123.com/this/awesome/url.html'), QUrl('https://supersecret.gov/nsa/files.txt'), None])
def test_set_url(url_widget, qurl):
url_widget.set_url(qurl)
if (qurl is not None):
assert (url_widget.text() == qurl.toDisplayString())
else:
assert (url_widget.text() == '')
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'qurl'",
",",
"[",
"QUrl",
"(",
"'http://abc123.com/this/awesome/url.html'",
")",
",",
"QUrl",
"(",
"'https://supersecret.gov/nsa/files.txt'",
")",
",",
"None",
"]",
")",
"def",
"test_set_url",
"(",
"url_widget... | test text displayed by the widget . | train | false |
11,013 | def is_module(name):
for ext in CODE_FILES:
if name.endswith(ext):
return name[:(- len(ext))]
| [
"def",
"is_module",
"(",
"name",
")",
":",
"for",
"ext",
"in",
"CODE_FILES",
":",
"if",
"name",
".",
"endswith",
"(",
"ext",
")",
":",
"return",
"name",
"[",
":",
"(",
"-",
"len",
"(",
"ext",
")",
")",
"]"
] | is this a recognized module type? does this name end in one of the recognized code_files extensions? the file is assumed to exist . | train | false |
11,014 | def validate_lms_config(settings):
validate_common_config(settings)
validate_marketing_site_config(settings)
| [
"def",
"validate_lms_config",
"(",
"settings",
")",
":",
"validate_common_config",
"(",
"settings",
")",
"validate_marketing_site_config",
"(",
"settings",
")"
] | validates configurations for lms and raise valueerror if not valid . | train | false |
11,015 | @docfiller
def generic_gradient_magnitude(input, derivative, output=None, mode='reflect', cval=0.0, extra_arguments=(), extra_keywords=None):
if (extra_keywords is None):
extra_keywords = {}
input = numpy.asarray(input)
(output, return_value) = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if (len(axes) > 0):
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return return_value
| [
"@",
"docfiller",
"def",
"generic_gradient_magnitude",
"(",
"input",
",",
"derivative",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"'reflect'",
",",
"cval",
"=",
"0.0",
",",
"extra_arguments",
"=",
"(",
")",
",",
"extra_keywords",
"=",
"None",
")",
":",... | gradient magnitude using a provided gradient function . | train | false |
11,016 | def do_sync(reg=None):
if (not reg):
reg = registry.AppRegistry()
apps = reg.get_all_apps()
try:
pthfile = pth.PthFile()
pthfile.sync(apps)
pthfile.save()
build.make_syncdb()
return True
except (OSError, SystemError) as ex:
LOG.error(("Failed to update the .pth file. Please fix any problem and run `%s --sync'\n%s" % (PROG_NAME, ex)))
return False
| [
"def",
"do_sync",
"(",
"reg",
"=",
"None",
")",
":",
"if",
"(",
"not",
"reg",
")",
":",
"reg",
"=",
"registry",
".",
"AppRegistry",
"(",
")",
"apps",
"=",
"reg",
".",
"get_all_apps",
"(",
")",
"try",
":",
"pthfile",
"=",
"pth",
".",
"PthFile",
"(... | sync apps with virtualenv . | train | false |
11,017 | def _get_hybrid_bridge_name(vif):
return ('qbr' + vif['id'])[:model.NIC_NAME_LEN]
| [
"def",
"_get_hybrid_bridge_name",
"(",
"vif",
")",
":",
"return",
"(",
"'qbr'",
"+",
"vif",
"[",
"'id'",
"]",
")",
"[",
":",
"model",
".",
"NIC_NAME_LEN",
"]"
] | get a bridge device name . | train | false |
11,019 | def authenticationAndCipheringResponse(AuthenticationParameterSRES_presence=0, MobileId_presence=0):
a = TpPd(pd=3)
b = MessageType(mesType=19)
c = AcReferenceNumberAndSpareHalfOctets()
packet = ((a / b) / c)
if (AuthenticationParameterSRES_presence is 1):
e = AuthenticationParameterSRES(ieiAPS=34)
packet = (packet / e)
if (MobileId_presence is 1):
f = MobileIdHdr(ieiMI=35, eightBitMI=0)
packet = (packet / f)
return packet
| [
"def",
"authenticationAndCipheringResponse",
"(",
"AuthenticationParameterSRES_presence",
"=",
"0",
",",
"MobileId_presence",
"=",
"0",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"19",
")",
"c",
"=",
... | authentication and ciphering response section 9 . | train | true |
11,021 | def instance_tag_set(context, instance_uuid, tags):
return IMPL.instance_tag_set(context, instance_uuid, tags)
| [
"def",
"instance_tag_set",
"(",
"context",
",",
"instance_uuid",
",",
"tags",
")",
":",
"return",
"IMPL",
".",
"instance_tag_set",
"(",
"context",
",",
"instance_uuid",
",",
"tags",
")"
] | replace all of the instance tags with specified list of tags . | train | false |
11,022 | def node_attribute_xy(G, attribute, nodes=None):
if (nodes is None):
nodes = set(G)
else:
nodes = set(nodes)
node = G.node
for (u, nbrsdict) in G.adjacency():
if (u not in nodes):
continue
uattr = node[u].get(attribute, None)
if G.is_multigraph():
for (v, keys) in nbrsdict.items():
vattr = node[v].get(attribute, None)
for (k, d) in keys.items():
(yield (uattr, vattr))
else:
for (v, eattr) in nbrsdict.items():
vattr = node[v].get(attribute, None)
(yield (uattr, vattr))
| [
"def",
"node_attribute_xy",
"(",
"G",
",",
"attribute",
",",
"nodes",
"=",
"None",
")",
":",
"if",
"(",
"nodes",
"is",
"None",
")",
":",
"nodes",
"=",
"set",
"(",
"G",
")",
"else",
":",
"nodes",
"=",
"set",
"(",
"nodes",
")",
"node",
"=",
"G",
... | return iterator of node-attribute pairs for all edges in g . | train | false |
11,023 | def sigquit_handler(sig, frame):
print('Dumping stack traces for all threads in PID {}'.format(os.getpid()))
id_to_name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for (thread_id, stack) in sys._current_frames().items():
code.append('\n# Thread: {}({})'.format(id_to_name.get(thread_id, ''), thread_id))
for (filename, line_number, name, line) in traceback.extract_stack(stack):
code.append('File: "{}", line {}, in {}'.format(filename, line_number, name))
if line:
code.append(' {}'.format(line.strip()))
print('\n'.join(code))
| [
"def",
"sigquit_handler",
"(",
"sig",
",",
"frame",
")",
":",
"print",
"(",
"'Dumping stack traces for all threads in PID {}'",
".",
"format",
"(",
"os",
".",
"getpid",
"(",
")",
")",
")",
"id_to_name",
"=",
"dict",
"(",
"[",
"(",
"th",
".",
"ident",
",",
... | helps debug deadlocks by printing stacktraces when this gets a sigquit e . | train | true |
11,024 | def getAreaLoops(loops):
areaLoops = 0.0
for loop in loops:
areaLoops += getAreaLoop(loop)
return areaLoops
| [
"def",
"getAreaLoops",
"(",
"loops",
")",
":",
"areaLoops",
"=",
"0.0",
"for",
"loop",
"in",
"loops",
":",
"areaLoops",
"+=",
"getAreaLoop",
"(",
"loop",
")",
"return",
"areaLoops"
] | get the area of a list of complex polygons . | train | false |
11,025 | def dmp_convert(f, u, K0, K1):
if (not u):
return dup_convert(f, K0, K1)
if ((K0 is not None) and (K0 == K1)):
return f
v = (u - 1)
return dmp_strip([dmp_convert(c, v, K0, K1) for c in f], u)
| [
"def",
"dmp_convert",
"(",
"f",
",",
"u",
",",
"K0",
",",
"K1",
")",
":",
"if",
"(",
"not",
"u",
")",
":",
"return",
"dup_convert",
"(",
"f",
",",
"K0",
",",
"K1",
")",
"if",
"(",
"(",
"K0",
"is",
"not",
"None",
")",
"and",
"(",
"K0",
"==",... | convert the ground domain of f from k0 to k1 . | train | false |
11,026 | def generateCoincMatrix(nCoinc=10, length=500, activity=50):
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array(([1.0] * activity), dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length), activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0
| [
"def",
"generateCoincMatrix",
"(",
"nCoinc",
"=",
"10",
",",
"length",
"=",
"500",
",",
"activity",
"=",
"50",
")",
":",
"coincMatrix0",
"=",
"SM32",
"(",
"int",
"(",
"nCoinc",
")",
",",
"int",
"(",
"length",
")",
")",
"theOnes",
"=",
"numpy",
".",
... | generate a coincidence matrix . | train | true |
11,027 | def s2n_motorola(string):
x = 0
for c in string:
x = ((x << 8) | ord_(c))
return x
| [
"def",
"s2n_motorola",
"(",
"string",
")",
":",
"x",
"=",
"0",
"for",
"c",
"in",
"string",
":",
"x",
"=",
"(",
"(",
"x",
"<<",
"8",
")",
"|",
"ord_",
"(",
"c",
")",
")",
"return",
"x"
] | extract multi-byte integer in motorola format . | train | true |
11,028 | def _paginate(request, queryset):
page = int(request.GET.get('page', 1))
limit = int(request.GET.get('limit', 0))
return __paginate(page, limit, queryset)
| [
"def",
"_paginate",
"(",
"request",
",",
"queryset",
")",
":",
"page",
"=",
"int",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'page'",
",",
"1",
")",
")",
"limit",
"=",
"int",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'limit'",
",",
"0",
... | paginates the results from func by continuously passing in the returned marker if the results were truncated . | train | false |
11,029 | def render_template_with_system_context(value, context=None, prefix=None):
context = (context or {})
context[SYSTEM_SCOPE] = KeyValueLookup(prefix=prefix, scope=SYSTEM_SCOPE)
context[DATASTORE_PARENT_SCOPE] = {SYSTEM_SCOPE: KeyValueLookup(prefix=prefix, scope=SYSTEM_SCOPE)}
rendered = render_template(value=value, context=context)
return rendered
| [
"def",
"render_template_with_system_context",
"(",
"value",
",",
"context",
"=",
"None",
",",
"prefix",
"=",
"None",
")",
":",
"context",
"=",
"(",
"context",
"or",
"{",
"}",
")",
"context",
"[",
"SYSTEM_SCOPE",
"]",
"=",
"KeyValueLookup",
"(",
"prefix",
"... | render provided template with a default system context . | train | false |
11,030 | def setup_app(command, conf, vars):
load_environment(conf.global_conf, conf.local_conf)
from ckan import model
log.debug('Creating tables')
model.repo.create_db()
log.info('Creating tables: SUCCESS')
| [
"def",
"setup_app",
"(",
"command",
",",
"conf",
",",
"vars",
")",
":",
"load_environment",
"(",
"conf",
".",
"global_conf",
",",
"conf",
".",
"local_conf",
")",
"from",
"ckan",
"import",
"model",
"log",
".",
"debug",
"(",
"'Creating tables'",
")",
"model"... | place any commands to setup ckan here . | train | false |
11,031 | def half_secret(d, k):
l = len(d[k])
if (l > 2):
d[k] = (d[k][:2] + ('*' * (l - 2)))
else:
d[k] = ('*' * l)
| [
"def",
"half_secret",
"(",
"d",
",",
"k",
")",
":",
"l",
"=",
"len",
"(",
"d",
"[",
"k",
"]",
")",
"if",
"(",
"l",
">",
"2",
")",
":",
"d",
"[",
"k",
"]",
"=",
"(",
"d",
"[",
"k",
"]",
"[",
":",
"2",
"]",
"+",
"(",
"'*'",
"*",
"(",
... | hidden part of the secret . | train | false |
11,033 | def get_coord(coord_fname, method='IQR'):
if (not os.path.isdir(coord_fname)):
try:
coord_f = open(coord_fname, 'U')
except (TypeError, IOError):
raise MissingFileError('Coord file required for this analysis')
(coord_header, coords, eigvals, pct_var) = parse_coords(coord_f)
return [coord_header, coords, eigvals, pct_var, None, None]
else:
(master_pcoa, support_pcoas) = load_pcoa_files(coord_fname)
(coords, coords_low, coords_high, eigval_average, coord_header) = summarize_pcoas(master_pcoa, support_pcoas, method=method)
pct_var = master_pcoa[3]
coord_header = list(master_pcoa[0])
return [coord_header, coords, eigval_average, pct_var, coords_low, coords_high]
| [
"def",
"get_coord",
"(",
"coord_fname",
",",
"method",
"=",
"'IQR'",
")",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"coord_fname",
")",
")",
":",
"try",
":",
"coord_f",
"=",
"open",
"(",
"coord_fname",
",",
"'U'",
")",
"except",
... | opens and returns coords location matrix and metadata . | train | false |
11,034 | def get_log_line(req, res, trans_time, additional_info):
policy_index = get_policy_index(req.headers, res.headers)
return ('%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %.4f "%s" %d %s' % (req.remote_addr, time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()), req.method, req.path, res.status.split()[0], (res.content_length or '-'), (req.referer or '-'), req.headers.get('x-trans-id', '-'), (req.user_agent or '-'), trans_time, (additional_info or '-'), os.getpid(), (policy_index or '-')))
| [
"def",
"get_log_line",
"(",
"req",
",",
"res",
",",
"trans_time",
",",
"additional_info",
")",
":",
"policy_index",
"=",
"get_policy_index",
"(",
"req",
".",
"headers",
",",
"res",
".",
"headers",
")",
"return",
"(",
"'%s - - [%s] \"%s %s\" %s %s \"%s\" \"%s\" \"%... | make a line for logging that matches the documented log line format for backend servers . | train | false |
11,035 | @task
def manylinux(ctx, vs, upload=False):
manylinux = '/tmp/manylinux-builds'
if (not os.path.exists(manylinux)):
with cd('/tmp'):
run('git clone --recursive https://github.com/minrk/manylinux-builds -b pyzmq')
else:
with cd(manylinux):
run('git pull')
run('git submodule update')
run('docker pull quay.io/pypa/manylinux1_x86_64')
run('docker pull quay.io/pypa/manylinux1_i686')
base_cmd = "docker run --dns 8.8.8.8 --rm -e PYZMQ_VERSIONS='{vs}' -e PYTHON_VERSIONS='{pys}' -e ZMQ_VERSION='{zmq}' -v $PWD:/io".format(vs=vs, pys='2.7 3.4 3.5 3.6', zmq=libzmq_vs)
with cd(manylinux):
run((base_cmd + ' quay.io/pypa/manylinux1_x86_64 /io/build_pyzmqs.sh'))
run((base_cmd + ' quay.io/pypa/manylinux1_i686 linux32 /io/build_pyzmqs.sh'))
if upload:
py = make_env('3.5', 'twine')
run(['twine', 'upload', os.path.join(manylinux, 'wheelhouse', '*')])
| [
"@",
"task",
"def",
"manylinux",
"(",
"ctx",
",",
"vs",
",",
"upload",
"=",
"False",
")",
":",
"manylinux",
"=",
"'/tmp/manylinux-builds'",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"manylinux",
")",
")",
":",
"with",
"cd",
"(",
"'/tmp... | build manylinux wheels with matthew bretts manylinux-builds . | train | false |
11,036 | def runCPU():
model = ModelFactory.create(model_params.MODEL_PARAMS)
model.enableInference({'predictedField': 'cpu'})
shifter = InferenceShifter()
actHistory = deque(([0.0] * WINDOW), maxlen=60)
predHistory = deque(([0.0] * WINDOW), maxlen=60)
(actline,) = plt.plot(range(WINDOW), actHistory)
(predline,) = plt.plot(range(WINDOW), predHistory)
actline.axes.set_ylim(0, 100)
predline.axes.set_ylim(0, 100)
while True:
s = time.time()
cpu = psutil.cpu_percent()
modelInput = {'cpu': cpu}
result = shifter.shift(model.run(modelInput))
inference = result.inferences['multiStepBestPredictions'][5]
if (inference is not None):
actHistory.append(result.rawInput['cpu'])
predHistory.append(inference)
actline.set_ydata(actHistory)
predline.set_ydata(predHistory)
plt.draw()
plt.legend(('actual', 'predicted'))
try:
plt.pause(SECONDS_PER_STEP)
except:
pass
| [
"def",
"runCPU",
"(",
")",
":",
"model",
"=",
"ModelFactory",
".",
"create",
"(",
"model_params",
".",
"MODEL_PARAMS",
")",
"model",
".",
"enableInference",
"(",
"{",
"'predictedField'",
":",
"'cpu'",
"}",
")",
"shifter",
"=",
"InferenceShifter",
"(",
")",
... | poll cpu usage . | train | true |
11,037 | def get_uploaded_file_mimetype(uploaded_file):
if (uploaded_file.content_type and (len(uploaded_file.content_type.split(u'/')) == 2) and (uploaded_file.content_type != u'application/octet-stream')):
mimetype = uploaded_file.content_type
else:
mimetype = guess_mimetype(uploaded_file)
return mimetype
| [
"def",
"get_uploaded_file_mimetype",
"(",
"uploaded_file",
")",
":",
"if",
"(",
"uploaded_file",
".",
"content_type",
"and",
"(",
"len",
"(",
"uploaded_file",
".",
"content_type",
".",
"split",
"(",
"u'/'",
")",
")",
"==",
"2",
")",
"and",
"(",
"uploaded_fil... | return the mimetype of a file that was uploaded . | train | false |
11,038 | def load_plugin(path_to_zip_file):
return loader.load(path_to_zip_file)
| [
"def",
"load_plugin",
"(",
"path_to_zip_file",
")",
":",
"return",
"loader",
".",
"load",
"(",
"path_to_zip_file",
")"
] | find and import a plugin module so that it can be registered . | train | false |
11,039 | def date_to_str(date):
return (datetime.strftime(date, config.DATE_FORMAT) if date else None)
| [
"def",
"date_to_str",
"(",
"date",
")",
":",
"return",
"(",
"datetime",
".",
"strftime",
"(",
"date",
",",
"config",
".",
"DATE_FORMAT",
")",
"if",
"date",
"else",
"None",
")"
] | converts a datetime value to the format defined in the configuration file . | train | false |
11,041 | def s_repeat(block_name, min_reps=0, max_reps=None, step=1, variable=None, fuzzable=True, name=None):
repeat = blocks.repeat(block_name, blocks.CURRENT, min_reps, max_reps, step, variable, fuzzable, name)
blocks.CURRENT.push(repeat)
| [
"def",
"s_repeat",
"(",
"block_name",
",",
"min_reps",
"=",
"0",
",",
"max_reps",
"=",
"None",
",",
"step",
"=",
"1",
",",
"variable",
"=",
"None",
",",
"fuzzable",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"repeat",
"=",
"blocks",
".",
"rep... | repeat the rendered contents of the specified block cycling from min_reps to max_reps counting by step . | train | false |
11,042 | def _compute_content_grad(F, F_content, layer):
Fl = F[layer]
El = (Fl - F_content[layer])
loss = ((El ** 2).sum() / 2)
grad = (El * (Fl > 0))
return (loss, grad)
| [
"def",
"_compute_content_grad",
"(",
"F",
",",
"F_content",
",",
"layer",
")",
":",
"Fl",
"=",
"F",
"[",
"layer",
"]",
"El",
"=",
"(",
"Fl",
"-",
"F_content",
"[",
"layer",
"]",
")",
"loss",
"=",
"(",
"(",
"El",
"**",
"2",
")",
".",
"sum",
"(",... | computes content gradient and loss from activation features . | train | false |
11,043 | def getSequenceIndexFromProcedure(procedure):
craftSequence = getReadCraftSequence()
if (procedure not in craftSequence):
return 0
return craftSequence.index(procedure)
| [
"def",
"getSequenceIndexFromProcedure",
"(",
"procedure",
")",
":",
"craftSequence",
"=",
"getReadCraftSequence",
"(",
")",
"if",
"(",
"procedure",
"not",
"in",
"craftSequence",
")",
":",
"return",
"0",
"return",
"craftSequence",
".",
"index",
"(",
"procedure",
... | get the profile sequence index of the procedure . | train | false |
11,044 | def reload_rules():
return __firewall_cmd('--reload')
| [
"def",
"reload_rules",
"(",
")",
":",
"return",
"__firewall_cmd",
"(",
"'--reload'",
")"
] | reload the firewall rules . | train | false |
11,045 | def HexToByte(hexStr):
bytes = []
hexStr = ''.join(hexStr.split(' '))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:(i + 2)], 16)))
return ''.join(bytes)
| [
"def",
"HexToByte",
"(",
"hexStr",
")",
":",
"bytes",
"=",
"[",
"]",
"hexStr",
"=",
"''",
".",
"join",
"(",
"hexStr",
".",
"split",
"(",
"' '",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"hexStr",
")",
",",
"2",
")",
":",... | convert a string hex byte values into a byte string . | train | false |
11,046 | def _interfaces_config(interfaces):
if (interfaces is None):
interfaces = DEFAULT_INTERFACES
lines = [INTERFACES_HEADER]
for entry in interfaces:
entry.setdefault('zone', 'net')
entry.setdefault('broadcast', 'detect')
entry.setdefault('options', '')
lines.append((INTERFACES_FORMAT % entry))
file('/etc/shorewall/interfaces', contents=''.join(lines), use_sudo=True)
| [
"def",
"_interfaces_config",
"(",
"interfaces",
")",
":",
"if",
"(",
"interfaces",
"is",
"None",
")",
":",
"interfaces",
"=",
"DEFAULT_INTERFACES",
"lines",
"=",
"[",
"INTERFACES_HEADER",
"]",
"for",
"entry",
"in",
"interfaces",
":",
"entry",
".",
"setdefault"... | interfaces configuration . | train | false |
11,047 | @public
def field_isomorphism(a, b, **args):
(a, b) = (sympify(a), sympify(b))
if (not a.is_AlgebraicNumber):
a = AlgebraicNumber(a)
if (not b.is_AlgebraicNumber):
b = AlgebraicNumber(b)
if (a == b):
return a.coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if (n == 1):
return [a.root]
if ((m % n) != 0):
return None
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if (result is not None):
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
| [
"@",
"public",
"def",
"field_isomorphism",
"(",
"a",
",",
"b",
",",
"**",
"args",
")",
":",
"(",
"a",
",",
"b",
")",
"=",
"(",
"sympify",
"(",
"a",
")",
",",
"sympify",
"(",
"b",
")",
")",
"if",
"(",
"not",
"a",
".",
"is_AlgebraicNumber",
")",
... | construct an isomorphism between two number fields . | train | false |
11,048 | def _has_db_updated_with_new_score_bwc_v2(user_id, scored_block_usage_key, expected_modified_time, score_deleted):
score = get_score(user_id, scored_block_usage_key)
if (score is None):
return score_deleted
elif (score.module_type == 'openassessment'):
anon_id = anonymous_id_for_user(User.objects.get(id=user_id), scored_block_usage_key.course_key)
course_id = unicode(scored_block_usage_key.course_key)
item_id = unicode(scored_block_usage_key)
api_score = sub_api.get_score({'student_id': anon_id, 'course_id': course_id, 'item_id': item_id, 'item_type': 'openassessment'})
if (api_score is None):
return score_deleted
reported_modified_time = api_score['created_at']
else:
reported_modified_time = score.modified
return (reported_modified_time >= expected_modified_time)
| [
"def",
"_has_db_updated_with_new_score_bwc_v2",
"(",
"user_id",
",",
"scored_block_usage_key",
",",
"expected_modified_time",
",",
"score_deleted",
")",
":",
"score",
"=",
"get_score",
"(",
"user_id",
",",
"scored_block_usage_key",
")",
"if",
"(",
"score",
"is",
"None... | deprecated version for backward compatibility with v2 tasks . | train | false |
11,049 | def get_queue_list(queue_list=None):
default_queue_list = queue_timeout.keys()
if queue_list:
if isinstance(queue_list, basestring):
queue_list = [queue_list]
for queue in queue_list:
validate_queue(queue, default_queue_list)
return queue_list
else:
return default_queue_list
| [
"def",
"get_queue_list",
"(",
"queue_list",
"=",
"None",
")",
":",
"default_queue_list",
"=",
"queue_timeout",
".",
"keys",
"(",
")",
"if",
"queue_list",
":",
"if",
"isinstance",
"(",
"queue_list",
",",
"basestring",
")",
":",
"queue_list",
"=",
"[",
"queue_... | defines possible queues . | train | false |
11,050 | def get_model_label(model):
if isinstance(model, six.string_types):
return model
else:
return ('%s.%s' % (model._meta.app_label, model.__name__))
| [
"def",
"get_model_label",
"(",
"model",
")",
":",
"if",
"isinstance",
"(",
"model",
",",
"six",
".",
"string_types",
")",
":",
"return",
"model",
"else",
":",
"return",
"(",
"'%s.%s'",
"%",
"(",
"model",
".",
"_meta",
".",
"app_label",
",",
"model",
".... | take a model class or model label and return its model label . | train | true |
11,051 | def safe_version(version):
return version.replace('.', '_')
| [
"def",
"safe_version",
"(",
"version",
")",
":",
"return",
"version",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")"
] | convert an arbitrary string to a standard version string spaces become dots . | train | false |
11,052 | def getLevelName(level):
return _levelNames.get(level, ('Level %s' % level))
| [
"def",
"getLevelName",
"(",
"level",
")",
":",
"return",
"_levelNames",
".",
"get",
"(",
"level",
",",
"(",
"'Level %s'",
"%",
"level",
")",
")"
] | return the textual representation of logging level level . | train | false |
11,053 | def shquote(arg):
for c in ('"', "'", '\\', '#'):
if (c in arg):
return repr(arg)
if (arg.split() != [arg]):
return repr(arg)
return arg
| [
"def",
"shquote",
"(",
"arg",
")",
":",
"for",
"c",
"in",
"(",
"'\"'",
",",
"\"'\"",
",",
"'\\\\'",
",",
"'#'",
")",
":",
"if",
"(",
"c",
"in",
"arg",
")",
":",
"return",
"repr",
"(",
"arg",
")",
"if",
"(",
"arg",
".",
"split",
"(",
")",
"!... | quote an argument for later parsing by shlex . | train | true |
11,054 | def get_name_and_placement(request, minsegs=1, maxsegs=None, rest_with_last=False):
policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if (not policy):
raise HTTPServiceUnavailable(body=(_('No policy with index %s') % policy_index), request=request, content_type='text/plain')
results = split_and_validate_path(request, minsegs=minsegs, maxsegs=maxsegs, rest_with_last=rest_with_last)
results.append(policy)
return results
| [
"def",
"get_name_and_placement",
"(",
"request",
",",
"minsegs",
"=",
"1",
",",
"maxsegs",
"=",
"None",
",",
"rest_with_last",
"=",
"False",
")",
":",
"policy_index",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'X-Backend-Storage-Policy-Index'",
")",
"po... | utility function to split and validate the request path and storage policy . | train | false |
11,055 | def _get_plugin_specs_as_list(specs):
if (specs is not None):
if isinstance(specs, str):
specs = (specs.split(',') if specs else [])
if (not isinstance(specs, (list, tuple))):
raise UsageError(("Plugin specs must be a ','-separated string or a list/tuple of strings for plugin names. Given: %r" % specs))
return list(specs)
return []
| [
"def",
"_get_plugin_specs_as_list",
"(",
"specs",
")",
":",
"if",
"(",
"specs",
"is",
"not",
"None",
")",
":",
"if",
"isinstance",
"(",
"specs",
",",
"str",
")",
":",
"specs",
"=",
"(",
"specs",
".",
"split",
"(",
"','",
")",
"if",
"specs",
"else",
... | parses a list of "plugin specs" and returns a list of plugin names . | train | false |
11,057 | def selWorst(individuals, k):
return sorted(individuals, key=attrgetter('fitness'))[:k]
| [
"def",
"selWorst",
"(",
"individuals",
",",
"k",
")",
":",
"return",
"sorted",
"(",
"individuals",
",",
"key",
"=",
"attrgetter",
"(",
"'fitness'",
")",
")",
"[",
":",
"k",
"]"
] | select the *k* worst individuals among the input *individuals* . | train | false |
11,058 | def requiresOAuth(fun):
def decorate(self, *args, **kwargs):
if self.client.has_access_token():
try:
fun(self, *args, **kwargs)
except gdata.service.RequestError as error:
if (error.code in [401, 403]):
self.redirect('/oauth/request_token')
else:
raise
else:
self.redirect('/oauth/request_token')
return decorate
| [
"def",
"requiresOAuth",
"(",
"fun",
")",
":",
"def",
"decorate",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"self",
".",
"client",
".",
"has_access_token",
"(",
")",
":",
"try",
":",
"fun",
"(",
"self",
",",
"*",
"args",
... | decorator for request handlers to gain authentication via oauth . | train | false |
11,059 | def _parse_string(data, start, stop_at_equals=False, must_have_content=False):
if (start == len(data)):
raise ParsingError(u'Expecting string, but found end of input!')
char = data[start]
if ((char == u'"') or (char == u"'")):
(end, value) = _parse_quoted_string(data, start)
has_content = True
else:
(end, value) = _parse_unquoted_string(data, start, stop_at_equals)
has_content = (len(value) > 0)
if (must_have_content and (not has_content)):
raise ParsingError(u'String starting at {0} must be non-empty!'.format(_format_position(data, start)))
next_is_equals = False
if (stop_at_equals and ((end + 1) < len(data))):
next_is_equals = (data[end] == u'=')
return (end, value, next_is_equals)
| [
"def",
"_parse_string",
"(",
"data",
",",
"start",
",",
"stop_at_equals",
"=",
"False",
",",
"must_have_content",
"=",
"False",
")",
":",
"if",
"(",
"start",
"==",
"len",
"(",
"data",
")",
")",
":",
"raise",
"ParsingError",
"(",
"u'Expecting string, but foun... | parse a string starting at position start in data . | train | false |
11,060 | def pop_key(data):
items = sorted(data.items(), key=(lambda item: (len(item[1]), item[0])))
key = items[0][0]
data.pop(key)
for dep in data.values():
dep.discard(key)
return key
| [
"def",
"pop_key",
"(",
"data",
")",
":",
"items",
"=",
"sorted",
"(",
"data",
".",
"items",
"(",
")",
",",
"key",
"=",
"(",
"lambda",
"item",
":",
"(",
"len",
"(",
"item",
"[",
"1",
"]",
")",
",",
"item",
"[",
"0",
"]",
")",
")",
")",
"key"... | pop an item from the graph that has the fewest dependencies in the case of a tie the winners will be sorted alphabetically . | train | false |
11,063 | def bw_normal_reference(x, kernel=kernels.Gaussian):
C = kernel.normal_reference_constant
A = _select_sigma(x)
n = len(x)
return ((C * A) * (n ** (-0.2)))
| [
"def",
"bw_normal_reference",
"(",
"x",
",",
"kernel",
"=",
"kernels",
".",
"Gaussian",
")",
":",
"C",
"=",
"kernel",
".",
"normal_reference_constant",
"A",
"=",
"_select_sigma",
"(",
"x",
")",
"n",
"=",
"len",
"(",
"x",
")",
"return",
"(",
"(",
"C",
... | plug-in bandwidth with kernel specific constant based on normal reference . | train | false |
11,064 | def write_png(filename, data):
data = np.asarray(data)
if ((not (data.ndim == 3)) and (data.shape[(-1)] in (3, 4))):
raise ValueError('data must be a 3D array with last dimension 3 or 4')
with open(filename, 'wb') as f:
f.write(_make_png(data))
| [
"def",
"write_png",
"(",
"filename",
",",
"data",
")",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"if",
"(",
"(",
"not",
"(",
"data",
".",
"ndim",
"==",
"3",
")",
")",
"and",
"(",
"data",
".",
"shape",
"[",
"(",
"-",
"1",
")",
... | write a png file unlike imsave . | train | true |
11,065 | def liftRepositoryDialogs(repositoryDialogs):
for repositoryDialog in repositoryDialogs:
repositoryDialog.root.withdraw()
repositoryDialog.root.lift()
repositoryDialog.root.deiconify()
repositoryDialog.root.lift()
repositoryDialog.root.update_idletasks()
| [
"def",
"liftRepositoryDialogs",
"(",
"repositoryDialogs",
")",
":",
"for",
"repositoryDialog",
"in",
"repositoryDialogs",
":",
"repositoryDialog",
".",
"root",
".",
"withdraw",
"(",
")",
"repositoryDialog",
".",
"root",
".",
"lift",
"(",
")",
"repositoryDialog",
"... | lift the repository dialogs . | train | false |
11,068 | def implicit_multiplication_application(result, local_dict, global_dict):
for step in (split_symbols, implicit_multiplication, implicit_application, function_exponentiation):
result = step(result, local_dict, global_dict)
return result
| [
"def",
"implicit_multiplication_application",
"(",
"result",
",",
"local_dict",
",",
"global_dict",
")",
":",
"for",
"step",
"in",
"(",
"split_symbols",
",",
"implicit_multiplication",
",",
"implicit_application",
",",
"function_exponentiation",
")",
":",
"result",
"=... | allows a slightly relaxed syntax . | train | false |
11,069 | def _group_activity_query(group_id):
import ckan.model as model
group = model.Group.get(group_id)
if (not group):
return model.Session.query(model.Activity).filter('0=1')
dataset_ids = [dataset.id for dataset in group.packages()]
q = model.Session.query(model.Activity)
if dataset_ids:
q = q.filter(or_((model.Activity.object_id == group_id), model.Activity.object_id.in_(dataset_ids)))
else:
q = q.filter((model.Activity.object_id == group_id))
return q
| [
"def",
"_group_activity_query",
"(",
"group_id",
")",
":",
"import",
"ckan",
".",
"model",
"as",
"model",
"group",
"=",
"model",
".",
"Group",
".",
"get",
"(",
"group_id",
")",
"if",
"(",
"not",
"group",
")",
":",
"return",
"model",
".",
"Session",
"."... | return an sqlalchemy query for all activities about group_id . | train | false |
11,070 | def gettempdir():
global tempdir
if (tempdir is None):
_once_lock.acquire()
try:
if (tempdir is None):
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
| [
"def",
"gettempdir",
"(",
")",
":",
"global",
"tempdir",
"if",
"(",
"tempdir",
"is",
"None",
")",
":",
"_once_lock",
".",
"acquire",
"(",
")",
"try",
":",
"if",
"(",
"tempdir",
"is",
"None",
")",
":",
"tempdir",
"=",
"_get_default_tempdir",
"(",
")",
... | accessor for tempfile . | train | true |
11,071 | def get_configured_provider():
return config.is_provider_configured(__opts__, (__active_provider_name__ or __virtualname__), ('user', 'tenant', 'identity_url', 'compute_region'))
| [
"def",
"get_configured_provider",
"(",
")",
":",
"return",
"config",
".",
"is_provider_configured",
"(",
"__opts__",
",",
"(",
"__active_provider_name__",
"or",
"__virtualname__",
")",
",",
"(",
"'user'",
",",
"'tenant'",
",",
"'identity_url'",
",",
"'compute_region... | return the first configured instance . | train | false |
11,072 | def matrixPoints(points, prefix, xmlElement):
matrixMatrixTetragrid = matrix.getMatrixTetragrid(prefix, xmlElement)
if (matrixMatrixTetragrid == None):
print 'Warning, matrixMatrixTetragrid was None in matrix so nothing will be done for:'
print xmlElement
return
for point in points:
transformVector3ByMatrix(matrixMatrixTetragrid, point)
| [
"def",
"matrixPoints",
"(",
"points",
",",
"prefix",
",",
"xmlElement",
")",
":",
"matrixMatrixTetragrid",
"=",
"matrix",
".",
"getMatrixTetragrid",
"(",
"prefix",
",",
"xmlElement",
")",
"if",
"(",
"matrixMatrixTetragrid",
"==",
"None",
")",
":",
"print",
"'W... | rotate the points . | train | false |
11,073 | def _set_coordinator_properties(coordinator, root, namespace):
coordinator.name = root.get('name')
coordinator.timezone = root.get('timezone')
coordinator.start = oozie_to_django_datetime(root.get('start'))
coordinator.end = oozie_to_django_datetime(root.get('end'))
(coordinator.frequency_unit, coordinator.frequency_number) = oozie_to_hue_frequency(root.get('frequency'))
| [
"def",
"_set_coordinator_properties",
"(",
"coordinator",
",",
"root",
",",
"namespace",
")",
":",
"coordinator",
".",
"name",
"=",
"root",
".",
"get",
"(",
"'name'",
")",
"coordinator",
".",
"timezone",
"=",
"root",
".",
"get",
"(",
"'timezone'",
")",
"co... | get coordinator properties from coordinator xml set properties on coordinator with attributes from xml etree root . | train | false |
11,074 | @verbose
def activate_proj(projs, copy=True, verbose=None):
if copy:
projs = deepcopy(projs)
for proj in projs:
proj['active'] = True
logger.info(('%d projection items activated' % len(projs)))
return projs
| [
"@",
"verbose",
"def",
"activate_proj",
"(",
"projs",
",",
"copy",
"=",
"True",
",",
"verbose",
"=",
"None",
")",
":",
"if",
"copy",
":",
"projs",
"=",
"deepcopy",
"(",
"projs",
")",
"for",
"proj",
"in",
"projs",
":",
"proj",
"[",
"'active'",
"]",
... | set all projections to active . | train | false |
11,076 | def unregister_class(alias):
try:
x = CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias(('Unknown alias %r' % (alias,)))
if (not x.anonymous):
del CLASS_CACHE[x.alias]
del CLASS_CACHE[x.klass]
return x
| [
"def",
"unregister_class",
"(",
"alias",
")",
":",
"try",
":",
"x",
"=",
"CLASS_CACHE",
"[",
"alias",
"]",
"except",
"KeyError",
":",
"raise",
"UnknownClassAlias",
"(",
"(",
"'Unknown alias %r'",
"%",
"(",
"alias",
",",
")",
")",
")",
"if",
"(",
"not",
... | unregister class instrumentation . | train | true |
11,077 | @loader_option()
def subqueryload(loadopt, attr):
return loadopt.set_relationship_strategy(attr, {'lazy': 'subquery'})
| [
"@",
"loader_option",
"(",
")",
"def",
"subqueryload",
"(",
"loadopt",
",",
"attr",
")",
":",
"return",
"loadopt",
".",
"set_relationship_strategy",
"(",
"attr",
",",
"{",
"'lazy'",
":",
"'subquery'",
"}",
")"
] | indicate that the given attribute should be loaded using subquery eager loading . | train | false |
11,078 | def _shuffle(y, groups, random_state):
if (groups is None):
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return safe_indexing(y, indices)
| [
"def",
"_shuffle",
"(",
"y",
",",
"groups",
",",
"random_state",
")",
":",
"if",
"(",
"groups",
"is",
"None",
")",
":",
"indices",
"=",
"random_state",
".",
"permutation",
"(",
"len",
"(",
"y",
")",
")",
"else",
":",
"indices",
"=",
"np",
".",
"ara... | return a shuffled copy of y eventually shuffle among same labels . | train | false |
11,079 | def commit():
connection._commit()
set_clean()
| [
"def",
"commit",
"(",
")",
":",
"connection",
".",
"_commit",
"(",
")",
"set_clean",
"(",
")"
] | interface to git-commit(1)_ cwd the path to the git checkout message commit message opts any additional options to add to the command line . | train | false |
11,080 | def next_multiple(n, k):
return (div_ceil(n, k) * k)
| [
"def",
"next_multiple",
"(",
"n",
",",
"k",
")",
":",
"return",
"(",
"div_ceil",
"(",
"n",
",",
"k",
")",
"*",
"k",
")"
] | the smallest multiple of k which is >= n . | train | false |
11,082 | def loop_until_passed(pr_url, sleep_between, session, jenkins_session, max_retries):
retry_counts = Counter()
for _ in infinite_sleeps(sleep_between):
resp = session.get(pr_url)
if (resp.status_code != 200):
print('PR not found: {}'.format(resp.content))
return (None, None)
pr = resp.json()
if (pr['state'] != u'open'):
print('Merge request not open: {}'.format(pr['state']))
return (None, None)
else:
statuses = get_statuses(pr, session)
if (len(statuses) < MINIMUM_STATUSES):
print("Can't merge PR yet because there aren't enough statuses reporting ({} so far)".format(len(statuses)))
else:
needed = filter(not_success, statuses)
if (not needed):
return (pr, statuses)
print("Can't merge PR yet because these {} checks haven't succeeded:".format(len(needed)))
maybe_retry_jobs(needed, retry_counts, max_retries, jenkins_session)
print('Sleeping for {} seconds and trying again.\n\n'.format(sleep_between))
| [
"def",
"loop_until_passed",
"(",
"pr_url",
",",
"sleep_between",
",",
"session",
",",
"jenkins_session",
",",
"max_retries",
")",
":",
"retry_counts",
"=",
"Counter",
"(",
")",
"for",
"_",
"in",
"infinite_sleeps",
"(",
"sleep_between",
")",
":",
"resp",
"=",
... | loop until all the statuses for the target pull request are green . | train | false |
11,083 | def adjust_gamma(image, gamma=1, gain=1):
_assert_non_negative(image)
dtype = image.dtype.type
if (gamma < 0):
raise ValueError('Gamma should be a non-negative real number.')
scale = float((dtype_limits(image, True)[1] - dtype_limits(image, True)[0]))
out = ((((image / scale) ** gamma) * scale) * gain)
return dtype(out)
| [
"def",
"adjust_gamma",
"(",
"image",
",",
"gamma",
"=",
"1",
",",
"gain",
"=",
"1",
")",
":",
"_assert_non_negative",
"(",
"image",
")",
"dtype",
"=",
"image",
".",
"dtype",
".",
"type",
"if",
"(",
"gamma",
"<",
"0",
")",
":",
"raise",
"ValueError",
... | performs gamma correction on the input image . | train | false |
11,084 | def _send_instance_update_notification(context, instance, old_vm_state=None, old_task_state=None, new_vm_state=None, new_task_state=None, service='compute', host=None):
payload = info_from_instance(context, instance, None, None)
if (not new_vm_state):
new_vm_state = instance['vm_state']
if (not new_task_state):
new_task_state = instance['task_state']
states_payload = {'old_state': old_vm_state, 'state': new_vm_state, 'old_task_state': old_task_state, 'new_task_state': new_task_state}
payload.update(states_payload)
(audit_start, audit_end) = audit_period_bounds(current_period=True)
payload['audit_period_beginning'] = audit_start
payload['audit_period_ending'] = audit_end
bw = bandwidth_usage(instance, audit_start)
payload['bandwidth'] = bw
publisher_id = notifier_api.publisher_id(service, host)
notifier_api.notify(context, publisher_id, 'compute.instance.update', notifier_api.INFO, payload)
| [
"def",
"_send_instance_update_notification",
"(",
"context",
",",
"instance",
",",
"old_vm_state",
"=",
"None",
",",
"old_task_state",
"=",
"None",
",",
"new_vm_state",
"=",
"None",
",",
"new_task_state",
"=",
"None",
",",
"service",
"=",
"'compute'",
",",
"host... | send compute . | train | false |
11,085 | def enable_parallel(processnum=None):
global pool, dt, cut, cut_for_search
from multiprocessing import cpu_count
if (os.name == u'nt'):
raise NotImplementedError(u'jieba: parallel mode only supports posix system')
else:
from multiprocessing import Pool
dt.check_initialized()
if (processnum is None):
processnum = cpu_count()
pool = Pool(processnum)
cut = _pcut
cut_for_search = _pcut_for_search
| [
"def",
"enable_parallel",
"(",
"processnum",
"=",
"None",
")",
":",
"global",
"pool",
",",
"dt",
",",
"cut",
",",
"cut_for_search",
"from",
"multiprocessing",
"import",
"cpu_count",
"if",
"(",
"os",
".",
"name",
"==",
"u'nt'",
")",
":",
"raise",
"NotImplem... | change the modules cut and cut_for_search functions to the parallel version . | train | true |
11,087 | def safe_ip_format(ip):
try:
if (netaddr.IPAddress(ip).version == 6):
return ('[%s]' % ip)
except (TypeError, netaddr.AddrFormatError):
pass
return ip
| [
"def",
"safe_ip_format",
"(",
"ip",
")",
":",
"try",
":",
"if",
"(",
"netaddr",
".",
"IPAddress",
"(",
"ip",
")",
".",
"version",
"==",
"6",
")",
":",
"return",
"(",
"'[%s]'",
"%",
"ip",
")",
"except",
"(",
"TypeError",
",",
"netaddr",
".",
"AddrFo... | transform ip string to "safe" format . | train | false |
11,088 | def get_step_state(emr_connection, jobflowid, step_name, update=False):
g.reset_caches()
steps = get_step_states(emr_connection, jobflowid, _update=update)
for (name, state, start) in sorted(steps, key=(lambda t: t[2]), reverse=True):
if (name == step_name):
return state
else:
return NOTFOUND
| [
"def",
"get_step_state",
"(",
"emr_connection",
",",
"jobflowid",
",",
"step_name",
",",
"update",
"=",
"False",
")",
":",
"g",
".",
"reset_caches",
"(",
")",
"steps",
"=",
"get_step_states",
"(",
"emr_connection",
",",
"jobflowid",
",",
"_update",
"=",
"upd... | return the state of a step . | train | false |
11,089 | def libvlc_log_unset(p_instance):
f = (_Cfunctions.get('libvlc_log_unset', None) or _Cfunction('libvlc_log_unset', ((1,),), None, None, Instance))
return f(p_instance)
| [
"def",
"libvlc_log_unset",
"(",
"p_instance",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_log_unset'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_log_unset'",
",",
"(",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"Non... | unsets the logging callback for a libvlc instance . | train | false |
11,090 | def test_history_import_abspath_in_archive():
dest_parent = mkdtemp()
arcname_prefix = os.path.abspath(os.path.join(dest_parent, 'insecure'))
with HistoryArchive(arcname_prefix=arcname_prefix) as history_archive:
history_archive.write_metafiles()
history_archive.write_file('datasets/Pasted_Entry_1.txt', 'foo')
history_archive.finalize()
_run_unpack(history_archive, dest_parent, 'Absolute path in import archive allowed')
| [
"def",
"test_history_import_abspath_in_archive",
"(",
")",
":",
"dest_parent",
"=",
"mkdtemp",
"(",
")",
"arcname_prefix",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dest_parent",
",",
"'insecure'",
")",
")",
"with",
... | ensure that a history import archive cannot reference a absolute path outside the archive . | train | false |
11,091 | def write_ros_handshake_header(sock, header):
s = encode_ros_handshake_header(header)
sock.sendall(s)
return len(s)
| [
"def",
"write_ros_handshake_header",
"(",
"sock",
",",
"header",
")",
":",
"s",
"=",
"encode_ros_handshake_header",
"(",
"header",
")",
"sock",
".",
"sendall",
"(",
"s",
")",
"return",
"len",
"(",
"s",
")"
] | write ros handshake header header to socket sock . | train | false |
11,093 | def test_multiset_partitions_taocp():
multiplicities = [2, 2]
compare_multiset_w_baseline(multiplicities)
multiplicities = [4, 3, 1]
compare_multiset_w_baseline(multiplicities)
| [
"def",
"test_multiset_partitions_taocp",
"(",
")",
":",
"multiplicities",
"=",
"[",
"2",
",",
"2",
"]",
"compare_multiset_w_baseline",
"(",
"multiplicities",
")",
"multiplicities",
"=",
"[",
"4",
",",
"3",
",",
"1",
"]",
"compare_multiset_w_baseline",
"(",
"mult... | compares the output of multiset_partitions_taocp with a baseline implementation . | train | false |
11,094 | def custom_key_func(key, key_prefix, version):
return (u'CUSTOM-' + u'-'.join([key_prefix, str(version), key]))
| [
"def",
"custom_key_func",
"(",
"key",
",",
"key_prefix",
",",
"version",
")",
":",
"return",
"(",
"u'CUSTOM-'",
"+",
"u'-'",
".",
"join",
"(",
"[",
"key_prefix",
",",
"str",
"(",
"version",
")",
",",
"key",
"]",
")",
")"
] | a customized cache key function . | train | false |
11,095 | def remove_prerequisite_course(course_key, milestone):
if (not is_prerequisite_courses_enabled()):
return None
milestones_api.remove_course_milestone(course_key, milestone)
| [
"def",
"remove_prerequisite_course",
"(",
"course_key",
",",
"milestone",
")",
":",
"if",
"(",
"not",
"is_prerequisite_courses_enabled",
"(",
")",
")",
":",
"return",
"None",
"milestones_api",
".",
"remove_course_milestone",
"(",
"course_key",
",",
"milestone",
")"
... | it would remove pre-requisite course milestone for course referred by course_key . | train | false |
11,097 | def getTricomplexscale(transformWords):
scale = euclidean.getComplexByWords(transformWords)
return [complex(scale.real, 0.0), complex(0.0, scale.imag), complex()]
| [
"def",
"getTricomplexscale",
"(",
"transformWords",
")",
":",
"scale",
"=",
"euclidean",
".",
"getComplexByWords",
"(",
"transformWords",
")",
"return",
"[",
"complex",
"(",
"scale",
".",
"real",
",",
"0.0",
")",
",",
"complex",
"(",
"0.0",
",",
"scale",
"... | get matrixsvg by transformwords . | train | false |
11,099 | def save_collection_summary(collection_summary):
collection_summary_model = collection_models.CollectionSummaryModel(id=collection_summary.id, title=collection_summary.title, category=collection_summary.category, objective=collection_summary.objective, language_code=collection_summary.language_code, tags=collection_summary.tags, status=collection_summary.status, community_owned=collection_summary.community_owned, owner_ids=collection_summary.owner_ids, editor_ids=collection_summary.editor_ids, viewer_ids=collection_summary.viewer_ids, contributor_ids=collection_summary.contributor_ids, contributors_summary=collection_summary.contributors_summary, version=collection_summary.version, node_count=collection_summary.node_count, collection_model_last_updated=collection_summary.collection_model_last_updated, collection_model_created_on=collection_summary.collection_model_created_on)
collection_summary_model.put()
| [
"def",
"save_collection_summary",
"(",
"collection_summary",
")",
":",
"collection_summary_model",
"=",
"collection_models",
".",
"CollectionSummaryModel",
"(",
"id",
"=",
"collection_summary",
".",
"id",
",",
"title",
"=",
"collection_summary",
".",
"title",
",",
"ca... | save a collection summary domain object as a collectionsummarymodel entity in the datastore . | train | false |
11,100 | def RATINGS_BY_NAME():
all_ratings = ALL_RATINGS()
ratings_choices = []
for rb in RATINGS_BODIES.values():
for r in rb.ratings:
ratings_choices.append((all_ratings.index(r), (u'%s - %s' % (rb.name, dehydrate_rating(r).name))))
return ratings_choices
| [
"def",
"RATINGS_BY_NAME",
"(",
")",
":",
"all_ratings",
"=",
"ALL_RATINGS",
"(",
")",
"ratings_choices",
"=",
"[",
"]",
"for",
"rb",
"in",
"RATINGS_BODIES",
".",
"values",
"(",
")",
":",
"for",
"r",
"in",
"rb",
".",
"ratings",
":",
"ratings_choices",
"."... | create a list of tuples after we know the locale since this attempts to concatenate two lazy translations in constants file . | train | false |
11,101 | def load_mappings(app):
now = int(time.time())
cache_time = (now - (app.config.intersphinx_cache_limit * 86400))
env = app.builder.env
if (not hasattr(env, 'intersphinx_cache')):
env.intersphinx_cache = {}
cache = env.intersphinx_cache
update = False
for (uri, inv) in app.config.intersphinx_mapping.iteritems():
if (not inv):
inv = posixpath.join(uri, INVENTORY_FILENAME)
if (('://' not in inv) or (uri not in cache) or (cache[uri][0] < cache_time)):
invdata = fetch_inventory(app, uri, inv)
cache[uri] = (now, invdata)
update = True
if update:
env.intersphinx_inventory = {}
for (_, invdata) in cache.itervalues():
if invdata:
env.intersphinx_inventory.update(invdata)
| [
"def",
"load_mappings",
"(",
"app",
")",
":",
"now",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"cache_time",
"=",
"(",
"now",
"-",
"(",
"app",
".",
"config",
".",
"intersphinx_cache_limit",
"*",
"86400",
")",
")",
"env",
"=",
"app",
".",... | load name mappings encoded in command-line arguments . | train | false |
11,102 | def position_messages_from_tlog(filename):
messages = []
mlog = mavutil.mavlink_connection(filename)
while True:
try:
m = mlog.recv_match(type=['GLOBAL_POSITION_INT'])
if (m is None):
break
except Exception:
break
if (m.lat == 0):
continue
messages.append(m)
num_points = len(messages)
keep_point_distance = 3
kept_messages = []
kept_messages.append(messages[0])
pt1num = 0
pt2num = 1
while True:
if ((pt2num == (num_points - 1)) or (len(kept_messages) == 99)):
kept_messages.append(messages[pt2num])
break
pt1 = LocationGlobalRelative((messages[pt1num].lat / 10000000.0), (messages[pt1num].lon / 10000000.0), 0)
pt2 = LocationGlobalRelative((messages[pt2num].lat / 10000000.0), (messages[pt2num].lon / 10000000.0), 0)
distance_between_points = get_distance_metres(pt1, pt2)
if (distance_between_points > keep_point_distance):
kept_messages.append(messages[pt2num])
pt1num = pt2num
pt2num = (pt2num + 1)
return kept_messages
| [
"def",
"position_messages_from_tlog",
"(",
"filename",
")",
":",
"messages",
"=",
"[",
"]",
"mlog",
"=",
"mavutil",
".",
"mavlink_connection",
"(",
"filename",
")",
"while",
"True",
":",
"try",
":",
"m",
"=",
"mlog",
".",
"recv_match",
"(",
"type",
"=",
... | given telemetry log . | train | true |
11,103 | @contextfunction
def resolve_ctx(context):
g._admin_render_ctx = context
| [
"@",
"contextfunction",
"def",
"resolve_ctx",
"(",
"context",
")",
":",
"g",
".",
"_admin_render_ctx",
"=",
"context"
] | resolve current jinja2 context and store it for general consumption . | train | false |
11,104 | def setDevice(dev, kind=None):
if (not hasattr(backend, 'defaultOutput')):
raise IOError('Attempting to SetDevice (audio) but not supported by the current audio library ({!r})'.format(audioLib))
if hasattr(dev, 'name'):
dev = dev['name']
if (kind is None):
backend.defaultInput = backend.defaultOutput = dev
elif (kind == 'input'):
backend.defaultInput = dev
elif (kind == 'output'):
backend.defaultOutput = dev
elif travisCI:
return
else:
raise TypeError("`kind` should be one of [None, 'output', 'input']not {!r}".format(kind))
| [
"def",
"setDevice",
"(",
"dev",
",",
"kind",
"=",
"None",
")",
":",
"if",
"(",
"not",
"hasattr",
"(",
"backend",
",",
"'defaultOutput'",
")",
")",
":",
"raise",
"IOError",
"(",
"'Attempting to SetDevice (audio) but not supported by the current audio library ({!r})'",
... | sets the device to be used for new streams being created . | train | false |
11,105 | def set_identity_providers_if_unset(facts):
if ('master' in facts):
deployment_type = facts['common']['deployment_type']
if ('identity_providers' not in facts['master']):
identity_provider = dict(name='allow_all', challenge=True, login=True, kind='AllowAllPasswordIdentityProvider')
if (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']):
identity_provider = dict(name='deny_all', challenge=True, login=True, kind='DenyAllPasswordIdentityProvider')
facts['master']['identity_providers'] = [identity_provider]
return facts
| [
"def",
"set_identity_providers_if_unset",
"(",
"facts",
")",
":",
"if",
"(",
"'master'",
"in",
"facts",
")",
":",
"deployment_type",
"=",
"facts",
"[",
"'common'",
"]",
"[",
"'deployment_type'",
"]",
"if",
"(",
"'identity_providers'",
"not",
"in",
"facts",
"["... | set identity_providers fact if not already present in facts dict args: facts : existing facts returns: dict: the facts dict updated with the generated identity providers facts if they were not already present . | train | false |
11,107 | @gen.coroutine
def OldRemovePhotos(client, obj_store, user_id, device_id, request):
remove_episodes = []
hide_episodes = []
for ep_dict in request['episodes']:
episode = (yield gen.Task(Episode.Query, client, ep_dict['episode_id'], None, must_exist=False))
if ((episode is None) or (episode.viewpoint_id == base.ViewfinderContext.current().user.private_vp_id)):
remove_episodes.append(ep_dict)
else:
hide_episodes.append(ep_dict)
hide_request = deepcopy(request)
if (len(hide_episodes) > 0):
hide_request['episodes'] = hide_episodes
(yield HidePhotos(client, obj_store, user_id, device_id, hide_request))
remove_request = deepcopy(request)
remove_request['episodes'] = remove_episodes
(yield RemovePhotos(client, obj_store, user_id, device_id, remove_request))
raise gen.Return({})
| [
"@",
"gen",
".",
"coroutine",
"def",
"OldRemovePhotos",
"(",
"client",
",",
"obj_store",
",",
"user_id",
",",
"device_id",
",",
"request",
")",
":",
"remove_episodes",
"=",
"[",
"]",
"hide_episodes",
"=",
"[",
"]",
"for",
"ep_dict",
"in",
"request",
"[",
... | used by older clients to remove photos from showing in a users personal library . | train | false |
11,108 | def codeDescription(status_code):
if (status_code in http_error_code):
return http_error_code[status_code]
else:
sickrage.srCore.srLogger.error((u'Unknown error code: %s. Please submit an issue' % status_code))
return u'unknown'
| [
"def",
"codeDescription",
"(",
"status_code",
")",
":",
"if",
"(",
"status_code",
"in",
"http_error_code",
")",
":",
"return",
"http_error_code",
"[",
"status_code",
"]",
"else",
":",
"sickrage",
".",
"srCore",
".",
"srLogger",
".",
"error",
"(",
"(",
"u'Unk... | returns the description of the url error code . | train | false |
11,110 | def adapt_rgb(apply_to_rgb):
def decorator(image_filter):
@functools.wraps(image_filter)
def image_filter_adapted(image, *args, **kwargs):
if is_rgb_like(image):
return apply_to_rgb(image_filter, image, *args, **kwargs)
else:
return image_filter(image, *args, **kwargs)
return image_filter_adapted
return decorator
| [
"def",
"adapt_rgb",
"(",
"apply_to_rgb",
")",
":",
"def",
"decorator",
"(",
"image_filter",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"image_filter",
")",
"def",
"image_filter_adapted",
"(",
"image",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
... | return decorator that adapts to rgb images to a gray-scale filter . | train | false |
11,111 | def _get_allocated_vnc_ports(session):
vnc_ports = set()
result = session._call_method(vim_util, 'get_objects', 'VirtualMachine', [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if (not hasattr(obj, 'propSet')):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
result = session._call_method(vutil, 'continue_retrieval', result)
return vnc_ports
| [
"def",
"_get_allocated_vnc_ports",
"(",
"session",
")",
":",
"vnc_ports",
"=",
"set",
"(",
")",
"result",
"=",
"session",
".",
"_call_method",
"(",
"vim_util",
",",
"'get_objects'",
",",
"'VirtualMachine'",
",",
"[",
"VNC_CONFIG_KEY",
"]",
")",
"while",
"resul... | return an integer set of all allocated vnc ports . | train | false |
11,112 | def has_ccx_coach_role(user, course_key):
if hasattr(course_key, 'ccx'):
ccx_id = course_key.ccx
role = CourseCcxCoachRole(course_key)
if role.has_user(user):
list_ccx = CustomCourseForEdX.objects.filter(course_id=course_key.to_course_locator(), coach=user)
if list_ccx.exists():
coach_ccx = list_ccx[0]
return (str(coach_ccx.id) == ccx_id)
else:
raise CCXLocatorValidationException('Invalid CCX key. To verify that user is a coach on CCX, you must provide key to CCX')
return False
| [
"def",
"has_ccx_coach_role",
"(",
"user",
",",
"course_key",
")",
":",
"if",
"hasattr",
"(",
"course_key",
",",
"'ccx'",
")",
":",
"ccx_id",
"=",
"course_key",
".",
"ccx",
"role",
"=",
"CourseCcxCoachRole",
"(",
"course_key",
")",
"if",
"role",
".",
"has_u... | check if user is a coach on this ccx . | train | false |
11,113 | def fixed_ip_get_all(context):
return IMPL.fixed_ip_get_all(context)
| [
"def",
"fixed_ip_get_all",
"(",
"context",
")",
":",
"return",
"IMPL",
".",
"fixed_ip_get_all",
"(",
"context",
")"
] | get all defined fixed ips . | train | false |
11,114 | def unicode_to_html(text):
return ''.join([(u'&#%s;' % ord(i)) for i in text])
| [
"def",
"unicode_to_html",
"(",
"text",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"(",
"u'&#%s;'",
"%",
"ord",
"(",
"i",
")",
")",
"for",
"i",
"in",
"text",
"]",
")"
] | turns all unicode into html entities . | train | false |
11,115 | def _from_soap(in_envelope_xml, xmlids=None, **kwargs):
ns_soap = kwargs.pop('ns', ns.soap11_env)
if xmlids:
resolve_hrefs(in_envelope_xml, xmlids)
if (in_envelope_xml.tag != ('{%s}Envelope' % ns_soap)):
raise Fault('Client.SoapError', ('No {%s}Envelope element was found!' % ns_soap))
header_envelope = in_envelope_xml.xpath('e:Header', namespaces={'e': ns_soap})
body_envelope = in_envelope_xml.xpath('e:Body', namespaces={'e': ns_soap})
if ((len(header_envelope) == 0) and (len(body_envelope) == 0)):
raise Fault('Client.SoapError', 'Soap envelope is empty!')
header = None
if (len(header_envelope) > 0):
header = header_envelope[0].getchildren()
body = None
if ((len(body_envelope) > 0) and (len(body_envelope[0]) > 0)):
body = body_envelope[0][0]
return (header, body)
| [
"def",
"_from_soap",
"(",
"in_envelope_xml",
",",
"xmlids",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"ns_soap",
"=",
"kwargs",
".",
"pop",
"(",
"'ns'",
",",
"ns",
".",
"soap11_env",
")",
"if",
"xmlids",
":",
"resolve_hrefs",
"(",
"in_envelope_xml",
",... | parses the xml string into the header and payload . | train | false |
11,116 | def format_header_param(name, value):
if (not any(((ch in value) for ch in '"\\\r\n'))):
result = ('%s="%s"' % (name, value))
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if ((not six.PY3) and isinstance(value, six.text_type)):
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = ('%s*=%s' % (name, value))
return value
| [
"def",
"format_header_param",
"(",
"name",
",",
"value",
")",
":",
"if",
"(",
"not",
"any",
"(",
"(",
"(",
"ch",
"in",
"value",
")",
"for",
"ch",
"in",
"'\"\\\\\\r\\n'",
")",
")",
")",
":",
"result",
"=",
"(",
"'%s=\"%s\"'",
"%",
"(",
"name",
",",
... | helper function to format and quote a single header parameter . | train | true |
11,117 | def clear_info_cache(app, env, account, container=None):
set_info_cache(app, env, account, container, None)
| [
"def",
"clear_info_cache",
"(",
"app",
",",
"env",
",",
"account",
",",
"container",
"=",
"None",
")",
":",
"set_info_cache",
"(",
"app",
",",
"env",
",",
"account",
",",
"container",
",",
"None",
")"
] | clear the cached info in both memcache and env . | train | false |
11,118 | def ne_chunk(tagged_tokens, binary=False):
if binary:
chunker_pickle = _BINARY_NE_CHUNKER
else:
chunker_pickle = _MULTICLASS_NE_CHUNKER
chunker = load(chunker_pickle)
return chunker.parse(tagged_tokens)
| [
"def",
"ne_chunk",
"(",
"tagged_tokens",
",",
"binary",
"=",
"False",
")",
":",
"if",
"binary",
":",
"chunker_pickle",
"=",
"_BINARY_NE_CHUNKER",
"else",
":",
"chunker_pickle",
"=",
"_MULTICLASS_NE_CHUNKER",
"chunker",
"=",
"load",
"(",
"chunker_pickle",
")",
"r... | use nltks currently recommended named entity chunker to chunk the given list of tagged tokens . | train | false |
11,119 | def is_fcntl_available(check_sunos=False):
if (check_sunos and is_sunos()):
return False
return HAS_FCNTL
| [
"def",
"is_fcntl_available",
"(",
"check_sunos",
"=",
"False",
")",
":",
"if",
"(",
"check_sunos",
"and",
"is_sunos",
"(",
")",
")",
":",
"return",
"False",
"return",
"HAS_FCNTL"
] | simple function to check if the fcntl module is available or not . | train | false |
11,120 | def computed_values(d, *args, **kwargs):
result = {}
for (k, v) in six.iteritems(d):
if callable(v):
v = v(*args, **kwargs)
if isinstance(v, dict):
v = computed_values(v, *args, **kwargs)
result[k] = v
return result
| [
"def",
"computed_values",
"(",
"d",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"result",
"=",
"{",
"}",
"for",
"(",
"k",
",",
"v",
")",
"in",
"six",
".",
"iteritems",
"(",
"d",
")",
":",
"if",
"callable",
"(",
"v",
")",
":",
"v",
"=",
... | returns a new dict that has callable values replaced with the return values . | train | false |
11,121 | def _GetDatastoreStats(kinds_list, use_stats_kinds=False):
global_stat = stats.GlobalStat.all().fetch(1)
if (not global_stat):
return _KindsListToTuple(kinds_list)
global_ts = global_stat[0].timestamp
kind_stats = stats.KindStat.all().filter('timestamp =', global_ts).fetch(1000)
if (not kind_stats):
return _KindsListToTuple(kinds_list)
results = {}
for kind_ent in kind_stats:
if ((not kind_ent.kind_name.startswith('__')) and (use_stats_kinds or (kind_ent.kind_name in kinds_list)) and (kind_ent.count > 0)):
results[kind_ent.kind_name] = _PresentatableKindStats(kind_ent)
utils.CacheStats(results.values())
for kind_str in (kinds_list or []):
if (kind_str not in results):
results[kind_str] = {'kind_name': kind_str}
return (global_ts, sorted(results.values(), key=(lambda x: x['kind_name'])))
| [
"def",
"_GetDatastoreStats",
"(",
"kinds_list",
",",
"use_stats_kinds",
"=",
"False",
")",
":",
"global_stat",
"=",
"stats",
".",
"GlobalStat",
".",
"all",
"(",
")",
".",
"fetch",
"(",
"1",
")",
"if",
"(",
"not",
"global_stat",
")",
":",
"return",
"_Kind... | retrieves stats for kinds . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.