id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
9,028 | def prepare_bearer_uri(token, uri):
return add_params_to_uri(uri, [(u'access_token', token)])
| [
"def",
"prepare_bearer_uri",
"(",
"token",
",",
"uri",
")",
":",
"return",
"add_params_to_uri",
"(",
"uri",
",",
"[",
"(",
"u'access_token'",
",",
"token",
")",
"]",
")"
] | add a bearer token_ to the request uri . | train | false |
9,029 | def get_comments_file(worksheet_path, archive, valid_files):
sheet_codename = os.path.split(worksheet_path)[(-1)]
rels_file = (((PACKAGE_WORKSHEET_RELS + '/') + sheet_codename) + '.rels')
if (rels_file not in valid_files):
return None
rels_source = archive.read(rels_file)
root = fromstring(rels_source)
for i in... | [
"def",
"get_comments_file",
"(",
"worksheet_path",
",",
"archive",
",",
"valid_files",
")",
":",
"sheet_codename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"worksheet_path",
")",
"[",
"(",
"-",
"1",
")",
"]",
"rels_file",
"=",
"(",
"(",
"(",
"PACKAGE_W... | returns the xml filename in the archive which contains the comments for the spreadsheet with codename sheet_codename . | train | false |
9,030 | def check_args(args):
if (args.presharedkey and ((len(args.presharedkey) < 8) or (len(args.presharedkey) > 64))):
sys.exit((((('[' + R) + '-') + W) + '] Pre-shared key must be between 8 and 63 printable characters.'))
if (((args.jamminginterface and (not args.apinterface)) or ((not args.jamminginterface) and args.a... | [
"def",
"check_args",
"(",
"args",
")",
":",
"if",
"(",
"args",
".",
"presharedkey",
"and",
"(",
"(",
"len",
"(",
"args",
".",
"presharedkey",
")",
"<",
"8",
")",
"or",
"(",
"len",
"(",
"args",
".",
"presharedkey",
")",
">",
"64",
")",
")",
")",
... | fail fast if paths we explicitly want to copy do not exist . | train | false |
9,031 | def utfstr(stuff):
if isinstance(stuff, basestring):
return stuff
else:
return str(stuff)
| [
"def",
"utfstr",
"(",
"stuff",
")",
":",
"if",
"isinstance",
"(",
"stuff",
",",
"basestring",
")",
":",
"return",
"stuff",
"else",
":",
"return",
"str",
"(",
"stuff",
")"
] | converts stuff to string and does without failing if stuff is a utf8 string . | train | false |
9,032 | def unregister_fsa_session_signals():
if (not has_flask_sqlalchemy):
return
version = parse_version(flask_sqlalchemy.__version__)
if (version >= (2, 0)):
return
events = flask_sqlalchemy._SessionSignalEvents
signal_names = ('before_commit', 'after_commit', 'after_rollback')
for signal_name in signal_names:
... | [
"def",
"unregister_fsa_session_signals",
"(",
")",
":",
"if",
"(",
"not",
"has_flask_sqlalchemy",
")",
":",
"return",
"version",
"=",
"parse_version",
"(",
"flask_sqlalchemy",
".",
"__version__",
")",
"if",
"(",
"version",
">=",
"(",
"2",
",",
"0",
")",
")",... | unregisters flask-sqlalchemy session commit and rollback signal handlers . | train | false |
9,033 | def with_ioloop(method, expect_success=True):
def test_method(self):
r = method(self)
loop = self.io_loop
if expect_success:
self.pullstream.on_recv(self.on_message_succeed)
else:
self.pullstream.on_recv(self.on_message_fail)
loop.call_later(1, self.attempt_connection)
loop.call_later(1.2, self.send_... | [
"def",
"with_ioloop",
"(",
"method",
",",
"expect_success",
"=",
"True",
")",
":",
"def",
"test_method",
"(",
"self",
")",
":",
"r",
"=",
"method",
"(",
"self",
")",
"loop",
"=",
"self",
".",
"io_loop",
"if",
"expect_success",
":",
"self",
".",
"pullst... | decorator for running tests with an ioloop . | train | false |
9,036 | def demo_legacy_grammar():
from nltk.grammar import FeatureGrammar
g = FeatureGrammar.fromstring(u"\n % start S\n S[sem=<hello>] -> 'hello'\n ")
print((u'Reading grammar: %s' % g))
print((u'*' * 20))
for reading in interpret_sents([u'hello'], g, semkey=u'sem'):
(syn, sem) = reading[0]
print()
print(... | [
"def",
"demo_legacy_grammar",
"(",
")",
":",
"from",
"nltk",
".",
"grammar",
"import",
"FeatureGrammar",
"g",
"=",
"FeatureGrammar",
".",
"fromstring",
"(",
"u\"\\n % start S\\n S[sem=<hello>] -> 'hello'\\n \"",
")",
"print",
"(",
"(",
"u'Reading grammar: %s'",
... | check that interpret_sents() is compatible with legacy grammars that use a lowercase sem feature . | train | false |
9,037 | def to_utf8(obj):
if isinstance(obj, str):
try:
return obj.decode('utf-8')
except AttributeError:
return obj
try:
if isinstance(obj, unicode):
return obj
else:
return obj.__str__().decode('utf-8')
except NameError:
if isinstance(obj, bytes):
return obj.decode('utf-8')
else:
return obj._... | [
"def",
"to_utf8",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"try",
":",
"return",
"obj",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"AttributeError",
":",
"return",
"obj",
"try",
":",
"if",
"isinstance",
"(",
"obj... | return a unicode string representing a python object . | train | false |
9,039 | def getprog():
codetbl = db.code
acid = request.vars.acid
sid = request.vars.sid
if sid:
query = (((codetbl.sid == sid) & (codetbl.acid == acid)) & (codetbl.timestamp != None))
elif auth.user:
query = (((codetbl.sid == auth.user.username) & (codetbl.acid == acid)) & (codetbl.timestamp != None))
else:
query ... | [
"def",
"getprog",
"(",
")",
":",
"codetbl",
"=",
"db",
".",
"code",
"acid",
"=",
"request",
".",
"vars",
".",
"acid",
"sid",
"=",
"request",
".",
"vars",
".",
"sid",
"if",
"sid",
":",
"query",
"=",
"(",
"(",
"(",
"codetbl",
".",
"sid",
"==",
"s... | return the program code for a particular acid . | train | false |
9,041 | def GetBase64EncodedHTML5ZipFromUrl(url):
response = urllib2.urlopen(url)
return base64.b64encode(response.read())
| [
"def",
"GetBase64EncodedHTML5ZipFromUrl",
"(",
"url",
")",
":",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
"return",
"base64",
".",
"b64encode",
"(",
"response",
".",
"read",
"(",
")",
")"
] | retrieve zip file from the given url . | train | false |
9,044 | def load_test_config():
conf.load_test_config()
| [
"def",
"load_test_config",
"(",
")",
":",
"conf",
".",
"load_test_config",
"(",
")"
] | load the unit test configuration . | train | false |
9,045 | def subsequence_match(ref, typed, csc):
if csc:
return _subsequence_match_iter(ref, typed)
else:
return _subsequence_match_iter(ref.lower(), typed.lower())
| [
"def",
"subsequence_match",
"(",
"ref",
",",
"typed",
",",
"csc",
")",
":",
"if",
"csc",
":",
"return",
"_subsequence_match_iter",
"(",
"ref",
",",
"typed",
")",
"else",
":",
"return",
"_subsequence_match_iter",
"(",
"ref",
".",
"lower",
"(",
")",
",",
"... | detects whether typed is a subsequence of ref . | train | false |
9,046 | def resource_name_package(name):
if (not (PRN_SEPARATOR in name)):
return None
return name[:name.find(PRN_SEPARATOR)]
| [
"def",
"resource_name_package",
"(",
"name",
")",
":",
"if",
"(",
"not",
"(",
"PRN_SEPARATOR",
"in",
"name",
")",
")",
":",
"return",
"None",
"return",
"name",
"[",
":",
"name",
".",
"find",
"(",
"PRN_SEPARATOR",
")",
"]"
] | pkg/typename -> pkg . | train | false |
9,048 | def query_package(module, name):
(rc, out, err) = module.run_command(('%s -p -v' % PKGIN_PATH))
if (rc == 0):
pflag = '-p'
splitchar = ';'
else:
pflag = ''
splitchar = ' '
(rc, out, err) = module.run_command(('%s %s search "^%s$"' % (PKGIN_PATH, pflag, name)))
if (rc == 0):
packages = out.split('\n')
f... | [
"def",
"query_package",
"(",
"module",
",",
"name",
")",
":",
"(",
"rc",
",",
"out",
",",
"err",
")",
"=",
"module",
".",
"run_command",
"(",
"(",
"'%s -p -v'",
"%",
"PKGIN_PATH",
")",
")",
"if",
"(",
"rc",
"==",
"0",
")",
":",
"pflag",
"=",
"'-p... | returns package info . | train | false |
9,050 | def load_key_file(path):
try:
key_file = path.open()
except IOError as e:
(code, failure) = e
raise PathError('Private key file could not be opened.', e.filename, code, failure)
keypair = ComparableKeyPair(keypair=KeyPair.load(key_file.read(), format=crypto.FILETYPE_PEM))
return keypair
| [
"def",
"load_key_file",
"(",
"path",
")",
":",
"try",
":",
"key_file",
"=",
"path",
".",
"open",
"(",
")",
"except",
"IOError",
"as",
"e",
":",
"(",
"code",
",",
"failure",
")",
"=",
"e",
"raise",
"PathError",
"(",
"'Private key file could not be opened.'"... | load a private key from a specified path . | train | false |
9,051 | def merge_events(events, ids, new_id, replace_events=True):
events = np.asarray(events)
events_out = events.copy()
idx_touched = []
for col in [1, 2]:
for i in ids:
mask = (events[:, col] == i)
events_out[(mask, col)] = new_id
idx_touched.append(np.where(mask)[0])
if (not replace_events):
idx_touched ... | [
"def",
"merge_events",
"(",
"events",
",",
"ids",
",",
"new_id",
",",
"replace_events",
"=",
"True",
")",
":",
"events",
"=",
"np",
".",
"asarray",
"(",
"events",
")",
"events_out",
"=",
"events",
".",
"copy",
"(",
")",
"idx_touched",
"=",
"[",
"]",
... | merge a set of events . | train | false |
9,053 | def send_mail_to_admins(sender, subject, body, make_sync_call=apiproxy_stub_map.MakeSyncCall, **kw):
kw['sender'] = sender
kw['subject'] = subject
kw['body'] = body
message = AdminEmailMessage(**kw)
message.send(make_sync_call)
| [
"def",
"send_mail_to_admins",
"(",
"sender",
",",
"subject",
",",
"body",
",",
"make_sync_call",
"=",
"apiproxy_stub_map",
".",
"MakeSyncCall",
",",
"**",
"kw",
")",
":",
"kw",
"[",
"'sender'",
"]",
"=",
"sender",
"kw",
"[",
"'subject'",
"]",
"=",
"subject... | sends mail to admins on behalf of application . | train | false |
9,055 | def clear_all(tgt=None, tgt_type='glob', expr_form=None):
if (expr_form is not None):
salt.utils.warn_until('Fluorine', "the target type should be passed using the 'tgt_type' argument instead of 'expr_form'. Support for using 'expr_form' will be removed in Salt Fluorine.")
tgt_type = expr_form
return _clear_cache... | [
"def",
"clear_all",
"(",
"tgt",
"=",
"None",
",",
"tgt_type",
"=",
"'glob'",
",",
"expr_form",
"=",
"None",
")",
":",
"if",
"(",
"expr_form",
"is",
"not",
"None",
")",
":",
"salt",
".",
"utils",
".",
"warn_until",
"(",
"'Fluorine'",
",",
"\"the target ... | clears all the placeholder variables of keep prob . | train | false |
9,057 | def _get_default_unit_format(config):
return u'cds'
| [
"def",
"_get_default_unit_format",
"(",
"config",
")",
":",
"return",
"u'cds'"
] | get the default unit format as specified in the votable spec . | train | false |
9,058 | def args_for_opt_dest_subset(option_parser, args, dests=None):
for (dest, value) in _args_for_opt_dest_subset(option_parser, args, dests):
(yield value)
| [
"def",
"args_for_opt_dest_subset",
"(",
"option_parser",
",",
"args",
",",
"dests",
"=",
"None",
")",
":",
"for",
"(",
"dest",
",",
"value",
")",
"in",
"_args_for_opt_dest_subset",
"(",
"option_parser",
",",
"args",
",",
"dests",
")",
":",
"(",
"yield",
"v... | for the given :py:class:optionparser and list of command line arguments *args* . | train | false |
9,060 | def invert_mapping(mapping):
invert_map = {}
for key in mapping.keys():
invert_map[key] = key
for id in mapping[key]:
invert_map[id] = key
return invert_map
| [
"def",
"invert_mapping",
"(",
"mapping",
")",
":",
"invert_map",
"=",
"{",
"}",
"for",
"key",
"in",
"mapping",
".",
"keys",
"(",
")",
":",
"invert_map",
"[",
"key",
"]",
"=",
"key",
"for",
"id",
"in",
"mapping",
"[",
"key",
"]",
":",
"invert_map",
... | inverts a dictionary mapping . | train | false |
9,061 | def STOCHRSI(ds, count, timeperiod=(- (2 ** 31)), fastk_period=(- (2 ** 31)), fastd_period=(- (2 ** 31)), fastd_matype=0):
ret = call_talib_with_ds(ds, count, talib.STOCHRSI, timeperiod, fastk_period, fastd_period, fastd_matype)
if (ret is None):
ret = (None, None)
return ret
| [
"def",
"STOCHRSI",
"(",
"ds",
",",
"count",
",",
"timeperiod",
"=",
"(",
"-",
"(",
"2",
"**",
"31",
")",
")",
",",
"fastk_period",
"=",
"(",
"-",
"(",
"2",
"**",
"31",
")",
")",
",",
"fastd_period",
"=",
"(",
"-",
"(",
"2",
"**",
"31",
")",
... | stochastic relative strength index . | train | false |
9,063 | def table_from_samples(samples, start=0, stop=(-1), rate=44100):
if (type(samples) == np.ndarray):
samples = samples.tolist()
if (type(samples) != list):
raise TypeError('samples should be a list or np.array')
if ((start, stop) != (0, (-1))):
if (stop > start):
samples = samples[(start * rate):(stop * rate)... | [
"def",
"table_from_samples",
"(",
"samples",
",",
"start",
"=",
"0",
",",
"stop",
"=",
"(",
"-",
"1",
")",
",",
"rate",
"=",
"44100",
")",
":",
"if",
"(",
"type",
"(",
"samples",
")",
"==",
"np",
".",
"ndarray",
")",
":",
"samples",
"=",
"samples... | return a pyo datatable constructed from samples . | train | false |
9,064 | def record_has(inrec, fieldvals):
retval = False
for field in fieldvals:
if isinstance(inrec[field], str):
set1 = {inrec[field]}
else:
set1 = set(inrec[field])
if (set1 & fieldvals[field]):
retval = True
break
return retval
| [
"def",
"record_has",
"(",
"inrec",
",",
"fieldvals",
")",
":",
"retval",
"=",
"False",
"for",
"field",
"in",
"fieldvals",
":",
"if",
"isinstance",
"(",
"inrec",
"[",
"field",
"]",
",",
"str",
")",
":",
"set1",
"=",
"{",
"inrec",
"[",
"field",
"]",
... | accepts a record . | train | false |
9,065 | def lookup_scopes(service_name):
if (service_name in CLIENT_LOGIN_SCOPES):
return CLIENT_LOGIN_SCOPES[service_name]
return None
| [
"def",
"lookup_scopes",
"(",
"service_name",
")",
":",
"if",
"(",
"service_name",
"in",
"CLIENT_LOGIN_SCOPES",
")",
":",
"return",
"CLIENT_LOGIN_SCOPES",
"[",
"service_name",
"]",
"return",
"None"
] | finds the scope urls for the desired service . | train | false |
9,066 | def expose_api_anonymous(func, to_json=True):
return expose_api(func, to_json=to_json, user_required=False)
| [
"def",
"expose_api_anonymous",
"(",
"func",
",",
"to_json",
"=",
"True",
")",
":",
"return",
"expose_api",
"(",
"func",
",",
"to_json",
"=",
"to_json",
",",
"user_required",
"=",
"False",
")"
] | expose this function via the api but dont require a set user . | train | false |
9,068 | def _extract_doc_comment_simple(content, line, column, markers):
align_column = (column - len(markers[0]))
pos = content[line].find(markers[2], column)
if (pos != (-1)):
return (line, (pos + len(markers[2])), content[line][column:pos])
doc_comment = content[line][column:]
line += 1
while (line < len(content)):
... | [
"def",
"_extract_doc_comment_simple",
"(",
"content",
",",
"line",
",",
"column",
",",
"markers",
")",
":",
"align_column",
"=",
"(",
"column",
"-",
"len",
"(",
"markers",
"[",
"0",
"]",
")",
")",
"pos",
"=",
"content",
"[",
"line",
"]",
".",
"find",
... | extract a documentation that starts at given beginning with simple layout . | train | false |
9,069 | def test_round(method, prec, exprange, restricted_range, itr, stat):
for op in all_unary(prec, 9999, itr):
n = random.randrange(10)
roundop = (op[0], n)
t = TestSet(method, roundop)
try:
if (not convert(t)):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
| [
"def",
"test_round",
"(",
"method",
",",
"prec",
",",
"exprange",
",",
"restricted_range",
",",
"itr",
",",
"stat",
")",
":",
"for",
"op",
"in",
"all_unary",
"(",
"prec",
",",
"9999",
",",
"itr",
")",
":",
"n",
"=",
"random",
".",
"randrange",
"(",
... | iterate the __round__ method through many test cases . | train | false |
9,070 | def get_ancestor_paths_from_ent_key(ent_key):
ancestor_list = []
tokens = str(ent_key).split(dbconstants.KIND_SEPARATOR)
tokens = tokens[:(-2)]
for num_elements in range(0, len(tokens)):
ancestor = ''
for token in tokens[0:(num_elements + 1)]:
ancestor += (token + dbconstants.KIND_SEPARATOR)
ancestor_list.... | [
"def",
"get_ancestor_paths_from_ent_key",
"(",
"ent_key",
")",
":",
"ancestor_list",
"=",
"[",
"]",
"tokens",
"=",
"str",
"(",
"ent_key",
")",
".",
"split",
"(",
"dbconstants",
".",
"KIND_SEPARATOR",
")",
"tokens",
"=",
"tokens",
"[",
":",
"(",
"-",
"2",
... | get a list of key string for the ancestor portion of a composite key . | train | false |
9,072 | def get_paramfile(path):
data = None
if isinstance(path, six.string_types):
for (prefix, function_spec) in PREFIX_MAP.items():
if path.startswith(prefix):
(function, kwargs) = function_spec
data = function(prefix, path, **kwargs)
return data
| [
"def",
"get_paramfile",
"(",
"path",
")",
":",
"data",
"=",
"None",
"if",
"isinstance",
"(",
"path",
",",
"six",
".",
"string_types",
")",
":",
"for",
"(",
"prefix",
",",
"function_spec",
")",
"in",
"PREFIX_MAP",
".",
"items",
"(",
")",
":",
"if",
"p... | load parameter based on a resource uri . | train | false |
9,073 | def deepcopy_return_value_class_decorator(cls):
class NewClass(cls, ):
def __getattribute__(self, attr_name):
obj = super(NewClass, self).__getattribute__(attr_name)
if (hasattr(obj, '__call__') and (not attr_name.startswith('_')) and (not isinstance(obj, mock.Mock))):
return deepcopy_return_value_method_d... | [
"def",
"deepcopy_return_value_class_decorator",
"(",
"cls",
")",
":",
"class",
"NewClass",
"(",
"cls",
",",
")",
":",
"def",
"__getattribute__",
"(",
"self",
",",
"attr_name",
")",
":",
"obj",
"=",
"super",
"(",
"NewClass",
",",
"self",
")",
".",
"__getatt... | wraps non-protected methods of a class with decorator . | train | false |
9,074 | def clear_db_env(*args, **kwargs):
pass
| [
"def",
"clear_db_env",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"pass"
] | unset global configuration variables for database . | train | false |
9,075 | def valid_csrf_token(req, session_id, csrf_token):
try:
when = int(csrf_token.split('-')[0], 16)
return ((when > (time.time() - CSRF_VALIDITY)) and (csrf_token == make_csrf_token(req, session_id, ts=when)))
except (ValueError, IndexError):
return False
| [
"def",
"valid_csrf_token",
"(",
"req",
",",
"session_id",
",",
"csrf_token",
")",
":",
"try",
":",
"when",
"=",
"int",
"(",
"csrf_token",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
",",
"16",
")",
"return",
"(",
"(",
"when",
">",
"(",
"time",
... | check the validity of a csrf token . | train | false |
9,076 | def linebreaks(value):
value = re.sub('\\r\\n|\\r|\\n', '\n', value)
paras = re.split('\n{2,}', value)
paras = [('<p>%s</p>' % p.strip().replace('\n', '<br />')) for p in paras]
return '\n\n'.join(paras)
| [
"def",
"linebreaks",
"(",
"value",
")",
":",
"value",
"=",
"re",
".",
"sub",
"(",
"'\\\\r\\\\n|\\\\r|\\\\n'",
",",
"'\\n'",
",",
"value",
")",
"paras",
"=",
"re",
".",
"split",
"(",
"'\\n{2,}'",
",",
"value",
")",
"paras",
"=",
"[",
"(",
"'<p>%s</p>'",... | converts newlines into <p> and <br />s . | train | false |
9,077 | @commands(u'chairs')
@example(u'.chairs Tyrope Jason elad')
def chairs(bot, trigger):
if (not ismeetingrunning(trigger.sender)):
bot.say(u"Can't do that, start meeting first")
return
if (not trigger.group(2)):
bot.say(u'Who are the chairs?')
return
if (trigger.nick.lower() == meetings_dict[trigger.sender][u'... | [
"@",
"commands",
"(",
"u'chairs'",
")",
"@",
"example",
"(",
"u'.chairs Tyrope Jason elad'",
")",
"def",
"chairs",
"(",
"bot",
",",
"trigger",
")",
":",
"if",
"(",
"not",
"ismeetingrunning",
"(",
"trigger",
".",
"sender",
")",
")",
":",
"bot",
".",
"say"... | set the meeting chairs . | train | false |
9,081 | def filter_matches(match_text, candidates, case_sensitive, sort_key=(lambda x: x)):
if case_sensitive:
case_transform = _identity
else:
case_transform = _lower
if match_text:
match_text = case_transform(match_text)
matches = [r for r in candidates if (match_text in case_transform(r))]
else:
matches = list... | [
"def",
"filter_matches",
"(",
"match_text",
",",
"candidates",
",",
"case_sensitive",
",",
"sort_key",
"=",
"(",
"lambda",
"x",
":",
"x",
")",
")",
":",
"if",
"case_sensitive",
":",
"case_transform",
"=",
"_identity",
"else",
":",
"case_transform",
"=",
"_lo... | filter candidates and return the matches . | train | false |
9,084 | def eulerian_circuit(G, source=None):
if (not is_eulerian(G)):
raise nx.NetworkXError('G is not Eulerian.')
g = G.__class__(G)
if (source is None):
v = arbitrary_element(g)
else:
v = source
if g.is_directed():
degree = g.in_degree
edges = g.in_edges
get_vertex = itemgetter(0)
else:
degree = g.degree... | [
"def",
"eulerian_circuit",
"(",
"G",
",",
"source",
"=",
"None",
")",
":",
"if",
"(",
"not",
"is_eulerian",
"(",
"G",
")",
")",
":",
"raise",
"nx",
".",
"NetworkXError",
"(",
"'G is not Eulerian.'",
")",
"g",
"=",
"G",
".",
"__class__",
"(",
"G",
")"... | returns an iterator over the edges of an eulerian circuit in g . | train | false |
9,086 | @login_required
def show_receipt(request, ordernum):
try:
order = Order.objects.get(id=ordernum)
except Order.DoesNotExist:
raise Http404('Order not found!')
if ((order.user != request.user) or (order.status not in ['purchased', 'refunded'])):
raise Http404('Order not found!')
if ('application/json' in reques... | [
"@",
"login_required",
"def",
"show_receipt",
"(",
"request",
",",
"ordernum",
")",
":",
"try",
":",
"order",
"=",
"Order",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"ordernum",
")",
"except",
"Order",
".",
"DoesNotExist",
":",
"raise",
"Http404",
"(",... | displays a receipt for a particular order . | train | false |
9,089 | def impute_ros(observations, censorship, df=None, min_uncensored=2, max_fraction_censored=0.8, substitution_fraction=0.5, transform_in=numpy.log, transform_out=numpy.exp, as_array=True):
if (df is None):
df = pandas.DataFrame({'obs': observations, 'cen': censorship})
observations = 'obs'
censorship = 'cen'
N_ob... | [
"def",
"impute_ros",
"(",
"observations",
",",
"censorship",
",",
"df",
"=",
"None",
",",
"min_uncensored",
"=",
"2",
",",
"max_fraction_censored",
"=",
"0.8",
",",
"substitution_fraction",
"=",
"0.5",
",",
"transform_in",
"=",
"numpy",
".",
"log",
",",
"tra... | impute censored dataset using regression on order statistics . | train | false |
9,091 | def art_for_asin(album):
if album.asin:
for index in AMAZON_INDICES:
(yield (AMAZON_URL % (album.asin, index)))
| [
"def",
"art_for_asin",
"(",
"album",
")",
":",
"if",
"album",
".",
"asin",
":",
"for",
"index",
"in",
"AMAZON_INDICES",
":",
"(",
"yield",
"(",
"AMAZON_URL",
"%",
"(",
"album",
".",
"asin",
",",
"index",
")",
")",
")"
] | generate urls using amazon id string . | train | false |
9,092 | def setUnicodeValue(glyph, glyphList):
if (glyph.name in glyphList):
glyph.unicode = int(glyphList[glyph.name], 16)
else:
uvNameMatch = re.match('uni([\\dA-F]{4})$', glyph.name)
if uvNameMatch:
glyph.unicode = int(uvNameMatch.group(1), 16)
| [
"def",
"setUnicodeValue",
"(",
"glyph",
",",
"glyphList",
")",
":",
"if",
"(",
"glyph",
".",
"name",
"in",
"glyphList",
")",
":",
"glyph",
".",
"unicode",
"=",
"int",
"(",
"glyphList",
"[",
"glyph",
".",
"name",
"]",
",",
"16",
")",
"else",
":",
"u... | try to ensure glyph has a unicode value -- used by fdk to make otfs . | train | false |
9,093 | def entry_breadcrumbs(entry):
date = entry.publication_date
if is_aware(date):
date = localtime(date)
return [year_crumb(date), month_crumb(date), day_crumb(date), Crumb(entry.title)]
| [
"def",
"entry_breadcrumbs",
"(",
"entry",
")",
":",
"date",
"=",
"entry",
".",
"publication_date",
"if",
"is_aware",
"(",
"date",
")",
":",
"date",
"=",
"localtime",
"(",
"date",
")",
"return",
"[",
"year_crumb",
"(",
"date",
")",
",",
"month_crumb",
"("... | breadcrumbs for an entry . | train | true |
9,094 | def test_dae_yaml():
limited_epoch_train(os.path.join(pylearn2.__path__[0], 'scripts/autoencoder_example/dae.yaml'))
| [
"def",
"test_dae_yaml",
"(",
")",
":",
"limited_epoch_train",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pylearn2",
".",
"__path__",
"[",
"0",
"]",
",",
"'scripts/autoencoder_example/dae.yaml'",
")",
")"
] | train a denoising autoencoder for a single epoch . | train | false |
9,095 | @must_be_logged_in
def user_choose_mailing_lists(auth, **kwargs):
user = auth.user
json_data = escape_html(request.get_json())
if json_data:
for (list_name, subscribe) in json_data.items():
if (list_name == settings.OSF_HELP_LIST):
update_osf_help_mails_subscription(user=user, subscribe=subscribe)
else:
... | [
"@",
"must_be_logged_in",
"def",
"user_choose_mailing_lists",
"(",
"auth",
",",
"**",
"kwargs",
")",
":",
"user",
"=",
"auth",
".",
"user",
"json_data",
"=",
"escape_html",
"(",
"request",
".",
"get_json",
"(",
")",
")",
"if",
"json_data",
":",
"for",
"(",... | update mailing list subscription on user model and in mailchimp example input: "open science framework general": true . | train | false |
9,096 | @events.route('/<int:event_id>/role-invite/decline/<hash>', methods=['GET', 'POST'])
def user_role_invite_decline(event_id, hash):
event = DataGetter.get_event(event_id)
user = current_user
role_invite = DataGetter.get_event_role_invite(event.id, hash, email=user.email)
if role_invite:
if role_invite.has_expired(... | [
"@",
"events",
".",
"route",
"(",
"'/<int:event_id>/role-invite/decline/<hash>'",
",",
"methods",
"=",
"[",
"'GET'",
",",
"'POST'",
"]",
")",
"def",
"user_role_invite_decline",
"(",
"event_id",
",",
"hash",
")",
":",
"event",
"=",
"DataGetter",
".",
"get_event",... | decline user-role invite for the event . | train | false |
9,097 | def _cmpBottom(a, b):
return _cmpTop(a, b, what='bottom 10 rank')
| [
"def",
"_cmpBottom",
"(",
"a",
",",
"b",
")",
":",
"return",
"_cmpTop",
"(",
"a",
",",
"b",
",",
"what",
"=",
"'bottom 10 rank'",
")"
] | compare function used to sort top 250/bottom 10 rank . | train | false |
9,100 | def getSimInfo():
try:
mContext = autoclass('android.content.Context')
pythonActivity = autoclass('org.renpy.android.PythonService')
telephonyManager = cast('android.telephony.TelephonyManager', pythonActivity.mService.getSystemService(mContext.TELEPHONY_SERVICE))
phoneCount = telephonyManager.getPhoneCount()
... | [
"def",
"getSimInfo",
"(",
")",
":",
"try",
":",
"mContext",
"=",
"autoclass",
"(",
"'android.content.Context'",
")",
"pythonActivity",
"=",
"autoclass",
"(",
"'org.renpy.android.PythonService'",
")",
"telephonyManager",
"=",
"cast",
"(",
"'android.telephony.TelephonyMan... | returns 0 if none of voice . | train | false |
9,101 | def _ContainsComments(node):
if isinstance(node, pytree.Leaf):
return (node.type == grammar_token.COMMENT)
for child in node.children:
if _ContainsComments(child):
return True
return False
| [
"def",
"_ContainsComments",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"pytree",
".",
"Leaf",
")",
":",
"return",
"(",
"node",
".",
"type",
"==",
"grammar_token",
".",
"COMMENT",
")",
"for",
"child",
"in",
"node",
".",
"children",
":"... | return true if the list has a comment in it . | train | false |
9,102 | def signed_varint(i):
if (i >= 0):
return varint((i << 1))
return varint(((i << 1) ^ (~ 0)))
| [
"def",
"signed_varint",
"(",
"i",
")",
":",
"if",
"(",
"i",
">=",
"0",
")",
":",
"return",
"varint",
"(",
"(",
"i",
"<<",
"1",
")",
")",
"return",
"varint",
"(",
"(",
"(",
"i",
"<<",
"1",
")",
"^",
"(",
"~",
"0",
")",
")",
")"
] | zig-zag encodes a signed integer into a varint . | train | false |
9,103 | def reload_from_cwd(module, reloader=None):
if (reloader is None):
reloader = reload
with cwd_in_path():
return reloader(module)
| [
"def",
"reload_from_cwd",
"(",
"module",
",",
"reloader",
"=",
"None",
")",
":",
"if",
"(",
"reloader",
"is",
"None",
")",
":",
"reloader",
"=",
"reload",
"with",
"cwd_in_path",
"(",
")",
":",
"return",
"reloader",
"(",
"module",
")"
] | reload module . | train | false |
9,104 | def inodeusage(args=None):
flags = _clean_flags(args, 'disk.inodeusage')
cmd = 'df -iP'
if flags:
cmd += ' -{0}'.format(flags)
ret = {}
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in out:
if line.startswith('Filesystem'):
continue
comps = line.split()
if (not comps):
con... | [
"def",
"inodeusage",
"(",
"args",
"=",
"None",
")",
":",
"flags",
"=",
"_clean_flags",
"(",
"args",
",",
"'disk.inodeusage'",
")",
"cmd",
"=",
"'df -iP'",
"if",
"flags",
":",
"cmd",
"+=",
"' -{0}'",
".",
"format",
"(",
"flags",
")",
"ret",
"=",
"{",
... | return inode usage information for volumes mounted on this minion cli example: . | train | true |
9,105 | def getPegCenterXs(numberOfSteps, pegCenterX, stepX):
pegCenterXs = []
for stepIndex in xrange(numberOfSteps):
pegCenterXs.append(pegCenterX)
pegCenterX += stepX
return pegCenterXs
| [
"def",
"getPegCenterXs",
"(",
"numberOfSteps",
",",
"pegCenterX",
",",
"stepX",
")",
":",
"pegCenterXs",
"=",
"[",
"]",
"for",
"stepIndex",
"in",
"xrange",
"(",
"numberOfSteps",
")",
":",
"pegCenterXs",
".",
"append",
"(",
"pegCenterX",
")",
"pegCenterX",
"+... | get the peg center x list . | train | false |
9,108 | def dumb_css_parser(data):
importIndex = data.find('@import')
while (importIndex != (-1)):
data = (data[0:importIndex] + data[(data.find(';', importIndex) + 1):])
importIndex = data.find('@import')
elements = [x.split('{') for x in data.split('}') if ('{' in x.strip())]
try:
elements = dict([(a.strip(), dumb_... | [
"def",
"dumb_css_parser",
"(",
"data",
")",
":",
"importIndex",
"=",
"data",
".",
"find",
"(",
"'@import'",
")",
"while",
"(",
"importIndex",
"!=",
"(",
"-",
"1",
")",
")",
":",
"data",
"=",
"(",
"data",
"[",
"0",
":",
"importIndex",
"]",
"+",
"dat... | returns a hash of css selectors . | train | true |
9,109 | def is_repo_link(val):
return val.startswith(u'weblate://')
| [
"def",
"is_repo_link",
"(",
"val",
")",
":",
"return",
"val",
".",
"startswith",
"(",
"u'weblate://'",
")"
] | checks whether repository is just a link for other one . | train | false |
9,110 | @requires_application()
def test_basics_desktop():
_test_basics('gl2')
with Canvas():
_test_setting_parameters()
_test_enabling_disabling()
_test_setting_stuff()
_test_object_creation_and_deletion()
_test_fbo()
try:
gl.gl2._get_gl_func('foo', None, ())
except RuntimeError as exp:
exp = str(exp)
... | [
"@",
"requires_application",
"(",
")",
"def",
"test_basics_desktop",
"(",
")",
":",
"_test_basics",
"(",
"'gl2'",
")",
"with",
"Canvas",
"(",
")",
":",
"_test_setting_parameters",
"(",
")",
"_test_enabling_disabling",
"(",
")",
"_test_setting_stuff",
"(",
")",
"... | test desktop gl backend for basic functionality . | train | false |
9,111 | def logical_and(image1, image2):
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
| [
"def",
"logical_and",
"(",
"image1",
",",
"image2",
")",
":",
"image1",
".",
"load",
"(",
")",
"image2",
".",
"load",
"(",
")",
"return",
"image1",
".",
"_new",
"(",
"image1",
".",
"im",
".",
"chop_and",
"(",
"image2",
".",
"im",
")",
")"
] | logical and between two images . | train | false |
9,113 | def getMatrixKey(row, column, prefix=''):
return (((prefix + 'm') + str((row + 1))) + str((column + 1)))
| [
"def",
"getMatrixKey",
"(",
"row",
",",
"column",
",",
"prefix",
"=",
"''",
")",
":",
"return",
"(",
"(",
"(",
"prefix",
"+",
"'m'",
")",
"+",
"str",
"(",
"(",
"row",
"+",
"1",
")",
")",
")",
"+",
"str",
"(",
"(",
"column",
"+",
"1",
")",
"... | get the key string from row & column . | train | false |
9,116 | @webob.dec.wsgify
@util.check_accept('application/json')
def list_for_resource_provider(req):
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
try:
resource_provider = objects.ResourceProvider.get_by_uuid(context, uuid)
except exception.NotFound as exc:
raise webob.exc.... | [
"@",
"webob",
".",
"dec",
".",
"wsgify",
"@",
"util",
".",
"check_accept",
"(",
"'application/json'",
")",
"def",
"list_for_resource_provider",
"(",
"req",
")",
":",
"context",
"=",
"req",
".",
"environ",
"[",
"'placement.context'",
"]",
"uuid",
"=",
"util",... | list allocations associated with a resource provider . | train | false |
9,117 | def xblock_primary_child_category(xblock):
category = xblock.category
if (category == 'course'):
return 'chapter'
elif (category == 'chapter'):
return 'sequential'
elif (category == 'sequential'):
return 'vertical'
return None
| [
"def",
"xblock_primary_child_category",
"(",
"xblock",
")",
":",
"category",
"=",
"xblock",
".",
"category",
"if",
"(",
"category",
"==",
"'course'",
")",
":",
"return",
"'chapter'",
"elif",
"(",
"category",
"==",
"'chapter'",
")",
":",
"return",
"'sequential'... | returns the primary child category for the specified xblock . | train | false |
9,118 | @app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
| [
"@",
"app",
".",
"teardown_appcontext",
"def",
"close_db",
"(",
"error",
")",
":",
"if",
"hasattr",
"(",
"g",
",",
"'sqlite_db'",
")",
":",
"g",
".",
"sqlite_db",
".",
"close",
"(",
")"
] | you might want to call odoo . | train | false |
9,120 | def _get_notifier():
global _notifier
if (_notifier is None):
host = (CONF.default_publisher_id or socket.gethostname())
try:
transport = oslo_messaging.get_notification_transport(CONF)
_notifier = oslo_messaging.Notifier(transport, ('identity.%s' % host))
except Exception:
LOG.exception(_LE('Failed to... | [
"def",
"_get_notifier",
"(",
")",
":",
"global",
"_notifier",
"if",
"(",
"_notifier",
"is",
"None",
")",
":",
"host",
"=",
"(",
"CONF",
".",
"default_publisher_id",
"or",
"socket",
".",
"gethostname",
"(",
")",
")",
"try",
":",
"transport",
"=",
"oslo_me... | check the context for the notifier and construct it if not present . | train | false |
9,121 | def to_records(df):
from ...array.core import Array
if (not isinstance(df, (DataFrame, Series))):
raise TypeError('df must be either DataFrame or Series')
name = ('to-records-' + tokenize(df))
dsk = {(name, i): (M.to_records, key) for (i, key) in enumerate(df._keys())}
x = df._meta.to_records()
chunks = (((np.n... | [
"def",
"to_records",
"(",
"df",
")",
":",
"from",
"...",
"array",
".",
"core",
"import",
"Array",
"if",
"(",
"not",
"isinstance",
"(",
"df",
",",
"(",
"DataFrame",
",",
"Series",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'df must be either DataFram... | create dask array from a dask dataframe warning: this creates a dask . | train | false |
9,123 | def milestones_achieved_by_user(user, namespace):
if (not settings.FEATURES.get('MILESTONES_APP')):
return None
return milestones_api.get_user_milestones({'id': user.id}, namespace)
| [
"def",
"milestones_achieved_by_user",
"(",
"user",
",",
"namespace",
")",
":",
"if",
"(",
"not",
"settings",
".",
"FEATURES",
".",
"get",
"(",
"'MILESTONES_APP'",
")",
")",
":",
"return",
"None",
"return",
"milestones_api",
".",
"get_user_milestones",
"(",
"{"... | it would fetch list of milestones completed by user . | train | false |
9,124 | def p_command_for_bad_initial(p):
p[0] = 'BAD INITIAL VALUE IN FOR STATEMENT'
| [
"def",
"p_command_for_bad_initial",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"'BAD INITIAL VALUE IN FOR STATEMENT'"
] | command : for id equals error to expr optstep . | train | false |
9,125 | def navigation_index(position):
if (position is None):
return None
try:
navigation_position = int(position.split('_', 1)[0])
except (ValueError, TypeError):
LOGGER.exception(u'Bad position %r passed to navigation_index, will assume first position', position)
navigation_position = 1
return navigation_positio... | [
"def",
"navigation_index",
"(",
"position",
")",
":",
"if",
"(",
"position",
"is",
"None",
")",
":",
"return",
"None",
"try",
":",
"navigation_position",
"=",
"int",
"(",
"position",
".",
"split",
"(",
"'_'",
",",
"1",
")",
"[",
"0",
"]",
")",
"excep... | get the navigation index from the position argument argument: position - result of position returned from call to path_to_location . | train | false |
9,127 | def url_path_join(*pieces):
initial = pieces[0].startswith('/')
final = pieces[(-1)].endswith('/')
stripped = [s.strip('/') for s in pieces]
result = '/'.join((s for s in stripped if s))
if initial:
result = ('/' + result)
if final:
result = (result + '/')
if (result == '//'):
result = '/'
return result
| [
"def",
"url_path_join",
"(",
"*",
"pieces",
")",
":",
"initial",
"=",
"pieces",
"[",
"0",
"]",
".",
"startswith",
"(",
"'/'",
")",
"final",
"=",
"pieces",
"[",
"(",
"-",
"1",
")",
"]",
".",
"endswith",
"(",
"'/'",
")",
"stripped",
"=",
"[",
"s",
... | join components of url into a relative url use to prevent double slash when joining subpath . | train | false |
9,128 | def _normalize_utf8_keys(kwargs):
if any(((type(key) is six.binary_type) for key in kwargs.keys())):
dict_type = type(kwargs)
return dict_type([(six.text_type(k), v) for (k, v) in kwargs.items()])
return kwargs
| [
"def",
"_normalize_utf8_keys",
"(",
"kwargs",
")",
":",
"if",
"any",
"(",
"(",
"(",
"type",
"(",
"key",
")",
"is",
"six",
".",
"binary_type",
")",
"for",
"key",
"in",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
":",
"dict_type",
"=",
"type",
"(",
... | when kwargs are passed literally in a source file . | train | true |
9,129 | def memorized_datetime(seconds):
try:
return _datetime_cache[seconds]
except KeyError:
dt = (_epoch + timedelta(seconds=seconds))
_datetime_cache[seconds] = dt
return dt
| [
"def",
"memorized_datetime",
"(",
"seconds",
")",
":",
"try",
":",
"return",
"_datetime_cache",
"[",
"seconds",
"]",
"except",
"KeyError",
":",
"dt",
"=",
"(",
"_epoch",
"+",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
")",
"_datetime_cache",
"[",
"s... | create only one instance of each distinct datetime . | train | true |
9,130 | def displayable_links_js(request):
links = []
if (u'mezzanine.pages' in settings.INSTALLED_APPS):
from mezzanine.pages.models import Page
is_page = (lambda obj: isinstance(obj, Page))
else:
is_page = (lambda obj: False)
for (url, obj) in Displayable.objects.url_map(for_user=request.user).items():
title = ge... | [
"def",
"displayable_links_js",
"(",
"request",
")",
":",
"links",
"=",
"[",
"]",
"if",
"(",
"u'mezzanine.pages'",
"in",
"settings",
".",
"INSTALLED_APPS",
")",
":",
"from",
"mezzanine",
".",
"pages",
".",
"models",
"import",
"Page",
"is_page",
"=",
"(",
"l... | renders a list of url/title pairs for all displayable subclass instances into json thats used to populate a list of links in tinymce . | train | false |
9,133 | def render_include(content):
content = cstr(content)
for i in xrange(5):
if (u'{% include' in content):
paths = re.findall(u'{% include\\s[\'"](.*)[\'"]\\s%}', content)
if (not paths):
frappe.throw(u'Invalid include path', InvalidIncludePath)
for path in paths:
(app, app_path) = path.split(u'/', 1)... | [
"def",
"render_include",
"(",
"content",
")",
":",
"content",
"=",
"cstr",
"(",
"content",
")",
"for",
"i",
"in",
"xrange",
"(",
"5",
")",
":",
"if",
"(",
"u'{% include'",
"in",
"content",
")",
":",
"paths",
"=",
"re",
".",
"findall",
"(",
"u'{% incl... | render {% raw %}{% include "app/path/filename" %}{% endraw %} in js file . | train | false |
9,134 | def minify_image(data, minify_to=(1200, 1600), preserve_aspect_ratio=True):
img = _data_to_image(data)
(owidth, oheight) = img.size
(nwidth, nheight) = minify_to
if ((owidth <= nwidth) and (oheight <= nheight)):
return img
if preserve_aspect_ratio:
(scaled, nwidth, nheight) = fit_image(owidth, oheight, nwidth,... | [
"def",
"minify_image",
"(",
"data",
",",
"minify_to",
"=",
"(",
"1200",
",",
"1600",
")",
",",
"preserve_aspect_ratio",
"=",
"True",
")",
":",
"img",
"=",
"_data_to_image",
"(",
"data",
")",
"(",
"owidth",
",",
"oheight",
")",
"=",
"img",
".",
"size",
... | minify image to specified size if image is bigger than specified size and return minified image . | train | false |
9,135 | def parse_identity(identity):
try:
identity = _identities[identity]
except KeyError:
raise ValueError(('Invalid identity value %r' % (identity,)))
return identity
| [
"def",
"parse_identity",
"(",
"identity",
")",
":",
"try",
":",
"identity",
"=",
"_identities",
"[",
"identity",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"(",
"'Invalid identity value %r'",
"%",
"(",
"identity",
",",
")",
")",
")",
"return... | parse an identity value and return the corresponding low-level value for numpy . | train | false |
9,137 | def sumofsq(x, axis=0):
return np.sum((x ** 2), axis=0)
| [
"def",
"sumofsq",
"(",
"x",
",",
"axis",
"=",
"0",
")",
":",
"return",
"np",
".",
"sum",
"(",
"(",
"x",
"**",
"2",
")",
",",
"axis",
"=",
"0",
")"
] | helper function to calculate sum of squares along first axis . | train | false |
9,138 | def oo_filter_list(data, filter_attr=None):
if (not isinstance(data, list)):
raise errors.AnsibleFilterError('|failed expects to filter on a list')
if (not isinstance(filter_attr, string_types)):
raise errors.AnsibleFilterError('|failed expects filter_attr is a str or unicode')
return [x for x in data if ((filte... | [
"def",
"oo_filter_list",
"(",
"data",
",",
"filter_attr",
"=",
"None",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
")",
":",
"raise",
"errors",
".",
"AnsibleFilterError",
"(",
"'|failed expects to filter on a list'",
")",
"if",
"... | this returns a list . | train | false |
9,139 | @transaction.non_atomic_requests
@require_POST
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def get_students_who_may_enroll(request, course_id):
course_key = CourseKey.from_string(course_id)
query_features = ['email']
try:
lms.djangoapps.instructor_... | [
"@",
"transaction",
".",
"non_atomic_requests",
"@",
"require_POST",
"@",
"ensure_csrf_cookie",
"@",
"cache_control",
"(",
"no_cache",
"=",
"True",
",",
"no_store",
"=",
"True",
",",
"must_revalidate",
"=",
"True",
")",
"@",
"require_level",
"(",
"'staff'",
")",... | initiate generation of a csv file containing information about students who may enroll in a course . | train | false |
9,140 | def wrap_errors(errors, function, args, kwargs):
try:
return (True, function(*args, **kwargs))
except errors as ex:
return (False, ex)
| [
"def",
"wrap_errors",
"(",
"errors",
",",
"function",
",",
"args",
",",
"kwargs",
")",
":",
"try",
":",
"return",
"(",
"True",
",",
"function",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
")",
"except",
"errors",
"as",
"ex",
":",
"return",
"(",
"Fa... | ensure errors are not passed along . | train | false |
9,141 | @check_job_permission
def task_attempt_counters(request, job, taskid, attemptid):
job_link = JobLinkage(request.jt, job.jobId)
task = job_link.get_task(taskid)
attempt = task.get_attempt(attemptid)
counters = {}
if attempt:
counters = attempt.counters
return render('counters.html', request, {'counters': counter... | [
"@",
"check_job_permission",
"def",
"task_attempt_counters",
"(",
"request",
",",
"job",
",",
"taskid",
",",
"attemptid",
")",
":",
"job_link",
"=",
"JobLinkage",
"(",
"request",
".",
"jt",
",",
"job",
".",
"jobId",
")",
"task",
"=",
"job_link",
".",
"get_... | we get here from /jobs/jobid/tasks/taskid/attempts/attemptid/counters . | train | false |
9,142 | def filter(names, pat):
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if (os.path is posixpath):
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
| [
"def",
"filter",
"(",
"names",
",",
"pat",
")",
":",
"result",
"=",
"[",
"]",
"pat",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"pat",
")",
"match",
"=",
"_compile_pattern",
"(",
"pat",
")",
"if",
"(",
"os",
".",
"path",
"is",
"posixpath",
")"... | filters out unwanted items using the specified function . | train | false |
9,143 | def from_time(year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None):
def str_or_stars(i, length):
if (i is None):
return ('*' * length)
else:
return str(i).rjust(length, '0')
wmi_time = ''
wmi_time += str_or_stars(year, 4)
wmi_time += str_or_stars(month... | [
"def",
"from_time",
"(",
"year",
"=",
"None",
",",
"month",
"=",
"None",
",",
"day",
"=",
"None",
",",
"hours",
"=",
"None",
",",
"minutes",
"=",
"None",
",",
"seconds",
"=",
"None",
",",
"microseconds",
"=",
"None",
",",
"timezone",
"=",
"None",
"... | convenience wrapper to take a series of date/time elements and return a wmi time of the form yyyymmddhhmmss . | train | true |
9,144 | @utils.decorator
def non_transactional(func, args, kwds, allow_existing=True):
from . import tasklets
ctx = tasklets.get_context()
if (not ctx.in_transaction()):
return func(*args, **kwds)
if (not allow_existing):
raise datastore_errors.BadRequestError(('%s cannot be called within a transaction.' % func.__name_... | [
"@",
"utils",
".",
"decorator",
"def",
"non_transactional",
"(",
"func",
",",
"args",
",",
"kwds",
",",
"allow_existing",
"=",
"True",
")",
":",
"from",
".",
"import",
"tasklets",
"ctx",
"=",
"tasklets",
".",
"get_context",
"(",
")",
"if",
"(",
"not",
... | a decorator that ensures a function is run outside a transaction . | train | true |
9,145 | def tokens(doc):
return (tok.lower() for tok in re.findall('\\w+', doc))
| [
"def",
"tokens",
"(",
"doc",
")",
":",
"return",
"(",
"tok",
".",
"lower",
"(",
")",
"for",
"tok",
"in",
"re",
".",
"findall",
"(",
"'\\\\w+'",
",",
"doc",
")",
")"
] | extract tokens from doc . | train | false |
9,146 | def CreateXsrfToken(action):
user_str = _MakeUserStr()
token = base64.b64encode(''.join((chr(int((random.random() * 255))) for _ in range(0, 64))))
memcache.set(token, (user_str, action), time=XSRF_VALIDITY_TIME, namespace=MEMCACHE_NAMESPACE)
return token
| [
"def",
"CreateXsrfToken",
"(",
"action",
")",
":",
"user_str",
"=",
"_MakeUserStr",
"(",
")",
"token",
"=",
"base64",
".",
"b64encode",
"(",
"''",
".",
"join",
"(",
"(",
"chr",
"(",
"int",
"(",
"(",
"random",
".",
"random",
"(",
")",
"*",
"255",
")... | generate a token to be passed with a form for xsrf protection . | train | false |
9,147 | def parse_redaction_policy_from_file(filename):
with open(filename) as f:
s = f.read().strip()
if (not s):
return RedactionPolicy([])
scheme = json.loads(s)
try:
version = str(scheme.pop('version'))
except KeyError:
raise ValueError('Redaction policy is missing `version` field')
if (version != '1'... | [
"def",
"parse_redaction_policy_from_file",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"s",
"=",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"if",
"(",
"not",
"s",
")",
":",
"return",
"RedactionPolicy",
"(... | parse a file into a redactionpolicy . | train | false |
9,148 | def strip(path, count):
path = '/'.join(path.split(os.sep))
return path.split('/', count)[(-1)]
| [
"def",
"strip",
"(",
"path",
",",
"count",
")",
":",
"path",
"=",
"'/'",
".",
"join",
"(",
"path",
".",
"split",
"(",
"os",
".",
"sep",
")",
")",
"return",
"path",
".",
"split",
"(",
"'/'",
",",
"count",
")",
"[",
"(",
"-",
"1",
")",
"]"
] | strip -> string return a copy of the string s with leading and trailing whitespace removed . | train | false |
9,149 | @conf.commands.register
def is_promisc(ip, fake_bcast='ff:ff:00:00:00:00', **kargs):
responses = srp1((Ether(dst=fake_bcast) / ARP(op='who-has', pdst=ip)), type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0, **kargs)
return (responses is not None)
| [
"@",
"conf",
".",
"commands",
".",
"register",
"def",
"is_promisc",
"(",
"ip",
",",
"fake_bcast",
"=",
"'ff:ff:00:00:00:00'",
",",
"**",
"kargs",
")",
":",
"responses",
"=",
"srp1",
"(",
"(",
"Ether",
"(",
"dst",
"=",
"fake_bcast",
")",
"/",
"ARP",
"("... | try to guess if target is in promisc mode . | train | true |
9,150 | def auto_model(model):
return getattr(model._meta, 'auto_created', False)
| [
"def",
"auto_model",
"(",
"model",
")",
":",
"return",
"getattr",
"(",
"model",
".",
"_meta",
",",
"'auto_created'",
",",
"False",
")"
] | returns if the given model was automatically generated . | train | false |
9,151 | def test_replace_update_column_via_setitem_warnings_refcount():
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert (len(w) == 1)
assert ("replaced co... | [
"def",
"test_replace_update_column_via_setitem_warnings_refcount",
"(",
")",
":",
"t",
"=",
"table",
".",
"Table",
"(",
"[",
"[",
"1",
",",
"2",
",",
"3",
"]",
",",
"[",
"4",
",",
"5",
",",
"6",
"]",
"]",
",",
"names",
"=",
"[",
"'a'",
",",
"'b'",
... | test warnings related to table replace change in #5556: reference count changes . | train | false |
9,152 | def validate_manifest(filepath):
manifest_data = return_json(filepath)
dependencies = manifest_data['dependencies']
for (_, dependency) in dependencies.items():
for (_, dependency_contents) in dependency.items():
if ('downloadFormat' not in dependency_contents):
raise Exception(('downloadFormat not specifie... | [
"def",
"validate_manifest",
"(",
"filepath",
")",
":",
"manifest_data",
"=",
"return_json",
"(",
"filepath",
")",
"dependencies",
"=",
"manifest_data",
"[",
"'dependencies'",
"]",
"for",
"(",
"_",
",",
"dependency",
")",
"in",
"dependencies",
".",
"items",
"("... | this validates syntax of the manifest . | train | false |
9,153 | def get_build_maps():
build_maps = {}
for app_path in app_paths:
path = os.path.join(app_path, u'public', u'build.json')
if os.path.exists(path):
with open(path) as f:
try:
for (target, sources) in json.loads(f.read()).iteritems():
source_paths = []
for source in sources:
if isinsta... | [
"def",
"get_build_maps",
"(",
")",
":",
"build_maps",
"=",
"{",
"}",
"for",
"app_path",
"in",
"app_paths",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_path",
",",
"u'public'",
",",
"u'build.json'",
")",
"if",
"os",
".",
"path",
".",
"... | get all build . | train | false |
9,156 | def safe_float(x):
if ('?' in x):
return np.nan
else:
return float(x)
| [
"def",
"safe_float",
"(",
"x",
")",
":",
"if",
"(",
"'?'",
"in",
"x",
")",
":",
"return",
"np",
".",
"nan",
"else",
":",
"return",
"float",
"(",
"x",
")"
] | given a string x . | train | false |
9,157 | def vsstrrepr(expr, **settings):
p = VectorStrReprPrinter(settings)
return p.doprint(expr)
| [
"def",
"vsstrrepr",
"(",
"expr",
",",
"**",
"settings",
")",
":",
"p",
"=",
"VectorStrReprPrinter",
"(",
"settings",
")",
"return",
"p",
".",
"doprint",
"(",
"expr",
")"
] | function for displaying expression representations with vector printing enabled . | train | false |
9,158 | @register.tag
def recursetree(parser, token):
bits = token.contents.split()
if (len(bits) != 2):
raise template.TemplateSyntaxError((_(u'%s tag requires a queryset') % bits[0]))
queryset_var = template.Variable(bits[1])
template_nodes = parser.parse((u'endrecursetree',))
parser.delete_first_token()
return Recur... | [
"@",
"register",
".",
"tag",
"def",
"recursetree",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"contents",
".",
"split",
"(",
")",
"if",
"(",
"len",
"(",
"bits",
")",
"!=",
"2",
")",
":",
"raise",
"template",
".",
"TemplateSyn... | iterates over the nodes in the tree . | train | false |
9,159 | def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
if (keyword in diffs):
vals = diffs[keyword]
for (idx, val) in enumerate(vals):
if (val is None):
continue
if (idx == 0):
dup = ''
else:
dup = '[{}]'.format((idx + 1))
fileobj.write(indent(u(' Keyword {:8}{} has different {... | [
"def",
"report_diff_keyword_attr",
"(",
"fileobj",
",",
"attr",
",",
"diffs",
",",
"keyword",
",",
"ind",
"=",
"0",
")",
":",
"if",
"(",
"keyword",
"in",
"diffs",
")",
":",
"vals",
"=",
"diffs",
"[",
"keyword",
"]",
"for",
"(",
"idx",
",",
"val",
"... | write a diff between two header keyword values or comments to the specified file-like object . | train | false |
9,160 | @contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.+ (module|package)', DeprecationWarning)
(yield)
else:
(yield)
| [
"@",
"contextlib",
".",
"contextmanager",
"def",
"_ignore_deprecated_imports",
"(",
"ignore",
"=",
"True",
")",
":",
"if",
"ignore",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"filterwarnings",
"(",
"'ignore'",
",",
"'.+ (m... | context manager to suppress package and module deprecation warnings when importing them . | train | false |
9,162 | def avg_pool_3d(incoming, kernel_size, strides=None, padding='same', name='AvgPool3D'):
input_shape = utils.get_incoming_shape(incoming)
assert (len(input_shape) == 5), 'Incoming Tensor shape must be 5-D'
kernel = utils.autoformat_kernel_3d(kernel_size)
strides = utils.autoformat_stride_3d(strides)
padding = utils... | [
"def",
"avg_pool_3d",
"(",
"incoming",
",",
"kernel_size",
",",
"strides",
"=",
"None",
",",
"padding",
"=",
"'same'",
",",
"name",
"=",
"'AvgPool3D'",
")",
":",
"input_shape",
"=",
"utils",
".",
"get_incoming_shape",
"(",
"incoming",
")",
"assert",
"(",
"... | average pooling 3d . | train | false |
9,163 | def bfs_predecessors(G, source):
for (s, t) in bfs_edges(G, source):
(yield (t, s))
| [
"def",
"bfs_predecessors",
"(",
"G",
",",
"source",
")",
":",
"for",
"(",
"s",
",",
"t",
")",
"in",
"bfs_edges",
"(",
"G",
",",
"source",
")",
":",
"(",
"yield",
"(",
"t",
",",
"s",
")",
")"
] | returns an iterator of predecessors in breadth-first-search from source . | train | false |
9,164 | def test_empty(empty_history):
items = []
(stream, _data, user_data) = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, empty_history)
assert (empty_history.count() == 0)
assert (empty_history.currentItemIndex() == 0)
assert (not user_data)
| [
"def",
"test_empty",
"(",
"empty_history",
")",
":",
"items",
"=",
"[",
"]",
"(",
"stream",
",",
"_data",
",",
"user_data",
")",
"=",
"tabhistory",
".",
"serialize",
"(",
"items",
")",
"qtutils",
".",
"deserialize_stream",
"(",
"stream",
",",
"empty_histor... | check tabhistory . | train | false |
9,165 | def ruby_metrics(registry, xml_parent, data):
metrics = XML.SubElement(xml_parent, 'hudson.plugins.rubyMetrics.rcov.RcovPublisher')
report_dir = data.get('report-dir', '')
XML.SubElement(metrics, 'reportDir').text = report_dir
targets = XML.SubElement(metrics, 'targets')
if ('target' in data):
for t in data['tar... | [
"def",
"ruby_metrics",
"(",
"registry",
",",
"xml_parent",
",",
"data",
")",
":",
"metrics",
"=",
"XML",
".",
"SubElement",
"(",
"xml_parent",
",",
"'hudson.plugins.rubyMetrics.rcov.RcovPublisher'",
")",
"report_dir",
"=",
"data",
".",
"get",
"(",
"'report-dir'",
... | yaml: ruby-metrics rcov plugin parses rcov html report files and shows it in jenkins with a trend graph . | train | false |
9,167 | def test_stratified_validation_k_fold():
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import StratifiedValidationKFold
n = 30
y = np.concatenate((np.zeros((n / 2), dtype=int), np.ones((n / 2), dtype=int)))
cv = StratifiedValidationKFold(y)
for (train, valid, test) in cv:
assert (np.uniqu... | [
"def",
"test_stratified_validation_k_fold",
"(",
")",
":",
"skip_if_no_sklearn",
"(",
")",
"from",
"pylearn2",
".",
"cross_validation",
".",
"subset_iterators",
"import",
"StratifiedValidationKFold",
"n",
"=",
"30",
"y",
"=",
"np",
".",
"concatenate",
"(",
"(",
"n... | test stratifiedvalidationkfold . | train | false |
9,168 | def is_auth(nodes):
cmd = ['pcs', 'cluster', 'auth']
cmd += nodes
return __salt__['cmd.run_all'](cmd, stdin='\n\n', output_loglevel='trace', python_shell=False)
| [
"def",
"is_auth",
"(",
"nodes",
")",
":",
"cmd",
"=",
"[",
"'pcs'",
",",
"'cluster'",
",",
"'auth'",
"]",
"cmd",
"+=",
"nodes",
"return",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"stdin",
"=",
"'\\n\\n'",
",",
"output_loglevel",
"=",
"'t... | check if nodes are already authorized nodes a list of nodes to be checked for authorization to the cluster cli example: . | train | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.