id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
38,736 | def get_repository_for_hook(repository_id, hosting_service_id, local_site_name, hooks_uuid=None):
q = (Q(pk=repository_id) & Q(hosting_account__service_name=hosting_service_id))
if hooks_uuid:
q &= Q(hooks_uuid=hooks_uuid)
if local_site_name:
q &= Q(local_site__name=local_site_name)
else:
q &= Q(local_site__isnull=True)
return get_object_or_404(Repository, q)
| [
"def",
"get_repository_for_hook",
"(",
"repository_id",
",",
"hosting_service_id",
",",
"local_site_name",
",",
"hooks_uuid",
"=",
"None",
")",
":",
"q",
"=",
"(",
"Q",
"(",
"pk",
"=",
"repository_id",
")",
"&",
"Q",
"(",
"hosting_account__service_name",
"=",
... | returns a repository for the given hook parameters . | train | false |
38,737 | def quota_update_resource(context, old_res, new_res):
return IMPL.quota_update_resource(context, old_res, new_res)
| [
"def",
"quota_update_resource",
"(",
"context",
",",
"old_res",
",",
"new_res",
")",
":",
"return",
"IMPL",
".",
"quota_update_resource",
"(",
"context",
",",
"old_res",
",",
"new_res",
")"
] | update resource of quotas . | train | false |
38,738 | def quote_identifier(identifier, for_grants=False):
if for_grants:
return (('`' + identifier.replace('`', '``').replace('_', '\\_').replace('%', '\\%%')) + '`')
else:
return (('`' + identifier.replace('`', '``').replace('%', '%%')) + '`')
| [
"def",
"quote_identifier",
"(",
"identifier",
",",
"for_grants",
"=",
"False",
")",
":",
"if",
"for_grants",
":",
"return",
"(",
"(",
"'`'",
"+",
"identifier",
".",
"replace",
"(",
"'`'",
",",
"'``'",
")",
".",
"replace",
"(",
"'_'",
",",
"'\\\\_'",
")... | return an identifier name escaped for mysql this means surrounded by "" character and escaping this character inside . | train | false |
38,740 | def HashFile(f):
if (type(f) is list):
f = f[0]
try:
return hashlib.sha256(open(f, 'rb').read()).hexdigest()
except:
return 'UNKNOWN'
| [
"def",
"HashFile",
"(",
"f",
")",
":",
"if",
"(",
"type",
"(",
"f",
")",
"is",
"list",
")",
":",
"f",
"=",
"f",
"[",
"0",
"]",
"try",
":",
"return",
"hashlib",
".",
"sha256",
"(",
"open",
"(",
"f",
",",
"'rb'",
")",
".",
"read",
"(",
")",
... | returns sha-256 hash of a given file . | train | false |
38,742 | def attach_bumper_transcript(item, filename, lang='en'):
item.video_bumper['transcripts'][lang] = filename
| [
"def",
"attach_bumper_transcript",
"(",
"item",
",",
"filename",
",",
"lang",
"=",
"'en'",
")",
":",
"item",
".",
"video_bumper",
"[",
"'transcripts'",
"]",
"[",
"lang",
"]",
"=",
"filename"
] | attach bumper transcript . | train | false |
38,743 | def metadef_object_get_all(context, namespace_name, session=None):
session = (session or get_session())
return metadef_object_api.get_all(context, namespace_name, session)
| [
"def",
"metadef_object_get_all",
"(",
"context",
",",
"namespace_name",
",",
"session",
"=",
"None",
")",
":",
"session",
"=",
"(",
"session",
"or",
"get_session",
"(",
")",
")",
"return",
"metadef_object_api",
".",
"get_all",
"(",
"context",
",",
"namespace_n... | get a metadata-schema object or raise if it does not exist . | train | false |
38,744 | def _verify_revision_is_published(location):
assert (location.revision == MongoRevisionKey.published)
| [
"def",
"_verify_revision_is_published",
"(",
"location",
")",
":",
"assert",
"(",
"location",
".",
"revision",
"==",
"MongoRevisionKey",
".",
"published",
")"
] | asserts that the revision set on the given location is mongorevisionkey . | train | false |
38,745 | def do_list_extensions(cs, _args):
extensions = cs.list_extensions.show_all()
fields = ['Name', 'Summary', 'Alias', 'Updated']
utils.print_list(extensions, fields)
| [
"def",
"do_list_extensions",
"(",
"cs",
",",
"_args",
")",
":",
"extensions",
"=",
"cs",
".",
"list_extensions",
".",
"show_all",
"(",
")",
"fields",
"=",
"[",
"'Name'",
",",
"'Summary'",
",",
"'Alias'",
",",
"'Updated'",
"]",
"utils",
".",
"print_list",
... | list all the os-api extensions that are available . | train | false |
38,746 | def bellman_ford(G, source, weight='weight'):
_warnings.warn('Function bellman_ford() is deprecated, use function bellman_ford_predecessor_and_distance() instead.', DeprecationWarning)
return bellman_ford_predecessor_and_distance(G, source, weight=weight)
| [
"def",
"bellman_ford",
"(",
"G",
",",
"source",
",",
"weight",
"=",
"'weight'",
")",
":",
"_warnings",
".",
"warn",
"(",
"'Function bellman_ford() is deprecated, use function bellman_ford_predecessor_and_distance() instead.'",
",",
"DeprecationWarning",
")",
"return",
"bell... | deprecated: has been replaced by function bellman_ford_predecessor_and_distance() . | train | false |
38,748 | def system_specific_scripts(system=None):
if is_windows(system):
return ['.bat', '.vbs']
elif is_osx(system):
return ['.command', '.sh']
else:
return ['.sh']
| [
"def",
"system_specific_scripts",
"(",
"system",
"=",
"None",
")",
":",
"if",
"is_windows",
"(",
"system",
")",
":",
"return",
"[",
"'.bat'",
",",
"'.vbs'",
"]",
"elif",
"is_osx",
"(",
"system",
")",
":",
"return",
"[",
"'.command'",
",",
"'.sh'",
"]",
... | all scripting types for that platform . | train | false |
38,749 | def test_update_contact(contacts_provider, contact_sync, db):
contacts_provider.supply_contact('Old Name', 'old@email.address')
contact_sync.provider = contacts_provider
contact_sync.sync()
results = db.session.query(Contact).all()
email_addresses = [r.email_address for r in results]
assert ('old@email.address' in email_addresses)
contacts_provider.__init__()
contacts_provider.supply_contact('New Name', 'new@email.address')
contact_sync.sync()
db.session.commit()
results = db.session.query(Contact).all()
names = [r.name for r in results]
assert ('New Name' in names)
email_addresses = [r.email_address for r in results]
assert ('new@email.address' in email_addresses)
| [
"def",
"test_update_contact",
"(",
"contacts_provider",
",",
"contact_sync",
",",
"db",
")",
":",
"contacts_provider",
".",
"supply_contact",
"(",
"'Old Name'",
",",
"'old@email.address'",
")",
"contact_sync",
".",
"provider",
"=",
"contacts_provider",
"contact_sync",
... | test that subsequent contact updates get stored . | train | false |
38,750 | def CleanList(list, folder, files):
for path in sorted(list.keys()):
(fld, name) = os.path.split(path)
if (fld == folder):
present = False
for name in files:
if (os.path.join(folder, name) == path):
present = True
break
if (not present):
del list[path]
| [
"def",
"CleanList",
"(",
"list",
",",
"folder",
",",
"files",
")",
":",
"for",
"path",
"in",
"sorted",
"(",
"list",
".",
"keys",
"(",
")",
")",
":",
"(",
"fld",
",",
"name",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"if",
"... | remove elements of "list" not found in "files" . | train | false |
38,751 | def run_simulation(session, users):
for (n, user) in enumerate(users):
if (((n % 100) == 0) and (n != 0)):
print 'Simulated data for {} users'.format(n)
simulate_user_history(session, user)
print 'COMPLETE: Simulated data for {} users'.format(len(users))
| [
"def",
"run_simulation",
"(",
"session",
",",
"users",
")",
":",
"for",
"(",
"n",
",",
"user",
")",
"in",
"enumerate",
"(",
"users",
")",
":",
"if",
"(",
"(",
"(",
"n",
"%",
"100",
")",
"==",
"0",
")",
"and",
"(",
"n",
"!=",
"0",
")",
")",
... | simulates app activity for all users . | train | false |
38,752 | def get_platform_info_dict():
platform_dict = {}
(sysname, nodename, release, version, machine) = os.uname()
platform_dict['os'] = sysname.lower()
platform_dict['architecture'] = machine.lower()
return platform_dict
| [
"def",
"get_platform_info_dict",
"(",
")",
":",
"platform_dict",
"=",
"{",
"}",
"(",
"sysname",
",",
"nodename",
",",
"release",
",",
"version",
",",
"machine",
")",
"=",
"os",
".",
"uname",
"(",
")",
"platform_dict",
"[",
"'os'",
"]",
"=",
"sysname",
... | return a dict with information about the current platform . | train | false |
38,753 | def get_all_types_by_group(context, group_id):
vol_types = db.volume_type_get_all_by_group(context, group_id)
return vol_types
| [
"def",
"get_all_types_by_group",
"(",
"context",
",",
"group_id",
")",
":",
"vol_types",
"=",
"db",
".",
"volume_type_get_all_by_group",
"(",
"context",
",",
"group_id",
")",
"return",
"vol_types"
] | get all volume_types in a group . | train | false |
38,754 | def test_unit_summary_prefixes():
from .. import astrophys
for summary in utils._iter_unit_summary(astrophys.__dict__):
(unit, _, _, _, prefixes) = summary
if (unit.name == u'lyr'):
assert prefixes
elif (unit.name == u'pc'):
assert prefixes
elif (unit.name == u'barn'):
assert prefixes
elif (unit.name == u'cycle'):
assert (not prefixes)
elif (unit.name == u'vox'):
assert prefixes
| [
"def",
"test_unit_summary_prefixes",
"(",
")",
":",
"from",
".",
".",
"import",
"astrophys",
"for",
"summary",
"in",
"utils",
".",
"_iter_unit_summary",
"(",
"astrophys",
".",
"__dict__",
")",
":",
"(",
"unit",
",",
"_",
",",
"_",
",",
"_",
",",
"prefixe... | test for a few units that the unit summary table correctly reports whether or not that unit supports prefixes . | train | false |
38,756 | def _FixPaths(paths):
return [_FixPath(i) for i in paths]
| [
"def",
"_FixPaths",
"(",
"paths",
")",
":",
"return",
"[",
"_FixPath",
"(",
"i",
")",
"for",
"i",
"in",
"paths",
"]"
] | fix each of the paths of the list . | train | false |
38,759 | def blankline_separated_blocks(text):
block = []
for line in text.splitlines(True):
block.append(line)
line = line.strip()
if ((not line) or (line.startswith('===') and (not line.strip('=')))):
(yield ''.join(block))
block = []
if block:
(yield ''.join(block))
| [
"def",
"blankline_separated_blocks",
"(",
"text",
")",
":",
"block",
"=",
"[",
"]",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
"True",
")",
":",
"block",
".",
"append",
"(",
"line",
")",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
... | a bunch of === characters is also considered a blank line . | train | false |
38,760 | def frequencies(seq):
d = collections.defaultdict(int)
for item in seq:
d[item] += 1
return dict(d)
| [
"def",
"frequencies",
"(",
"seq",
")",
":",
"d",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"item",
"in",
"seq",
":",
"d",
"[",
"item",
"]",
"+=",
"1",
"return",
"dict",
"(",
"d",
")"
] | find number of occurrences of each value in seq . | train | false |
38,761 | def sort_results(results):
results.sort(key=(lambda x: unicode_sorter(force_text(x))))
| [
"def",
"sort_results",
"(",
"results",
")",
":",
"results",
".",
"sort",
"(",
"key",
"=",
"(",
"lambda",
"x",
":",
"unicode_sorter",
"(",
"force_text",
"(",
"x",
")",
")",
")",
")"
] | performs in-place sort of filterchain results . | train | false |
38,762 | def get_feedback_message_references(user_id):
model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False)
if (model is None):
return []
return [feedback_domain.FeedbackMessageReference(reference['exploration_id'], reference['thread_id'], reference['message_id']) for reference in model.feedback_message_references]
| [
"def",
"get_feedback_message_references",
"(",
"user_id",
")",
":",
"model",
"=",
"feedback_models",
".",
"UnsentFeedbackEmailModel",
".",
"get",
"(",
"user_id",
",",
"strict",
"=",
"False",
")",
"if",
"(",
"model",
"is",
"None",
")",
":",
"return",
"[",
"]"... | fetches all feedbackmessagereference objects written by the given user。 args: user_id: str . | train | false |
38,763 | def compute_node_get_all_by_pagination(context, limit=None, marker=None):
return IMPL.compute_node_get_all_by_pagination(context, limit=limit, marker=marker)
| [
"def",
"compute_node_get_all_by_pagination",
"(",
"context",
",",
"limit",
"=",
"None",
",",
"marker",
"=",
"None",
")",
":",
"return",
"IMPL",
".",
"compute_node_get_all_by_pagination",
"(",
"context",
",",
"limit",
"=",
"limit",
",",
"marker",
"=",
"marker",
... | get compute nodes by pagination . | train | false |
38,764 | def _split_conditional(string):
bracks_open = 0
args = []
carg = ''
escaped = False
for (idx, char) in enumerate(string):
if (char == '('):
if (not escaped):
bracks_open += 1
elif (char == ')'):
if (not escaped):
bracks_open -= 1
elif ((char == ':') and (not bracks_open) and (not escaped)):
args.append(carg)
carg = ''
escaped = False
continue
carg += char
if (char == '\\'):
escaped = (not escaped)
else:
escaped = False
args.append(carg)
return args
| [
"def",
"_split_conditional",
"(",
"string",
")",
":",
"bracks_open",
"=",
"0",
"args",
"=",
"[",
"]",
"carg",
"=",
"''",
"escaped",
"=",
"False",
"for",
"(",
"idx",
",",
"char",
")",
"in",
"enumerate",
"(",
"string",
")",
":",
"if",
"(",
"char",
"=... | split the given conditional string into its arguments . | train | false |
38,766 | @contextlib.contextmanager
def ignored(*exceptions):
try:
(yield)
except exceptions:
pass
| [
"@",
"contextlib",
".",
"contextmanager",
"def",
"ignored",
"(",
"*",
"exceptions",
")",
":",
"try",
":",
"(",
"yield",
")",
"except",
"exceptions",
":",
"pass"
] | context manager that ignores all of the specified exceptions . | train | false |
38,769 | def publish_programmatically(source_class, source, source_path, destination_class, destination, destination_path, reader, reader_name, parser, parser_name, writer, writer_name, settings, settings_spec, settings_overrides, config_section, enable_exit_status):
pub = Publisher(reader, parser, writer, settings=settings, source_class=source_class, destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
pub.process_programmatic_settings(settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
output = pub.publish(enable_exit_status=enable_exit_status)
return (output, pub)
| [
"def",
"publish_programmatically",
"(",
"source_class",
",",
"source",
",",
"source_path",
",",
"destination_class",
",",
"destination",
",",
"destination_path",
",",
"reader",
",",
"reader_name",
",",
"parser",
",",
"parser_name",
",",
"writer",
",",
"writer_name",... | set up & run a publisher for custom programmatic use . | train | false |
38,770 | def preorder_traversal(task):
for item in task:
if istask(item):
for i in preorder_traversal(item):
(yield i)
elif isinstance(item, list):
(yield list)
for i in preorder_traversal(item):
(yield i)
else:
(yield item)
| [
"def",
"preorder_traversal",
"(",
"task",
")",
":",
"for",
"item",
"in",
"task",
":",
"if",
"istask",
"(",
"item",
")",
":",
"for",
"i",
"in",
"preorder_traversal",
"(",
"item",
")",
":",
"(",
"yield",
"i",
")",
"elif",
"isinstance",
"(",
"item",
","... | a generator to preorder-traverse a task . | train | false |
38,771 | def _find_baremetal_node(cs, node):
return utils.find_resource(cs.baremetal, node)
| [
"def",
"_find_baremetal_node",
"(",
"cs",
",",
"node",
")",
":",
"return",
"utils",
".",
"find_resource",
"(",
"cs",
".",
"baremetal",
",",
"node",
")"
] | get a node by id . | train | false |
38,772 | def activity_detail_list(context, data_dict):
model = context['model']
activity_id = _get_or_bust(data_dict, 'id')
activity_detail_objects = model.ActivityDetail.by_activity_id(activity_id)
return model_dictize.activity_detail_list_dictize(activity_detail_objects, context)
| [
"def",
"activity_detail_list",
"(",
"context",
",",
"data_dict",
")",
":",
"model",
"=",
"context",
"[",
"'model'",
"]",
"activity_id",
"=",
"_get_or_bust",
"(",
"data_dict",
",",
"'id'",
")",
"activity_detail_objects",
"=",
"model",
".",
"ActivityDetail",
".",
... | return an activitys list of activity detail items . | train | false |
38,774 | def group_by_node(notifications):
emails = NotificationsDict()
for notification in notifications:
emails.add_message(notification['node_lineage'], notification['message'])
return emails
| [
"def",
"group_by_node",
"(",
"notifications",
")",
":",
"emails",
"=",
"NotificationsDict",
"(",
")",
"for",
"notification",
"in",
"notifications",
":",
"emails",
".",
"add_message",
"(",
"notification",
"[",
"'node_lineage'",
"]",
",",
"notification",
"[",
"'me... | take list of notifications and group by node . | train | false |
38,777 | def p_struct_or_union_specifier_3(t):
pass
| [
"def",
"p_struct_or_union_specifier_3",
"(",
"t",
")",
":",
"pass"
] | struct_or_union_specifier : struct_or_union id . | train | false |
38,778 | def add_users(caller, role, *users):
_check_caller_authority(caller, role)
role.add_users(*users)
| [
"def",
"add_users",
"(",
"caller",
",",
"role",
",",
"*",
"users",
")",
":",
"_check_caller_authority",
"(",
"caller",
",",
"role",
")",
"role",
".",
"add_users",
"(",
"*",
"users",
")"
] | the caller requests adding the given users to the role . | train | false |
38,779 | def p_postfix_expression_4(t):
pass
| [
"def",
"p_postfix_expression_4",
"(",
"t",
")",
":",
"pass"
] | postfix_expression : postfix_expression lparen rparen . | train | false |
38,780 | def dmp_clear_denoms(f, u, K0, K1=None, convert=False):
if (not u):
return dup_clear_denoms(f, K0, K1, convert=convert)
if (K1 is None):
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = _rec_clear_denoms(f, u, K0, K1)
if (not K1.is_one(common)):
f = dmp_mul_ground(f, common, u, K0)
if (not convert):
return (common, f)
else:
return (common, dmp_convert(f, u, K0, K1))
| [
"def",
"dmp_clear_denoms",
"(",
"f",
",",
"u",
",",
"K0",
",",
"K1",
"=",
"None",
",",
"convert",
"=",
"False",
")",
":",
"if",
"(",
"not",
"u",
")",
":",
"return",
"dup_clear_denoms",
"(",
"f",
",",
"K0",
",",
"K1",
",",
"convert",
"=",
"convert... | clear denominators . | train | false |
38,781 | def getNumberOfIntersectionsToLeft(loop, point):
if (point == None):
return 0
numberOfIntersectionsToLeft = 0
for pointIndex in xrange(len(loop)):
firstPointComplex = loop[pointIndex]
secondPointComplex = loop[((pointIndex + 1) % len(loop))]
xIntersection = getXIntersectionIfExists(firstPointComplex, secondPointComplex, point.imag)
if (xIntersection != None):
if (xIntersection < point.real):
numberOfIntersectionsToLeft += 1
return numberOfIntersectionsToLeft
| [
"def",
"getNumberOfIntersectionsToLeft",
"(",
"loop",
",",
"point",
")",
":",
"if",
"(",
"point",
"==",
"None",
")",
":",
"return",
"0",
"numberOfIntersectionsToLeft",
"=",
"0",
"for",
"pointIndex",
"in",
"xrange",
"(",
"len",
"(",
"loop",
")",
")",
":",
... | get the number of intersections through the loop for the line going left . | train | false |
38,783 | @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP')
def test_htmlinputter():
f = 't/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>', '<tr><td>1</td><td>a</td><td>1.05</td></tr>', '<tr><td>2</td><td>b</td><td>2.75</td></tr>', '<tr><td>3</td><td>c</td><td>-1.25</td></tr>']
assert ([str(x) for x in inputter.get_lines(table)] == expected)
inputter.html = {'table_id': 4}
with pytest.raises(core.InconsistentTableError):
inputter.get_lines(table)
inputter.html['table_id'] = 'second'
expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>', '<tr><td>4</td><td>d</td><td>10.5</td></tr>', '<tr><td>5</td><td>e</td><td>27.5</td></tr>', '<tr><td>6</td><td>f</td><td>-12.5</td></tr>']
assert ([str(x) for x in inputter.get_lines(table)] == expected)
inputter.html['table_id'] = 3
expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>', '<tr><td>7</td><td>g</td><td>105.0</td></tr>', '<tr><td>8</td><td>h</td><td>275.0</td></tr>', '<tr><td>9</td><td>i</td><td>-125.0</td></tr>']
assert ([str(x) for x in inputter.get_lines(table)] == expected)
| [
"@",
"pytest",
".",
"mark",
".",
"skipif",
"(",
"'not HAS_BEAUTIFUL_SOUP'",
")",
"def",
"test_htmlinputter",
"(",
")",
":",
"f",
"=",
"'t/html.html'",
"with",
"open",
"(",
"f",
")",
"as",
"fd",
":",
"table",
"=",
"fd",
".",
"read",
"(",
")",
"inputter"... | test to ensure that htmlinputter correctly converts input into a list of soupstrings representing table elements . | train | false |
38,784 | def validate_hatch(s):
if (not isinstance(s, six.string_types)):
raise ValueError(u'Hatch pattern must be a string')
unknown = (set(s) - {u'\\', u'/', u'|', u'-', u'+', u'*', u'.', u'x', u'o', u'O'})
if unknown:
raise ValueError((u'Unknown hatch symbol(s): %s' % list(unknown)))
return s
| [
"def",
"validate_hatch",
"(",
"s",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
")",
":",
"raise",
"ValueError",
"(",
"u'Hatch pattern must be a string'",
")",
"unknown",
"=",
"(",
"set",
"(",
"s",
")",
"-",
... | validate a hatch pattern . | train | false |
38,785 | def rational_independent(terms, x):
if (not terms):
return []
ind = terms[0:1]
for t in terms[1:]:
n = t.as_independent(x)[1]
for (i, term) in enumerate(ind):
d = term.as_independent(x)[1]
q = (n / d).cancel()
if q.is_rational_function(x):
ind[i] += t
break
else:
ind.append(t)
return ind
| [
"def",
"rational_independent",
"(",
"terms",
",",
"x",
")",
":",
"if",
"(",
"not",
"terms",
")",
":",
"return",
"[",
"]",
"ind",
"=",
"terms",
"[",
"0",
":",
"1",
"]",
"for",
"t",
"in",
"terms",
"[",
"1",
":",
"]",
":",
"n",
"=",
"t",
".",
... | returns a list of all the rationally independent terms . | train | false |
38,786 | def getNumberOfIntersectionsToLeftOfLoops(loops, point):
totalNumberOfIntersectionsToLeft = 0
for loop in loops:
totalNumberOfIntersectionsToLeft += getNumberOfIntersectionsToLeft(loop, point)
return totalNumberOfIntersectionsToLeft
| [
"def",
"getNumberOfIntersectionsToLeftOfLoops",
"(",
"loops",
",",
"point",
")",
":",
"totalNumberOfIntersectionsToLeft",
"=",
"0",
"for",
"loop",
"in",
"loops",
":",
"totalNumberOfIntersectionsToLeft",
"+=",
"getNumberOfIntersectionsToLeft",
"(",
"loop",
",",
"point",
... | get the number of intersections through the loop for the line starting from the left point and going left . | train | false |
38,787 | def whiten_ar(x, ar_coefs):
rho = ar_coefs
x = np.array(x, np.float64)
if (x.ndim == 2):
rho = rho[:, None]
for i in range(self.order):
_x[(i + 1):] = (_x[(i + 1):] - (rho[i] * x[0:(- (i + 1))]))
return _x[self.order:]
| [
"def",
"whiten_ar",
"(",
"x",
",",
"ar_coefs",
")",
":",
"rho",
"=",
"ar_coefs",
"x",
"=",
"np",
".",
"array",
"(",
"x",
",",
"np",
".",
"float64",
")",
"if",
"(",
"x",
".",
"ndim",
"==",
"2",
")",
":",
"rho",
"=",
"rho",
"[",
":",
",",
"No... | whiten a series of columns according to an ar(p) covariance structure . | train | false |
38,789 | def pat_matrix(m, dx, dy, dz):
dxdy = ((- dx) * dy)
dydz = ((- dy) * dz)
dzdx = ((- dz) * dx)
dxdx = (dx ** 2)
dydy = (dy ** 2)
dzdz = (dz ** 2)
mat = (((dydy + dzdz), dxdy, dzdx), (dxdy, (dxdx + dzdz), dydz), (dzdx, dydz, (dydy + dxdx)))
return (m * Matrix(mat))
| [
"def",
"pat_matrix",
"(",
"m",
",",
"dx",
",",
"dy",
",",
"dz",
")",
":",
"dxdy",
"=",
"(",
"(",
"-",
"dx",
")",
"*",
"dy",
")",
"dydz",
"=",
"(",
"(",
"-",
"dy",
")",
"*",
"dz",
")",
"dzdx",
"=",
"(",
"(",
"-",
"dz",
")",
"*",
"dx",
... | returns the parallel axis theorem matrix to translate the inertia matrix a distance of for a body of mass m . | train | false |
38,791 | def trim_gentoo_mirrors(value):
return trim_var('GENTOO_MIRRORS', value)
| [
"def",
"trim_gentoo_mirrors",
"(",
"value",
")",
":",
"return",
"trim_var",
"(",
"'GENTOO_MIRRORS'",
",",
"value",
")"
] | remove a value from gentoo_mirrors variable in the make . | train | false |
38,793 | def get_visible_pages(request, pages, site=None):
pages = get_visible_page_objects(request, pages, site)
return [page.pk for page in pages]
| [
"def",
"get_visible_pages",
"(",
"request",
",",
"pages",
",",
"site",
"=",
"None",
")",
":",
"pages",
"=",
"get_visible_page_objects",
"(",
"request",
",",
"pages",
",",
"site",
")",
"return",
"[",
"page",
".",
"pk",
"for",
"page",
"in",
"pages",
"]"
] | returns the ids of all visible pages . | train | false |
38,796 | def test_hsl_to_rgb_part_17():
assert (hsl_to_rgb(240, 100, 0) == (0, 0, 0))
assert (hsl_to_rgb(240, 100, 10) == (0, 0, 51))
assert (hsl_to_rgb(240, 100, 20) == (0, 0, 102))
assert (hsl_to_rgb(240, 100, 30) == (0, 0, 153))
assert (hsl_to_rgb(240, 100, 40) == (0, 0, 204))
assert (hsl_to_rgb(240, 100, 50) == (0, 0, 255))
assert (hsl_to_rgb(240, 100, 60) == (51, 51, 255))
assert (hsl_to_rgb(240, 100, 70) == (102, 102, 255))
assert (hsl_to_rgb(240, 100, 80) == (153, 153, 255))
assert (hsl_to_rgb(240, 100, 90) == (204, 204, 255))
assert (hsl_to_rgb(240, 100, 100) == (255, 255, 255))
| [
"def",
"test_hsl_to_rgb_part_17",
"(",
")",
":",
"assert",
"(",
"hsl_to_rgb",
"(",
"240",
",",
"100",
",",
"0",
")",
"==",
"(",
"0",
",",
"0",
",",
"0",
")",
")",
"assert",
"(",
"hsl_to_rgb",
"(",
"240",
",",
"100",
",",
"10",
")",
"==",
"(",
"... | test hsl to rgb color function . | train | false |
38,797 | def program_exists(program):
proc = Popen(['which', program], stdout=PIPE, stderr=PIPE)
txt = proc.communicate()
if ((txt[0].strip() == '') and (txt[1].strip() == '')):
return False
if ((txt[0].strip() != '') and (txt[1].strip() == '')):
return True
return (not ((txt[1].strip() == '') or (txt[1].find(('no %s in' % program)) != (-1))))
| [
"def",
"program_exists",
"(",
"program",
")",
":",
"proc",
"=",
"Popen",
"(",
"[",
"'which'",
",",
"program",
"]",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"txt",
"=",
"proc",
".",
"communicate",
"(",
")",
"if",
"(",
"(",
"txt",... | uses which to check if a program is installed . | train | false |
38,798 | def _get_zone(gcdns, zone_name, zone_id):
if (zone_id is not None):
try:
return gcdns.get_zone(zone_id)
except ZoneDoesNotExistError:
return None
available_zones = gcdns.iterate_zones()
found_zone = None
for zone in available_zones:
if (zone.domain == zone_name):
found_zone = zone
break
return found_zone
| [
"def",
"_get_zone",
"(",
"gcdns",
",",
"zone_name",
",",
"zone_id",
")",
":",
"if",
"(",
"zone_id",
"is",
"not",
"None",
")",
":",
"try",
":",
"return",
"gcdns",
".",
"get_zone",
"(",
"zone_id",
")",
"except",
"ZoneDoesNotExistError",
":",
"return",
"Non... | gets the zone object for a given domain name . | train | false |
38,799 | def get_urls():
try:
(__, __, port) = get_pid()
urls = []
for addr in get_ip_addresses():
urls.append('http://{}:{}/'.format(addr, port))
return (STATUS_RUNNING, urls)
except NotRunning as e:
return (e.status_code, [])
| [
"def",
"get_urls",
"(",
")",
":",
"try",
":",
"(",
"__",
",",
"__",
",",
"port",
")",
"=",
"get_pid",
"(",
")",
"urls",
"=",
"[",
"]",
"for",
"addr",
"in",
"get_ip_addresses",
"(",
")",
":",
"urls",
".",
"append",
"(",
"'http://{}:{}/'",
".",
"fo... | fetch a list of urls :returns: status_code . | train | false |
38,800 | def ip_host(value, options=None, version=None):
ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version)
if (not ipaddr_filter_out):
return
return [ipaddress.ip_interface(ip_a) for ip_a in ipaddr_filter_out]
| [
"def",
"ip_host",
"(",
"value",
",",
"options",
"=",
"None",
",",
"version",
"=",
"None",
")",
":",
"ipaddr_filter_out",
"=",
"_filter_ipaddr",
"(",
"value",
",",
"options",
"=",
"options",
",",
"version",
"=",
"version",
")",
"if",
"(",
"not",
"ipaddr_f... | returns the interfaces ip address . | train | false |
38,801 | def fill_paragraphs(s, width, sep=u'\n'):
return sep.join((fill(p, width) for p in s.split(sep)))
| [
"def",
"fill_paragraphs",
"(",
"s",
",",
"width",
",",
"sep",
"=",
"u'\\n'",
")",
":",
"return",
"sep",
".",
"join",
"(",
"(",
"fill",
"(",
"p",
",",
"width",
")",
"for",
"p",
"in",
"s",
".",
"split",
"(",
"sep",
")",
")",
")"
] | fill paragraphs with newlines . | train | false |
38,802 | def _natsort_key_case_insensitive(item):
item = str(item).lower()
try:
chunks = re.split('(\\d+(?:\\.\\d+)?)', item)
except TypeError:
chunks = re.split('(\\d+(?:\\.\\d+)?)', item[0])
for ii in range(len(chunks)):
if (chunks[ii] and (chunks[ii][0] in '0123456789')):
if ('.' in chunks[ii]):
numtype = float
else:
numtype = int
chunks[ii] = (0, numtype(chunks[ii]))
else:
chunks[ii] = (1, chunks[ii])
return (chunks, item)
| [
"def",
"_natsort_key_case_insensitive",
"(",
"item",
")",
":",
"item",
"=",
"str",
"(",
"item",
")",
".",
"lower",
"(",
")",
"try",
":",
"chunks",
"=",
"re",
".",
"split",
"(",
"'(\\\\d+(?:\\\\.\\\\d+)?)'",
",",
"item",
")",
"except",
"TypeError",
":",
"... | provides normalized version of item for sorting with digits . | train | false |
38,803 | def skeletonize(image):
image = image.astype(np.uint8)
if (image.ndim != 2):
raise ValueError('Skeletonize requires a 2D array')
if (not np.all(np.in1d(image.flat, (0, 1)))):
raise ValueError('Image contains values other than 0 and 1')
return _fast_skeletonize(image)
| [
"def",
"skeletonize",
"(",
"image",
")",
":",
"image",
"=",
"image",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"if",
"(",
"image",
".",
"ndim",
"!=",
"2",
")",
":",
"raise",
"ValueError",
"(",
"'Skeletonize requires a 2D array'",
")",
"if",
"(",
"no... | return the skeleton of a binary image . | train | false |
38,805 | def elemwise_mul(a, b):
return (a * b)
| [
"def",
"elemwise_mul",
"(",
"a",
",",
"b",
")",
":",
"return",
"(",
"a",
"*",
"b",
")"
] | a: a theano matrix b: a theano matrix returns the elementwise product of a and b . | train | false |
38,806 | def find_or_create_node(branch, key):
for node in branch:
if (not isinstance(node, dict)):
continue
if (key in node):
return node[key]
new_branch = []
node = {key: new_branch}
branch.append(node)
return new_branch
| [
"def",
"find_or_create_node",
"(",
"branch",
",",
"key",
")",
":",
"for",
"node",
"in",
"branch",
":",
"if",
"(",
"not",
"isinstance",
"(",
"node",
",",
"dict",
")",
")",
":",
"continue",
"if",
"(",
"key",
"in",
"node",
")",
":",
"return",
"node",
... | given a list . | train | false |
38,809 | def test_import_error_reporting():
def _import_error_test():
try:
import_buffer_to_ast('(import "sys")', '')
except HyTypeError:
return 'Error reported'
assert (_import_error_test() == 'Error reported')
assert (_import_error_test() is not None)
| [
"def",
"test_import_error_reporting",
"(",
")",
":",
"def",
"_import_error_test",
"(",
")",
":",
"try",
":",
"import_buffer_to_ast",
"(",
"'(import \"sys\")'",
",",
"''",
")",
"except",
"HyTypeError",
":",
"return",
"'Error reported'",
"assert",
"(",
"_import_error_... | make sure that reports errors correctly . | train | false |
38,812 | def get_entry_ids(entry):
ids = {}
for lazy in [False, True]:
if entry.get(u'trakt_movie_id', eval_lazy=lazy):
ids[u'trakt'] = entry[u'trakt_movie_id']
elif entry.get(u'trakt_show_id', eval_lazy=lazy):
ids[u'trakt'] = entry[u'trakt_show_id']
elif entry.get(u'trakt_episode_id', eval_lazy=lazy):
ids[u'trakt'] = entry[u'trakt_episode_id']
if entry.get(u'tmdb_id', eval_lazy=lazy):
ids[u'tmdb'] = entry[u'tmdb_id']
if entry.get(u'tvdb_id', eval_lazy=lazy):
ids[u'tvdb'] = entry[u'tvdb_id']
if entry.get(u'imdb_id', eval_lazy=lazy):
ids[u'imdb'] = entry[u'imdb_id']
if entry.get(u'tvrage_id', eval_lazy=lazy):
ids[u'tvrage'] = entry[u'tvrage_id']
if ids:
break
return ids
| [
"def",
"get_entry_ids",
"(",
"entry",
")",
":",
"ids",
"=",
"{",
"}",
"for",
"lazy",
"in",
"[",
"False",
",",
"True",
"]",
":",
"if",
"entry",
".",
"get",
"(",
"u'trakt_movie_id'",
",",
"eval_lazy",
"=",
"lazy",
")",
":",
"ids",
"[",
"u'trakt'",
"]... | creates a trakt ids dict from id fields on an entry . | train | false |
38,814 | def _parse_life_span_string(life_span_string):
try:
life_span = int(life_span_string)
except:
life_span = _convert_life_span_string(life_span_string)
if (life_span <= 0):
raise Exception('Life span must be greater than 0')
return life_span
| [
"def",
"_parse_life_span_string",
"(",
"life_span_string",
")",
":",
"try",
":",
"life_span",
"=",
"int",
"(",
"life_span_string",
")",
"except",
":",
"life_span",
"=",
"_convert_life_span_string",
"(",
"life_span_string",
")",
"if",
"(",
"life_span",
"<=",
"0",
... | parses a life span string . | train | false |
38,816 | def import_book(stream):
(format, stream) = detect(stream)
try:
databook = Databook()
format.import_book(databook, stream)
return databook
except AttributeError:
return None
| [
"def",
"import_book",
"(",
"stream",
")",
":",
"(",
"format",
",",
"stream",
")",
"=",
"detect",
"(",
"stream",
")",
"try",
":",
"databook",
"=",
"Databook",
"(",
")",
"format",
".",
"import_book",
"(",
"databook",
",",
"stream",
")",
"return",
"databo... | return dataset of given stream . | train | false |
38,817 | def min_dist(coord, surface):
d = (surface - coord)
d2 = numpy.sum((d * d), 1)
return numpy.sqrt(min(d2))
| [
"def",
"min_dist",
"(",
"coord",
",",
"surface",
")",
":",
"d",
"=",
"(",
"surface",
"-",
"coord",
")",
"d2",
"=",
"numpy",
".",
"sum",
"(",
"(",
"d",
"*",
"d",
")",
",",
"1",
")",
"return",
"numpy",
".",
"sqrt",
"(",
"min",
"(",
"d2",
")",
... | return minimum distance between coord and surface . | train | false |
38,820 | def ec2_snap_id_to_uuid(ec2_id):
ctxt = context.get_admin_context()
int_id = ec2_id_to_id(ec2_id)
return get_snapshot_uuid_from_int_id(ctxt, int_id)
| [
"def",
"ec2_snap_id_to_uuid",
"(",
"ec2_id",
")",
":",
"ctxt",
"=",
"context",
".",
"get_admin_context",
"(",
")",
"int_id",
"=",
"ec2_id_to_id",
"(",
"ec2_id",
")",
"return",
"get_snapshot_uuid_from_int_id",
"(",
"ctxt",
",",
"int_id",
")"
] | get the corresponding uuid for the given ec2-id . | train | false |
38,821 | def _get_bbox_regression_labels(bbox_target_data, num_classes):
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, (4 * num_classes)), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where((clss > 0))[0]
for ind in inds:
cls = clss[ind]
start = (4 * cls)
end = (start + 4)
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return (bbox_targets, bbox_inside_weights)
| [
"def",
"_get_bbox_regression_labels",
"(",
"bbox_target_data",
",",
"num_classes",
")",
":",
"clss",
"=",
"bbox_target_data",
"[",
":",
",",
"0",
"]",
"bbox_targets",
"=",
"np",
".",
"zeros",
"(",
"(",
"clss",
".",
"size",
",",
"(",
"4",
"*",
"num_classes"... | bounding-box regression targets are stored in a compact form in the roidb . | train | false |
38,822 | def volume_absent(name, driver=None):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
volume = _find_volume(name)
if (not volume):
ret['result'] = True
ret['comment'] = "Volume '{0}' already absent".format(name)
return ret
try:
ret['changes']['removed'] = __salt__['dockerng.remove_volume'](name)
ret['result'] = True
except Exception as exc:
ret['comment'] = "Failed to remove volume '{0}': {1}".format(name, exc)
return ret
| [
"def",
"volume_absent",
"(",
"name",
",",
"driver",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
"}",
"volume",
"=",
"_find_volume",
"(",
... | check that a block volume exists . | train | true |
38,824 | def unlock_keychain(keychain, password):
cmd = 'security unlock-keychain -p {0} {1}'.format(password, keychain)
__salt__['cmd.run'](cmd)
| [
"def",
"unlock_keychain",
"(",
"keychain",
",",
"password",
")",
":",
"cmd",
"=",
"'security unlock-keychain -p {0} {1}'",
".",
"format",
"(",
"password",
",",
"keychain",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
")"
] | unlock the given keychain with the password keychain the keychain to unlock password the password to use to unlock the keychain . | train | false |
38,825 | def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
LOG.debug('Creating VM on the ESX host', instance=instance)
vm_create_task = session._call_method(session.vim, 'CreateVM_Task', vm_folder, config=config_spec, pool=res_pool_ref)
try:
task_info = session._wait_for_task(vm_create_task)
except vexc.VMwareDriverException:
with excutils.save_and_reraise_exception():
if (config_spec.guestId not in constants.VALID_OS_TYPES):
LOG.warning(_LW("vmware_ostype from image is not recognised: '%(ostype)s'. An invalid os type may be one cause of this instance creation failure"), {'ostype': config_spec.guestId})
LOG.debug('Created VM on the ESX host', instance=instance)
return task_info.result
| [
"def",
"create_vm",
"(",
"session",
",",
"instance",
",",
"vm_folder",
",",
"config_spec",
",",
"res_pool_ref",
")",
":",
"LOG",
".",
"debug",
"(",
"'Creating VM on the ESX host'",
",",
"instance",
"=",
"instance",
")",
"vm_create_task",
"=",
"session",
".",
"... | create vm on esx host . | train | false |
38,828 | def _unpack_actions(b, length, offset=0):
if ((len(b) - offset) < length):
raise UnderrunError
actions = []
end = (length + offset)
while (offset < end):
(t, l) = struct.unpack_from('!HH', b, offset)
if ((len(b) - offset) < l):
raise UnderrunError
a = _action_type_to_class.get(t)
if (a is None):
a = ofp_action_generic()
else:
a = a()
a.unpack(b[offset:(offset + l)])
assert (len(a) == l)
actions.append(a)
offset += l
return (offset, actions)
| [
"def",
"_unpack_actions",
"(",
"b",
",",
"length",
",",
"offset",
"=",
"0",
")",
":",
"if",
"(",
"(",
"len",
"(",
"b",
")",
"-",
"offset",
")",
"<",
"length",
")",
":",
"raise",
"UnderrunError",
"actions",
"=",
"[",
"]",
"end",
"=",
"(",
"length"... | parses actions from a buffer b is a buffer offset . | train | false |
38,829 | def load_sprite_image(img_path, rows_cols, n_sprites=None):
(rows, cols) = rows_cols
if (n_sprites is None):
n_sprites = (rows * cols)
img = caffe_load_image(img_path, color=True, as_uint=True)
assert ((img.shape[0] % rows) == 0), ('sprite image has shape %s which is not divisible by rows_cols %' % (img.shape, rows_cols))
assert ((img.shape[1] % cols) == 0), ('sprite image has shape %s which is not divisible by rows_cols %' % (img.shape, rows_cols))
sprite_height = (img.shape[0] / rows)
sprite_width = (img.shape[1] / cols)
sprite_channels = img.shape[2]
ret = np.zeros((n_sprites, sprite_height, sprite_width, sprite_channels), dtype=img.dtype)
for idx in xrange(n_sprites):
ii = (idx / cols)
jj = (idx % cols)
ret[idx] = img[(ii * sprite_height):((ii + 1) * sprite_height), (jj * sprite_width):((jj + 1) * sprite_width), :]
return ret
| [
"def",
"load_sprite_image",
"(",
"img_path",
",",
"rows_cols",
",",
"n_sprites",
"=",
"None",
")",
":",
"(",
"rows",
",",
"cols",
")",
"=",
"rows_cols",
"if",
"(",
"n_sprites",
"is",
"None",
")",
":",
"n_sprites",
"=",
"(",
"rows",
"*",
"cols",
")",
... | load a 2d sprite image where = rows_cols . | train | false |
38,830 | def _ListUnion(list_1, list_2):
if (not (isinstance(list_1, list) and isinstance(list_2, list))):
raise TypeError('Arguments must be lists.')
union = []
for x in (list_1 + list_2):
if (x not in union):
union.append(x)
return union
| [
"def",
"_ListUnion",
"(",
"list_1",
",",
"list_2",
")",
":",
"if",
"(",
"not",
"(",
"isinstance",
"(",
"list_1",
",",
"list",
")",
"and",
"isinstance",
"(",
"list_2",
",",
"list",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Arguments must be lists.'... | returns the union of two lists . | train | false |
38,831 | def check_doubled_words(physical_line, filename):
msg = "N343: Doubled word '%(word)s' typo found"
match = re.search(doubled_words_re, physical_line)
if match:
return (0, (msg % {'word': match.group(1)}))
| [
"def",
"check_doubled_words",
"(",
"physical_line",
",",
"filename",
")",
":",
"msg",
"=",
"\"N343: Doubled word '%(word)s' typo found\"",
"match",
"=",
"re",
".",
"search",
"(",
"doubled_words_re",
",",
"physical_line",
")",
"if",
"match",
":",
"return",
"(",
"0"... | check for the common doubled-word typos n343 . | train | false |
38,832 | def _comp_sort_keys(c):
return (int(c['coeff_type']), int(c['scanno']))
| [
"def",
"_comp_sort_keys",
"(",
"c",
")",
":",
"return",
"(",
"int",
"(",
"c",
"[",
"'coeff_type'",
"]",
")",
",",
"int",
"(",
"c",
"[",
"'scanno'",
"]",
")",
")"
] | sort the compensation data . | train | false |
38,836 | def depends(**dependencies):
def decorator(func):
@wraps(func)
def wrapper(*a, **explicit):
deps = dict([(k, get_dependency_for(k, v)) for (k, v) in dependencies.iteritems() if (k not in explicit)])
return func(*a, **union(deps, explicit))
return wrapper
return decorator
| [
"def",
"depends",
"(",
"**",
"dependencies",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"a",
",",
"**",
"explicit",
")",
":",
"deps",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"g... | return a decorator that specifies the field dependencies of a "compute" method . | train | false |
38,839 | def filter_comments(tree):
def traverse(tree):
'Generator dropping comment nodes'
for entry in tree:
spaceless = [e for e in entry if (not nginxparser.spacey(e))]
if spaceless:
key = spaceless[0]
values = (spaceless[1] if (len(spaceless) > 1) else None)
else:
key = values = ''
if isinstance(key, list):
new = copy.deepcopy(entry)
new[1] = filter_comments(values)
(yield new)
elif ((key != '#') and spaceless):
(yield spaceless)
return list(traverse(tree))
| [
"def",
"filter_comments",
"(",
"tree",
")",
":",
"def",
"traverse",
"(",
"tree",
")",
":",
"for",
"entry",
"in",
"tree",
":",
"spaceless",
"=",
"[",
"e",
"for",
"e",
"in",
"entry",
"if",
"(",
"not",
"nginxparser",
".",
"spacey",
"(",
"e",
")",
")",... | filter comment nodes from parsed configurations . | train | false |
38,840 | def get_default_stylesheet_location():
import os
css_file = os.path.join(os.path.dirname(__file__), 'default.css')
if (not os.path.exists(css_file)):
raise Exception(('Default CSS file could not be found at %s' % css_file))
return css_file
| [
"def",
"get_default_stylesheet_location",
"(",
")",
":",
"import",
"os",
"css_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'default.css'",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",... | get_default_stylesheet_location() => file path returns the path of the default cascading style sheet file that is installed with the rest of the silvercity package . | train | false |
38,844 | def make_basic(app, global_conf, realm, authfunc, **kw):
from paste.util.import_string import eval_import
import types
authfunc = eval_import(authfunc)
assert isinstance(authfunc, types.FunctionType), 'authfunc must resolve to a function'
return AuthBasicHandler(app, realm, authfunc)
| [
"def",
"make_basic",
"(",
"app",
",",
"global_conf",
",",
"realm",
",",
"authfunc",
",",
"**",
"kw",
")",
":",
"from",
"paste",
".",
"util",
".",
"import_string",
"import",
"eval_import",
"import",
"types",
"authfunc",
"=",
"eval_import",
"(",
"authfunc",
... | grant access via basic authentication config looks like this:: [filter:grant] use = egg:paste#auth_basic realm=myrealm authfunc=somepackage . | train | false |
38,845 | def h5_deconv_data(filename):
ret = list()
with h5py.File(filename, 'r') as f:
if ('deconv' not in list(f.keys())):
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in list(act_data.keys()):
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
(batch_ind, img_ind) = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret
| [
"def",
"h5_deconv_data",
"(",
"filename",
")",
":",
"ret",
"=",
"list",
"(",
")",
"with",
"h5py",
".",
"File",
"(",
"filename",
",",
"'r'",
")",
"as",
"f",
":",
"if",
"(",
"'deconv'",
"not",
"in",
"list",
"(",
"f",
".",
"keys",
"(",
")",
")",
"... | read deconv visualization data from hdf5 file . | train | false |
38,846 | def _log_record_from_hadoop(record):
if (not _is_counter_log4j_record(record)):
level = getattr(logging, (record.get('level') or ''), None)
_log_line_from_hadoop(record['message'], level=level)
| [
"def",
"_log_record_from_hadoop",
"(",
"record",
")",
":",
"if",
"(",
"not",
"_is_counter_log4j_record",
"(",
"record",
")",
")",
":",
"level",
"=",
"getattr",
"(",
"logging",
",",
"(",
"record",
".",
"get",
"(",
"'level'",
")",
"or",
"''",
")",
",",
"... | log log4j record parsed from hadoop stderr . | train | false |
38,848 | @json_view
@non_atomic_requests
def ajax_search(request):
search_obj = BaseAjaxSearch(request)
search_obj.types = amo.ADDON_SEARCH_TYPES
return search_obj.items
| [
"@",
"json_view",
"@",
"non_atomic_requests",
"def",
"ajax_search",
"(",
"request",
")",
":",
"search_obj",
"=",
"BaseAjaxSearch",
"(",
"request",
")",
"search_obj",
".",
"types",
"=",
"amo",
".",
"ADDON_SEARCH_TYPES",
"return",
"search_obj",
".",
"items"
] | this is currently used only to return add-ons for populating a new collection . | train | false |
38,849 | def _linear_jac(t, y, a):
return a
| [
"def",
"_linear_jac",
"(",
"t",
",",
"y",
",",
"a",
")",
":",
"return",
"a"
] | jacobian of a * y is a . | train | false |
38,851 | def get_oauth_url(scope, redirect_uri, extra_params=None):
scope = parse_scope(scope)
query_dict = QueryDict('', True)
query_dict['scope'] = ','.join(scope)
query_dict['client_id'] = facebook_settings.FACEBOOK_APP_ID
query_dict['redirect_uri'] = redirect_uri
oauth_url = 'https://www.facebook.com/dialog/oauth?'
oauth_url += query_dict.urlencode()
return oauth_url
| [
"def",
"get_oauth_url",
"(",
"scope",
",",
"redirect_uri",
",",
"extra_params",
"=",
"None",
")",
":",
"scope",
"=",
"parse_scope",
"(",
"scope",
")",
"query_dict",
"=",
"QueryDict",
"(",
"''",
",",
"True",
")",
"query_dict",
"[",
"'scope'",
"]",
"=",
"'... | returns the oauth url for the given scope and redirect_uri . | train | false |
38,852 | def _filterout_legal(lines):
return [line for line in lines[5:] if ((not ('This experiment was created using PsychoPy2 Experiment Builder (' in line)) and (not (('trialList=data.importConditions(' in line) and (".xlsx'))" in line))))]
| [
"def",
"_filterout_legal",
"(",
"lines",
")",
":",
"return",
"[",
"line",
"for",
"line",
"in",
"lines",
"[",
"5",
":",
"]",
"if",
"(",
"(",
"not",
"(",
"'This experiment was created using PsychoPy2 Experiment Builder ('",
"in",
"line",
")",
")",
"and",
"(",
... | ignore first 5 lines: header info . | train | false |
38,853 | def get_training_roidb(imdb):
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
print 'Preparing training data...'
rdl_roidb.prepare_roidb(imdb)
print 'done'
return imdb.roidb
| [
"def",
"get_training_roidb",
"(",
"imdb",
")",
":",
"if",
"cfg",
".",
"TRAIN",
".",
"USE_FLIPPED",
":",
"print",
"'Appending horizontally-flipped training examples...'",
"imdb",
".",
"append_flipped_images",
"(",
")",
"print",
"'done'",
"print",
"'Preparing training dat... | returns a roidb for use in training . | train | false |
38,854 | def construct_parser(magic_func):
kwds = getattr(magic_func, 'argcmd_kwds', {})
if ('description' not in kwds):
kwds['description'] = getattr(magic_func, '__doc__', None)
arg_name = real_name(magic_func)
parser = MagicArgumentParser(arg_name, **kwds)
group = None
for deco in magic_func.decorators[::(-1)]:
result = deco.add_to_parser(parser, group)
if (result is not None):
group = result
magic_func.__doc__ = parser.format_help()
return parser
| [
"def",
"construct_parser",
"(",
"magic_func",
")",
":",
"kwds",
"=",
"getattr",
"(",
"magic_func",
",",
"'argcmd_kwds'",
",",
"{",
"}",
")",
"if",
"(",
"'description'",
"not",
"in",
"kwds",
")",
":",
"kwds",
"[",
"'description'",
"]",
"=",
"getattr",
"("... | construct an argument parser using the function decorations . | train | true |
38,855 | def repeat_first_axis(array, count):
return as_strided(array, ((count,) + array.shape), ((0,) + array.strides))
| [
"def",
"repeat_first_axis",
"(",
"array",
",",
"count",
")",
":",
"return",
"as_strided",
"(",
"array",
",",
"(",
"(",
"count",
",",
")",
"+",
"array",
".",
"shape",
")",
",",
"(",
"(",
"0",
",",
")",
"+",
"array",
".",
"strides",
")",
")"
] | restride array to repeat count times along the first axis . | train | true |
38,856 | def build_request_type(direction, type, recipient):
return ((recipient | type) | direction)
| [
"def",
"build_request_type",
"(",
"direction",
",",
"type",
",",
"recipient",
")",
":",
"return",
"(",
"(",
"recipient",
"|",
"type",
")",
"|",
"direction",
")"
] | build a bmrequesttype field for control requests . | train | false |
38,857 | def identity(x):
return x
| [
"def",
"identity",
"(",
"x",
")",
":",
"return",
"x"
] | identity function . | train | false |
38,858 | def name_lookup(qualified_name):
if ('.' not in qualified_name):
qualified_name = ('__builtin__.' + qualified_name)
(module_name, class_name) = qualified_name.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
return getattr(module, class_name)
| [
"def",
"name_lookup",
"(",
"qualified_name",
")",
":",
"if",
"(",
"'.'",
"not",
"in",
"qualified_name",
")",
":",
"qualified_name",
"=",
"(",
"'__builtin__.'",
"+",
"qualified_name",
")",
"(",
"module_name",
",",
"class_name",
")",
"=",
"qualified_name",
".",
... | return the object referenced by a qualified name . | train | false |
38,860 | def Compilable(filename):
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
| [
"def",
"Compilable",
"(",
"filename",
")",
":",
"for",
"res",
"in",
"(",
"filename",
".",
"endswith",
"(",
"e",
")",
"for",
"e",
"in",
"COMPILABLE_EXTENSIONS",
")",
":",
"if",
"res",
":",
"return",
"True",
"return",
"False"
] | return true if the file is compilable . | train | false |
38,861 | def bufsize_validator(kwargs):
invalid = []
in_ob = kwargs.get('in', None)
out_ob = kwargs.get('out', None)
in_buf = kwargs.get('in_bufsize', None)
out_buf = kwargs.get('out_bufsize', None)
in_no_buf = (ob_is_tty(in_ob) or ob_is_pipe(in_ob))
out_no_buf = (ob_is_tty(out_ob) or ob_is_pipe(out_ob))
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if (in_no_buf and (in_buf is not None)):
invalid.append((('in', 'in_bufsize'), err.format(target='in')))
if (out_no_buf and (out_buf is not None)):
invalid.append((('out', 'out_bufsize'), err.format(target='out')))
return invalid
| [
"def",
"bufsize_validator",
"(",
"kwargs",
")",
":",
"invalid",
"=",
"[",
"]",
"in_ob",
"=",
"kwargs",
".",
"get",
"(",
"'in'",
",",
"None",
")",
"out_ob",
"=",
"kwargs",
".",
"get",
"(",
"'out'",
",",
"None",
")",
"in_buf",
"=",
"kwargs",
".",
"ge... | a validator to prevent a user from saying that they want custom buffering when theyre using an in/out object that will be os . | train | true |
38,862 | def to_table(data, bins=None):
data = np.asarray(data)
(n_rows, n_cols) = data.shape
if (bins is None):
(cat_uni, cat_int) = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
bins_ = (np.arange((n_cat + 1)) - 0.5)
elif np.isscalar(bins):
bins_ = (np.arange((bins + 1)) - 0.5)
data_ = data
else:
bins_ = bins
data_ = data
tt = np.histogramdd(data_, ((bins_,) * n_cols))
return (tt[0], bins_)
| [
"def",
"to_table",
"(",
"data",
",",
"bins",
"=",
"None",
")",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"(",
"n_rows",
",",
"n_cols",
")",
"=",
"data",
".",
"shape",
"if",
"(",
"bins",
"is",
"None",
")",
":",
"(",
"cat_uni",
",... | convert raw data with shape to brings data into correct format for cohens_kappa parameters data : array_like . | train | false |
38,863 | def postBuildStatic(static):
pass
| [
"def",
"postBuildStatic",
"(",
"static",
")",
":",
"pass"
] | called after building a static file . | train | false |
38,864 | def _get_user_agent():
product_tokens = []
version = sdk_update_checker.GetVersionObject()
if (version is None):
release = 'unknown'
else:
release = version['release']
product_tokens.append(('%s/%s' % (SDK_PRODUCT, release)))
product_tokens.append(appengine_rpc.GetPlatformToken())
python_version = '.'.join((str(i) for i in sys.version_info))
product_tokens.append(('Python/%s' % python_version))
return ' '.join(product_tokens)
| [
"def",
"_get_user_agent",
"(",
")",
":",
"product_tokens",
"=",
"[",
"]",
"version",
"=",
"sdk_update_checker",
".",
"GetVersionObject",
"(",
")",
"if",
"(",
"version",
"is",
"None",
")",
":",
"release",
"=",
"'unknown'",
"else",
":",
"release",
"=",
"vers... | returns the value of the user-agent header to use for update requests . | train | false |
38,865 | def delete_datasource(datasourceid, orgname=None, profile='grafana'):
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
response = requests.delete('{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3))
if (response.status_code >= 400):
response.raise_for_status()
return response.json()
| [
"def",
"delete_datasource",
"(",
"datasourceid",
",",
"orgname",
"=",
"None",
",",
"profile",
"=",
"'grafana'",
")",
":",
"if",
"isinstance",
"(",
"profile",
",",
"string_types",
")",
":",
"profile",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"profil... | delete a datasource . | train | true |
38,866 | @with_setup(reset)
def test_invalid_call():
assert_raises(TypeError, register)
assert_raises(TypeError, register, 'one-argument-only')
| [
"@",
"with_setup",
"(",
"reset",
")",
"def",
"test_invalid_call",
"(",
")",
":",
"assert_raises",
"(",
"TypeError",
",",
"register",
")",
"assert_raises",
"(",
"TypeError",
",",
"register",
",",
"'one-argument-only'",
")"
] | test calling register with an invalid syntax . | train | false |
38,868 | @register.filter()
def startswith(value, arg):
return str(value).startswith(arg)
| [
"@",
"register",
".",
"filter",
"(",
")",
"def",
"startswith",
"(",
"value",
",",
"arg",
")",
":",
"return",
"str",
"(",
"value",
")",
".",
"startswith",
"(",
"arg",
")"
] | test whether a string starts with the given argument . | train | false |
38,870 | def is_bogus_ip(ip):
return (ip.startswith('0.') or ip.startswith('127.'))
| [
"def",
"is_bogus_ip",
"(",
"ip",
")",
":",
"return",
"(",
"ip",
".",
"startswith",
"(",
"'0.'",
")",
"or",
"ip",
".",
"startswith",
"(",
"'127.'",
")",
")"
] | checks if the given ip address is bogus . | train | false |
38,871 | @cronjobs.register
def enqueue_lag_monitor_task():
measure_queue_lag.delay(datetime.now())
| [
"@",
"cronjobs",
".",
"register",
"def",
"enqueue_lag_monitor_task",
"(",
")",
":",
"measure_queue_lag",
".",
"delay",
"(",
"datetime",
".",
"now",
"(",
")",
")"
] | fires a task that measures the queue lag . | train | false |
38,872 | def servicegroup_server_up(sg_name, s_name, s_port, **connection_args):
server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args)
return ((server is not None) and (server.get_svrstate() == 'UP'))
| [
"def",
"servicegroup_server_up",
"(",
"sg_name",
",",
"s_name",
",",
"s_port",
",",
"**",
"connection_args",
")",
":",
"server",
"=",
"_servicegroup_get_server",
"(",
"sg_name",
",",
"s_name",
",",
"s_port",
",",
"**",
"connection_args",
")",
"return",
"(",
"(... | check if a server:port combination is in state up in a servicegroup cli example: . | train | true |
38,873 | def _build_align_cmdline(cmdline, pair, output_filename, kbyte=None, force_type=None, quiet=False):
cmdline = cmdline[:]
if (kbyte is None):
try:
cmdline.extend(('-kbyte', os.environ['WISE_KBYTE']))
except KeyError:
pass
else:
cmdline.extend(('-kbyte', str(kbyte)))
if (not os.isatty(sys.stderr.fileno())):
cmdline.append('-quiet')
cmdline.extend(pair)
cmdline.extend(('>', output_filename))
if quiet:
cmdline.extend(('2>', '/dev/null'))
cmdline_str = ' '.join(cmdline)
return cmdline_str
| [
"def",
"_build_align_cmdline",
"(",
"cmdline",
",",
"pair",
",",
"output_filename",
",",
"kbyte",
"=",
"None",
",",
"force_type",
"=",
"None",
",",
"quiet",
"=",
"False",
")",
":",
"cmdline",
"=",
"cmdline",
"[",
":",
"]",
"if",
"(",
"kbyte",
"is",
"No... | helper function to build a command line string . | train | false |
38,874 | @requires_segment_info
def jobnum(pl, segment_info, show_zero=False):
jobnum = segment_info[u'args'].jobnum
if ((jobnum is None) or ((not show_zero) and (jobnum == 0))):
return None
else:
return str(jobnum)
| [
"@",
"requires_segment_info",
"def",
"jobnum",
"(",
"pl",
",",
"segment_info",
",",
"show_zero",
"=",
"False",
")",
":",
"jobnum",
"=",
"segment_info",
"[",
"u'args'",
"]",
".",
"jobnum",
"if",
"(",
"(",
"jobnum",
"is",
"None",
")",
"or",
"(",
"(",
"no... | return the number of jobs . | train | false |
38,878 | def timed(registry, xml_parent, data):
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.TimerTrigger')
XML.SubElement(scmtrig, 'spec').text = data
| [
"def",
"timed",
"(",
"registry",
",",
"xml_parent",
",",
"data",
")",
":",
"scmtrig",
"=",
"XML",
".",
"SubElement",
"(",
"xml_parent",
",",
"'hudson.triggers.TimerTrigger'",
")",
"XML",
".",
"SubElement",
"(",
"scmtrig",
",",
"'spec'",
")",
".",
"text",
"... | yaml: timed trigger builds at certain times . | train | false |
38,879 | def get_default_dest_dir():
t_downloaddir = u'TriblerDownloads'
if os.path.isdir(t_downloaddir):
return os.path.abspath(t_downloaddir)
downloads_dir = os.path.join(get_home_dir(), u'Downloads')
if os.path.isdir(downloads_dir):
return os.path.join(downloads_dir, t_downloaddir)
else:
return os.path.join(get_home_dir(), t_downloaddir)
| [
"def",
"get_default_dest_dir",
"(",
")",
":",
"t_downloaddir",
"=",
"u'TriblerDownloads'",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"t_downloaddir",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"t_downloaddir",
")",
"downloads_dir",
"=",
"o... | returns the default dir to save content to . | train | false |
38,881 | def xl_rowcol_to_cell(row, col, row_abs=False, col_abs=False):
row += 1
row_abs = ('$' if row_abs else '')
col_str = xl_col_to_name(col, col_abs)
return ((col_str + row_abs) + str(row))
| [
"def",
"xl_rowcol_to_cell",
"(",
"row",
",",
"col",
",",
"row_abs",
"=",
"False",
",",
"col_abs",
"=",
"False",
")",
":",
"row",
"+=",
"1",
"row_abs",
"=",
"(",
"'$'",
"if",
"row_abs",
"else",
"''",
")",
"col_str",
"=",
"xl_col_to_name",
"(",
"col",
... | convert a zero indexed row and column cell reference to a a1 style string . | train | false |
38,882 | def _replaceIf(condition, alternative):
def decorator(func):
if (condition is True):
call = alternative
elif (condition is False):
call = func
else:
raise ValueError('condition argument to _replaceIf requires a bool, not {}'.format(repr(condition)))
@wraps(func)
def wrapped(*args, **kwargs):
return call(*args, **kwargs)
return wrapped
return decorator
| [
"def",
"_replaceIf",
"(",
"condition",
",",
"alternative",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"if",
"(",
"condition",
"is",
"True",
")",
":",
"call",
"=",
"alternative",
"elif",
"(",
"condition",
"is",
"False",
")",
":",
"call",
"=",
... | if c{condition} . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.