id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
14,075 | def build_projection_operator(l_x, n_dir):
(X, Y) = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
(data_inds, weights, camera_inds) = ([], [], [])
data_unravel_indices = np.arange((l_x ** 2))
data_unravel_indices = np.hstack((data_unravel_indices, data_unravel_indices))
for (i, angle) in enumerate(angles):
Xrot = ((np.cos(angle) * X) - (np.sin(angle) * Y))
(inds, w) = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and((inds >= 0), (inds < l_x))
weights += list(w[mask])
camera_inds += list((inds[mask] + (i * l_x)))
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
| [
"def",
"build_projection_operator",
"(",
"l_x",
",",
"n_dir",
")",
":",
"(",
"X",
",",
"Y",
")",
"=",
"_generate_center_coordinates",
"(",
"l_x",
")",
"angles",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"np",
".",
"pi",
",",
"n_dir",
",",
"endpoint",
... | compute the tomography design matrix . | train | false |
14,076 | def delete_rax_clb(args):
print ("--- Cleaning Cloud Load Balancers matching '%s'" % args.match_re)
for region in pyrax.identity.services.load_balancer.regions:
clb = pyrax.connect_to_cloud_loadbalancers(region=region)
for lb in rax_list_iterator(clb):
if re.search(args.match_re, lb.name):
prompt_and_delete(lb, ('Delete matching %s? [y/n]: ' % lb), args.assumeyes)
| [
"def",
"delete_rax_clb",
"(",
"args",
")",
":",
"print",
"(",
"\"--- Cleaning Cloud Load Balancers matching '%s'\"",
"%",
"args",
".",
"match_re",
")",
"for",
"region",
"in",
"pyrax",
".",
"identity",
".",
"services",
".",
"load_balancer",
".",
"regions",
":",
"... | function for deleting cloud load balancers . | train | false |
14,079 | @_ensure_exists
def pid(name):
try:
return int(info(name).get('PID'))
except (TypeError, ValueError) as exc:
raise CommandExecutionError("Unable to get PID for container '{0}': {1}".format(name, exc))
| [
"@",
"_ensure_exists",
"def",
"pid",
"(",
"name",
")",
":",
"try",
":",
"return",
"int",
"(",
"info",
"(",
"name",
")",
".",
"get",
"(",
"'PID'",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
"as",
"exc",
":",
"raise",
"CommandExecuti... | returns the pid of a container name container name or id cli example: . | train | true |
14,080 | def extra_padding_x_keep_ratio(original_size, padding):
return _resize(original_size, 0, padding=padding, keep_aspect_ratio=True)
| [
"def",
"extra_padding_x_keep_ratio",
"(",
"original_size",
",",
"padding",
")",
":",
"return",
"_resize",
"(",
"original_size",
",",
"0",
",",
"padding",
"=",
"padding",
",",
"keep_aspect_ratio",
"=",
"True",
")"
] | reduce the width of original_size by padding . | train | false |
14,082 | def test_distance_to_quantity_when_not_units_of_length():
d = Distance((15 * u.kpc))
twice = (2.0 * d)
assert isinstance(twice, Distance)
area = ((4.0 * np.pi) * (d ** 2))
assert area.unit.is_equivalent((u.m ** 2))
assert (not isinstance(area, Distance))
assert (type(area) is u.Quantity)
| [
"def",
"test_distance_to_quantity_when_not_units_of_length",
"(",
")",
":",
"d",
"=",
"Distance",
"(",
"(",
"15",
"*",
"u",
".",
"kpc",
")",
")",
"twice",
"=",
"(",
"2.0",
"*",
"d",
")",
"assert",
"isinstance",
"(",
"twice",
",",
"Distance",
")",
"area",... | any operatation that leaves units other than those of length should turn a distance into a quantity . | train | false |
14,083 | def tableload(datafile, cdfile, hfile=None):
return BinTableHDU.load(datafile, cdfile, hfile, replace=True)
| [
"def",
"tableload",
"(",
"datafile",
",",
"cdfile",
",",
"hfile",
"=",
"None",
")",
":",
"return",
"BinTableHDU",
".",
"load",
"(",
"datafile",
",",
"cdfile",
",",
"hfile",
",",
"replace",
"=",
"True",
")"
] | create a table from the input ascii files . | train | false |
14,084 | def _unpack_simple(fid, dtype, out_dtype):
dtype = np.dtype(dtype)
string = fid.read(dtype.itemsize)
out = np.fromstring(string, dtype=dtype).astype(out_dtype)
if (len(out) > 0):
out = out[0]
return out
| [
"def",
"_unpack_simple",
"(",
"fid",
",",
"dtype",
",",
"out_dtype",
")",
":",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"string",
"=",
"fid",
".",
"read",
"(",
"dtype",
".",
"itemsize",
")",
"out",
"=",
"np",
".",
"fromstring",
"(",
"st... | unpack a numpy type . | train | false |
14,086 | @gen.engine
def HandleOneUser(client_store, user_id, callback):
logs_paths = logs_util.UserAnalyticsLogsPaths(user_id)
base_path = logs_paths.RawDirectory()
marker = (os.path.join(base_path, options.options.start_date) if (options.options.start_date is not None) else None)
files = (yield gen.Task(store_utils.ListAllKeys, client_store, prefix=base_path, marker=marker))
analytics_files = []
crash_files = []
for f in sorted(files):
if f.endswith('.analytics.gz'):
analytics_files.append(f)
elif (f.endswith('.crash') or f.endswith('.crash.gz')):
crash_files.append(f)
if (analytics_files and options.options.process_analytics):
(yield gen.Task(HandleAnalytics, client_store, user_id, logs_paths, analytics_files))
if (crash_files and options.options.process_crashes):
(yield gen.Task(HandleCrashes, client_store, user_id, crash_files))
callback()
| [
"@",
"gen",
".",
"engine",
"def",
"HandleOneUser",
"(",
"client_store",
",",
"user_id",
",",
"callback",
")",
":",
"logs_paths",
"=",
"logs_util",
".",
"UserAnalyticsLogsPaths",
"(",
"user_id",
")",
"base_path",
"=",
"logs_paths",
".",
"RawDirectory",
"(",
")"... | process client logs for a single user . | train | false |
14,087 | def getTile(layer, coord, extension, ignore_cached=False):
(status_code, headers, body) = layer.getTileResponse(coord, extension, ignore_cached)
mime = headers.get('Content-Type')
return (mime, body)
| [
"def",
"getTile",
"(",
"layer",
",",
"coord",
",",
"extension",
",",
"ignore_cached",
"=",
"False",
")",
":",
"(",
"status_code",
",",
"headers",
",",
"body",
")",
"=",
"layer",
".",
"getTileResponse",
"(",
"coord",
",",
"extension",
",",
"ignore_cached",
... | get a type string and tile binary for a given request layer tile . | train | false |
14,088 | def getobj(txt, last=False):
txt_end = ''
for (startchar, endchar) in ['[]', '()']:
if txt.endswith(endchar):
pos = txt.rfind(startchar)
if pos:
txt_end = txt[pos:]
txt = txt[:pos]
tokens = re.split(SYMBOLS, txt)
token = None
try:
while ((token is None) or re.match(SYMBOLS, token)):
token = tokens.pop()
if token.endswith('.'):
token = token[:(-1)]
if token.startswith('.'):
return None
if last:
token += txt[(txt.rfind(token) + len(token))]
token += txt_end
if token:
return token
except IndexError:
return None
| [
"def",
"getobj",
"(",
"txt",
",",
"last",
"=",
"False",
")",
":",
"txt_end",
"=",
"''",
"for",
"(",
"startchar",
",",
"endchar",
")",
"in",
"[",
"'[]'",
",",
"'()'",
"]",
":",
"if",
"txt",
".",
"endswith",
"(",
"endchar",
")",
":",
"pos",
"=",
... | return the last valid object name in string . | train | true |
14,091 | def dmp_sqf_list_include(f, u, K, all=False):
if (not u):
return dup_sqf_list_include(f, K, all=all)
(coeff, factors) = dmp_sqf_list(f, u, K, all=all)
if (factors and (factors[0][1] == 1)):
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return ([(g, 1)] + factors[1:])
else:
g = dmp_ground(coeff, u)
return ([(g, 1)] + factors)
| [
"def",
"dmp_sqf_list_include",
"(",
"f",
",",
"u",
",",
"K",
",",
"all",
"=",
"False",
")",
":",
"if",
"(",
"not",
"u",
")",
":",
"return",
"dup_sqf_list_include",
"(",
"f",
",",
"K",
",",
"all",
"=",
"all",
")",
"(",
"coeff",
",",
"factors",
")"... | return square-free decomposition of a polynomial in k[x] . | train | false |
14,092 | def form_for_model(model):
parts = model.__module__.split('.')
parts[parts.index('models')] = 'forms'
module_name = '.'.join(parts)
form_name = (model.__name__ + 'Form')
module = try_import(module_name)
if (module is not None):
form = getattr(module, form_name, None)
if (form is not None):
return form
meta_dict = LocationForm.Meta.__dict__
meta_dict['model'] = model
return type(form_name, (LocationForm,), {'Meta': type('Meta', (), meta_dict)})
| [
"def",
"form_for_model",
"(",
"model",
")",
":",
"parts",
"=",
"model",
".",
"__module__",
".",
"split",
"(",
"'.'",
")",
"parts",
"[",
"parts",
".",
"index",
"(",
"'models'",
")",
"]",
"=",
"'forms'",
"module_name",
"=",
"'.'",
".",
"join",
"(",
"pa... | returns a form class for the given django model class . | train | false |
14,093 | def get_file_client(opts, pillar=False):
client = opts.get('file_client', 'remote')
if (pillar and (client == 'local')):
client = 'pillar'
return {'remote': RemoteClient, 'local': FSClient, 'pillar': LocalClient}.get(client, RemoteClient)(opts)
| [
"def",
"get_file_client",
"(",
"opts",
",",
"pillar",
"=",
"False",
")",
":",
"client",
"=",
"opts",
".",
"get",
"(",
"'file_client'",
",",
"'remote'",
")",
"if",
"(",
"pillar",
"and",
"(",
"client",
"==",
"'local'",
")",
")",
":",
"client",
"=",
"'p... | read in the file_client option and return the correct type of file server . | train | true |
14,094 | def volume_metadata_delete(context, volume_id, key, meta_type=common.METADATA_TYPES.user):
return IMPL.volume_metadata_delete(context, volume_id, key, meta_type)
| [
"def",
"volume_metadata_delete",
"(",
"context",
",",
"volume_id",
",",
"key",
",",
"meta_type",
"=",
"common",
".",
"METADATA_TYPES",
".",
"user",
")",
":",
"return",
"IMPL",
".",
"volume_metadata_delete",
"(",
"context",
",",
"volume_id",
",",
"key",
",",
... | delete the given metadata item . | train | false |
14,096 | def add_introspection_rules(rules=[], patterns=[]):
assert isinstance(rules, (list, tuple))
assert isinstance(patterns, (list, tuple))
allowed_fields.extend(patterns)
introspection_details.extend(rules)
| [
"def",
"add_introspection_rules",
"(",
"rules",
"=",
"[",
"]",
",",
"patterns",
"=",
"[",
"]",
")",
":",
"assert",
"isinstance",
"(",
"rules",
",",
"(",
"list",
",",
"tuple",
")",
")",
"assert",
"isinstance",
"(",
"patterns",
",",
"(",
"list",
",",
"... | allows you to add some introspection rules at runtime . | train | false |
14,098 | def _write_morph_map(fname, subject_from, subject_to, mmap_1, mmap_2):
fid = start_file(fname)
assert (len(mmap_1) == 2)
assert (len(mmap_2) == 2)
hemis = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for (m, hemi) in zip(mmap_1, hemis):
start_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_FROM, subject_from)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_TO, subject_to)
write_int(fid, FIFF.FIFF_MNE_HEMI, hemi)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_MORPH_MAP, m)
end_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
for (m, hemi) in zip(mmap_2, hemis):
start_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_FROM, subject_to)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_TO, subject_from)
write_int(fid, FIFF.FIFF_MNE_HEMI, hemi)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_MORPH_MAP, m)
end_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
end_file(fid)
| [
"def",
"_write_morph_map",
"(",
"fname",
",",
"subject_from",
",",
"subject_to",
",",
"mmap_1",
",",
"mmap_2",
")",
":",
"fid",
"=",
"start_file",
"(",
"fname",
")",
"assert",
"(",
"len",
"(",
"mmap_1",
")",
"==",
"2",
")",
"assert",
"(",
"len",
"(",
... | write a morph map to disk . | train | false |
14,099 | def getNewRepository():
return ExportRepository()
| [
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] | get the repository constructor . | train | false |
14,100 | def _read_uint32(f):
return np.uint32(struct.unpack('>I', f.read(4))[0])
| [
"def",
"_read_uint32",
"(",
"f",
")",
":",
"return",
"np",
".",
"uint32",
"(",
"struct",
".",
"unpack",
"(",
"'>I'",
",",
"f",
".",
"read",
"(",
"4",
")",
")",
"[",
"0",
"]",
")"
] | read an unsigned 32-bit integer . | train | false |
14,101 | def chmod_plus_x(path):
path_mode = os.stat(path).st_mode
path_mode &= int(u'777', 8)
if (path_mode & stat.S_IRUSR):
path_mode |= stat.S_IXUSR
if (path_mode & stat.S_IRGRP):
path_mode |= stat.S_IXGRP
if (path_mode & stat.S_IROTH):
path_mode |= stat.S_IXOTH
os.chmod(path, path_mode)
| [
"def",
"chmod_plus_x",
"(",
"path",
")",
":",
"path_mode",
"=",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
"path_mode",
"&=",
"int",
"(",
"u'777'",
",",
"8",
")",
"if",
"(",
"path_mode",
"&",
"stat",
".",
"S_IRUSR",
")",
":",
"path_mode",
... | equivalent of unix chmod a+x path . | train | true |
14,102 | def _update_perf_events_xml(xml_doc, migrate_data):
supported_perf_events = []
old_xml_has_perf = True
if ('supported_perf_events' in migrate_data):
supported_perf_events = migrate_data.supported_perf_events
perf_events = xml_doc.findall('./perf')
if (not perf_events):
perf_events = etree.Element('perf')
old_xml_has_perf = False
else:
perf_events = perf_events[0]
for (_, event) in enumerate(perf_events):
perf_events.remove(event)
if (not supported_perf_events):
return xml_doc
for e in supported_perf_events:
new_event = etree.Element('event', enabled='yes', name=e)
perf_events.append(new_event)
if (not old_xml_has_perf):
xml_doc.append(perf_events)
return xml_doc
| [
"def",
"_update_perf_events_xml",
"(",
"xml_doc",
",",
"migrate_data",
")",
":",
"supported_perf_events",
"=",
"[",
"]",
"old_xml_has_perf",
"=",
"True",
"if",
"(",
"'supported_perf_events'",
"in",
"migrate_data",
")",
":",
"supported_perf_events",
"=",
"migrate_data"... | update xml by the supported events of destination host . | train | false |
14,103 | def gid_to_group(gid):
try:
gid = int(gid)
except ValueError:
gid = group_to_gid(gid)
if (gid == ''):
return ''
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
return gid
| [
"def",
"gid_to_group",
"(",
"gid",
")",
":",
"try",
":",
"gid",
"=",
"int",
"(",
"gid",
")",
"except",
"ValueError",
":",
"gid",
"=",
"group_to_gid",
"(",
"gid",
")",
"if",
"(",
"gid",
"==",
"''",
")",
":",
"return",
"''",
"try",
":",
"return",
"... | convert the group id to the group name on this system under windows . | train | true |
14,104 | def _uncache_ours_upto(fd, offset, first_region, remaining_regions):
(rstart, rlen) = first_region
while ((rstart is not None) and (((rstart + rlen) * sc_page_size) <= offset)):
_fadvise_pages_done(fd, rstart, rlen)
(rstart, rlen) = next(remaining_regions, (None, None))
return (rstart, rlen)
| [
"def",
"_uncache_ours_upto",
"(",
"fd",
",",
"offset",
",",
"first_region",
",",
"remaining_regions",
")",
":",
"(",
"rstart",
",",
"rlen",
")",
"=",
"first_region",
"while",
"(",
"(",
"rstart",
"is",
"not",
"None",
")",
"and",
"(",
"(",
"(",
"rstart",
... | uncache the pages of fd indicated by first_region and remaining_regions that are before offset . | train | false |
14,105 | def _make_transform_card(fro, to, r_lpa, r_nasion, r_rpa):
diff_1 = (r_nasion - r_lpa)
ex = (r_rpa - r_lpa)
alpha = (np.dot(diff_1, ex) / np.dot(ex, ex))
ex /= np.sqrt(np.sum((ex * ex)))
trans = np.eye(4)
move = (((1.0 - alpha) * r_lpa) + (alpha * r_rpa))
trans[:3, 3] = move
trans[:3, 0] = ex
ey = (r_nasion - move)
ey /= np.sqrt(np.sum((ey * ey)))
trans[:3, 1] = ey
trans[:3, 2] = np.cross(ex, ey)
return Transform(fro, to, trans)
| [
"def",
"_make_transform_card",
"(",
"fro",
",",
"to",
",",
"r_lpa",
",",
"r_nasion",
",",
"r_rpa",
")",
":",
"diff_1",
"=",
"(",
"r_nasion",
"-",
"r_lpa",
")",
"ex",
"=",
"(",
"r_rpa",
"-",
"r_lpa",
")",
"alpha",
"=",
"(",
"np",
".",
"dot",
"(",
... | make a transform from cardinal landmarks . | train | false |
14,108 | @register.filter(name='crispy')
def as_crispy_form(form, template_pack=TEMPLATE_PACK, label_class='', field_class=''):
if isinstance(form, BaseFormSet):
template = uni_formset_template(template_pack)
c = Context({'formset': form, 'form_show_errors': True, 'form_show_labels': True, 'label_class': label_class, 'field_class': field_class}).flatten()
else:
template = uni_form_template(template_pack)
c = Context({'form': form, 'form_show_errors': True, 'form_show_labels': True, 'label_class': label_class, 'field_class': field_class}).flatten()
return template.render(c)
| [
"@",
"register",
".",
"filter",
"(",
"name",
"=",
"'crispy'",
")",
"def",
"as_crispy_form",
"(",
"form",
",",
"template_pack",
"=",
"TEMPLATE_PACK",
",",
"label_class",
"=",
"''",
",",
"field_class",
"=",
"''",
")",
":",
"if",
"isinstance",
"(",
"form",
... | the original and still very useful way to generate a div elegant form/formset:: {% load crispy_forms_tags %} <form class="uniform" method="post"> {% csrf_token %} {{ myform|crispy }} </form> or . | train | true |
14,109 | def iterencode(iter):
return ','.join((as_unicode(v).replace(CHAR_ESCAPE, (CHAR_ESCAPE + CHAR_ESCAPE)).replace(CHAR_SEPARATOR, (CHAR_ESCAPE + CHAR_SEPARATOR)) for v in iter))
| [
"def",
"iterencode",
"(",
"iter",
")",
":",
"return",
"','",
".",
"join",
"(",
"(",
"as_unicode",
"(",
"v",
")",
".",
"replace",
"(",
"CHAR_ESCAPE",
",",
"(",
"CHAR_ESCAPE",
"+",
"CHAR_ESCAPE",
")",
")",
".",
"replace",
"(",
"CHAR_SEPARATOR",
",",
"(",... | encoding iterator . | train | false |
14,110 | def record_stat(key, request, **data):
if ('__recorded' in data):
recorded = data.pop('__recorded')
else:
recorded = datetime.datetime.utcnow()
if (not data):
raise ValueError('You should at least define one value')
record = MonolithRecord(key=key, user_hash=get_user_hash(request), recorded=recorded, value=json.dumps(data))
record.save()
return record
| [
"def",
"record_stat",
"(",
"key",
",",
"request",
",",
"**",
"data",
")",
":",
"if",
"(",
"'__recorded'",
"in",
"data",
")",
":",
"recorded",
"=",
"data",
".",
"pop",
"(",
"'__recorded'",
")",
"else",
":",
"recorded",
"=",
"datetime",
".",
"datetime",
... | create a new record in the database with the given values . | train | false |
14,111 | def p_postfix_expression_1(t):
pass
| [
"def",
"p_postfix_expression_1",
"(",
"t",
")",
":",
"pass"
] | postfix_expression : primary_expression . | train | false |
14,112 | def indicator_data():
return s3_rest_controller()
| [
"def",
"indicator_data",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] | restful crud controller . | train | false |
14,114 | def organization_autocomplete(context, data_dict):
_check_access('organization_autocomplete', context, data_dict)
q = data_dict['q']
limit = data_dict.get('limit', 20)
model = context['model']
query = model.Group.search_by_name_or_title(q, group_type=None, is_org=True)
organization_list = []
for organization in query.all():
result_dict = {}
for k in ['id', 'name', 'title']:
result_dict[k] = getattr(organization, k)
organization_list.append(result_dict)
return organization_list
| [
"def",
"organization_autocomplete",
"(",
"context",
",",
"data_dict",
")",
":",
"_check_access",
"(",
"'organization_autocomplete'",
",",
"context",
",",
"data_dict",
")",
"q",
"=",
"data_dict",
"[",
"'q'",
"]",
"limit",
"=",
"data_dict",
".",
"get",
"(",
"'li... | return a list of organization names that contain a string . | train | false |
14,115 | def maxabs_scale(X, axis=0, copy=True):
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if (original_ndim == 1):
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if (axis == 0):
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if (original_ndim == 1):
X = X.ravel()
return X
| [
"def",
"maxabs_scale",
"(",
"X",
",",
"axis",
"=",
"0",
",",
"copy",
"=",
"True",
")",
":",
"X",
"=",
"check_array",
"(",
"X",
",",
"accept_sparse",
"=",
"(",
"'csr'",
",",
"'csc'",
")",
",",
"copy",
"=",
"False",
",",
"ensure_2d",
"=",
"False",
... | scale each feature to the [-1 . | train | false |
14,116 | def dmp_quo_ground(f, c, u, K):
if (not u):
return dup_quo_ground(f, c, K)
v = (u - 1)
return [dmp_quo_ground(cf, c, v, K) for cf in f]
| [
"def",
"dmp_quo_ground",
"(",
"f",
",",
"c",
",",
"u",
",",
"K",
")",
":",
"if",
"(",
"not",
"u",
")",
":",
"return",
"dup_quo_ground",
"(",
"f",
",",
"c",
",",
"K",
")",
"v",
"=",
"(",
"u",
"-",
"1",
")",
"return",
"[",
"dmp_quo_ground",
"("... | quotient by a constant in k[x] . | train | false |
14,118 | def topic_weight(d, word, k):
return (p_word_given_topic(word, k) * p_topic_given_document(k, d))
| [
"def",
"topic_weight",
"(",
"d",
",",
"word",
",",
"k",
")",
":",
"return",
"(",
"p_word_given_topic",
"(",
"word",
",",
"k",
")",
"*",
"p_topic_given_document",
"(",
"k",
",",
"d",
")",
")"
] | given a document and a word in that document . | train | false |
14,119 | def _GenerateActionsForMSBuild(spec, actions_to_add):
sources_handled_by_action = OrderedSet()
actions_spec = []
for (primary_input, actions) in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
if action.get('msbuild_use_call', True):
cmd = ('call ' + cmd)
commands.append(cmd)
description = ', and also '.join(descriptions)
command = '\r\n'.join([(c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%') for c in commands])
_AddMSBuildAction(spec, primary_input, inputs, outputs, command, description, sources_handled_by_action, actions_spec)
return (actions_spec, sources_handled_by_action)
| [
"def",
"_GenerateActionsForMSBuild",
"(",
"spec",
",",
"actions_to_add",
")",
":",
"sources_handled_by_action",
"=",
"OrderedSet",
"(",
")",
"actions_spec",
"=",
"[",
"]",
"for",
"(",
"primary_input",
",",
"actions",
")",
"in",
"actions_to_add",
".",
"iteritems",
... | add actions accumulated into an actions_to_add . | train | false |
14,120 | def drilldown_tree_for_node(node, rel_cls=None, rel_field=None, count_attr=None, cumulative=False):
if (rel_cls and rel_field and count_attr):
children = node._tree_manager.add_related_count(node.get_children(), rel_cls, rel_field, count_attr, cumulative)
else:
children = node.get_children()
return itertools.chain(node.get_ancestors(), [node], children)
| [
"def",
"drilldown_tree_for_node",
"(",
"node",
",",
"rel_cls",
"=",
"None",
",",
"rel_field",
"=",
"None",
",",
"count_attr",
"=",
"None",
",",
"cumulative",
"=",
"False",
")",
":",
"if",
"(",
"rel_cls",
"and",
"rel_field",
"and",
"count_attr",
")",
":",
... | creates a drilldown tree for the given node . | train | false |
14,121 | def _retrieve_dbxrefs(adaptor, primary_id):
_dbxrefs = []
dbxrefs = adaptor.execute_and_fetchall('SELECT dbname, accession, version FROM bioentry_dbxref join dbxref using (dbxref_id) WHERE bioentry_id = %s ORDER BY rank', (primary_id,))
for (dbname, accession, version) in dbxrefs:
if (version and (version != '0')):
v = ('%s.%s' % (accession, version))
else:
v = accession
_dbxrefs.append(('%s:%s' % (dbname, v)))
return _dbxrefs
| [
"def",
"_retrieve_dbxrefs",
"(",
"adaptor",
",",
"primary_id",
")",
":",
"_dbxrefs",
"=",
"[",
"]",
"dbxrefs",
"=",
"adaptor",
".",
"execute_and_fetchall",
"(",
"'SELECT dbname, accession, version FROM bioentry_dbxref join dbxref using (dbxref_id) WHERE bioentry_id = %s ORDER BY ... | retrieve the database cross references for the sequence . | train | false |
14,122 | def test_tabs():
ignore = _tab_ignores[:]
try:
import mayavi
except ImportError:
ignore.extend((('mne.gui.' + name) for name in ('_coreg_gui', '_fiducials_gui', '_file_traits', '_help', '_kit2fiff_gui', '_marker_gui', '_viewer')))
for (importer, modname, ispkg) in walk_packages(mne.__path__, prefix='mne.'):
if ((not ispkg) and (modname not in ignore)):
try:
__import__(modname)
except Exception:
continue
mod = sys.modules[modname]
source = getsource(mod)
assert_true((' DCTB ' not in source), ('"%s" has tabs, please remove them or add it to theignore list' % modname))
| [
"def",
"test_tabs",
"(",
")",
":",
"ignore",
"=",
"_tab_ignores",
"[",
":",
"]",
"try",
":",
"import",
"mayavi",
"except",
"ImportError",
":",
"ignore",
".",
"extend",
"(",
"(",
"(",
"'mne.gui.'",
"+",
"name",
")",
"for",
"name",
"in",
"(",
"'_coreg_gu... | test that there are no tabs in our source files . | train | false |
14,124 | def compute_item_matrix(items, row_first=False, empty=None, *args, **kwargs):
info = _find_optimal(list(map(len, items)), row_first, *args, **kwargs)
(nrow, ncol) = (info['max_rows'], info['num_columns'])
if row_first:
return ([[_get_or_default(items, ((r * ncol) + c), default=empty) for c in range(ncol)] for r in range(nrow)], info)
else:
return ([[_get_or_default(items, ((c * nrow) + r), default=empty) for c in range(ncol)] for r in range(nrow)], info)
| [
"def",
"compute_item_matrix",
"(",
"items",
",",
"row_first",
"=",
"False",
",",
"empty",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"info",
"=",
"_find_optimal",
"(",
"list",
"(",
"map",
"(",
"len",
",",
"items",
")",
")",
",",
... | returns a nested list . | train | false |
14,125 | def setupWorkerTransition():
default_warn_method = getWarningMethod()
def custom_warn_method(message, category, stacklevel):
if (stacklevel is not None):
stacklevel += 1
if (_WORKER_WARNING_MARK in message):
message = message.replace(_WORKER_WARNING_MARK, '')
warnings.warn(DeprecatedWorkerNameWarning(message), message, stacklevel)
else:
default_warn_method(message, category, stacklevel)
setWarningMethod(custom_warn_method)
| [
"def",
"setupWorkerTransition",
"(",
")",
":",
"default_warn_method",
"=",
"getWarningMethod",
"(",
")",
"def",
"custom_warn_method",
"(",
"message",
",",
"category",
",",
"stacklevel",
")",
":",
"if",
"(",
"stacklevel",
"is",
"not",
"None",
")",
":",
"stackle... | hook twisted deprecation machinery to use custom warning class for worker api deprecation warnings . | train | true |
14,126 | def complain(message):
print >>sys.__stderr__, ((isinstance(message, str) and message) or repr(message))
| [
"def",
"complain",
"(",
"message",
")",
":",
"print",
">>",
"sys",
".",
"__stderr__",
",",
"(",
"(",
"isinstance",
"(",
"message",
",",
"str",
")",
"and",
"message",
")",
"or",
"repr",
"(",
"message",
")",
")"
] | if any exception occurs in this file . | train | false |
14,127 | @with_setup(step_runner_environ)
def test_steps_that_match_groups_takes_them_as_parameters():
@step('Given a ([^\\s]+) called "(.*)"')
def given_what_named(step, what, name):
assert_equals(what, 'person')
assert_equals(name, 'John Doe')
f = Feature.from_string(FEATURE4)
feature_result = f.run()
scenario_result = feature_result.scenario_results[0]
assert_equals(len(scenario_result.steps_passed), 1)
assert_equals(scenario_result.total_steps, 1)
| [
"@",
"with_setup",
"(",
"step_runner_environ",
")",
"def",
"test_steps_that_match_groups_takes_them_as_parameters",
"(",
")",
":",
"@",
"step",
"(",
"'Given a ([^\\\\s]+) called \"(.*)\"'",
")",
"def",
"given_what_named",
"(",
"step",
",",
"what",
",",
"name",
")",
":... | steps that match groups takes them as parameters . | train | false |
14,128 | def instance_fault_get_by_instance_uuids(context, instance_uuids):
rows = model_query(context, models.InstanceFault, read_deleted='no').filter(models.InstanceFault.instance_uuid.in_(instance_uuids)).order_by(desc('created_at'), desc('id')).all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row.iteritems())
output[row['instance_uuid']].append(data)
return output
| [
"def",
"instance_fault_get_by_instance_uuids",
"(",
"context",
",",
"instance_uuids",
")",
":",
"rows",
"=",
"model_query",
"(",
"context",
",",
"models",
".",
"InstanceFault",
",",
"read_deleted",
"=",
"'no'",
")",
".",
"filter",
"(",
"models",
".",
"InstanceFa... | get all instance faults for the provided instance_uuids . | train | false |
14,130 | def _new_Index(cls, d):
return cls.__new__(cls, **d)
| [
"def",
"_new_Index",
"(",
"cls",
",",
"d",
")",
":",
"return",
"cls",
".",
"__new__",
"(",
"cls",
",",
"**",
"d",
")"
] | this is called upon unpickling . | train | false |
14,131 | @pytest.mark.django_db
def test_project_save_no_fullname(project0):
project0.fullname = ''
with pytest.raises(ValidationError):
project0.save()
| [
"@",
"pytest",
".",
"mark",
".",
"django_db",
"def",
"test_project_save_no_fullname",
"(",
"project0",
")",
":",
"project0",
".",
"fullname",
"=",
"''",
"with",
"pytest",
".",
"raises",
"(",
"ValidationError",
")",
":",
"project0",
".",
"save",
"(",
")"
] | test that an existing project cant be removed its fullname . | train | false |
14,132 | def init(mpstate):
return SerialModule(mpstate)
| [
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] | initialise module . | train | false |
14,133 | def reversed_lower_upper_bounded_changelog(repo, excluded_lower_bounds_changeset_revision, included_upper_bounds_changeset_revision):
if (excluded_lower_bounds_changeset_revision == INITIAL_CHANGELOG_HASH):
appending_started = True
else:
appending_started = False
reversed_changelog = []
for changeset in repo.changelog:
changeset_hash = str(repo.changectx(changeset))
if appending_started:
reversed_changelog.insert(0, changeset)
if ((changeset_hash == excluded_lower_bounds_changeset_revision) and (not appending_started)):
appending_started = True
if (changeset_hash == included_upper_bounds_changeset_revision):
break
return reversed_changelog
| [
"def",
"reversed_lower_upper_bounded_changelog",
"(",
"repo",
",",
"excluded_lower_bounds_changeset_revision",
",",
"included_upper_bounds_changeset_revision",
")",
":",
"if",
"(",
"excluded_lower_bounds_changeset_revision",
"==",
"INITIAL_CHANGELOG_HASH",
")",
":",
"appending_star... | return a reversed list of changesets in the repository changelog after the excluded_lower_bounds_changeset_revision . | train | false |
14,134 | def find_submodule_git_dir(d):
if is_git_dir(d):
return d
try:
with open(d) as fp:
content = fp.read().rstrip()
except (IOError, OSError):
pass
else:
if content.startswith('gitdir: '):
path = content[8:]
if Git.is_cygwin():
path = decygpath(path)
if (not osp.isabs(path)):
path = osp.join(osp.dirname(d), path)
return find_submodule_git_dir(path)
return None
| [
"def",
"find_submodule_git_dir",
"(",
"d",
")",
":",
"if",
"is_git_dir",
"(",
"d",
")",
":",
"return",
"d",
"try",
":",
"with",
"open",
"(",
"d",
")",
"as",
"fp",
":",
"content",
"=",
"fp",
".",
"read",
"(",
")",
".",
"rstrip",
"(",
")",
"except"... | search for a submodule repo . | train | true |
14,135 | def common_meta_backup_mocks(f):
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd):
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rados.ObjectNotFound = MockObjectNotFoundException
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
| [
"def",
"common_meta_backup_mocks",
"(",
"f",
")",
":",
"def",
"_common_inner_inner1",
"(",
"inst",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"@",
"mock",
".",
"patch",
"(",
"'cinder.backup.drivers.ceph.rbd'",
")",
"@",
"mock",
".",
"patch",
"(",
"'c... | decorator to set mocks common to all metadata backup tests . | train | false |
14,136 | def determineDefaultFunctionName():
try:
(1 / 0)
except:
return traceback.extract_stack()[(-2)][2]
| [
"def",
"determineDefaultFunctionName",
"(",
")",
":",
"try",
":",
"(",
"1",
"/",
"0",
")",
"except",
":",
"return",
"traceback",
".",
"extract_stack",
"(",
")",
"[",
"(",
"-",
"2",
")",
"]",
"[",
"2",
"]"
] | return the string used by python as the name for code objects which are compiled from interactive input or at the top-level of modules . | train | false |
14,137 | def get_rect_ymax(data):
return max(data[0][1], data[1][1], data[2][1], data[3][1])
| [
"def",
"get_rect_ymax",
"(",
"data",
")",
":",
"return",
"max",
"(",
"data",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"data",
"[",
"1",
"]",
"[",
"1",
"]",
",",
"data",
"[",
"2",
"]",
"[",
"1",
"]",
",",
"data",
"[",
"3",
"]",
"[",
"1",
"]",
... | find maximum y value from four vertices . | train | false |
14,138 | def pytest_generate_tests(metafunc):
test_files = dict(map(parse_test_files_option, metafunc.config.option.test_files))
if ('case' in metafunc.fixturenames):
base_dir = metafunc.config.option.integration_case_dir
thirdparty = metafunc.config.option.thirdparty
cases = list(run.collect_dir_tests(base_dir, test_files))
if thirdparty:
cases.extend(run.collect_dir_tests(os.path.join(base_dir, 'thirdparty'), test_files, True))
ids = [('%s:%s' % (c.module_name, c.line_nr_test)) for c in cases]
metafunc.parametrize('case', cases, ids=ids)
if ('refactor_case' in metafunc.fixturenames):
base_dir = metafunc.config.option.refactor_case_dir
metafunc.parametrize('refactor_case', refactor.collect_dir_tests(base_dir, test_files))
if ('static_analysis_case' in metafunc.fixturenames):
base_dir = os.path.join(os.path.dirname(__file__), 'static_analysis')
metafunc.parametrize('static_analysis_case', collect_static_analysis_tests(base_dir, test_files))
| [
"def",
"pytest_generate_tests",
"(",
"metafunc",
")",
":",
"test_files",
"=",
"dict",
"(",
"map",
"(",
"parse_test_files_option",
",",
"metafunc",
".",
"config",
".",
"option",
".",
"test_files",
")",
")",
"if",
"(",
"'case'",
"in",
"metafunc",
".",
"fixture... | build a list of test arguments for test_copy_transpose . | train | false |
14,139 | def getUserSid(username):
domain = win32api.GetComputerName()
if (username.find(u'\\') != (-1)):
domain = username.split(u'\\')[0]
username = username.split(u'\\')[(-1)]
domain = domain.upper()
return win32security.ConvertSidToStringSid(win32security.LookupAccountName(None, ((domain + u'\\') + username))[0])
| [
"def",
"getUserSid",
"(",
"username",
")",
":",
"domain",
"=",
"win32api",
".",
"GetComputerName",
"(",
")",
"if",
"(",
"username",
".",
"find",
"(",
"u'\\\\'",
")",
"!=",
"(",
"-",
"1",
")",
")",
":",
"domain",
"=",
"username",
".",
"split",
"(",
... | get the security id for the user . | train | false |
14,140 | def test_if_marathon_app_can_be_deployed(dcos_api_session):
dcos_api_session.marathon.deploy_test_app_and_check(*get_test_app())
| [
"def",
"test_if_marathon_app_can_be_deployed",
"(",
"dcos_api_session",
")",
":",
"dcos_api_session",
".",
"marathon",
".",
"deploy_test_app_and_check",
"(",
"*",
"get_test_app",
"(",
")",
")"
] | marathon app deployment integration test this test verifies that marathon app can be deployed . | train | false |
14,141 | def sync_job_reset(r, **attr):
if r.interactive:
if (r.component and (r.component.alias == 'job')):
job_id = r.component_id
if job_id:
S3Task.reset(job_id)
current.session.confirmation = current.T('Job reactivated')
r.component_id = None
redirect(r.url(method=''))
| [
"def",
"sync_job_reset",
"(",
"r",
",",
"**",
"attr",
")",
":",
"if",
"r",
".",
"interactive",
":",
"if",
"(",
"r",
".",
"component",
"and",
"(",
"r",
".",
"component",
".",
"alias",
"==",
"'job'",
")",
")",
":",
"job_id",
"=",
"r",
".",
"compone... | restful method to reset a job status from failed to queued . | train | false |
14,142 | def Pareto(name, xm, alpha):
return rv(name, ParetoDistribution, (xm, alpha))
| [
"def",
"Pareto",
"(",
"name",
",",
"xm",
",",
"alpha",
")",
":",
"return",
"rv",
"(",
"name",
",",
"ParetoDistribution",
",",
"(",
"xm",
",",
"alpha",
")",
")"
] | create a continuous random variable with the pareto distribution . | train | false |
14,144 | def load_after_create(request, database):
tablename = request.REQUEST.get('table')
path = request.REQUEST.get('path')
if ((not tablename) or (not path)):
msg = _('Internal error: Missing needed parameter to load data into table.')
LOG.error(msg)
raise PopupException(msg)
LOG.debug(('Auto loading data from %s into table %s' % (path, tablename)))
hql = ("LOAD DATA INPATH '%s' INTO TABLE `%s.%s`" % (path, database, tablename))
query = hql_query(hql)
on_success_url = reverse('metastore:describe_table', kwargs={'database': database, 'table': tablename})
try:
return execute_directly(request, query, on_success_url=on_success_url)
except Exception as e:
message = 'The table data could not be loaded'
LOG.exception(message)
detail = (e.message if (hasattr(e, 'message') and e.message) else None)
raise PopupException(_(message), detail=detail)
| [
"def",
"load_after_create",
"(",
"request",
",",
"database",
")",
":",
"tablename",
"=",
"request",
".",
"REQUEST",
".",
"get",
"(",
"'table'",
")",
"path",
"=",
"request",
".",
"REQUEST",
".",
"get",
"(",
"'path'",
")",
"if",
"(",
"(",
"not",
"tablena... | automatically load data into a newly created table . | train | false |
14,145 | def load_language(cr, lang):
env = odoo.api.Environment(cr, SUPERUSER_ID, {})
installer = env['base.language.install'].create({'lang': lang})
installer.lang_install()
| [
"def",
"load_language",
"(",
"cr",
",",
"lang",
")",
":",
"env",
"=",
"odoo",
".",
"api",
".",
"Environment",
"(",
"cr",
",",
"SUPERUSER_ID",
",",
"{",
"}",
")",
"installer",
"=",
"env",
"[",
"'base.language.install'",
"]",
".",
"create",
"(",
"{",
"... | loads a translation terms for a language . | train | false |
14,146 | def register_atexit_function(func):
atexit.register(func)
| [
"def",
"register_atexit_function",
"(",
"func",
")",
":",
"atexit",
".",
"register",
"(",
"func",
")"
] | register a function for processing at program exit . | train | false |
14,147 | def _valid_composer(composer):
if salt.utils.which(composer):
return True
return False
| [
"def",
"_valid_composer",
"(",
"composer",
")",
":",
"if",
"salt",
".",
"utils",
".",
"which",
"(",
"composer",
")",
":",
"return",
"True",
"return",
"False"
] | validate the composer file is indeed there . | train | false |
14,148 | def get_text_list(list_, last_word='or'):
if (len(list_) == 0):
return ''
if (len(list_) == 1):
return list_[0]
return ('%s %s %s' % (', '.join([str(i) for i in list_][:(-1)]), last_word, list_[(-1)]))
| [
"def",
"get_text_list",
"(",
"list_",
",",
"last_word",
"=",
"'or'",
")",
":",
"if",
"(",
"len",
"(",
"list_",
")",
"==",
"0",
")",
":",
"return",
"''",
"if",
"(",
"len",
"(",
"list_",
")",
"==",
"1",
")",
":",
"return",
"list_",
"[",
"0",
"]",... | return a string with a natural enumeration of items . | train | false |
14,149 | def update_classpath(path):
for (prefix, replacement) in DEPRECATION_RULES:
if path.startswith(prefix):
new_path = path.replace(prefix, replacement, 1)
warnings.warn('`{}` class is deprecated, use `{}` instead'.format(path, new_path), ScrapyDeprecationWarning)
return new_path
return path
| [
"def",
"update_classpath",
"(",
"path",
")",
":",
"for",
"(",
"prefix",
",",
"replacement",
")",
"in",
"DEPRECATION_RULES",
":",
"if",
"path",
".",
"startswith",
"(",
"prefix",
")",
":",
"new_path",
"=",
"path",
".",
"replace",
"(",
"prefix",
",",
"repla... | update a deprecated path from an object with its new location . | train | false |
14,150 | def test_batch_normalized_mlp_learn_shift_propagated_at_alloc():
mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9], learn_shift=False)
assert (not mlp.learn_shift)
assert all((act.children[0].learn_shift for act in mlp.activations))
mlp.allocate()
assert (not any((act.children[0].learn_shift for act in mlp.activations)))
| [
"def",
"test_batch_normalized_mlp_learn_shift_propagated_at_alloc",
"(",
")",
":",
"mlp",
"=",
"BatchNormalizedMLP",
"(",
"[",
"Tanh",
"(",
")",
",",
"Tanh",
"(",
")",
"]",
",",
"[",
"5",
",",
"7",
",",
"9",
"]",
",",
"learn_shift",
"=",
"False",
")",
"a... | test that setting learn_shift on a batchnormalizedmlp works . | train | false |
14,151 | def compare_lists(old=None, new=None):
ret = dict()
for item in new:
if (item not in old):
ret['new'] = item
for item in old:
if (item not in new):
ret['old'] = item
return ret
| [
"def",
"compare_lists",
"(",
"old",
"=",
"None",
",",
"new",
"=",
"None",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"for",
"item",
"in",
"new",
":",
"if",
"(",
"item",
"not",
"in",
"old",
")",
":",
"ret",
"[",
"'new'",
"]",
"=",
"item",
"for",
... | compare before and after results from various salt functions . | train | true |
14,152 | def disable_failed_job(registry, xml_parent, data):
xml_element = XML.SubElement(xml_parent, 'disableFailedJob.disableFailedJob.DisableFailedJob', {'plugin': 'disable-failed-job'})
valid_conditions = ['Only Failure', 'Failure and Unstable', 'Only Unstable']
mapping = [('when-to-disable', 'whenDisable', None, valid_conditions)]
helpers.convert_mapping_to_xml(xml_element, data, mapping, fail_required=True)
if ('no-of-failures' in data):
XML.SubElement(xml_element, 'failureTimes').text = str(data.get('no-of-failures'))
XML.SubElement(xml_element, 'optionalBrockChecked').text = 'true'
else:
XML.SubElement(xml_element, 'optionalBrockChecked').text = 'false'
| [
"def",
"disable_failed_job",
"(",
"registry",
",",
"xml_parent",
",",
"data",
")",
":",
"xml_element",
"=",
"XML",
".",
"SubElement",
"(",
"xml_parent",
",",
"'disableFailedJob.disableFailedJob.DisableFailedJob'",
",",
"{",
"'plugin'",
":",
"'disable-failed-job'",
"}"... | yaml: disable-failed-job automatically disable failed jobs . | train | false |
14,153 | def sdm_nf_buchberger(f, G, O, K, phantom=None):
from itertools import repeat
h = f
T = list(G)
if (phantom is not None):
hp = phantom[0]
Tp = list(phantom[1])
phantom = True
else:
Tp = repeat([])
phantom = False
while h:
try:
(g, gp) = next(((g, gp) for (g, gp) in zip(T, Tp) if sdm_monomial_divides(sdm_LM(g), sdm_LM(h))))
except StopIteration:
break
if phantom:
(h, hp) = sdm_spoly(h, g, O, K, phantom=(hp, gp))
else:
h = sdm_spoly(h, g, O, K)
if phantom:
return (h, hp)
return h
| [
"def",
"sdm_nf_buchberger",
"(",
"f",
",",
"G",
",",
"O",
",",
"K",
",",
"phantom",
"=",
"None",
")",
":",
"from",
"itertools",
"import",
"repeat",
"h",
"=",
"f",
"T",
"=",
"list",
"(",
"G",
")",
"if",
"(",
"phantom",
"is",
"not",
"None",
")",
... | compute a weak normal form of f with respect to g and order o . | train | false |
14,154 | def lookupResponsibility(name, timeout=None):
return getResolver().lookupResponsibility(name, timeout)
| [
"def",
"lookupResponsibility",
"(",
"name",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"getResolver",
"(",
")",
".",
"lookupResponsibility",
"(",
"name",
",",
"timeout",
")"
] | perform an rp record lookup . | train | false |
14,155 | def to_bokeh(fig=None, tools=DEFAULT_TOOLS, use_pandas=True, xkcd=False):
if (fig is None):
fig = plt.gcf()
renderer = BokehRenderer(tools, use_pandas, xkcd)
exporter = BokehExporter(renderer)
exporter.run(fig)
return renderer.fig
| [
"def",
"to_bokeh",
"(",
"fig",
"=",
"None",
",",
"tools",
"=",
"DEFAULT_TOOLS",
",",
"use_pandas",
"=",
"True",
",",
"xkcd",
"=",
"False",
")",
":",
"if",
"(",
"fig",
"is",
"None",
")",
":",
"fig",
"=",
"plt",
".",
"gcf",
"(",
")",
"renderer",
"=... | uses bokeh to display a matplotlib figure . | train | false |
14,156 | def require_level(level):
if (level not in ['instructor', 'staff']):
raise ValueError("unrecognized level '{}'".format(level))
def decorator(func):
def wrapped(*args, **kwargs):
request = args[0]
course = get_course_by_id(CourseKey.from_string(kwargs['course_id']))
if has_access(request.user, level, course):
return func(*args, **kwargs)
else:
return HttpResponseForbidden()
return wrapped
return decorator
| [
"def",
"require_level",
"(",
"level",
")",
":",
"if",
"(",
"level",
"not",
"in",
"[",
"'instructor'",
",",
"'staff'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"unrecognized level '{}'\"",
".",
"format",
"(",
"level",
")",
")",
"def",
"decorator",
"(",
... | decorator with argument that requires an access level of the requesting user . | train | false |
14,157 | def _iter_service_names():
found = set()
for line in glob.glob('/etc/init.d/*'):
name = os.path.basename(line)
found.add(name)
(yield name)
init_root = '/etc/init/'
for (root, dirnames, filenames) in os.walk(init_root):
relpath = os.path.relpath(root, init_root)
for filename in fnmatch.filter(filenames, '*.conf'):
if (relpath == '.'):
name = filename[:(-5)]
else:
name = os.path.join(relpath, filename[:(-5)])
if (name in found):
continue
(yield name)
| [
"def",
"_iter_service_names",
"(",
")",
":",
"found",
"=",
"set",
"(",
")",
"for",
"line",
"in",
"glob",
".",
"glob",
"(",
"'/etc/init.d/*'",
")",
":",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"line",
")",
"found",
".",
"add",
"(",
"na... | detect all of the service names available to upstart via init configuration files and via classic sysv init scripts . | train | true |
14,158 | def map_unreachable_exception(e):
exception_class = FacebookUnreachable
if isinstance(e, ssl.SSLError):
exception_class = FacebookSSLError
elif isinstance(e, HTTPError):
exception_class = FacebookHTTPError
elif isinstance(e, URLError):
exception_class = FacebookURLError
return exception_class
| [
"def",
"map_unreachable_exception",
"(",
"e",
")",
":",
"exception_class",
"=",
"FacebookUnreachable",
"if",
"isinstance",
"(",
"e",
",",
"ssl",
".",
"SSLError",
")",
":",
"exception_class",
"=",
"FacebookSSLError",
"elif",
"isinstance",
"(",
"e",
",",
"HTTPErro... | we always raise the original and new subclass to - preserve backwards compatibility . | train | false |
14,159 | @FileSystem.in_directory(current_directory, 'django', 'bamboo')
def test_mail_content():
(status, out) = run_scenario('leaves', 'content', 1)
assert_equals(status, 0, out)
(status, out) = run_scenario('leaves', 'content', 2)
assert_equals(status, 0, out)
(status, out) = run_scenario('leaves', 'content', 3)
assert_not_equals(status, 0)
assert ('An email contained expected text in the body' in out)
| [
"@",
"FileSystem",
".",
"in_directory",
"(",
"current_directory",
",",
"'django'",
",",
"'bamboo'",
")",
"def",
"test_mail_content",
"(",
")",
":",
"(",
"status",
",",
"out",
")",
"=",
"run_scenario",
"(",
"'leaves'",
",",
"'content'",
",",
"1",
")",
"asse... | mail content is checked through lettuce steps . | train | false |
14,160 | def get_tax_summary(source):
all_line_taxes = []
untaxed = TaxlessPrice(source.create_price(0).amount)
for line in source.get_final_lines():
line_taxes = list(line.taxes)
all_line_taxes.extend(line_taxes)
if (not line_taxes):
untaxed += line.taxless_price
return TaxSummary.from_line_taxes(all_line_taxes, untaxed)
| [
"def",
"get_tax_summary",
"(",
"source",
")",
":",
"all_line_taxes",
"=",
"[",
"]",
"untaxed",
"=",
"TaxlessPrice",
"(",
"source",
".",
"create_price",
"(",
"0",
")",
".",
"amount",
")",
"for",
"line",
"in",
"source",
".",
"get_final_lines",
"(",
")",
":... | get tax summary of given source lines . | train | false |
14,161 | def check_for_multiple(files):
for regex in _RE_MULTIPLE:
matched_files = check_for_sequence(regex, files)
if matched_files:
return matched_files
return ''
| [
"def",
"check_for_multiple",
"(",
"files",
")",
":",
"for",
"regex",
"in",
"_RE_MULTIPLE",
":",
"matched_files",
"=",
"check_for_sequence",
"(",
"regex",
",",
"files",
")",
"if",
"matched_files",
":",
"return",
"matched_files",
"return",
"''"
] | return list of files that looks like a multi-part post . | train | false |
14,163 | def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version):
if (not HAS_LIBCLOUD_BASE):
module.fail_json(msg='libcloud must be installed to use this module')
creds = _get_gcp_credentials(module, require_valid_json=False, check_libcloud=True)
try:
gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'], datacenter=module.params.get('zone', None), project=creds['project_id'])
gcp.connection.user_agent_append(('%s/%s' % (user_agent_product, user_agent_version)))
except (RuntimeError, ValueError) as e:
module.fail_json(msg=str(e), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return gcp
| [
"def",
"gcp_connect",
"(",
"module",
",",
"provider",
",",
"get_driver",
",",
"user_agent_product",
",",
"user_agent_version",
")",
":",
"if",
"(",
"not",
"HAS_LIBCLOUD_BASE",
")",
":",
"module",
".",
"fail_json",
"(",
"msg",
"=",
"'libcloud must be installed to u... | return a google libcloud driver connection . | train | false |
14,164 | @handle_response_format
@treeio_login_required
def queue_edit(request, queue_id, response_format='html'):
queue = get_object_or_404(TicketQueue, pk=queue_id)
if (not request.user.profile.has_permission(queue, mode='w')):
return user_denied(request, message="You don't have access to this Queue")
if request.POST:
if ('cancel' not in request.POST):
form = QueueForm(request.user.profile, request.POST, instance=queue)
if form.is_valid():
queue = form.save()
return HttpResponseRedirect(reverse('services_queue_view', args=[queue.id]))
else:
return HttpResponseRedirect(reverse('services_queue_view', args=[queue.id]))
else:
form = QueueForm(request.user.profile, instance=queue)
context = _get_default_context(request)
context.update({'queue': queue, 'form': form})
return render_to_response('services/queue_edit', context, context_instance=RequestContext(request), response_format=response_format)
| [
"@",
"handle_response_format",
"@",
"treeio_login_required",
"def",
"queue_edit",
"(",
"request",
",",
"queue_id",
",",
"response_format",
"=",
"'html'",
")",
":",
"queue",
"=",
"get_object_or_404",
"(",
"TicketQueue",
",",
"pk",
"=",
"queue_id",
")",
"if",
"(",... | queue edit . | train | false |
14,166 | def onLoseChargeCB(ordersID, dbid, success, datas):
DEBUG_MSG(('onLoseChargeCB: ordersID=%s, dbid=%i, success=%i, datas=%s' % (ordersID, dbid, success, datas)))
| [
"def",
"onLoseChargeCB",
"(",
"ordersID",
",",
"dbid",
",",
"success",
",",
"datas",
")",
":",
"DEBUG_MSG",
"(",
"(",
"'onLoseChargeCB: ordersID=%s, dbid=%i, success=%i, datas=%s'",
"%",
"(",
"ordersID",
",",
"dbid",
",",
"success",
",",
"datas",
")",
")",
")"
] | kbengine method . | train | false |
14,167 | def parse_exclude_devices(exclude_list):
exclude_mapping = {}
for dev_mapping in exclude_list:
try:
(dev_name, exclude_devices) = dev_mapping.split(':', 1)
except ValueError:
raise ValueError((_("Invalid mapping: '%s'") % dev_mapping))
dev_name = dev_name.strip()
if (not dev_name):
raise ValueError((_("Missing key in mapping: '%s'") % dev_mapping))
if (dev_name in exclude_mapping):
raise ValueError((_('Device %(dev_name)s in mapping: %(mapping)s not unique') % {'dev_name': dev_name, 'mapping': dev_mapping}))
exclude_devices_list = exclude_devices.split(';')
exclude_devices_set = set()
for dev in exclude_devices_list:
dev = dev.strip()
if dev:
exclude_devices_set.add(dev)
exclude_mapping[dev_name] = exclude_devices_set
return exclude_mapping
| [
"def",
"parse_exclude_devices",
"(",
"exclude_list",
")",
":",
"exclude_mapping",
"=",
"{",
"}",
"for",
"dev_mapping",
"in",
"exclude_list",
":",
"try",
":",
"(",
"dev_name",
",",
"exclude_devices",
")",
"=",
"dev_mapping",
".",
"split",
"(",
"':'",
",",
"1"... | parse exclude devices list parses excluded device list in the form: dev_name:pci_dev_1;pci_dev_2 . | train | false |
14,169 | def bulk_stop(workers, lbn, profile='default'):
ret = {}
if isinstance(workers, str):
workers = workers.split(',')
for worker in workers:
try:
ret[worker] = worker_stop(worker, lbn, profile)
except Exception:
ret[worker] = False
return ret
| [
"def",
"bulk_stop",
"(",
"workers",
",",
"lbn",
",",
"profile",
"=",
"'default'",
")",
":",
"ret",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"workers",
",",
"str",
")",
":",
"workers",
"=",
"workers",
".",
"split",
"(",
"','",
")",
"for",
"worker",
"... | stop all the given workers in the specific load balancer cli examples: . | train | false |
14,171 | def worker_stopped(name, workers=None, profile='default'):
if (workers is None):
workers = []
return _bulk_state('modjk.bulk_stop', name, workers, profile)
| [
"def",
"worker_stopped",
"(",
"name",
",",
"workers",
"=",
"None",
",",
"profile",
"=",
"'default'",
")",
":",
"if",
"(",
"workers",
"is",
"None",
")",
":",
"workers",
"=",
"[",
"]",
"return",
"_bulk_state",
"(",
"'modjk.bulk_stop'",
",",
"name",
",",
... | stop all the workers in the modjk load balancer example: . | train | true |
14,172 | def coerce_to_list(val):
if val:
if (not isinstance(val, (list, tuple))):
val = [val]
else:
val = []
return val
| [
"def",
"coerce_to_list",
"(",
"val",
")",
":",
"if",
"val",
":",
"if",
"(",
"not",
"isinstance",
"(",
"val",
",",
"(",
"list",
",",
"tuple",
")",
")",
")",
":",
"val",
"=",
"[",
"val",
"]",
"else",
":",
"val",
"=",
"[",
"]",
"return",
"val"
] | for parameters that can take either a single string or a list of strings . | train | true |
14,174 | def course_start_date_is_default(start, advertised_start):
return ((advertised_start is None) and (start == DEFAULT_START_DATE))
| [
"def",
"course_start_date_is_default",
"(",
"start",
",",
"advertised_start",
")",
":",
"return",
"(",
"(",
"advertised_start",
"is",
"None",
")",
"and",
"(",
"start",
"==",
"DEFAULT_START_DATE",
")",
")"
] | returns whether a courses start date hasnt yet been set . | train | false |
14,176 | def major_version(best=False):
return _distro.major_version(best)
| [
"def",
"major_version",
"(",
"best",
"=",
"False",
")",
":",
"return",
"_distro",
".",
"major_version",
"(",
"best",
")"
] | return the major version of the current linux distribution . | train | false |
14,177 | def _plot_update_evoked(params, bools):
(picks, evoked) = [params[k] for k in ('picks', 'evoked')]
times = (evoked.times * 1000.0)
projs = [proj for (ii, proj) in enumerate(params['projs']) if (ii in np.where(bools)[0])]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for (ax, t) in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if (params['types'][i] == t)]
D = (this_scaling * new_evoked.data[idx, :])
if (params['plot_type'] == 'butterfly'):
for (line, di) in zip(ax.lines, D):
line.set_data(times, di)
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
| [
"def",
"_plot_update_evoked",
"(",
"params",
",",
"bools",
")",
":",
"(",
"picks",
",",
"evoked",
")",
"=",
"[",
"params",
"[",
"k",
"]",
"for",
"k",
"in",
"(",
"'picks'",
",",
"'evoked'",
")",
"]",
"times",
"=",
"(",
"evoked",
".",
"times",
"*",
... | update the plot evoked lines . | train | false |
14,178 | def add_serializer_errors(serializer, data, field_errors):
if (not serializer.is_valid()):
errors = serializer.errors
for (key, error) in errors.iteritems():
field_errors[key] = {'developer_message': u"Value '{field_value}' is not valid for field '{field_name}': {error}".format(field_value=data.get(key, ''), field_name=key, error=error), 'user_message': _(u'This value is invalid.')}
return field_errors
| [
"def",
"add_serializer_errors",
"(",
"serializer",
",",
"data",
",",
"field_errors",
")",
":",
"if",
"(",
"not",
"serializer",
".",
"is_valid",
"(",
")",
")",
":",
"errors",
"=",
"serializer",
".",
"errors",
"for",
"(",
"key",
",",
"error",
")",
"in",
... | adds errors from serializer validation to field_errors . | train | false |
14,179 | def ipexec(fname, options=None, commands=()):
if (options is None):
options = []
cmdargs = (default_argv() + options)
test_dir = os.path.dirname(__file__)
ipython_cmd = get_ipython_cmd()
full_fname = os.path.join(test_dir, fname)
full_cmd = ((ipython_cmd + cmdargs) + [full_fname])
env = os.environ.copy()
env['PYTHONWARNINGS'] = 'ignore'
for (k, v) in env.items():
if (not isinstance(v, str)):
print (k, v)
p = Popen(full_cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=env)
(out, err) = p.communicate(input=(py3compat.str_to_bytes('\n'.join(commands)) or None))
(out, err) = (py3compat.bytes_to_str(out), py3compat.bytes_to_str(err))
if out:
out = re.sub('\\x1b\\[[^h]+h', '', out)
return (out, err)
| [
"def",
"ipexec",
"(",
"fname",
",",
"options",
"=",
"None",
",",
"commands",
"=",
"(",
")",
")",
":",
"if",
"(",
"options",
"is",
"None",
")",
":",
"options",
"=",
"[",
"]",
"cmdargs",
"=",
"(",
"default_argv",
"(",
")",
"+",
"options",
")",
"tes... | utility to call ipython filename . | train | false |
14,180 | @parallel
def logs():
sudo('tail -f {logdir}* /var/log/nginx/*.log'.format(logdir=LOG_DIR))
| [
"@",
"parallel",
"def",
"logs",
"(",
")",
":",
"sudo",
"(",
"'tail -f {logdir}* /var/log/nginx/*.log'",
".",
"format",
"(",
"logdir",
"=",
"LOG_DIR",
")",
")"
] | returns the logs for the container . | train | false |
14,181 | def default_keychain(name, domain='user', user=None):
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if (not os.path.exists(name)):
ret['result'] = False
ret['comment'] += 'Keychain not found at {0}'.format(name)
else:
out = __salt__['keychain.get_default_keychain'](user, domain)
if (name in out):
ret['comment'] += '{0} was already the default keychain.'.format(name)
else:
out = __salt__['keychain.set_default_keychain'](name, domain, user)
if (len(out) == 0):
ret['changes']['default'] = name
else:
ret['result'] = False
ret['comment'] = 'Failed to install keychain. {0}'.format(out)
return ret
| [
"def",
"default_keychain",
"(",
"name",
",",
"domain",
"=",
"'user'",
",",
"user",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"if",... | set the default keychain to use name the chain in which to use as the default domain the domain to use valid values are user|system|common|dynamic . | train | true |
14,182 | def create_temporary_ca_path(anchor_list, folder):
try:
if (not os.path.isdir(folder)):
os.makedirs(folder)
except:
return None
l = len(anchor_list)
if (l == 0):
return None
fmtstr = ('%%0%sd.pem' % math.ceil(math.log(l, 10)))
i = 0
try:
for a in anchor_list:
fname = os.path.join(folder, (fmtstr % i))
f = open(fname, 'w')
s = a.output(fmt='PEM')
f.write(s)
f.close()
i += 1
except:
return None
(r, w) = popen2.popen2(('c_rehash %s' % folder))
r.close()
w.close()
return l
| [
"def",
"create_temporary_ca_path",
"(",
"anchor_list",
",",
"folder",
")",
":",
"try",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"folder",
")",
"except",
":",
"return",
"None",
"... | create a ca path folder as defined in openssl terminology . | train | true |
14,184 | def rowcol_to_a1(row, col):
row = int(row)
col = int(col)
if ((row < 1) or (col < 1)):
raise IncorrectCellLabel(('(%s, %s)' % (row, col)))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if (mod == 0):
mod = 26
div -= 1
column_label = (chr((mod + MAGIC_NUMBER)) + column_label)
label = ('%s%s' % (column_label, row))
return label
| [
"def",
"rowcol_to_a1",
"(",
"row",
",",
"col",
")",
":",
"row",
"=",
"int",
"(",
"row",
")",
"col",
"=",
"int",
"(",
"col",
")",
"if",
"(",
"(",
"row",
"<",
"1",
")",
"or",
"(",
"col",
"<",
"1",
")",
")",
":",
"raise",
"IncorrectCellLabel",
"... | translates a row and column cell address to a1 notation . | train | true |
14,185 | def reset():
_runtime.reset()
| [
"def",
"reset",
"(",
")",
":",
"_runtime",
".",
"reset",
"(",
")"
] | reset the registry of providers . | train | false |
14,187 | def object_compat(function):
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
metas = [meta for meta in ('metadata', 'system_metadata') if (meta in instance_or_dict)]
instance = objects.Instance._from_db_object(context, objects.Instance(), instance_or_dict, expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = ((_load_instance(args[0]),) + args[1:])
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = objects.Migration._from_db_object(context.elevated(), objects.Migration(), migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
| [
"def",
"object_compat",
"(",
"function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"function",
")",
"def",
"decorated_function",
"(",
"self",
",",
"context",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"def",
"_load_instance",
"(",
"instance_or_d... | wraps a method that expects a new-world instance this provides compatibility for callers passing old-style dict instances . | train | false |
14,189 | @pytest.mark.parametrize('i', range(len(ITEMS)))
def test_no_userdata(objects, i):
assert (objects.history.itemAt(i).userData() is None)
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'i'",
",",
"range",
"(",
"len",
"(",
"ITEMS",
")",
")",
")",
"def",
"test_no_userdata",
"(",
"objects",
",",
"i",
")",
":",
"assert",
"(",
"objects",
".",
"history",
".",
"itemAt",
"(",
"i",
")"... | check if all items have no user data . | train | false |
14,190 | @register.tag('static')
def do_static(parser, token):
return StaticNode.handle_token(parser, token)
| [
"@",
"register",
".",
"tag",
"(",
"'static'",
")",
"def",
"do_static",
"(",
"parser",
",",
"token",
")",
":",
"return",
"StaticNode",
".",
"handle_token",
"(",
"parser",
",",
"token",
")"
] | a template tag that returns the url to a file using staticfiles storage backend usage:: {% static path [as varname] %} examples:: {% static "myapp/css/base . | train | false |
14,191 | def module_path(local_function):
return os.path.abspath(inspect.getsourcefile(local_function))
| [
"def",
"module_path",
"(",
"local_function",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"inspect",
".",
"getsourcefile",
"(",
"local_function",
")",
")"
] | returns the module path without the use of __file__ . | train | false |
14,192 | def p_constant_expression_opt_1(t):
pass
| [
"def",
"p_constant_expression_opt_1",
"(",
"t",
")",
":",
"pass"
] | constant_expression_opt : empty . | train | false |
14,194 | def _dict_values_match(*args, **kwargs):
matchers = dict(*args, **kwargs)
def extract_val(key):
def extract_val_for_key(d):
return d.get(key)
return extract_val_for_key
return MatchesAll(*list((AfterPreprocessing(extract_val(key), value) for (key, value) in matchers.iteritems())))
| [
"def",
"_dict_values_match",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"matchers",
"=",
"dict",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"def",
"extract_val",
"(",
"key",
")",
":",
"def",
"extract_val_for_key",
"(",
"d",
")",
":",
"return",
... | matcher that matches a dict where each of they keys match the matcher passed in . | train | false |
14,196 | def should_bypass_proxies(url):
get_proxy = (lambda k: (os.environ.get(k) or os.environ.get(k.upper())))
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if (netloc.endswith(host) or netloc.split(':')[0].endswith(host)):
return True
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
| [
"def",
"should_bypass_proxies",
"(",
"url",
")",
":",
"get_proxy",
"=",
"(",
"lambda",
"k",
":",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"k",
")",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"k",
".",
"upper",
"(",
")",
")",
")",
")",
"no_... | returns whether we should bypass proxies or not . | train | true |
14,197 | def is_mm_32_format(msg_string):
return (msg_string.startswith('mm') and (len(msg_string) == 34))
| [
"def",
"is_mm_32_format",
"(",
"msg_string",
")",
":",
"return",
"(",
"msg_string",
".",
"startswith",
"(",
"'mm'",
")",
"and",
"(",
"len",
"(",
"msg_string",
")",
"==",
"34",
")",
")"
] | missed message strings are formatted with a little "mm" prefix followed by a randomly generated 32-character string . | train | false |
14,199 | def unescape_entities(value):
safe_characters = {'&': '&'}
if isinstance(value, dict):
return {key: unescape_entities(value) for (key, value) in value.iteritems()}
if is_iterable_but_not_string(value):
return [unescape_entities(each) for each in value]
if isinstance(value, basestring):
for (escape_sequence, character) in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
| [
"def",
"unescape_entities",
"(",
"value",
")",
":",
"safe_characters",
"=",
"{",
"'&'",
":",
"'&'",
"}",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"{",
"key",
":",
"unescape_entities",
"(",
"value",
")",
"for",
"(",
"key",
... | convert html-encoded data to literal characters . | train | false |
14,200 | def get_debtpaying_data(year, quarter):
if (ct._check_input(year, quarter) is True):
ct._write_head()
df = _get_debtpaying_data(year, quarter, 1, pd.DataFrame())
if (df is not None):
df['code'] = df['code'].map((lambda x: str(x).zfill(6)))
return df
| [
"def",
"get_debtpaying_data",
"(",
"year",
",",
"quarter",
")",
":",
"if",
"(",
"ct",
".",
"_check_input",
"(",
"year",
",",
"quarter",
")",
"is",
"True",
")",
":",
"ct",
".",
"_write_head",
"(",
")",
"df",
"=",
"_get_debtpaying_data",
"(",
"year",
","... | parameters year:int 年度 e . | train | false |
14,201 | def _find_missing_edge(G):
nodes = set(G)
for u in G:
missing = (nodes - set((list(G[u].keys()) + [u])))
if missing:
return (u, missing.pop())
| [
"def",
"_find_missing_edge",
"(",
"G",
")",
":",
"nodes",
"=",
"set",
"(",
"G",
")",
"for",
"u",
"in",
"G",
":",
"missing",
"=",
"(",
"nodes",
"-",
"set",
"(",
"(",
"list",
"(",
"G",
"[",
"u",
"]",
".",
"keys",
"(",
")",
")",
"+",
"[",
"u",... | given a non-complete graph g . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.