id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
42,116 | def serialize_argument_at_idx(idx, all_args, accessor, types):
type_name = all_args[idx]['type']
return serialize_type(type_name, accessor, types)
| [
"def",
"serialize_argument_at_idx",
"(",
"idx",
",",
"all_args",
",",
"accessor",
",",
"types",
")",
":",
"type_name",
"=",
"all_args",
"[",
"idx",
"]",
"[",
"'type'",
"]",
"return",
"serialize_type",
"(",
"type_name",
",",
"accessor",
",",
"types",
")"
] | for an argument at the given index . | train | false |
42,118 | def get_permission(perms, p):
for name in permlist():
perms[name] = has_permission(p, getattr(PERMS, name))
| [
"def",
"get_permission",
"(",
"perms",
",",
"p",
")",
":",
"for",
"name",
"in",
"permlist",
"(",
")",
":",
"perms",
"[",
"name",
"]",
"=",
"has_permission",
"(",
"p",
",",
"getattr",
"(",
"PERMS",
",",
"name",
")",
")"
] | returns a dict with permission key . | train | false |
42,119 | def sort_queryset(queryset, request, allowed_sorts, default=None):
sort = request.GET.get('sort', None)
if (sort in allowed_sorts):
direction = request.GET.get('dir', 'asc')
sort = (('-' if (direction == 'desc') else '') + sort)
queryset = queryset.order_by(sort)
elif default:
queryset = queryset.order_by(default)
return queryset
| [
"def",
"sort_queryset",
"(",
"queryset",
",",
"request",
",",
"allowed_sorts",
",",
"default",
"=",
"None",
")",
":",
"sort",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'sort'",
",",
"None",
")",
"if",
"(",
"sort",
"in",
"allowed_sorts",
")",
":",
... | sorts the queryset by one of allowed_sorts based on parameters sort and dir from request . | train | false |
42,120 | def url_to_file_path(url, filecache):
key = CacheController.cache_url(url)
return filecache._fn(key)
| [
"def",
"url_to_file_path",
"(",
"url",
",",
"filecache",
")",
":",
"key",
"=",
"CacheController",
".",
"cache_url",
"(",
"url",
")",
"return",
"filecache",
".",
"_fn",
"(",
"key",
")"
] | return the file cache path based on the url . | train | true |
42,122 | def change_DOWNLOAD_PROPERS(download_propers):
download_propers = checkbox_to_value(download_propers)
if (sickbeard.DOWNLOAD_PROPERS == download_propers):
return
sickbeard.DOWNLOAD_PROPERS = download_propers
if sickbeard.DOWNLOAD_PROPERS:
if (not sickbeard.properFinderScheduler.enable):
logger.log(u'Starting PROPERFINDER thread', logger.INFO)
sickbeard.properFinderScheduler.silent = False
sickbeard.properFinderScheduler.enable = True
else:
logger.log(u'Unable to start PROPERFINDER thread. Already running', logger.INFO)
else:
sickbeard.properFinderScheduler.enable = False
sickbeard.traktCheckerScheduler.silent = True
logger.log(u'Stopping PROPERFINDER thread', logger.INFO)
| [
"def",
"change_DOWNLOAD_PROPERS",
"(",
"download_propers",
")",
":",
"download_propers",
"=",
"checkbox_to_value",
"(",
"download_propers",
")",
"if",
"(",
"sickbeard",
".",
"DOWNLOAD_PROPERS",
"==",
"download_propers",
")",
":",
"return",
"sickbeard",
".",
"DOWNLOAD_... | enable/disable proper download thread todo: make this return true/false on success/failure . | train | false |
42,123 | def instance_metadata_get(context, instance_uuid):
return IMPL.instance_metadata_get(context, instance_uuid)
| [
"def",
"instance_metadata_get",
"(",
"context",
",",
"instance_uuid",
")",
":",
"return",
"IMPL",
".",
"instance_metadata_get",
"(",
"context",
",",
"instance_uuid",
")"
] | get all metadata for an instance . | train | false |
42,124 | def parse_content_range(content_range):
found = re.search(_content_range_pattern, content_range)
if (not found):
raise ValueError(('malformed Content-Range %r' % (content_range,)))
return tuple((int(x) for x in found.groups()))
| [
"def",
"parse_content_range",
"(",
"content_range",
")",
":",
"found",
"=",
"re",
".",
"search",
"(",
"_content_range_pattern",
",",
"content_range",
")",
"if",
"(",
"not",
"found",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'malformed Content-Range %r'",
"%",
... | parse a content-range header into . | train | false |
42,125 | def _minpoly_pow(ex, pw, x, dom, mp=None):
pw = sympify(pw)
if (not mp):
mp = _minpoly_compose(ex, x, dom)
if (not pw.is_rational):
raise NotAlgebraic(("%s doesn't seem to be an algebraic element" % ex))
if (pw < 0):
if (mp == x):
raise ZeroDivisionError(('%s is zero' % ex))
mp = _invertx(mp, x)
if (pw == (-1)):
return mp
pw = (- pw)
ex = (1 / ex)
y = Dummy(str(x))
mp = mp.subs({x: y})
(n, d) = pw.as_numer_denom()
res = Poly(resultant(mp, ((x ** d) - (y ** n)), gens=[y]), x, domain=dom)
(_, factors) = res.factor_list()
res = _choose_factor(factors, x, (ex ** pw), dom)
return res.as_expr()
| [
"def",
"_minpoly_pow",
"(",
"ex",
",",
"pw",
",",
"x",
",",
"dom",
",",
"mp",
"=",
"None",
")",
":",
"pw",
"=",
"sympify",
"(",
"pw",
")",
"if",
"(",
"not",
"mp",
")",
":",
"mp",
"=",
"_minpoly_compose",
"(",
"ex",
",",
"x",
",",
"dom",
")",
... | returns minpoly parameters ex : algebraic element pw : rational number x : indeterminate of the polynomial dom: ground domain mp : minimal polynomial of p examples . | train | false |
42,126 | def my_sleeping_function(random_base):
time.sleep(random_base)
| [
"def",
"my_sleeping_function",
"(",
"random_base",
")",
":",
"time",
".",
"sleep",
"(",
"random_base",
")"
] | this is a function that will run within the dag execution . | train | false |
42,128 | def get_display_label(choices, status):
for (value, label) in choices:
if (value == (status or '').lower()):
display_label = label
break
else:
display_label = status
return display_label
| [
"def",
"get_display_label",
"(",
"choices",
",",
"status",
")",
":",
"for",
"(",
"value",
",",
"label",
")",
"in",
"choices",
":",
"if",
"(",
"value",
"==",
"(",
"status",
"or",
"''",
")",
".",
"lower",
"(",
")",
")",
":",
"display_label",
"=",
"la... | this method is used in places where a resources status or admin state labels need to assigned before they are sent to the view template . | train | true |
42,129 | def _interpret_emr_bootstrap_stderr(fs, matches, partial=True):
result = {}
for match in matches:
stderr_path = match['path']
task_error = _parse_task_stderr(_cat_log(fs, stderr_path))
if task_error:
task_error = dict(task_error)
task_error['path'] = stderr_path
error = dict(action_num=match['action_num'], node_id=match['node_id'], task_error=task_error)
result.setdefault('errors', [])
result['errors'].append(error)
if partial:
result['partial'] = True
break
return result
| [
"def",
"_interpret_emr_bootstrap_stderr",
"(",
"fs",
",",
"matches",
",",
"partial",
"=",
"True",
")",
":",
"result",
"=",
"{",
"}",
"for",
"match",
"in",
"matches",
":",
"stderr_path",
"=",
"match",
"[",
"'path'",
"]",
"task_error",
"=",
"_parse_task_stderr... | extract errors from bootstrap stderr . | train | false |
42,130 | def write_worksheet_rels(worksheet, idx):
root = Element('Relationships', {'xmlns': 'http://schemas.openxmlformats.org/package/2006/relationships'})
for rel in worksheet.relationships:
attrs = {'Id': rel.id, 'Type': rel.type, 'Target': rel.target}
if rel.target_mode:
attrs['TargetMode'] = rel.target_mode
SubElement(root, 'Relationship', attrs)
if worksheet._charts:
attrs = {'Id': 'rId1', 'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/drawing', 'Target': ('../drawings/drawing%s.xml' % idx)}
SubElement(root, 'Relationship', attrs)
return get_document_content(root)
| [
"def",
"write_worksheet_rels",
"(",
"worksheet",
",",
"idx",
")",
":",
"root",
"=",
"Element",
"(",
"'Relationships'",
",",
"{",
"'xmlns'",
":",
"'http://schemas.openxmlformats.org/package/2006/relationships'",
"}",
")",
"for",
"rel",
"in",
"worksheet",
".",
"relati... | write relationships for the worksheet to xml . | train | false |
42,131 | def current_resource_name(request):
service = current_service(request)
resource_name = service.viewset.get_name(service.resource)
return resource_name
| [
"def",
"current_resource_name",
"(",
"request",
")",
":",
"service",
"=",
"current_service",
"(",
"request",
")",
"resource_name",
"=",
"service",
".",
"viewset",
".",
"get_name",
"(",
"service",
".",
"resource",
")",
"return",
"resource_name"
] | return the name used when the kinto . | train | false |
42,132 | def make_equiv(lo, li):
seeno = OrderedDict()
left = []
right = []
for (o, i) in zip(lo, li):
if (o in seeno):
left += [i]
right += [o]
else:
seeno[o] = i
return (left, right)
| [
"def",
"make_equiv",
"(",
"lo",
",",
"li",
")",
":",
"seeno",
"=",
"OrderedDict",
"(",
")",
"left",
"=",
"[",
"]",
"right",
"=",
"[",
"]",
"for",
"(",
"o",
",",
"i",
")",
"in",
"zip",
"(",
"lo",
",",
"li",
")",
":",
"if",
"(",
"o",
"in",
... | builds a dictionary of equivalences between inner inputs based on the equivalence of their corresponding outer inputs . | train | false |
42,133 | def enqueue_feedback_message_instant_email_task(user_id, reference):
payload = {'user_id': user_id, 'reference_dict': reference.to_dict()}
taskqueue_services.enqueue_task(feconf.TASK_URL_INSTANT_FEEDBACK_EMAILS, payload, 0)
| [
"def",
"enqueue_feedback_message_instant_email_task",
"(",
"user_id",
",",
"reference",
")",
":",
"payload",
"=",
"{",
"'user_id'",
":",
"user_id",
",",
"'reference_dict'",
":",
"reference",
".",
"to_dict",
"(",
")",
"}",
"taskqueue_services",
".",
"enqueue_task",
... | adds a send feedback email task into the task queue . | train | false |
42,134 | @register.function
@jinja2.contextfunction
def logs_tabnav(context):
request = context['request']
if acl.action_allowed(request, 'Apps', 'Review'):
rv = [('reviewers.apps.logs', 'logs', _('Reviews'))]
else:
rv = []
if acl.action_allowed(request, 'Apps', 'ModerateReview'):
rv.append(('reviewers.apps.moderatelog', 'moderatelog', _('Moderated Reviews')))
return rv
| [
"@",
"register",
".",
"function",
"@",
"jinja2",
".",
"contextfunction",
"def",
"logs_tabnav",
"(",
"context",
")",
":",
"request",
"=",
"context",
"[",
"'request'",
"]",
"if",
"acl",
".",
"action_allowed",
"(",
"request",
",",
"'Apps'",
",",
"'Review'",
"... | returns tuple of tab navigation for the log pages . | train | false |
42,135 | def optimalWriteOrder():
while MetricCache:
(metric, datapoints) = MetricCache.drain_metric()
dbFileExists = state.database.exists(metric)
if ((not dbFileExists) and CREATE_BUCKET):
if CREATE_BUCKET.drain(1):
(yield (metric, datapoints, dbFileExists))
continue
(yield (metric, datapoints, dbFileExists))
| [
"def",
"optimalWriteOrder",
"(",
")",
":",
"while",
"MetricCache",
":",
"(",
"metric",
",",
"datapoints",
")",
"=",
"MetricCache",
".",
"drain_metric",
"(",
")",
"dbFileExists",
"=",
"state",
".",
"database",
".",
"exists",
"(",
"metric",
")",
"if",
"(",
... | generates metrics with the most cached values first and applies a soft rate limit on new metrics . | train | false |
42,136 | def check_that_blanks_fail(problem):
blank_answers = dict(((answer_id, u'') for answer_id in problem.get_question_answers()))
grading_results = problem.grade_answers(blank_answers)
try:
assert all(((result == u'incorrect') for result in grading_results.values()))
except AssertionError:
log.error(u'Blank accepted as correct answer in {0} for {1}'.format(problem, [answer_id for (answer_id, result) in sorted(grading_results.items()) if (result != u'incorrect')]))
| [
"def",
"check_that_blanks_fail",
"(",
"problem",
")",
":",
"blank_answers",
"=",
"dict",
"(",
"(",
"(",
"answer_id",
",",
"u''",
")",
"for",
"answer_id",
"in",
"problem",
".",
"get_question_answers",
"(",
")",
")",
")",
"grading_results",
"=",
"problem",
"."... | leaving it blank should never work . | train | false |
42,137 | def flatten_tree(elem, path, branches):
if (not path):
path = []
if isinstance(elem, dict):
for (k, v) in elem.items():
flatten_tree(v, (path + [k]), branches)
elif isinstance(elem, list):
for sub in elem:
flatten_tree(sub, path, branches)
else:
branches.append((path + [six.text_type(elem)]))
| [
"def",
"flatten_tree",
"(",
"elem",
",",
"path",
",",
"branches",
")",
":",
"if",
"(",
"not",
"path",
")",
":",
"path",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"elem",
",",
"dict",
")",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"elem",
".",
... | flatten nested lists/dictionaries into lists of strings . | train | false |
42,138 | def get_rarefaction_data(rarefaction_data, col_headers):
rare_mat_raw = array(rarefaction_data)
rare_mat_min = [rare_mat_raw[x][2:] for x in range(0, len(rare_mat_raw))]
seqs_per_samp = [rare_mat_raw[x][0] for x in range(0, len(rare_mat_raw))]
sampleIDs = col_headers[3:]
rare_mat_trans = transpose(array(rare_mat_min)).tolist()
return (rare_mat_trans, seqs_per_samp, sampleIDs)
| [
"def",
"get_rarefaction_data",
"(",
"rarefaction_data",
",",
"col_headers",
")",
":",
"rare_mat_raw",
"=",
"array",
"(",
"rarefaction_data",
")",
"rare_mat_min",
"=",
"[",
"rare_mat_raw",
"[",
"x",
"]",
"[",
"2",
":",
"]",
"for",
"x",
"in",
"range",
"(",
"... | this function takes a rarefaction file and converts it into an array . | train | false |
42,139 | def chost_contains(value):
return var_contains('CHOST', value)
| [
"def",
"chost_contains",
"(",
"value",
")",
":",
"return",
"var_contains",
"(",
"'CHOST'",
",",
"value",
")"
] | verify if chost variable contains a value in make . | train | false |
42,140 | def is_vulnerable(host, timeout, port=443):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(int(timeout))
try:
s.connect((host, int(port)))
except Exception as e:
return None
s.send(hello)
while True:
(typ, ver, pay) = recvmsg(s)
if (typ is None):
return None
if (typ == 22):
payarr = unpack_handshake(pay)
finddone = [t for (t, l, p) in payarr if (t == 14)]
if (len(finddone) > 0):
break
ver_chr = chr((ver & 255))
hb = (((h2bin('18 03') + ver_chr) + h2bin('40 00 01 3f fd')) + ('\x01' * 16381))
hb += ((h2bin('18 03') + ver_chr) + h2bin('00 03 01 00 00'))
s.send(hb)
return hit_hb(s)
| [
"def",
"is_vulnerable",
"(",
"host",
",",
"timeout",
",",
"port",
"=",
"443",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"s",
".",
"settimeout",
"(",
"int",
"(",
"timeout",
")",... | check if remote host is vulnerable to heartbleed returns: none -- if remote host has no ssl false -- remote host has ssl but likely not vulnerable true -- remote host might be vulnerable . | train | false |
42,141 | def generatedPointGrid(pixel_width, pixel_height, width_scalar=1.0, height_scalar=1.0, horiz_points=5, vert_points=5):
swidth = (pixel_width * width_scalar)
sheight = (pixel_height * height_scalar)
(x, y) = np.meshgrid(np.linspace(((- swidth) / 2.0), (swidth / 2.0), horiz_points), np.linspace(((- sheight) / 2.0), (sheight / 2.0), vert_points))
points = np.column_stack((x.flatten(), y.flatten()))
return points
| [
"def",
"generatedPointGrid",
"(",
"pixel_width",
",",
"pixel_height",
",",
"width_scalar",
"=",
"1.0",
",",
"height_scalar",
"=",
"1.0",
",",
"horiz_points",
"=",
"5",
",",
"vert_points",
"=",
"5",
")",
":",
"swidth",
"=",
"(",
"pixel_width",
"*",
"width_sca... | generate a set of points in a nxm grid . | train | false |
42,142 | def centroid(y):
return linkage(y, method='centroid', metric='euclidean')
| [
"def",
"centroid",
"(",
"y",
")",
":",
"return",
"linkage",
"(",
"y",
",",
"method",
"=",
"'centroid'",
",",
"metric",
"=",
"'euclidean'",
")"
] | returns the center of the given list of vectors . | train | false |
42,145 | def test_illegal_event_id():
(raw, events, picks) = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin, tmax, picks=picks, proj=False)
| [
"def",
"test_illegal_event_id",
"(",
")",
":",
"(",
"raw",
",",
"events",
",",
"picks",
")",
"=",
"_get_data",
"(",
")",
"event_id_illegal",
"=",
"dict",
"(",
"aud_l",
"=",
"1",
",",
"does_not_exist",
"=",
"12345678",
")",
"assert_raises",
"(",
"ValueError... | test handling of invalid events ids . | train | false |
42,146 | def get_domainvalue(link):
z = (- np.log(np.random.uniform(0, 1)))
if (type(link) == type(cloglog)):
z = min(z, 3)
elif (type(link) == type(negbinom)):
z = (- z)
return z
| [
"def",
"get_domainvalue",
"(",
"link",
")",
":",
"z",
"=",
"(",
"-",
"np",
".",
"log",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
")",
")",
"if",
"(",
"type",
"(",
"link",
")",
"==",
"type",
"(",
"cloglog",
")",
")",
... | get a value in the domain for a given family . | train | false |
42,148 | def add_method(func, *combined_args):
def wrapper(cls):
for combined_arg in combined_args:
if (len(combined_arg) == 2):
args = combined_arg[0]
kwargs = combined_arg[1]
elif isinstance(combined_arg[0], Mapping):
args = []
kwargs = combined_arg[0]
else:
args = combined_arg[0]
kwargs = {}
test_method = make_method(func, args, kwargs)
setattr(cls, test_method.__name__, test_method)
return cls
return wrapper
| [
"def",
"add_method",
"(",
"func",
",",
"*",
"combined_args",
")",
":",
"def",
"wrapper",
"(",
"cls",
")",
":",
"for",
"combined_arg",
"in",
"combined_args",
":",
"if",
"(",
"len",
"(",
"combined_arg",
")",
"==",
"2",
")",
":",
"args",
"=",
"combined_ar... | add a test case via a class decorator . | train | false |
42,149 | def clear_task_instances(tis, session, activate_dag_runs=True):
job_ids = []
for ti in tis:
if (ti.state == State.RUNNING):
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
session.delete(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs:
execution_dates = {ti.execution_date for ti in tis}
dag_ids = {ti.dag_id for ti in tis}
drs = session.query(DagRun).filter(DagRun.dag_id.in_(dag_ids), DagRun.execution_date.in_(execution_dates)).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = datetime.now()
| [
"def",
"clear_task_instances",
"(",
"tis",
",",
"session",
",",
"activate_dag_runs",
"=",
"True",
")",
":",
"job_ids",
"=",
"[",
"]",
"for",
"ti",
"in",
"tis",
":",
"if",
"(",
"ti",
".",
"state",
"==",
"State",
".",
"RUNNING",
")",
":",
"if",
"ti",
... | clears a set of task instances . | train | false |
42,150 | def read_random_int(nbits):
randomdata = read_random_bits(nbits)
value = transform.bytes2int(randomdata)
value |= (1 << (nbits - 1))
return value
| [
"def",
"read_random_int",
"(",
"nbits",
")",
":",
"randomdata",
"=",
"read_random_bits",
"(",
"nbits",
")",
"value",
"=",
"transform",
".",
"bytes2int",
"(",
"randomdata",
")",
"value",
"|=",
"(",
"1",
"<<",
"(",
"nbits",
"-",
"1",
")",
")",
"return",
... | reads a random integer of approximately nbits bits . | train | false |
42,151 | def _parse_date_perforce(aDateString):
_my_date_pattern = re.compile(u'(\\w{,3}), (\\d{,4})/(\\d{,2})/(\\d{2}) (\\d{,2}):(\\d{2}):(\\d{2}) (\\w{,3})')
m = _my_date_pattern.search(aDateString)
if (m is None):
return None
(dow, year, month, day, hour, minute, second, tz) = m.groups()
months = [u'Jan', u'Feb', u'Mar', u'Apr', u'May', u'Jun', u'Jul', u'Aug', u'Sep', u'Oct', u'Nov', u'Dec']
dateString = (u'%s, %s %s %s %s:%s:%s %s' % (dow, day, months[(int(month) - 1)], year, hour, minute, second, tz))
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
| [
"def",
"_parse_date_perforce",
"(",
"aDateString",
")",
":",
"_my_date_pattern",
"=",
"re",
".",
"compile",
"(",
"u'(\\\\w{,3}), (\\\\d{,4})/(\\\\d{,2})/(\\\\d{2}) (\\\\d{,2}):(\\\\d{2}):(\\\\d{2}) (\\\\w{,3})'",
")",
"m",
"=",
"_my_date_pattern",
".",
"search",
"(",
"aDateSt... | parse a date in yyyy/mm/dd hh:mm:ss ttt format . | train | false |
42,152 | def get_rand_name(max_length=None, prefix='test'):
return get_related_rand_names([prefix], max_length)[0]
| [
"def",
"get_rand_name",
"(",
"max_length",
"=",
"None",
",",
"prefix",
"=",
"'test'",
")",
":",
"return",
"get_related_rand_names",
"(",
"[",
"prefix",
"]",
",",
"max_length",
")",
"[",
"0",
"]"
] | return a random string . | train | false |
42,153 | def make_or_verify_core_dir(directory, mode, uid, strict):
try:
util.make_or_verify_dir(directory, mode, uid, strict)
except OSError as error:
raise errors.Error(_PERM_ERR_FMT.format(error))
| [
"def",
"make_or_verify_core_dir",
"(",
"directory",
",",
"mode",
",",
"uid",
",",
"strict",
")",
":",
"try",
":",
"util",
".",
"make_or_verify_dir",
"(",
"directory",
",",
"mode",
",",
"uid",
",",
"strict",
")",
"except",
"OSError",
"as",
"error",
":",
"... | make sure directory exists with proper permissions . | train | false |
42,154 | def test_settings():
env.testval = 'outer value'
with settings(testval='inner value'):
eq_(env.testval, 'inner value')
eq_(env.testval, 'outer value')
| [
"def",
"test_settings",
"(",
")",
":",
"env",
".",
"testval",
"=",
"'outer value'",
"with",
"settings",
"(",
"testval",
"=",
"'inner value'",
")",
":",
"eq_",
"(",
"env",
".",
"testval",
",",
"'inner value'",
")",
"eq_",
"(",
"env",
".",
"testval",
",",
... | settings() should temporarily override env dict with given key/value pair . | train | false |
42,156 | def format_cpu_spec(cpuset, allow_ranges=True):
if allow_ranges:
ranges = []
previndex = None
for cpuindex in sorted(cpuset):
if ((previndex is None) or (previndex != (cpuindex - 1))):
ranges.append([])
ranges[(-1)].append(cpuindex)
previndex = cpuindex
parts = []
for entry in ranges:
if (len(entry) == 1):
parts.append(str(entry[0]))
else:
parts.append(('%d-%d' % (entry[0], entry[(len(entry) - 1)])))
return ','.join(parts)
else:
return ','.join((str(id) for id in sorted(cpuset)))
| [
"def",
"format_cpu_spec",
"(",
"cpuset",
",",
"allow_ranges",
"=",
"True",
")",
":",
"if",
"allow_ranges",
":",
"ranges",
"=",
"[",
"]",
"previndex",
"=",
"None",
"for",
"cpuindex",
"in",
"sorted",
"(",
"cpuset",
")",
":",
"if",
"(",
"(",
"previndex",
... | format a libvirt cpu range specification . | train | false |
42,157 | def isnum(x):
for T in (int, float, complex):
if isinstance(x, T):
return 1
return 0
| [
"def",
"isnum",
"(",
"x",
")",
":",
"for",
"T",
"in",
"(",
"int",
",",
"float",
",",
"complex",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"T",
")",
":",
"return",
"1",
"return",
"0"
] | test whether an object is an instance of a built-in numeric type . | train | false |
42,158 | def shadow_hash(crypt_salt=None, password=None, algorithm='sha512'):
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm)
| [
"def",
"shadow_hash",
"(",
"crypt_salt",
"=",
"None",
",",
"password",
"=",
"None",
",",
"algorithm",
"=",
"'sha512'",
")",
":",
"return",
"salt",
".",
"utils",
".",
"pycrypto",
".",
"gen_hash",
"(",
"crypt_salt",
",",
"password",
",",
"algorithm",
")"
] | generates a salted hash suitable for /etc/shadow . | train | true |
42,159 | def _list_hosts():
count = 0
hfn = __get_hosts_filename()
ret = odict.OrderedDict()
if (not os.path.isfile(hfn)):
return ret
with salt.utils.fopen(hfn) as ifile:
for line in ifile:
line = line.strip()
if (not line):
continue
if line.startswith('#'):
ret.setdefault('comment-{0}'.format(count), []).extend(line)
count += 1
continue
if ('#' in line):
line = line[:line.index('#')].strip()
comps = line.split()
ip = comps.pop(0)
ret.setdefault(ip, []).extend(comps)
return ret
| [
"def",
"_list_hosts",
"(",
")",
":",
"count",
"=",
"0",
"hfn",
"=",
"__get_hosts_filename",
"(",
")",
"ret",
"=",
"odict",
".",
"OrderedDict",
"(",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"hfn",
")",
")",
":",
"return",
"ret",... | return the hosts found in the hosts file in as an ordereddict . | train | false |
42,163 | def get_site_dict(app_name='filebrowser'):
if (app_name not in _sites_cache):
return {}
deployed = get_resolver(get_urlconf()).app_dict[app_name]
return dict(((k, v) for (k, v) in _sites_cache[app_name].items() if (k in deployed)))
| [
"def",
"get_site_dict",
"(",
"app_name",
"=",
"'filebrowser'",
")",
":",
"if",
"(",
"app_name",
"not",
"in",
"_sites_cache",
")",
":",
"return",
"{",
"}",
"deployed",
"=",
"get_resolver",
"(",
"get_urlconf",
"(",
")",
")",
".",
"app_dict",
"[",
"app_name",... | return a dict with all *deployed* filebrowser sites that have a given app_name . | train | false |
42,164 | def get_gc_config():
return {'prefix': 'jvm', 'log_dir': '/var/log/gc', 'log_name_pattern': '*gc*.log'}
| [
"def",
"get_gc_config",
"(",
")",
":",
"return",
"{",
"'prefix'",
":",
"'jvm'",
",",
"'log_dir'",
":",
"'/var/log/gc'",
",",
"'log_name_pattern'",
":",
"'*gc*.log'",
"}"
] | prefix: the full metric name will be prefix . | train | false |
42,165 | def ClearUserInfoCookie(cookie_name=COOKIE_NAME):
set_cookie = Cookie.SimpleCookie()
set_cookie[cookie_name] = ''
set_cookie[cookie_name]['path'] = '/'
set_cookie[cookie_name]['max-age'] = '0'
return ('%s\r\n' % set_cookie)
| [
"def",
"ClearUserInfoCookie",
"(",
"cookie_name",
"=",
"COOKIE_NAME",
")",
":",
"set_cookie",
"=",
"Cookie",
".",
"SimpleCookie",
"(",
")",
"set_cookie",
"[",
"cookie_name",
"]",
"=",
"''",
"set_cookie",
"[",
"cookie_name",
"]",
"[",
"'path'",
"]",
"=",
"'/'... | clears the user info cookie from the requestor . | train | false |
42,167 | def mask_and(clip, other_clip):
if isinstance(other_clip, ImageClip):
other_clip = other_clip.img
if isinstance(other_clip, np.ndarray):
return clip.fl_image((lambda f: np.minimum(f, other_clip)))
else:
return clip.fl((lambda gf, t: np.minimum(gf(t), other_clip.get_frame(t))))
| [
"def",
"mask_and",
"(",
"clip",
",",
"other_clip",
")",
":",
"if",
"isinstance",
"(",
"other_clip",
",",
"ImageClip",
")",
":",
"other_clip",
"=",
"other_clip",
".",
"img",
"if",
"isinstance",
"(",
"other_clip",
",",
"np",
".",
"ndarray",
")",
":",
"retu... | returns the logical and between two masks . | train | false |
42,168 | def has_variety(seq):
for (i, s) in enumerate(seq):
if (i == 0):
sentinel = s
elif (s != sentinel):
return True
return False
| [
"def",
"has_variety",
"(",
"seq",
")",
":",
"for",
"(",
"i",
",",
"s",
")",
"in",
"enumerate",
"(",
"seq",
")",
":",
"if",
"(",
"i",
"==",
"0",
")",
":",
"sentinel",
"=",
"s",
"elif",
"(",
"s",
"!=",
"sentinel",
")",
":",
"return",
"True",
"r... | return true if there are any different elements in seq . | train | false |
42,169 | @unbox(types.SliceType)
def unbox_slice(typ, obj, c):
from . import slicing
(ok, start, stop, step) = c.pyapi.slice_as_ints(obj)
sli = c.context.make_helper(c.builder, typ)
sli.start = start
sli.stop = stop
sli.step = step
return NativeValue(sli._getvalue(), is_error=c.builder.not_(ok))
| [
"@",
"unbox",
"(",
"types",
".",
"SliceType",
")",
"def",
"unbox_slice",
"(",
"typ",
",",
"obj",
",",
"c",
")",
":",
"from",
".",
"import",
"slicing",
"(",
"ok",
",",
"start",
",",
"stop",
",",
"step",
")",
"=",
"c",
".",
"pyapi",
".",
"slice_as_... | convert object *obj* to a native slice structure . | train | false |
42,170 | @map_project_slug
@map_subproject_slug
def redirect_project_slug(request, project, subproject):
return HttpResponseRedirect(resolve((subproject or project)))
| [
"@",
"map_project_slug",
"@",
"map_subproject_slug",
"def",
"redirect_project_slug",
"(",
"request",
",",
"project",
",",
"subproject",
")",
":",
"return",
"HttpResponseRedirect",
"(",
"resolve",
"(",
"(",
"subproject",
"or",
"project",
")",
")",
")"
] | handle / -> /en/latest/ directs on subdomains . | train | false |
42,172 | def getAroundsFromLoop(loop, radius, thresholdRatio=0.9):
return getAroundsFromPoints(getPointsFromLoop(loop, (1.01 * abs(radius)), thresholdRatio), radius)
| [
"def",
"getAroundsFromLoop",
"(",
"loop",
",",
"radius",
",",
"thresholdRatio",
"=",
"0.9",
")",
":",
"return",
"getAroundsFromPoints",
"(",
"getPointsFromLoop",
"(",
"loop",
",",
"(",
"1.01",
"*",
"abs",
"(",
"radius",
")",
")",
",",
"thresholdRatio",
")",
... | get the arounds from the loop . | train | false |
42,173 | def _handle_head(gs_stub, filename):
filestat = gs_stub.head_object(filename)
if (not filestat):
return _FakeUrlFetchResult(404, {}, '')
http_time = common.posix_time_to_http(filestat.st_ctime)
response_headers = {'content-length': filestat.st_size, 'content-type': filestat.content_type, 'etag': filestat.etag, 'last-modified': http_time}
if filestat.metadata:
response_headers.update(filestat.metadata)
return _FakeUrlFetchResult(200, response_headers, '')
| [
"def",
"_handle_head",
"(",
"gs_stub",
",",
"filename",
")",
":",
"filestat",
"=",
"gs_stub",
".",
"head_object",
"(",
"filename",
")",
"if",
"(",
"not",
"filestat",
")",
":",
"return",
"_FakeUrlFetchResult",
"(",
"404",
",",
"{",
"}",
",",
"''",
")",
... | handle head request . | train | false |
42,174 | def get_course_display_string(descriptor):
return ' '.join([descriptor.display_org_with_default, descriptor.display_number_with_default])
| [
"def",
"get_course_display_string",
"(",
"descriptor",
")",
":",
"return",
"' '",
".",
"join",
"(",
"[",
"descriptor",
".",
"display_org_with_default",
",",
"descriptor",
".",
"display_number_with_default",
"]",
")"
] | returns a string to display for a course or course overview . | train | false |
42,175 | def reload_constraints():
global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {}
constraints_conf = ConfigParser()
if constraints_conf.read(utils.SWIFT_CONF_FILE):
SWIFT_CONSTRAINTS_LOADED = True
for name in DEFAULT_CONSTRAINTS:
try:
value = constraints_conf.get('swift-constraints', name)
except NoOptionError:
pass
except NoSectionError:
break
else:
try:
value = int(value)
except ValueError:
value = utils.list_from_csv(value)
OVERRIDE_CONSTRAINTS[name] = value
for (name, default) in DEFAULT_CONSTRAINTS.items():
value = OVERRIDE_CONSTRAINTS.get(name, default)
EFFECTIVE_CONSTRAINTS[name] = value
globals()[name.upper()] = value
| [
"def",
"reload_constraints",
"(",
")",
":",
"global",
"SWIFT_CONSTRAINTS_LOADED",
",",
"OVERRIDE_CONSTRAINTS",
"SWIFT_CONSTRAINTS_LOADED",
"=",
"False",
"OVERRIDE_CONSTRAINTS",
"=",
"{",
"}",
"constraints_conf",
"=",
"ConfigParser",
"(",
")",
"if",
"constraints_conf",
"... | parse swift_conf_file and reset module level global constraint attrs . | train | false |
42,176 | def seterr(divide=False):
if (_errdict['divide'] != divide):
clear_cache()
_errdict['divide'] = divide
| [
"def",
"seterr",
"(",
"divide",
"=",
"False",
")",
":",
"if",
"(",
"_errdict",
"[",
"'divide'",
"]",
"!=",
"divide",
")",
":",
"clear_cache",
"(",
")",
"_errdict",
"[",
"'divide'",
"]",
"=",
"divide"
] | should sympy raise an exception on 0/0 or return a nan? divide == true . | train | false |
42,177 | def pack_session(item):
_init_globals()
session = _SESSION_HANDLER.get(item.sessid)
if (session and (session.conn_time == item.conn_time)):
return (item.conn_time and item.sessid and ('__packed_session__', _GA(item, 'sessid'), _GA(item, 'conn_time')))
return None
| [
"def",
"pack_session",
"(",
"item",
")",
":",
"_init_globals",
"(",
")",
"session",
"=",
"_SESSION_HANDLER",
".",
"get",
"(",
"item",
".",
"sessid",
")",
"if",
"(",
"session",
"and",
"(",
"session",
".",
"conn_time",
"==",
"item",
".",
"conn_time",
")",
... | handle the safe serializion of sessions objects (these contain hidden references to database objects so they cant be safely serialized) . | train | false |
42,178 | def optimize_clones(clones, optimizer, regularization_losses=None, **kwargs):
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if (regularization_losses is None):
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
(clone_loss, clone_grad) = _optimize_clone(optimizer, clone, num_clones, regularization_losses, **kwargs)
if (clone_loss is not None):
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
regularization_losses = None
total_loss = tf.add_n(clones_losses, name='total_loss')
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return (total_loss, grads_and_vars)
| [
"def",
"optimize_clones",
"(",
"clones",
",",
"optimizer",
",",
"regularization_losses",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"grads_and_vars",
"=",
"[",
"]",
"clones_losses",
"=",
"[",
"]",
"num_clones",
"=",
"len",
"(",
"clones",
")",
"if",
"(",
... | compute clone losses and gradients for the given list of clones . | train | false |
42,181 | def isTestCase(obj):
try:
return issubclass(obj, pyunit.TestCase)
except TypeError:
return False
| [
"def",
"isTestCase",
"(",
"obj",
")",
":",
"try",
":",
"return",
"issubclass",
"(",
"obj",
",",
"pyunit",
".",
"TestCase",
")",
"except",
"TypeError",
":",
"return",
"False"
] | returns c{true} if c{obj} is a class that contains test cases . | train | false |
42,184 | @require_POST
@login_required
def watch_approved(request, product=None):
if (request.LANGUAGE_CODE not in settings.SUMO_LANGUAGES):
raise Http404
kwargs = {'locale': request.LANGUAGE_CODE}
if (product is not None):
kwargs['product'] = product
ApproveRevisionInLocaleEvent.notify(request.user, **kwargs)
statsd.incr('wiki.watches.approved')
return HttpResponse()
| [
"@",
"require_POST",
"@",
"login_required",
"def",
"watch_approved",
"(",
"request",
",",
"product",
"=",
"None",
")",
":",
"if",
"(",
"request",
".",
"LANGUAGE_CODE",
"not",
"in",
"settings",
".",
"SUMO_LANGUAGES",
")",
":",
"raise",
"Http404",
"kwargs",
"=... | start watching approved revisions in a locale for a given product . | train | false |
42,185 | def Timestamp(year, month, day, hour, minute, second):
return dateconverter.Timestamp(year, month, day, hour, minute, second)
| [
"def",
"Timestamp",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
")",
":",
"return",
"dateconverter",
".",
"Timestamp",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
")"
] | construct an object holding a timestamp value . | train | false |
42,186 | def activity_group_type():
return s3_rest_controller()
| [
"def",
"activity_group_type",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] | activity group types: restful crud controller . | train | false |
42,187 | def long2raw(value, endian, size=None):
assert (((not size) and (0 < value)) or (0 <= value))
assert (endian in (LITTLE_ENDIAN, BIG_ENDIAN))
text = []
while ((value != 0) or (text == '')):
byte = (value % 256)
text.append(chr(byte))
value >>= 8
if size:
need = max((size - len(text)), 0)
else:
need = 0
if need:
if (endian is BIG_ENDIAN):
text = chain(repeat('\x00', need), reversed(text))
else:
text = chain(text, repeat('\x00', need))
elif (endian is BIG_ENDIAN):
text = reversed(text)
return ''.join(text)
| [
"def",
"long2raw",
"(",
"value",
",",
"endian",
",",
"size",
"=",
"None",
")",
":",
"assert",
"(",
"(",
"(",
"not",
"size",
")",
"and",
"(",
"0",
"<",
"value",
")",
")",
"or",
"(",
"0",
"<=",
"value",
")",
")",
"assert",
"(",
"endian",
"in",
... | convert a number to a raw string . | train | false |
42,188 | def possible_links(source_node, sink_node):
possible = []
for source in source_node.output_channels():
for sink in sink_node.input_channels():
if compatible_channels(source, sink):
possible.append((source, sink))
return possible
| [
"def",
"possible_links",
"(",
"source_node",
",",
"sink_node",
")",
":",
"possible",
"=",
"[",
"]",
"for",
"source",
"in",
"source_node",
".",
"output_channels",
"(",
")",
":",
"for",
"sink",
"in",
"sink_node",
".",
"input_channels",
"(",
")",
":",
"if",
... | return a list of tuples . | train | false |
42,189 | def dashCapitalize(s):
return '-'.join([x.capitalize() for x in s.split('-')])
| [
"def",
"dashCapitalize",
"(",
"s",
")",
":",
"return",
"'-'",
".",
"join",
"(",
"[",
"x",
".",
"capitalize",
"(",
")",
"for",
"x",
"in",
"s",
".",
"split",
"(",
"'-'",
")",
"]",
")"
] | capitalize a string . | train | false |
42,191 | @snippet
def client_list_topics(client, to_delete):
def do_something_with(sub):
pass
for topic in client.list_topics():
do_something_with(topic)
| [
"@",
"snippet",
"def",
"client_list_topics",
"(",
"client",
",",
"to_delete",
")",
":",
"def",
"do_something_with",
"(",
"sub",
")",
":",
"pass",
"for",
"topic",
"in",
"client",
".",
"list_topics",
"(",
")",
":",
"do_something_with",
"(",
"topic",
")"
] | list topics for a project . | train | false |
42,193 | def append_host(host, pool):
if ((not host) or (not pool)):
return host
new_host = '#'.join([host, pool])
return new_host
| [
"def",
"append_host",
"(",
"host",
",",
"pool",
")",
":",
"if",
"(",
"(",
"not",
"host",
")",
"or",
"(",
"not",
"pool",
")",
")",
":",
"return",
"host",
"new_host",
"=",
"'#'",
".",
"join",
"(",
"[",
"host",
",",
"pool",
"]",
")",
"return",
"ne... | encode pool into host info . | train | false |
42,195 | def _FindApiJars(lib_dir):
result = {}
for jar_file in _FilesMatching(lib_dir, (lambda f: f.endswith('.jar'))):
manifest = jarfile.ReadManifest(jar_file)
if manifest:
section = manifest.sections.get('com/google/appengine/api/')
if (section and ('Specification-Version' in section)):
result[jar_file] = section['Specification-Version']
return result
| [
"def",
"_FindApiJars",
"(",
"lib_dir",
")",
":",
"result",
"=",
"{",
"}",
"for",
"jar_file",
"in",
"_FilesMatching",
"(",
"lib_dir",
",",
"(",
"lambda",
"f",
":",
"f",
".",
"endswith",
"(",
"'.jar'",
")",
")",
")",
":",
"manifest",
"=",
"jarfile",
".... | find the appengine-api-* . | train | false |
42,196 | def test_convert_to_prover9(expr):
for t in expr:
e = Expression.fromstring(t)
print(convert_to_prover9(e))
| [
"def",
"test_convert_to_prover9",
"(",
"expr",
")",
":",
"for",
"t",
"in",
"expr",
":",
"e",
"=",
"Expression",
".",
"fromstring",
"(",
"t",
")",
"print",
"(",
"convert_to_prover9",
"(",
"e",
")",
")"
] | test that parsing works ok . | train | false |
42,197 | def StartFlowAndWait(client_id, token=None, timeout=DEFAULT_TIMEOUT, **flow_args):
flow_urn = flow.GRRFlow.StartFlow(client_id=client_id, token=token, sync=True, **flow_args)
WaitForFlow(flow_urn, token=token, timeout=timeout)
return flow_urn
| [
"def",
"StartFlowAndWait",
"(",
"client_id",
",",
"token",
"=",
"None",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
",",
"**",
"flow_args",
")",
":",
"flow_urn",
"=",
"flow",
".",
"GRRFlow",
".",
"StartFlow",
"(",
"client_id",
"=",
"client_id",
",",
"token",
"... | runs a flow and waits for it to finish . | train | false |
42,198 | def listen(opts):
event = salt.utils.event.get_event(opts['node'], sock_dir=opts['sock_dir'], transport=opts['transport'], opts=opts, listen=True)
check_access_and_print_warning(opts['sock_dir'])
print(event.puburi)
jid_counter = 0
found_minions = []
while True:
ret = event.get_event(full=True)
if (ret is None):
continue
if opts['func_count']:
data = ret.get('data', False)
if data:
if (('id' in six.iterkeys(data)) and (data.get('id', False) not in found_minions)):
if (data['fun'] == opts['func_count']):
jid_counter += 1
found_minions.append(data['id'])
print('Reply received from [{0}]. Total replies now: [{1}].'.format(ret['data']['id'], jid_counter))
continue
else:
print('Event fired at {0}'.format(time.asctime()))
print(('*' * 25))
print('Tag: {0}'.format(ret['tag']))
print('Data:')
pprint.pprint(ret['data'])
| [
"def",
"listen",
"(",
"opts",
")",
":",
"event",
"=",
"salt",
".",
"utils",
".",
"event",
".",
"get_event",
"(",
"opts",
"[",
"'node'",
"]",
",",
"sock_dir",
"=",
"opts",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"opts",
"[",
"'transport'",
"]",... | creates a new thread listening to that port defaults to ipaddr=127 . | train | false |
42,199 | def setmem(vm_, memory, config=False):
if (vm_state(vm_)[vm_] != 'shutdown'):
return False
dom = _get_domain(vm_)
flags = libvirt.VIR_DOMAIN_MEM_MAXIMUM
if config:
flags = (flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
ret1 = dom.setMemoryFlags((memory * 1024), flags)
ret2 = dom.setMemoryFlags((memory * 1024), libvirt.VIR_DOMAIN_AFFECT_CURRENT)
return (ret1 == ret2 == 0)
| [
"def",
"setmem",
"(",
"vm_",
",",
"memory",
",",
"config",
"=",
"False",
")",
":",
"if",
"(",
"vm_state",
"(",
"vm_",
")",
"[",
"vm_",
"]",
"!=",
"'shutdown'",
")",
":",
"return",
"False",
"dom",
"=",
"_get_domain",
"(",
"vm_",
")",
"flags",
"=",
... | change the amount of memory allocated to vm . | train | false |
42,200 | def test_renn_fit():
renn = RepeatedEditedNearestNeighbours(random_state=RND_SEED)
renn.fit(X, Y)
assert_equal(renn.min_c_, 0)
assert_equal(renn.maj_c_, 2)
assert_equal(renn.stats_c_[0], 4)
assert_equal(renn.stats_c_[1], 16)
assert_equal(renn.stats_c_[2], 20)
| [
"def",
"test_renn_fit",
"(",
")",
":",
"renn",
"=",
"RepeatedEditedNearestNeighbours",
"(",
"random_state",
"=",
"RND_SEED",
")",
"renn",
".",
"fit",
"(",
"X",
",",
"Y",
")",
"assert_equal",
"(",
"renn",
".",
"min_c_",
",",
"0",
")",
"assert_equal",
"(",
... | test the fitting method . | train | false |
42,201 | def cloned_traverse(obj, opts, visitors):
cloned = {}
stop_on = set(opts.get('stop_on', []))
def clone(elem):
if (elem in stop_on):
return elem
else:
if (id(elem) not in cloned):
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if (obj is not None):
obj = clone(obj)
return obj
| [
"def",
"cloned_traverse",
"(",
"obj",
",",
"opts",
",",
"visitors",
")",
":",
"cloned",
"=",
"{",
"}",
"stop_on",
"=",
"set",
"(",
"opts",
".",
"get",
"(",
"'stop_on'",
",",
"[",
"]",
")",
")",
"def",
"clone",
"(",
"elem",
")",
":",
"if",
"(",
... | clone the given expression structure . | train | false |
42,202 | def run_selected_algorithm(timeseries, metric_name):
if (len(timeseries) < MIN_TOLERABLE_LENGTH):
raise TooShort()
if ((time() - timeseries[(-1)][0]) > STALE_PERIOD):
raise Stale()
if (len(set((item[1] for item in timeseries[(- MAX_TOLERABLE_BOREDOM):]))) == BOREDOM_SET_SIZE):
raise Boring()
try:
ensemble = [globals()[algorithm](timeseries) for algorithm in ALGORITHMS]
threshold = (len(ensemble) - CONSENSUS)
if (ensemble.count(False) <= threshold):
if ENABLE_SECOND_ORDER:
if is_anomalously_anomalous(metric_name, ensemble, timeseries[(-1)][1]):
return (True, ensemble, timeseries[(-1)][1])
else:
return (True, ensemble, timeseries[(-1)][1])
return (False, ensemble, timeseries[(-1)][1])
except:
logging.error(('Algorithm error: ' + traceback.format_exc()))
return (False, [], 1)
| [
"def",
"run_selected_algorithm",
"(",
"timeseries",
",",
"metric_name",
")",
":",
"if",
"(",
"len",
"(",
"timeseries",
")",
"<",
"MIN_TOLERABLE_LENGTH",
")",
":",
"raise",
"TooShort",
"(",
")",
"if",
"(",
"(",
"time",
"(",
")",
"-",
"timeseries",
"[",
"(... | filter timeseries and run selected algorithm . | train | false |
42,203 | @cache_permission
def can_ignore_check(user, project):
return check_permission(user, project, 'trans.ignore_check')
| [
"@",
"cache_permission",
"def",
"can_ignore_check",
"(",
"user",
",",
"project",
")",
":",
"return",
"check_permission",
"(",
"user",
",",
"project",
",",
"'trans.ignore_check'",
")"
] | checks whether user can ignore check . | train | false |
42,204 | def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
a = _asarray_validated(a, check_finite=check_finite)
(u, s, vh) = decomp_svd.svd(a, full_matrices=False, check_finite=False)
if (rcond is not None):
cond = rcond
if (cond in [None, (-1)]):
t = u.dtype.char.lower()
factor = {'f': 1000.0, 'd': 1000000.0}
cond = (factor[t] * np.finfo(t).eps)
rank = np.sum((s > (cond * np.max(s))))
u = u[:, :rank]
u /= s[:rank]
B = np.transpose(np.conjugate(np.dot(u, vh[:rank])))
if return_rank:
return (B, rank)
else:
return B
| [
"def",
"pinv2",
"(",
"a",
",",
"cond",
"=",
"None",
",",
"rcond",
"=",
"None",
",",
"return_rank",
"=",
"False",
",",
"check_finite",
"=",
"True",
")",
":",
"a",
"=",
"_asarray_validated",
"(",
"a",
",",
"check_finite",
"=",
"check_finite",
")",
"(",
... | compute the pseudo-inverse of a matrix . | train | false |
42,205 | def subPost(sub, a, b):
sub.calledSubPost = (sub.calledSubPost + 1)
| [
"def",
"subPost",
"(",
"sub",
",",
"a",
",",
"b",
")",
":",
"sub",
".",
"calledSubPost",
"=",
"(",
"sub",
".",
"calledSubPost",
"+",
"1",
")"
] | a post-hook for the subclass . | train | false |
42,207 | def long_substr(data):
substr = ''
if ((len(data) > 1) and (len(data[0]) > 0)):
for i in range(len(data[0])):
for j in range(((len(data[0]) - i) + 1)):
if ((j > len(substr)) and all(((data[0][i:(i + j)] in x) for x in data))):
substr = data[0][i:(i + j)]
elif (len(data) == 1):
substr = data[0]
return substr
| [
"def",
"long_substr",
"(",
"data",
")",
":",
"substr",
"=",
"''",
"if",
"(",
"(",
"len",
"(",
"data",
")",
">",
"1",
")",
"and",
"(",
"len",
"(",
"data",
"[",
"0",
"]",
")",
">",
"0",
")",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
... | return the longest common substring in a list of strings . | train | true |
42,208 | def cov_hc0(results):
het_scale = (results.resid ** 2)
cov_hc0 = _HCCM(results, het_scale)
return cov_hc0
| [
"def",
"cov_hc0",
"(",
"results",
")",
":",
"het_scale",
"=",
"(",
"results",
".",
"resid",
"**",
"2",
")",
"cov_hc0",
"=",
"_HCCM",
"(",
"results",
",",
"het_scale",
")",
"return",
"cov_hc0"
] | see statsmodels . | train | false |
42,209 | @cache_result()
@register.simple_tag
def primary_avatar(user, size=settings.AVATAR_DEFAULT_SIZE):
alt = six.text_type(user)
url = reverse('avatar_render_primary', kwargs={'user': user, 'size': size})
return ('<img src="%s" alt="%s" width="%s" height="%s" />' % (url, alt, size, size))
| [
"@",
"cache_result",
"(",
")",
"@",
"register",
".",
"simple_tag",
"def",
"primary_avatar",
"(",
"user",
",",
"size",
"=",
"settings",
".",
"AVATAR_DEFAULT_SIZE",
")",
":",
"alt",
"=",
"six",
".",
"text_type",
"(",
"user",
")",
"url",
"=",
"reverse",
"("... | this tag tries to get the default avatar for a user without doing any db requests . | train | false |
42,210 | def generate_nonce():
return random.randrange(1000000000, 2000000000)
| [
"def",
"generate_nonce",
"(",
")",
":",
"return",
"random",
".",
"randrange",
"(",
"1000000000",
",",
"2000000000",
")"
] | generate pseudorandom nonce that is unlikely to repeat . | train | false |
42,212 | def usecase1(arr1, arr2):
n1 = arr1.size
n2 = arr2.size
for i1 in range(n1):
st1 = arr1[i1]
for i2 in range(n2):
st2 = arr2[i2]
st2.row += (((st1.p * st2.p) + st1.row) - st1.col)
st1.p += st2.p
st1.col -= st2.col
| [
"def",
"usecase1",
"(",
"arr1",
",",
"arr2",
")",
":",
"n1",
"=",
"arr1",
".",
"size",
"n2",
"=",
"arr2",
".",
"size",
"for",
"i1",
"in",
"range",
"(",
"n1",
")",
":",
"st1",
"=",
"arr1",
"[",
"i1",
"]",
"for",
"i2",
"in",
"range",
"(",
"n2",... | base on URL modified to add test-able side effect . | train | false |
42,213 | def fsbsize(path):
path = encode(path)
if (os.name == 'nt'):
import ctypes
drive = ('%s\\' % os.path.splitdrive(path)[0])
(cluster_sectors, sector_size) = ctypes.c_longlong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceW(ctypes.c_wchar_p(drive), ctypes.pointer(cluster_sectors), ctypes.pointer(sector_size), None, None)
return (cluster_sectors * sector_size)
else:
return os.statvfs(path).f_frsize
| [
"def",
"fsbsize",
"(",
"path",
")",
":",
"path",
"=",
"encode",
"(",
"path",
")",
"if",
"(",
"os",
".",
"name",
"==",
"'nt'",
")",
":",
"import",
"ctypes",
"drive",
"=",
"(",
"'%s\\\\'",
"%",
"os",
".",
"path",
".",
"splitdrive",
"(",
"path",
")"... | get optimal file system buffer size for i/o calls . | train | false |
42,214 | def _get_random_string():
return ''.join((random.choice((string.ascii_uppercase + string.digits)) for _ in range(RAND_LENGTH_SIZE)))
| [
"def",
"_get_random_string",
"(",
")",
":",
"return",
"''",
".",
"join",
"(",
"(",
"random",
".",
"choice",
"(",
"(",
"string",
".",
"ascii_uppercase",
"+",
"string",
".",
"digits",
")",
")",
"for",
"_",
"in",
"range",
"(",
"RAND_LENGTH_SIZE",
")",
")"... | return a securely generated random string . | train | false |
42,215 | def set_cors_middleware_defaults():
cors.set_defaults(allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-Openstack-Request-Id'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-Openstack-Request-Id'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'])
| [
"def",
"set_cors_middleware_defaults",
"(",
")",
":",
"cors",
".",
"set_defaults",
"(",
"allow_headers",
"=",
"[",
"'X-Auth-Token'",
",",
"'X-Identity-Status'",
",",
"'X-Roles'",
",",
"'X-Service-Catalog'",
",",
"'X-User-Id'",
",",
"'X-Tenant-Id'",
",",
"'X-Openstack-... | update default configuration options for oslo . | train | false |
42,217 | def sluu99(n):
if (n < 0):
n = (n * (-1))
while (n > 0):
n = (n - 2)
return (n == 0)
| [
"def",
"sluu99",
"(",
"n",
")",
":",
"if",
"(",
"n",
"<",
"0",
")",
":",
"n",
"=",
"(",
"n",
"*",
"(",
"-",
"1",
")",
")",
"while",
"(",
"n",
">",
"0",
")",
":",
"n",
"=",
"(",
"n",
"-",
"2",
")",
"return",
"(",
"n",
"==",
"0",
")"
... | this function checks if a number is odd or even . | train | false |
42,219 | def GetFeedItemIdsForCampaign(campaign_feed):
feed_item_ids = set()
try:
lhs_operand = campaign_feed['matchingFunction']['lhsOperand']
except KeyError:
lhs_operand = None
if (lhs_operand and (lhs_operand[0]['FunctionArgumentOperand.Type'] == 'RequestContextOperand')):
request_context_operand = lhs_operand[0]
if ((request_context_operand['contextType'] == 'FEED_ITEM_ID') and (campaign_feed['matchingFunction']['operator'] == 'IN')):
for argument in campaign_feed['matchingFunction']['rhsOperand']:
if (argument['xsi_type'] == 'ConstantOperand'):
feed_item_ids.add(argument['longValue'])
return feed_item_ids
| [
"def",
"GetFeedItemIdsForCampaign",
"(",
"campaign_feed",
")",
":",
"feed_item_ids",
"=",
"set",
"(",
")",
"try",
":",
"lhs_operand",
"=",
"campaign_feed",
"[",
"'matchingFunction'",
"]",
"[",
"'lhsOperand'",
"]",
"except",
"KeyError",
":",
"lhs_operand",
"=",
"... | gets the feed item ids used by a campaign through a given campaign feed . | train | true |
42,220 | def getNewRepository():
return ExportRepository()
| [
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] | get new repository . | train | false |
42,222 | @contextlib.contextmanager
def HandleServerException(display=True, truncate=False):
try:
try:
(yield)
except UnknownExtraConf as e:
if vimsupport.Confirm(str(e)):
_LoadExtraConfFile(e.extra_conf_file)
else:
_IgnoreExtraConfFile(e.extra_conf_file)
except Exception as e:
_logger.exception(u'Error while handling server response')
if display:
DisplayServerException(e, truncate)
| [
"@",
"contextlib",
".",
"contextmanager",
"def",
"HandleServerException",
"(",
"display",
"=",
"True",
",",
"truncate",
"=",
"False",
")",
":",
"try",
":",
"try",
":",
"(",
"yield",
")",
"except",
"UnknownExtraConf",
"as",
"e",
":",
"if",
"vimsupport",
"."... | catch any exception raised through server communication . | train | false |
42,224 | def create_requirements_index_file(venv_path, requirements_file):
index_filename = get_index_filename(venv_path)
packages = get_package_names(requirements_file)
with open(index_filename, 'w') as writer:
writer.write('\n'.join(packages))
writer.write('\n')
return index_filename
| [
"def",
"create_requirements_index_file",
"(",
"venv_path",
",",
"requirements_file",
")",
":",
"index_filename",
"=",
"get_index_filename",
"(",
"venv_path",
")",
"packages",
"=",
"get_package_names",
"(",
"requirements_file",
")",
"with",
"open",
"(",
"index_filename",... | creates a file . | train | false |
42,228 | def put_acl(Bucket, ACL=None, AccessControlPolicy=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWrite=None, GrantWriteACP=None, region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
if (AccessControlPolicy is not None):
if isinstance(AccessControlPolicy, six.string_types):
AccessControlPolicy = json.loads(AccessControlPolicy)
kwargs['AccessControlPolicy'] = AccessControlPolicy
for arg in ('ACL', 'GrantFullControl', 'GrantRead', 'GrantReadACP', 'GrantWrite', 'GrantWriteACP'):
if (locals()[arg] is not None):
kwargs[arg] = str(locals()[arg])
conn.put_bucket_acl(Bucket=Bucket, **kwargs)
return {'updated': True, 'name': Bucket}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
| [
"def",
"put_acl",
"(",
"Bucket",
",",
"ACL",
"=",
"None",
",",
"AccessControlPolicy",
"=",
"None",
",",
"GrantFullControl",
"=",
"None",
",",
"GrantRead",
"=",
"None",
",",
"GrantReadACP",
"=",
"None",
",",
"GrantWrite",
"=",
"None",
",",
"GrantWriteACP",
... | given a valid config . | train | true |
42,230 | def widgets_sorter(widget):
return WIDGETS[widget].order
| [
"def",
"widgets_sorter",
"(",
"widget",
")",
":",
"return",
"WIDGETS",
"[",
"widget",
"]",
".",
"order"
] | provides better ordering of widgets . | train | false |
42,231 | def runctx(statement, globals, locals, filename=None, sort=(-1)):
prof = Profile()
result = None
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
finally:
if (filename is not None):
prof.dump_stats(filename)
else:
result = prof.print_stats(sort)
return result
| [
"def",
"runctx",
"(",
"statement",
",",
"globals",
",",
"locals",
",",
"filename",
"=",
"None",
",",
"sort",
"=",
"(",
"-",
"1",
")",
")",
":",
"prof",
"=",
"Profile",
"(",
")",
"result",
"=",
"None",
"try",
":",
"prof",
"=",
"prof",
".",
"runctx... | run statement under profiler . | train | false |
42,232 | @require_POST
@login_required
def update_coupon(request, course_id):
coupon_id = request.POST.get('coupon_id', None)
if (not coupon_id):
return JsonResponse({'message': _('coupon id not found')}, status=400)
try:
coupon = Coupon.objects.get(pk=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({'message': _('coupon with the coupon id ({coupon_id}) DoesNotExist').format(coupon_id=coupon_id)}, status=400)
description = request.POST.get('description')
coupon.description = description
coupon.save()
return JsonResponse({'message': _('coupon with the coupon id ({coupon_id}) updated Successfully').format(coupon_id=coupon_id)})
| [
"@",
"require_POST",
"@",
"login_required",
"def",
"update_coupon",
"(",
"request",
",",
"course_id",
")",
":",
"coupon_id",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'coupon_id'",
",",
"None",
")",
"if",
"(",
"not",
"coupon_id",
")",
":",
"return",
... | update the coupon object in the database . | train | false |
42,233 | def __getMasterPassword():
global MasterPassword
(pw, ok) = QInputDialog.getText(None, QCoreApplication.translate(u'Crypto', u'Master Password'), QCoreApplication.translate(u'Crypto', u'Enter the master password:'), QLineEdit.Password)
if ok:
from .py3PBKDF2 import verifyPassword
masterPassword = Preferences.getUser(u'MasterPassword')
try:
if masterPassword:
if verifyPassword(pw, masterPassword):
MasterPassword = pwEncode(pw)
else:
E5MessageBox.warning(None, QCoreApplication.translate(u'Crypto', u'Master Password'), QCoreApplication.translate(u'Crypto', u'The given password is incorrect.'))
else:
E5MessageBox.critical(None, QCoreApplication.translate(u'Crypto', u'Master Password'), QCoreApplication.translate(u'Crypto', u'There is no master password registered.'))
except ValueError as why:
E5MessageBox.warning(None, QCoreApplication.translate(u'Crypto', u'Master Password'), QCoreApplication.translate(u'Crypto', u'<p>The given password cannot be verified.</p><p>Reason: {0}'.format(str(why))))
| [
"def",
"__getMasterPassword",
"(",
")",
":",
"global",
"MasterPassword",
"(",
"pw",
",",
"ok",
")",
"=",
"QInputDialog",
".",
"getText",
"(",
"None",
",",
"QCoreApplication",
".",
"translate",
"(",
"u'Crypto'",
",",
"u'Master Password'",
")",
",",
"QCoreApplic... | private module function to get the password from the user . | train | false |
42,234 | def test_replace_update_column_via_setitem_warnings_attributes():
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert (len(w) == 1)
assert ("replaced column 'a' and column attributes ['unit']" in str(w[0].message))
| [
"def",
"test_replace_update_column_via_setitem_warnings_attributes",
"(",
")",
":",
"t",
"=",
"table",
".",
"Table",
"(",
"[",
"[",
"1",
",",
"2",
",",
"3",
"]",
",",
"[",
"4",
",",
"5",
",",
"6",
"]",
"]",
",",
"names",
"=",
"[",
"'a'",
",",
"'b'"... | test warnings related to table replace change in #5556: lost attributes . | train | false |
42,236 | def validate_index(index_vals):
from numbers import Number
if isinstance(index_vals[0], Number):
if (not all((isinstance(item, Number) for item in index_vals))):
raise exceptions.PlotlyError('Error in indexing column. Make sure all entries of each column are all numbers or all strings.')
elif isinstance(index_vals[0], str):
if (not all((isinstance(item, str) for item in index_vals))):
raise exceptions.PlotlyError('Error in indexing column. Make sure all entries of each column are all numbers or all strings.')
| [
"def",
"validate_index",
"(",
"index_vals",
")",
":",
"from",
"numbers",
"import",
"Number",
"if",
"isinstance",
"(",
"index_vals",
"[",
"0",
"]",
",",
"Number",
")",
":",
"if",
"(",
"not",
"all",
"(",
"(",
"isinstance",
"(",
"item",
",",
"Number",
")"... | validates if a list contains all numbers or all strings :raises: if there are any two items in the list whose types differ . | train | false |
42,237 | def test_neg_clrtype_wrong_case():
global called
called = False
class MyType(type, ):
def __clrType__(self):
global called
called = True
return super(MyType, self).__clrtype__()
class X(object, ):
__metaclass__ = MyType
AreEqual(called, False)
| [
"def",
"test_neg_clrtype_wrong_case",
"(",
")",
":",
"global",
"called",
"called",
"=",
"False",
"class",
"MyType",
"(",
"type",
",",
")",
":",
"def",
"__clrType__",
"(",
"self",
")",
":",
"global",
"called",
"called",
"=",
"True",
"return",
"super",
"(",
... | define the __clrtype__ function using the wrong case and see what happens . | train | false |
42,239 | def check_predicate(result, func, cargs):
val = ord(result)
if (val == 1):
return True
elif (val == 0):
return False
else:
raise GEOSException(('Error encountered on GEOS C predicate function "%s".' % func.__name__))
| [
"def",
"check_predicate",
"(",
"result",
",",
"func",
",",
"cargs",
")",
":",
"val",
"=",
"ord",
"(",
"result",
")",
"if",
"(",
"val",
"==",
"1",
")",
":",
"return",
"True",
"elif",
"(",
"val",
"==",
"0",
")",
":",
"return",
"False",
"else",
":",... | error checking for unary/binary predicate functions . | train | false |
42,240 | def flatpages_link_list(request):
from django.contrib.flatpages.models import FlatPage
link_list = [(page.title, page.url) for page in FlatPage.objects.all()]
return render_to_link_list(link_list)
| [
"def",
"flatpages_link_list",
"(",
"request",
")",
":",
"from",
"django",
".",
"contrib",
".",
"flatpages",
".",
"models",
"import",
"FlatPage",
"link_list",
"=",
"[",
"(",
"page",
".",
"title",
",",
"page",
".",
"url",
")",
"for",
"page",
"in",
"FlatPag... | returns a httpresponse whose content is a javascript file representing a list of links to flatpages . | train | true |
42,241 | def linux_standby():
if (not HAVE_DBUS):
return
(proxy, interface) = _get_sessionproxy()
if proxy:
if proxy.CanSuspend():
proxy.Suspend(dbus_interface=interface)
else:
(proxy, interface, pinterface) = _get_systemproxy('UPower')
if (not proxy):
(proxy, interface, pinterface) = _get_systemproxy('DeviceKit')
if proxy:
if proxy.Get(interface, 'can-suspend', dbus_interface=pinterface):
try:
proxy.Suspend(dbus_interface=interface)
except dbus.exceptions.DBusException as msg:
logging.info('Received a DBus exception %s', msg)
else:
logging.info('DBus does not support Suspend (standby)')
time.sleep(10)
| [
"def",
"linux_standby",
"(",
")",
":",
"if",
"(",
"not",
"HAVE_DBUS",
")",
":",
"return",
"(",
"proxy",
",",
"interface",
")",
"=",
"_get_sessionproxy",
"(",
")",
"if",
"proxy",
":",
"if",
"proxy",
".",
"CanSuspend",
"(",
")",
":",
"proxy",
".",
"Sus... | make linux system go into standby . | train | false |
42,244 | def fuzzy_url(urlstr, cwd=None, relative=False, do_search=True, force_search=False):
urlstr = urlstr.strip()
path = get_path_if_valid(urlstr, cwd=cwd, relative=relative, check_exists=True)
if ((not force_search) and (path is not None)):
url = QUrl.fromLocalFile(path)
elif (force_search or (do_search and (not is_url(urlstr)))):
log.url.debug('URL is a fuzzy search term')
try:
url = _get_search_url(urlstr)
except ValueError:
url = qurl_from_user_input(urlstr)
else:
log.url.debug('URL is a fuzzy address')
url = qurl_from_user_input(urlstr)
log.url.debug('Converting fuzzy term {!r} to URL -> {}'.format(urlstr, url.toDisplayString()))
if (do_search and config.get('general', 'auto-search') and urlstr):
qtutils.ensure_valid(url)
elif (not url.isValid()):
raise InvalidUrlError(url)
return url
| [
"def",
"fuzzy_url",
"(",
"urlstr",
",",
"cwd",
"=",
"None",
",",
"relative",
"=",
"False",
",",
"do_search",
"=",
"True",
",",
"force_search",
"=",
"False",
")",
":",
"urlstr",
"=",
"urlstr",
".",
"strip",
"(",
")",
"path",
"=",
"get_path_if_valid",
"(... | get a qurl based on a user input which is url or search term . | train | false |
42,245 | def test_nm1_sample_wrong_X():
nm1 = NearMiss(random_state=RND_SEED)
nm1.fit(X, Y)
assert_raises(RuntimeError, nm1.sample, np.random.random((100, 40)), np.array((([0] * 50) + ([1] * 50))))
| [
"def",
"test_nm1_sample_wrong_X",
"(",
")",
":",
"nm1",
"=",
"NearMiss",
"(",
"random_state",
"=",
"RND_SEED",
")",
"nm1",
".",
"fit",
"(",
"X",
",",
"Y",
")",
"assert_raises",
"(",
"RuntimeError",
",",
"nm1",
".",
"sample",
",",
"np",
".",
"random",
"... | test either if an error is raised when x is different at fitting and sampling . | train | false |
42,246 | @conf.commands.register
def srbt1(peer, pkts, *args, **kargs):
(a, b) = srbt(peer, pkts, *args, **kargs)
if (len(a) > 0):
return a[0][1]
| [
"@",
"conf",
".",
"commands",
".",
"register",
"def",
"srbt1",
"(",
"peer",
",",
"pkts",
",",
"*",
"args",
",",
"**",
"kargs",
")",
":",
"(",
"a",
",",
"b",
")",
"=",
"srbt",
"(",
"peer",
",",
"pkts",
",",
"*",
"args",
",",
"**",
"kargs",
")"... | send and receive 1 packet using a bluetooth socket . | train | false |
42,249 | def test_only_one_value(Chart):
chart = Chart()
chart.add('S', [1])
q = chart.render_pyquery()
assert (len(q('.legend')) == 1)
| [
"def",
"test_only_one_value",
"(",
"Chart",
")",
":",
"chart",
"=",
"Chart",
"(",
")",
"chart",
".",
"add",
"(",
"'S'",
",",
"[",
"1",
"]",
")",
"q",
"=",
"chart",
".",
"render_pyquery",
"(",
")",
"assert",
"(",
"len",
"(",
"q",
"(",
"'.legend'",
... | test chart rendering with only one value . | train | false |
42,250 | def import_from_stdlib(module):
old_path = sys.path
sys.path = [d for d in sys.path if (d not in ('', '.'))]
m = __import__(module)
sys.path = old_path
return m
| [
"def",
"import_from_stdlib",
"(",
"module",
")",
":",
"old_path",
"=",
"sys",
".",
"path",
"sys",
".",
"path",
"=",
"[",
"d",
"for",
"d",
"in",
"sys",
".",
"path",
"if",
"(",
"d",
"not",
"in",
"(",
"''",
",",
"'.'",
")",
")",
"]",
"m",
"=",
"... | when python is run from within the nltk/ directory tree . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.