id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
48,559 | def _accumulateFrequencyCounts(values, freqCounts=None):
values = numpy.array(values)
numEntries = (values.max() + 1)
if (freqCounts is not None):
numEntries = max(numEntries, freqCounts.size)
if (freqCounts is not None):
if (freqCounts.size != numEntries):
newCounts = numpy.zeros(numEntries, dtype='int32')
newCounts[0:freqCounts.size] = freqCounts
else:
newCounts = freqCounts
else:
newCounts = numpy.zeros(numEntries, dtype='int32')
for v in values:
newCounts[v] += 1
return newCounts
| [
"def",
"_accumulateFrequencyCounts",
"(",
"values",
",",
"freqCounts",
"=",
"None",
")",
":",
"values",
"=",
"numpy",
".",
"array",
"(",
"values",
")",
"numEntries",
"=",
"(",
"values",
".",
"max",
"(",
")",
"+",
"1",
")",
"if",
"(",
"freqCounts",
"is"... | accumulate a list of values values into the frequency counts freqcounts . | train | true |
48,564 | def is_spam(request, form, url):
for spam_filter_path in settings.SPAM_FILTERS:
spam_filter = import_dotted_path(spam_filter_path)
if spam_filter(request, form, url):
return True
| [
"def",
"is_spam",
"(",
"request",
",",
"form",
",",
"url",
")",
":",
"for",
"spam_filter_path",
"in",
"settings",
".",
"SPAM_FILTERS",
":",
"spam_filter",
"=",
"import_dotted_path",
"(",
"spam_filter_path",
")",
"if",
"spam_filter",
"(",
"request",
",",
"form"... | main entry point for spam handling - called from the comment view and page processor for mezzanine . | train | true |
48,565 | def mask_color(clip, color=[0, 0, 0], thr=0, s=1):
hill = (lambda x: ((1.0 * (x != 0)) if (thr == 0) else ((x ** s) / ((thr ** s) + (x ** s)))))
color = np.array(color)
flim = (lambda im: hill(np.sqrt(((im - color) ** 2).sum(axis=2))))
mask = clip.fl_image(flim)
mask.ismask = True
newclip = clip.set_mask(mask)
return newclip
| [
"def",
"mask_color",
"(",
"clip",
",",
"color",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"thr",
"=",
"0",
",",
"s",
"=",
"1",
")",
":",
"hill",
"=",
"(",
"lambda",
"x",
":",
"(",
"(",
"1.0",
"*",
"(",
"x",
"!=",
"0",
")",
")",
"if",... | returns a new clip with a mask for transparency where the original clip is of the given color . | train | false |
48,567 | def _is_in_range(value, min_value, max_value):
if (value is None):
return False
if ((not (min_value or max_value)) or (min_value == max_value == value)):
return True
if (((not min_value) or (value > min_value)) and ((max_value is None) or (value <= max_value))):
return True
return False
| [
"def",
"_is_in_range",
"(",
"value",
",",
"min_value",
",",
"max_value",
")",
":",
"if",
"(",
"value",
"is",
"None",
")",
":",
"return",
"False",
"if",
"(",
"(",
"not",
"(",
"min_value",
"or",
"max_value",
")",
")",
"or",
"(",
"min_value",
"==",
"max... | help function to check if the range matches with value if min_value is none the max_value determines if the range matches . | train | false |
48,568 | def is_pubkey(string):
pgp_regex = '.*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*'
return re.match(pgp_regex, string, re.DOTALL)
| [
"def",
"is_pubkey",
"(",
"string",
")",
":",
"pgp_regex",
"=",
"'.*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*'",
"return",
"re",
".",
"match",
"(",
"pgp_regex",
",",
"string",
",",
"re",
".",
"DOTALL",
")"
] | verifies if string is a pubkey . | train | false |
48,570 | def update_comment_in_doc(doc):
if (doc.communication_type not in (u'Comment', u'Communication')):
return
if ((doc.communication_type == u'Comment') and (doc.comment_type != u'Comment')):
return
def get_content(doc):
return ((doc.content[:97] + u'...') if (len(doc.content) > 100) else doc.content)
if (doc.reference_doctype and doc.reference_name and doc.content):
_comments = get_comments_from_parent(doc)
updated = False
for c in _comments:
if (c.get(u'name') == doc.name):
c[u'comment'] = get_content(doc)
updated = True
if (not updated):
_comments.append({u'comment': get_content(doc), u'by': (doc.sender or doc.owner), u'name': doc.name})
update_comments_in_parent(doc.reference_doctype, doc.reference_name, _comments)
| [
"def",
"update_comment_in_doc",
"(",
"doc",
")",
":",
"if",
"(",
"doc",
".",
"communication_type",
"not",
"in",
"(",
"u'Comment'",
",",
"u'Communication'",
")",
")",
":",
"return",
"if",
"(",
"(",
"doc",
".",
"communication_type",
"==",
"u'Comment'",
")",
... | updates _comments property in parent document . | train | false |
48,571 | @step(u'The "([^"]*)" problem displays a "([^"]*)" answer')
def assert_problem_has_answer(step, problem_type, answer_class):
assert (answer_class in ['correct', 'incorrect', 'blank'])
assert (problem_type in PROBLEM_DICT)
problem_has_answer(world.scenario_dict['COURSE'].number, problem_type, answer_class)
| [
"@",
"step",
"(",
"u'The \"([^\"]*)\" problem displays a \"([^\"]*)\" answer'",
")",
"def",
"assert_problem_has_answer",
"(",
"step",
",",
"problem_type",
",",
"answer_class",
")",
":",
"assert",
"(",
"answer_class",
"in",
"[",
"'correct'",
",",
"'incorrect'",
",",
"'... | assert that the problem is displaying a particular answer . | train | false |
48,572 | def approximate_fst(desired_fst, simulated_fst, parameter_fst, max_run_fst=1, min_run_fst=0, limit=0.005):
if (abs((simulated_fst - desired_fst)) < limit):
return (parameter_fst, max_run_fst, min_run_fst)
if (simulated_fst > desired_fst):
max_run_fst = parameter_fst
next_parameter_fst = ((min_run_fst + parameter_fst) / 2)
else:
min_run_fst = parameter_fst
next_parameter_fst = ((max_run_fst + parameter_fst) / 2)
return (next_parameter_fst, max_run_fst, min_run_fst)
| [
"def",
"approximate_fst",
"(",
"desired_fst",
",",
"simulated_fst",
",",
"parameter_fst",
",",
"max_run_fst",
"=",
"1",
",",
"min_run_fst",
"=",
"0",
",",
"limit",
"=",
"0.005",
")",
":",
"if",
"(",
"abs",
"(",
"(",
"simulated_fst",
"-",
"desired_fst",
")"... | calculates the next fst attempt in order to approximate a desired fst . | train | false |
48,573 | def random_y_given_x(x):
return (x + roll_a_die())
| [
"def",
"random_y_given_x",
"(",
"x",
")",
":",
"return",
"(",
"x",
"+",
"roll_a_die",
"(",
")",
")"
] | equally likely to be x + 1 . | train | false |
48,574 | def genBhLobe(x):
N = 512
f = (((x * np.pi) * 2) / N)
df = ((2 * np.pi) / N)
y = np.zeros(x.size)
consts = [0.35875, 0.48829, 0.14128, 0.01168]
for m in range(0, 4):
y += ((consts[m] / 2) * (sinc((f - (df * m)), N) + sinc((f + (df * m)), N)))
y = ((y / N) / consts[0])
return y
| [
"def",
"genBhLobe",
"(",
"x",
")",
":",
"N",
"=",
"512",
"f",
"=",
"(",
"(",
"(",
"x",
"*",
"np",
".",
"pi",
")",
"*",
"2",
")",
"/",
"N",
")",
"df",
"=",
"(",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"/",
"N",
")",
"y",
"=",
"np",
".",... | generate the main lobe of a blackman-harris window x: bin positions to compute returns y: main lobe os spectrum of a blackman-harris window . | train | false |
48,575 | def _close_to_num(o1, o2, epsilon=5):
delta = abs(((o2 - o1) * MUSECONDS_PER_DAY))
assert (delta < epsilon)
| [
"def",
"_close_to_num",
"(",
"o1",
",",
"o2",
",",
"epsilon",
"=",
"5",
")",
":",
"delta",
"=",
"abs",
"(",
"(",
"(",
"o2",
"-",
"o1",
")",
"*",
"MUSECONDS_PER_DAY",
")",
")",
"assert",
"(",
"delta",
"<",
"epsilon",
")"
] | assert that float ordinals *o1* and *o2* are within *epsilon* microseconds . | train | false |
48,576 | def make_attendance_records(student, student_name, status, course_schedule=None, student_batch=None, date=None):
student_attendance = frappe.new_doc(u'Student Attendance')
student_attendance.student = student
student_attendance.student_name = student_name
student_attendance.course_schedule = course_schedule
student_attendance.student_batch = student_batch
student_attendance.date = date
student_attendance.status = status
student_attendance.submit()
| [
"def",
"make_attendance_records",
"(",
"student",
",",
"student_name",
",",
"status",
",",
"course_schedule",
"=",
"None",
",",
"student_batch",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"student_attendance",
"=",
"frappe",
".",
"new_doc",
"(",
"u'Stude... | creates attendance record . | train | false |
48,578 | def _get_window(start, end):
from scipy.signal import hann
window = (1 - np.r_[(hann(4)[:2], np.ones((np.abs((end - start)) - 4)), hann(4)[(-2):])].T)
return window
| [
"def",
"_get_window",
"(",
"start",
",",
"end",
")",
":",
"from",
"scipy",
".",
"signal",
"import",
"hann",
"window",
"=",
"(",
"1",
"-",
"np",
".",
"r_",
"[",
"(",
"hann",
"(",
"4",
")",
"[",
":",
"2",
"]",
",",
"np",
".",
"ones",
"(",
"(",
... | return window which has length as much as parameter start - end . | train | false |
48,579 | @pytest.mark.parametrize('value', [['foo\\bar'], ['fo\xc3\xb6\r\nb\xc3\xa4r'], ['fo\xc3\xb6\\r\\nb\xc3\xa4r'], ['fo\xc3\xb6\r\n\\r\\nb\xc3\xa4r', 'b\xc3\xa4r\r\n\\r\\nb\xc3\xa4z'], ['nfo\xc3\xb6\nb\xc3\xa4r'], ['nfo\xc3\xb6\\nb\xc3\xa4r'], ['fo\xc3\xb6\n\\nb\xc3\xa4r', 'b\xc3\xa4r\n\\nb\xc3\xa4z']])
def test_multistringwidget_decompress_list_of_values(value):
widget = MultiStringWidget()
assert (widget.decompress(value) == value)
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'value'",
",",
"[",
"[",
"'foo\\\\bar'",
"]",
",",
"[",
"'fo\\xc3\\xb6\\r\\nb\\xc3\\xa4r'",
"]",
",",
"[",
"'fo\\xc3\\xb6\\\\r\\\\nb\\xc3\\xa4r'",
"]",
",",
"[",
"'fo\\xc3\\xb6\\r\\n\\\\r\\\\nb\\xc3\\xa4r'",
",",
... | tests units multistringwidget decompresses a list of values . | train | false |
48,580 | def _local_server_get(url, session):
request = HttpRequest()
request.method = 'GET'
request.session = session
(view, args, kwargs) = resolve(url)
response = view(request, *args, **kwargs)
return response.content
| [
"def",
"_local_server_get",
"(",
"url",
",",
"session",
")",
":",
"request",
"=",
"HttpRequest",
"(",
")",
"request",
".",
"method",
"=",
"'GET'",
"request",
".",
"session",
"=",
"session",
"(",
"view",
",",
"args",
",",
"kwargs",
")",
"=",
"resolve",
... | simulate a server-server get request for an in-process api . | train | false |
48,581 | def walk_outputs(object):
out = []
if isinstance(object, dict):
for (key, val) in sorted(object.items()):
if isdefined(val):
out.extend(walk_outputs(val))
elif isinstance(object, (list, tuple)):
for val in object:
if isdefined(val):
out.extend(walk_outputs(val))
elif (isdefined(object) and isinstance(object, (str, bytes))):
if (os.path.islink(object) or os.path.isfile(object)):
out = [(filename, u'f') for filename in get_all_files(object)]
elif os.path.isdir(object):
out = [(object, u'd')]
return out
| [
"def",
"walk_outputs",
"(",
"object",
")",
":",
"out",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"object",
",",
"dict",
")",
":",
"for",
"(",
"key",
",",
"val",
")",
"in",
"sorted",
"(",
"object",
".",
"items",
"(",
")",
")",
":",
"if",
"isdefined"... | extract every file and directory from a python structure . | train | false |
48,582 | def install_reactor(explicit_reactor=None, verbose=False):
import sys
log = txaio.make_logger()
if explicit_reactor:
from twisted.application.reactors import installReactor
log.info("Trying to install explicitly specified Twisted reactor '{reactor}'", reactor=explicit_reactor)
try:
installReactor(explicit_reactor)
except:
log.failure('Could not install Twisted reactor {reactor}\n{log_failure.value}', reactor=explicit_reactor)
sys.exit(1)
else:
log.debug('Automatically choosing optimal Twisted reactor')
install_optimal_reactor(verbose)
from twisted.internet import reactor
txaio.config.loop = reactor
if verbose:
from twisted.python.reflect import qual
log.debug('Running Twisted reactor {reactor}', reactor=qual(reactor.__class__))
return reactor
| [
"def",
"install_reactor",
"(",
"explicit_reactor",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"import",
"sys",
"log",
"=",
"txaio",
".",
"make_logger",
"(",
")",
"if",
"explicit_reactor",
":",
"from",
"twisted",
".",
"application",
".",
"reactors",... | install twisted reactor . | train | false |
48,584 | def cookie_date(epoch_seconds=None):
rfcdate = formatdate(epoch_seconds)
return ('%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25]))
| [
"def",
"cookie_date",
"(",
"epoch_seconds",
"=",
"None",
")",
":",
"rfcdate",
"=",
"formatdate",
"(",
"epoch_seconds",
")",
"return",
"(",
"'%s-%s-%s GMT'",
"%",
"(",
"rfcdate",
"[",
":",
"7",
"]",
",",
"rfcdate",
"[",
"8",
":",
"11",
"]",
",",
"rfcdat... | formats the time to ensure compatibility with netscapes cookie standard . | train | false |
48,585 | @not_implemented_for('undirected')
def number_attracting_components(G):
n = len(list(attracting_components(G)))
return n
| [
"@",
"not_implemented_for",
"(",
"'undirected'",
")",
"def",
"number_attracting_components",
"(",
"G",
")",
":",
"n",
"=",
"len",
"(",
"list",
"(",
"attracting_components",
"(",
"G",
")",
")",
")",
"return",
"n"
] | returns the number of attracting components in g . | train | false |
48,586 | def _find_guids(guid_string):
guids = []
for found_guid in re.finditer(GUID_REGEX, guid_string):
if found_guid.groups():
guids.append(found_guid.group(0).strip('{}'))
return sorted(list(set(guids)))
| [
"def",
"_find_guids",
"(",
"guid_string",
")",
":",
"guids",
"=",
"[",
"]",
"for",
"found_guid",
"in",
"re",
".",
"finditer",
"(",
"GUID_REGEX",
",",
"guid_string",
")",
":",
"if",
"found_guid",
".",
"groups",
"(",
")",
":",
"guids",
".",
"append",
"("... | return the set of guids found in guid_string . | train | true |
48,587 | def parent_is_init():
if (os.getppid() == 1):
return True
return False
| [
"def",
"parent_is_init",
"(",
")",
":",
"if",
"(",
"os",
".",
"getppid",
"(",
")",
"==",
"1",
")",
":",
"return",
"True",
"return",
"False"
] | check if parent is init check if the parent process is init . | train | false |
48,588 | def is_extension_type(value):
if is_categorical(value):
return True
elif is_sparse(value):
return True
elif is_datetimetz(value):
return True
return False
| [
"def",
"is_extension_type",
"(",
"value",
")",
":",
"if",
"is_categorical",
"(",
"value",
")",
":",
"return",
"True",
"elif",
"is_sparse",
"(",
"value",
")",
":",
"return",
"True",
"elif",
"is_datetimetz",
"(",
"value",
")",
":",
"return",
"True",
"return"... | if we are a klass that is preserved by the internals these are internal klasses that we represent . | train | false |
48,589 | def add_doc_link(app, pagename, templatename, context, doctree):
if ((not app.config.github_user) and app.config.github_project):
return
source_suffix = app.config.source_suffix
source_suffix = (source_suffix if isinstance(source_suffix, basestring) else source_suffix[0])
context['github_link'] = (lambda mode='edit': make_github_link(app, ('doc/%s%s' % (pagename, source_suffix)), mode=mode))
| [
"def",
"add_doc_link",
"(",
"app",
",",
"pagename",
",",
"templatename",
",",
"context",
",",
"doctree",
")",
":",
"if",
"(",
"(",
"not",
"app",
".",
"config",
".",
"github_user",
")",
"and",
"app",
".",
"config",
".",
"github_project",
")",
":",
"retu... | add github_link function linking to the current page on github . | train | false |
48,590 | def search_python(python_code, template_name):
retval = []
for (fn, content) in python_code:
for (ln, line) in enumerate(content):
if (((u'"%s"' % template_name) in line) or ((u"'%s'" % template_name) in line)):
retval.append((fn, (ln + 1)))
return retval
| [
"def",
"search_python",
"(",
"python_code",
",",
"template_name",
")",
":",
"retval",
"=",
"[",
"]",
"for",
"(",
"fn",
",",
"content",
")",
"in",
"python_code",
":",
"for",
"(",
"ln",
",",
"line",
")",
"in",
"enumerate",
"(",
"content",
")",
":",
"if... | searches python code for a template name . | train | false |
48,591 | @pytest.fixture(scope=u'function')
def remove_test_dir(request):
def fin_remove_test_dir():
if os.path.exists(u'test_copy_without_render'):
utils.rmtree(u'test_copy_without_render')
request.addfinalizer(fin_remove_test_dir)
| [
"@",
"pytest",
".",
"fixture",
"(",
"scope",
"=",
"u'function'",
")",
"def",
"remove_test_dir",
"(",
"request",
")",
":",
"def",
"fin_remove_test_dir",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"u'test_copy_without_render'",
")",
":",
"ut... | remove the folder that is created by the test . | train | false |
48,592 | def _clean_order(order):
order = re.findall('\\([0-9 ]*?\\)', order)
clean = (lambda x: tuple(map(int, re.sub('[()]', '', x).split(' '))))
if (len(order) > 1):
(order, sorder) = map(clean, order)
else:
order = clean(order[0])
sorder = (0, 0, 0)
return (order, sorder)
| [
"def",
"_clean_order",
"(",
"order",
")",
":",
"order",
"=",
"re",
".",
"findall",
"(",
"'\\\\([0-9 ]*?\\\\)'",
",",
"order",
")",
"clean",
"=",
"(",
"lambda",
"x",
":",
"tuple",
"(",
"map",
"(",
"int",
",",
"re",
".",
"sub",
"(",
"'[()]'",
",",
"'... | takes something like and returns a arma order . | train | false |
48,593 | def test_printwcs():
h = get_pkg_data_contents(u'spectra/orion-freq-1.hdr', encoding=u'binary')
w = wcs.WCS(h)
w.printwcs()
h = get_pkg_data_contents(u'data/3d_cd.hdr', encoding=u'binary')
w = wcs.WCS(h)
w.printwcs()
| [
"def",
"test_printwcs",
"(",
")",
":",
"h",
"=",
"get_pkg_data_contents",
"(",
"u'spectra/orion-freq-1.hdr'",
",",
"encoding",
"=",
"u'binary'",
")",
"w",
"=",
"wcs",
".",
"WCS",
"(",
"h",
")",
"w",
".",
"printwcs",
"(",
")",
"h",
"=",
"get_pkg_data_conten... | just make sure that it runs . | train | false |
48,594 | def get_switchport(port, module):
command = 'show interface {0} switchport'.format(port)
body = execute_show_command(command, module)
try:
body = execute_show_command(command, module)[0]
except IndexError:
body = []
if body:
key_map = {'interface': 'interface', 'oper_mode': 'mode', 'switchport': 'switchport', 'access_vlan': 'access_vlan', 'access_vlan_name': 'access_vlan_name', 'native_vlan': 'native_vlan', 'native_vlan_name': 'native_vlan_name', 'trunk_vlans': 'trunk_vlans'}
sp_table = body['TABLE_interface']['ROW_interface']
sp = apply_key_map(key_map, sp_table)
return sp
else:
return {}
| [
"def",
"get_switchport",
"(",
"port",
",",
"module",
")",
":",
"command",
"=",
"'show interface {0} switchport'",
".",
"format",
"(",
"port",
")",
"body",
"=",
"execute_show_command",
"(",
"command",
",",
"module",
")",
"try",
":",
"body",
"=",
"execute_show_c... | gets current config of l2 switchport args: device : this is the device object of an nx-api enabled device using the device class within device . | train | false |
48,595 | def OutHeader(text, dash):
n = (64 - len(text))
Output()
Output('/* %s %s %s */', (dash * (n / 2)), text, (dash * (n - (n / 2))))
Output()
| [
"def",
"OutHeader",
"(",
"text",
",",
"dash",
")",
":",
"n",
"=",
"(",
"64",
"-",
"len",
"(",
"text",
")",
")",
"Output",
"(",
")",
"Output",
"(",
"'/* %s %s %s */'",
",",
"(",
"dash",
"*",
"(",
"n",
"/",
"2",
")",
")",
",",
"text",
",",
"(",... | output a header comment using a given dash character . | train | false |
48,596 | def format_exc(limit=None):
try:
(etype, value, tb) = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
| [
"def",
"format_exc",
"(",
"limit",
"=",
"None",
")",
":",
"try",
":",
"(",
"etype",
",",
"value",
",",
"tb",
")",
"=",
"sys",
".",
"exc_info",
"(",
")",
"return",
"''",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"etype",
",",
"val... | like print_exc() but return a string . | train | true |
48,597 | def _aggregate(input, options, output, timeFieldName):
aggregator = Aggregator(aggregationInfo=options, inputFields=input.getFields(), timeFieldName=timeFieldName)
while True:
inRecord = input.getNextRecord()
print 'Feeding in: ', inRecord
(outRecord, aggBookmark) = aggregator.next(record=inRecord, curInputBookmark=None)
print 'Record out: ', outRecord
if (outRecord is not None):
output.appendRecord(outRecord, None)
if ((inRecord is None) and (outRecord is None)):
break
| [
"def",
"_aggregate",
"(",
"input",
",",
"options",
",",
"output",
",",
"timeFieldName",
")",
":",
"aggregator",
"=",
"Aggregator",
"(",
"aggregationInfo",
"=",
"options",
",",
"inputFields",
"=",
"input",
".",
"getFields",
"(",
")",
",",
"timeFieldName",
"="... | aggregate the input stream and write aggregated records to the output stream . | train | false |
48,598 | @frappe.whitelist()
def apply_price_list(args, as_doc=False):
args = process_args(args)
parent = get_price_list_currency_and_exchange_rate(args)
children = []
if (u'items' in args):
item_list = args.get(u'items')
args.update(parent)
for item in item_list:
args_copy = frappe._dict(args.copy())
args_copy.update(item)
item_details = apply_price_list_on_item(args_copy)
children.append(item_details)
if as_doc:
args.price_list_currency = parent.price_list_currency
args.plc_conversion_rate = parent.plc_conversion_rate
if args.get(u'items'):
for (i, item) in enumerate(args.get(u'items')):
for fieldname in children[i]:
if ((fieldname in item) and (fieldname not in (u'name', u'doctype'))):
item[fieldname] = children[i][fieldname]
return args
else:
return {u'parent': parent, u'children': children}
| [
"@",
"frappe",
".",
"whitelist",
"(",
")",
"def",
"apply_price_list",
"(",
"args",
",",
"as_doc",
"=",
"False",
")",
":",
"args",
"=",
"process_args",
"(",
"args",
")",
"parent",
"=",
"get_price_list_currency_and_exchange_rate",
"(",
"args",
")",
"children",
... | apply pricelist on a document-like dict object and return as {parent: dict . | train | false |
48,599 | def _fake_is_smpl(*args):
return True
| [
"def",
"_fake_is_smpl",
"(",
"*",
"args",
")",
":",
"return",
"True"
] | assume the shadowimage pair status is smpl . | train | false |
48,600 | def split_low_tag(tag):
(state, id_, name, fun) = tag.split('_|-')
return {'state': state, '__id__': id_, 'name': name, 'fun': fun}
| [
"def",
"split_low_tag",
"(",
"tag",
")",
":",
"(",
"state",
",",
"id_",
",",
"name",
",",
"fun",
")",
"=",
"tag",
".",
"split",
"(",
"'_|-'",
")",
"return",
"{",
"'state'",
":",
"state",
",",
"'__id__'",
":",
"id_",
",",
"'name'",
":",
"name",
",... | take a low tag and split it back into the low dict that it came from . | train | true |
48,601 | def list_bans(banlist):
if (not banlist):
return 'No active bans were found.'
table = prettytable.PrettyTable(['{wid', '{wname/ip', '{wdate', '{wreason'])
for (inum, ban) in enumerate(banlist):
table.add_row([str((inum + 1)), ((ban[0] and ban[0]) or ban[1]), ban[3], ban[4]])
string = ('{wActive bans:{n\n%s' % table)
return string
| [
"def",
"list_bans",
"(",
"banlist",
")",
":",
"if",
"(",
"not",
"banlist",
")",
":",
"return",
"'No active bans were found.'",
"table",
"=",
"prettytable",
".",
"PrettyTable",
"(",
"[",
"'{wid'",
",",
"'{wname/ip'",
",",
"'{wdate'",
",",
"'{wreason'",
"]",
"... | helper function to display a list of active bans . | train | false |
48,602 | @pytest.mark.django_db
def test_service_methods_with_long_name(rf):
MAX_LENGTH = 100
long_name = ('X' * MAX_LENGTH)
assert (len(long_name) == MAX_LENGTH)
sm = ShippingMethod.objects.language('en').create(shop=get_default_shop(), name=long_name, enabled=True, tax_class=get_default_tax_class())
pm = PaymentMethod.objects.language('en').create(shop=get_default_shop(), name=long_name, enabled=True, tax_class=get_default_tax_class())
order = create_empty_order()
order.shipping_method = sm
order.payment_method = pm
order.full_clean()
order.save()
| [
"@",
"pytest",
".",
"mark",
".",
"django_db",
"def",
"test_service_methods_with_long_name",
"(",
"rf",
")",
":",
"MAX_LENGTH",
"=",
"100",
"long_name",
"=",
"(",
"'X'",
"*",
"MAX_LENGTH",
")",
"assert",
"(",
"len",
"(",
"long_name",
")",
"==",
"MAX_LENGTH",
... | make sure that service methods with long names dont cause exceptions when creating an order . | train | false |
48,604 | def salt_refs(data):
proto = 'salt://'
ret = []
if isinstance(data, str):
if data.startswith(proto):
return [data]
if isinstance(data, list):
for comp in data:
if isinstance(comp, str):
if comp.startswith(proto):
ret.append(comp)
return ret
| [
"def",
"salt_refs",
"(",
"data",
")",
":",
"proto",
"=",
"'salt://'",
"ret",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"if",
"data",
".",
"startswith",
"(",
"proto",
")",
":",
"return",
"[",
"data",
"]",
"if",
"isinstanc... | pull salt file references out of the states . | train | false |
48,608 | def test_statsd_fail():
logger = Statsd(Config())
logger.sock = MockSocket(True)
logger.info('No impact on logging')
logger.debug('No impact on logging')
logger.critical('No impact on logging')
logger.error('No impact on logging')
logger.warning('No impact on logging')
logger.exception('No impact on logging')
| [
"def",
"test_statsd_fail",
"(",
")",
":",
"logger",
"=",
"Statsd",
"(",
"Config",
"(",
")",
")",
"logger",
".",
"sock",
"=",
"MockSocket",
"(",
"True",
")",
"logger",
".",
"info",
"(",
"'No impact on logging'",
")",
"logger",
".",
"debug",
"(",
"'No impa... | udp socket fails . | train | false |
48,609 | def quote_slashes(text):
return re.sub(u'[;/]', _quote_slashes, text)
| [
"def",
"quote_slashes",
"(",
"text",
")",
":",
"return",
"re",
".",
"sub",
"(",
"u'[;/]'",
",",
"_quote_slashes",
",",
"text",
")"
] | quote / characters so that they arent visible to djangos url quoting . | train | false |
48,610 | def migrate_uuid(node, wname):
db = share_db()
old_sharejs_uuid = get_sharejs_uuid(node, wname)
broadcast_to_sharejs('lock', old_sharejs_uuid)
generate_private_uuid(node, wname)
new_sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': old_sharejs_uuid})
if doc_item:
doc_item['_id'] = new_sharejs_uuid
db['docs'].insert(doc_item)
db['docs'].remove({'_id': old_sharejs_uuid})
ops_items = [item for item in db['docs_ops'].find({'name': old_sharejs_uuid})]
if ops_items:
for item in ops_items:
item['_id'] = item['_id'].replace(old_sharejs_uuid, new_sharejs_uuid)
item['name'] = new_sharejs_uuid
db['docs_ops'].insert(ops_items)
db['docs_ops'].remove({'name': old_sharejs_uuid})
write_contributors = [user._id for user in node.contributors if node.has_permission(user, 'write')]
broadcast_to_sharejs('unlock', old_sharejs_uuid, data=write_contributors)
| [
"def",
"migrate_uuid",
"(",
"node",
",",
"wname",
")",
":",
"db",
"=",
"share_db",
"(",
")",
"old_sharejs_uuid",
"=",
"get_sharejs_uuid",
"(",
"node",
",",
"wname",
")",
"broadcast_to_sharejs",
"(",
"'lock'",
",",
"old_sharejs_uuid",
")",
"generate_private_uuid"... | migrates uuid to new namespace . | train | false |
48,612 | def _series_inversion1(p, x, prec):
if rs_is_puiseux(p, x):
return rs_puiseux(_series_inversion1, p, x, prec)
R = p.ring
zm = R.zero_monom
c = p[zm]
if (prec == int(prec)):
prec = int(prec)
if (zm not in p):
raise ValueError('No constant term in series')
if _has_constant_term((p - c), x):
raise ValueError('p cannot contain a constant term depending on parameters')
one = R(1)
if (R.domain is EX):
one = 1
if (c != one):
p1 = (R(1) / c)
else:
p1 = R(1)
for precx in _giant_steps(prec):
t = (1 - rs_mul(p1, p, x, precx))
p1 = (p1 + rs_mul(p1, t, x, precx))
return p1
| [
"def",
"_series_inversion1",
"(",
"p",
",",
"x",
",",
"prec",
")",
":",
"if",
"rs_is_puiseux",
"(",
"p",
",",
"x",
")",
":",
"return",
"rs_puiseux",
"(",
"_series_inversion1",
",",
"p",
",",
"x",
",",
"prec",
")",
"R",
"=",
"p",
".",
"ring",
"zm",
... | univariate series inversion 1/p modulo o . | train | false |
48,613 | def _InternalDeclareKeyFlags(flag_names, flag_values=FLAGS, key_flag_values=None):
key_flag_values = (key_flag_values or flag_values)
module = _GetCallingModule()
for flag_name in flag_names:
if (flag_name not in flag_values):
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
key_flag_values._RegisterKeyFlagForModule(module, flag)
| [
"def",
"_InternalDeclareKeyFlags",
"(",
"flag_names",
",",
"flag_values",
"=",
"FLAGS",
",",
"key_flag_values",
"=",
"None",
")",
":",
"key_flag_values",
"=",
"(",
"key_flag_values",
"or",
"flag_values",
")",
"module",
"=",
"_GetCallingModule",
"(",
")",
"for",
... | declares a flag as key for the calling module . | train | false |
48,614 | def default_from_email():
global _default_from_email
if (_default_from_email is None):
try:
fqdn = socket.getfqdn()
except IOError:
fqdn = 'localhost'
_default_from_email = ('hue@' + fqdn)
return _default_from_email
| [
"def",
"default_from_email",
"(",
")",
":",
"global",
"_default_from_email",
"if",
"(",
"_default_from_email",
"is",
"None",
")",
":",
"try",
":",
"fqdn",
"=",
"socket",
".",
"getfqdn",
"(",
")",
"except",
"IOError",
":",
"fqdn",
"=",
"'localhost'",
"_defaul... | email for hue@<host-fqdn> . | train | false |
48,616 | def r_squared(alpha, beta, x, y):
return (1.0 - (sum_of_squared_errors(alpha, beta, x, y) / total_sum_of_squares(y)))
| [
"def",
"r_squared",
"(",
"alpha",
",",
"beta",
",",
"x",
",",
"y",
")",
":",
"return",
"(",
"1.0",
"-",
"(",
"sum_of_squared_errors",
"(",
"alpha",
",",
"beta",
",",
"x",
",",
"y",
")",
"/",
"total_sum_of_squares",
"(",
"y",
")",
")",
")"
] | the fraction of variation in y captured by the model . | train | false |
48,617 | def get_pixels():
return _sensehat.get_pixels()
| [
"def",
"get_pixels",
"(",
")",
":",
"return",
"_sensehat",
".",
"get_pixels",
"(",
")"
] | returns a list of 64 smaller lists of [r . | train | false |
48,618 | def get_yaml_loader(argline):
def yaml_loader(*args):
return SaltYamlSafeLoader(dictclass=OrderedDict, *args)
return yaml_loader
| [
"def",
"get_yaml_loader",
"(",
"argline",
")",
":",
"def",
"yaml_loader",
"(",
"*",
"args",
")",
":",
"return",
"SaltYamlSafeLoader",
"(",
"dictclass",
"=",
"OrderedDict",
",",
"*",
"args",
")",
"return",
"yaml_loader"
] | return the ordered dict yaml loader . | train | false |
48,619 | def remove_image(image):
client = _get_client()
status = base_status.copy()
try:
infos = _get_image_infos(image)
if infos:
status['id'] = infos['Id']
try:
client.remove_image(infos['Id'])
except Exception:
_invalid(status, id_=image, out=traceback.format_exc(), comment='Image could not be deleted')
try:
infos = _get_image_infos(image)
_invalid(status, comment='Image marked to be deleted but not deleted yet')
except Exception:
_valid(status, id_=image, comment='Image deleted')
else:
_invalid(status)
except Exception:
_invalid(status, out=traceback.format_exc(), comment='Image does not exist: {0}'.format(image))
return status
| [
"def",
"remove_image",
"(",
"image",
")",
":",
"client",
"=",
"_get_client",
"(",
")",
"status",
"=",
"base_status",
".",
"copy",
"(",
")",
"try",
":",
"infos",
"=",
"_get_image_infos",
"(",
"image",
")",
"if",
"infos",
":",
"status",
"[",
"'id'",
"]",... | remove an image from a system . | train | false |
48,621 | def syncloop(agents, sleep=2):
while True:
for agent in agents:
agent.sync()
time.sleep(sleep)
| [
"def",
"syncloop",
"(",
"agents",
",",
"sleep",
"=",
"2",
")",
":",
"while",
"True",
":",
"for",
"agent",
"in",
"agents",
":",
"agent",
".",
"sync",
"(",
")",
"time",
".",
"sleep",
"(",
"sleep",
")"
] | synchronize periodically the agents . | train | false |
48,622 | def dup_prs_resultant(f, g, K):
if ((not f) or (not g)):
return (K.zero, [])
(R, S) = dup_inner_subresultants(f, g, K)
if (dup_degree(R[(-1)]) > 0):
return (K.zero, R)
return (S[(-1)], R)
| [
"def",
"dup_prs_resultant",
"(",
"f",
",",
"g",
",",
"K",
")",
":",
"if",
"(",
"(",
"not",
"f",
")",
"or",
"(",
"not",
"g",
")",
")",
":",
"return",
"(",
"K",
".",
"zero",
",",
"[",
"]",
")",
"(",
"R",
",",
"S",
")",
"=",
"dup_inner_subresu... | resultant algorithm in k[x] using subresultant prs . | train | false |
48,623 | def _pattern_of(index):
return np.array([[(index & (2 ** 0)), (index & (2 ** 1)), (index & (2 ** 2))], [(index & (2 ** 3)), (index & (2 ** 4)), (index & (2 ** 5))], [(index & (2 ** 6)), (index & (2 ** 7)), (index & (2 ** 8))]], bool)
| [
"def",
"_pattern_of",
"(",
"index",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"(",
"index",
"&",
"(",
"2",
"**",
"0",
")",
")",
",",
"(",
"index",
"&",
"(",
"2",
"**",
"1",
")",
")",
",",
"(",
"index",
"&",
"(",
"2",
"**",
"2... | return the pattern represented by an index value byte decomposition of index . | train | false |
48,624 | def mathjax_for_rst(pelicanobj, mathjax_script):
docutils_settings = pelicanobj.settings.get('DOCUTILS_SETTINGS', {})
docutils_settings['math_output'] = 'MathJax'
pelicanobj.settings['DOCUTILS_SETTINGS'] = docutils_settings
rst_add_mathjax.mathjax_script = mathjax_script
| [
"def",
"mathjax_for_rst",
"(",
"pelicanobj",
",",
"mathjax_script",
")",
":",
"docutils_settings",
"=",
"pelicanobj",
".",
"settings",
".",
"get",
"(",
"'DOCUTILS_SETTINGS'",
",",
"{",
"}",
")",
"docutils_settings",
"[",
"'math_output'",
"]",
"=",
"'MathJax'",
"... | setup math for rst . | train | false |
48,628 | def angular_momentum(point, frame, *body):
if (not isinstance(frame, ReferenceFrame)):
raise TypeError('Please enter a valid ReferenceFrame')
if (not isinstance(point, Point)):
raise TypeError('Please specify a valid Point')
else:
angular_momentum_sys = Vector(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
angular_momentum_sys += e.angular_momentum(point, frame)
else:
raise TypeError('*body must have only Particle or RigidBody')
return angular_momentum_sys
| [
"def",
"angular_momentum",
"(",
"point",
",",
"frame",
",",
"*",
"body",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"frame",
",",
"ReferenceFrame",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Please enter a valid ReferenceFrame'",
")",
"if",
"(",
"not",
... | angular momentum of a system this function returns the angular momentum of a system of particles and/or rigidbodys . | train | false |
48,629 | def hypersimp(f, k):
f = sympify(f)
g = (f.subs(k, (k + 1)) / f)
g = g.rewrite(gamma)
g = expand_func(g)
g = powsimp(g, deep=True, combine='exp')
if g.is_rational_function(k):
return simplify(g, ratio=S.Infinity)
else:
return None
| [
"def",
"hypersimp",
"(",
"f",
",",
"k",
")",
":",
"f",
"=",
"sympify",
"(",
"f",
")",
"g",
"=",
"(",
"f",
".",
"subs",
"(",
"k",
",",
"(",
"k",
"+",
"1",
")",
")",
"/",
"f",
")",
"g",
"=",
"g",
".",
"rewrite",
"(",
"gamma",
")",
"g",
... | given combinatorial term f(k) simplify its consecutive term ratio i . | train | false |
48,630 | @utils.arg('address', metavar='<address>', help=_('IP of Floating IP.'))
@deprecated_network
def do_floating_ip_delete(cs, args):
floating_ips = cs.floating_ips.list()
for floating_ip in floating_ips:
if (floating_ip.ip == args.address):
return cs.floating_ips.delete(floating_ip.id)
raise exceptions.CommandError((_('Floating IP %s not found.') % args.address))
| [
"@",
"utils",
".",
"arg",
"(",
"'address'",
",",
"metavar",
"=",
"'<address>'",
",",
"help",
"=",
"_",
"(",
"'IP of Floating IP.'",
")",
")",
"@",
"deprecated_network",
"def",
"do_floating_ip_delete",
"(",
"cs",
",",
"args",
")",
":",
"floating_ips",
"=",
... | de-allocate a floating ip . | train | false |
48,631 | def nice_exit(ret=0):
if (ret and is_bare_console()):
print
print 'Press [Enter] to close this window.'
raw_input()
sys.exit(ret)
| [
"def",
"nice_exit",
"(",
"ret",
"=",
"0",
")",
":",
"if",
"(",
"ret",
"and",
"is_bare_console",
"(",
")",
")",
":",
"print",
"print",
"'Press [Enter] to close this window.'",
"raw_input",
"(",
")",
"sys",
".",
"exit",
"(",
"ret",
")"
] | drop-in replacement for sys . | train | false |
48,632 | def test_kde_integer_input():
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
| [
"def",
"test_kde_integer_input",
"(",
")",
":",
"x1",
"=",
"np",
".",
"arange",
"(",
"5",
")",
"kde",
"=",
"stats",
".",
"gaussian_kde",
"(",
"x1",
")",
"y_expected",
"=",
"[",
"0.13480721",
",",
"0.18222869",
",",
"0.19514935",
",",
"0.18222869",
",",
... | regression test for #1181 . | train | false |
48,633 | def resume(vm_):
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, 'VM', vm_)
if (vm_uuid is False):
return False
try:
xapi.VM.unpause(vm_uuid)
return True
except Exception:
return False
| [
"def",
"resume",
"(",
"vm_",
")",
":",
"with",
"_get_xapi_session",
"(",
")",
"as",
"xapi",
":",
"vm_uuid",
"=",
"_get_label_uuid",
"(",
"xapi",
",",
"'VM'",
",",
"vm_",
")",
"if",
"(",
"vm_uuid",
"is",
"False",
")",
":",
"return",
"False",
"try",
":... | resumes processing of queues . | train | true |
48,635 | def sh_margin_details(date='', symbol='', start='', end='', retry_count=3, pause=0.001):
date = (date if (date == '') else date.replace('-', ''))
start = (start if (start == '') else start.replace('-', ''))
end = (end if (end == '') else end.replace('-', ''))
if ((start != '') & (end != '')):
date = ''
data = pd.DataFrame()
ct._write_head()
df = _sh_mx(data, date=date, start=start, end=end, symbol=symbol, retry_count=retry_count, pause=pause)
return df
| [
"def",
"sh_margin_details",
"(",
"date",
"=",
"''",
",",
"symbol",
"=",
"''",
",",
"start",
"=",
"''",
",",
"end",
"=",
"''",
",",
"retry_count",
"=",
"3",
",",
"pause",
"=",
"0.001",
")",
":",
"date",
"=",
"(",
"date",
"if",
"(",
"date",
"==",
... | parameters date:string 明细数据日期 format:yyyy-mm-dd 默认为空 symbol:string 标的代码,6位数字e . | train | false |
48,636 | def format_log_context(msg, connection=None, keyspace=None):
connection_info = (connection or 'DEFAULT_CONNECTION')
if keyspace:
msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info, keyspace, msg)
else:
msg = '[Connection: {0}] {1}'.format(connection_info, msg)
return msg
| [
"def",
"format_log_context",
"(",
"msg",
",",
"connection",
"=",
"None",
",",
"keyspace",
"=",
"None",
")",
":",
"connection_info",
"=",
"(",
"connection",
"or",
"'DEFAULT_CONNECTION'",
")",
"if",
"keyspace",
":",
"msg",
"=",
"'[Connection: {0}, Keyspace: {1}] {2}... | format log message to add keyspace and connection context . | train | true |
48,637 | def cms_to_token(cms_text):
start_delim = '-----BEGIN CMS-----'
end_delim = '-----END CMS-----'
signed_text = cms_text
signed_text = signed_text.replace('/', '-')
signed_text = signed_text.replace(start_delim, '')
signed_text = signed_text.replace(end_delim, '')
signed_text = signed_text.replace('\n', '')
return signed_text
| [
"def",
"cms_to_token",
"(",
"cms_text",
")",
":",
"start_delim",
"=",
"'-----BEGIN CMS-----'",
"end_delim",
"=",
"'-----END CMS-----'",
"signed_text",
"=",
"cms_text",
"signed_text",
"=",
"signed_text",
".",
"replace",
"(",
"'/'",
",",
"'-'",
")",
"signed_text",
"... | convert a cms-signed token in pem format to a custom url-safe format . | train | false |
48,639 | @then(u'the command output should contain')
def step_command_output_should_contain(context):
assert (context.text is not None), 'REQUIRE: multi-line text'
step_command_output_should_contain_text(context, context.text)
| [
"@",
"then",
"(",
"u'the command output should contain'",
")",
"def",
"step_command_output_should_contain",
"(",
"context",
")",
":",
"assert",
"(",
"context",
".",
"text",
"is",
"not",
"None",
")",
",",
"'REQUIRE: multi-line text'",
"step_command_output_should_contain_te... | example: when i run "behave . | train | false |
48,640 | @simple_decorator
def check_local_site_access(view_func):
def _check(request, local_site_name=None, *args, **kwargs):
if local_site_name:
if (not request.local_site):
raise Http404
local_site = request.local_site
if (not local_site.is_accessible_by(request.user)):
if (local_site.public or request.user.is_authenticated()):
response = render_to_response('permission_denied.html', RequestContext(request))
response.status_code = 403
return response
else:
return HttpResponseRedirect(('%s?next_page=%s' % (reverse('login'), request.get_full_path())))
else:
local_site = None
return view_func(request, local_site=local_site, *args, **kwargs)
return _check
| [
"@",
"simple_decorator",
"def",
"check_local_site_access",
"(",
"view_func",
")",
":",
"def",
"_check",
"(",
"request",
",",
"local_site_name",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"local_site_name",
":",
"if",
"(",
"not",
"r... | checks if a user has access to a local site . | train | false |
48,641 | def digestAuth(realm, algorithm=MD5, nonce=None, qop=AUTH):
global SUPPORTED_ALGORITHM, DIGEST_AUTH_ENCODERS, SUPPORTED_QOP
assert (algorithm in SUPPORTED_ALGORITHM)
assert (qop in SUPPORTED_QOP)
if (nonce is None):
nonce = calculateNonce(realm, algorithm)
return ('Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (realm, nonce, algorithm, qop))
| [
"def",
"digestAuth",
"(",
"realm",
",",
"algorithm",
"=",
"MD5",
",",
"nonce",
"=",
"None",
",",
"qop",
"=",
"AUTH",
")",
":",
"global",
"SUPPORTED_ALGORITHM",
",",
"DIGEST_AUTH_ENCODERS",
",",
"SUPPORTED_QOP",
"assert",
"(",
"algorithm",
"in",
"SUPPORTED_ALGO... | challenges the client for a digest authentication . | train | false |
48,642 | def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True, check_series_type=True, check_names=True, obj='SparseSeries'):
assertIsInstance(left, pd.SparseSeries, '[SparseSeries]')
assertIsInstance(right, pd.SparseSeries, '[SparseSeries]')
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index, obj='{0}.index'.format(obj))
assert_sp_array_equal(left.block.values, right.block.values)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values)
| [
"def",
"assert_sp_series_equal",
"(",
"left",
",",
"right",
",",
"check_dtype",
"=",
"True",
",",
"exact_indices",
"=",
"True",
",",
"check_series_type",
"=",
"True",
",",
"check_names",
"=",
"True",
",",
"obj",
"=",
"'SparseSeries'",
")",
":",
"assertIsInstan... | check that the left and right sparseseries are equal . | train | false |
48,644 | def respond_as_web_page(title, html, success=None, http_status_code=None, context=None, indicator_color=None, primary_action=u'/', primary_label=None, fullpage=False):
local.message_title = title
local.message = html
local.response[u'type'] = u'page'
local.response[u'route'] = u'message'
if http_status_code:
local.response[u'http_status_code'] = http_status_code
if (not context):
context = {}
if (not indicator_color):
if success:
indicator_color = u'green'
elif (http_status_code and (http_status_code > 300)):
indicator_color = u'red'
else:
indicator_color = u'blue'
context[u'indicator_color'] = indicator_color
context[u'primary_label'] = primary_label
context[u'primary_action'] = primary_action
context[u'error_code'] = http_status_code
context[u'fullpage'] = fullpage
local.response[u'context'] = context
| [
"def",
"respond_as_web_page",
"(",
"title",
",",
"html",
",",
"success",
"=",
"None",
",",
"http_status_code",
"=",
"None",
",",
"context",
"=",
"None",
",",
"indicator_color",
"=",
"None",
",",
"primary_action",
"=",
"u'/'",
",",
"primary_label",
"=",
"None... | send response as a web page with a message rather than json . | train | false |
48,645 | def apphook_pre_title_checker(instance, **kwargs):
if instance.publisher_is_draft:
return
try:
instance._old_data = Title.objects.filter(pk=instance.pk).select_related('page')[0]
except IndexError:
instance._old_data = None
| [
"def",
"apphook_pre_title_checker",
"(",
"instance",
",",
"**",
"kwargs",
")",
":",
"if",
"instance",
".",
"publisher_is_draft",
":",
"return",
"try",
":",
"instance",
".",
"_old_data",
"=",
"Title",
".",
"objects",
".",
"filter",
"(",
"pk",
"=",
"instance",... | store the old application_urls and path on the instance . | train | false |
48,646 | def gf_multi_eval(f, A, p, K):
return [gf_eval(f, a, p, K) for a in A]
| [
"def",
"gf_multi_eval",
"(",
"f",
",",
"A",
",",
"p",
",",
"K",
")",
":",
"return",
"[",
"gf_eval",
"(",
"f",
",",
"a",
",",
"p",
",",
"K",
")",
"for",
"a",
"in",
"A",
"]"
] | evaluate f(a) for a in [a_1 . | train | false |
48,647 | @contextmanager
def dummy():
(yield)
| [
"@",
"contextmanager",
"def",
"dummy",
"(",
")",
":",
"(",
"yield",
")"
] | a context manager that does nothing special . | train | false |
48,648 | def set_edge_attributes(G, name, values):
if (not isinstance(values, dict)):
if G.is_multigraph():
edges = G.edges(keys=True)
else:
edges = G.edges()
values = dict(zip_longest(edges, [], fillvalue=values))
if G.is_multigraph():
for ((u, v, key), value) in values.items():
G[u][v][key][name] = value
else:
for ((u, v), value) in values.items():
G[u][v][name] = value
| [
"def",
"set_edge_attributes",
"(",
"G",
",",
"name",
",",
"values",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"values",
",",
"dict",
")",
")",
":",
"if",
"G",
".",
"is_multigraph",
"(",
")",
":",
"edges",
"=",
"G",
".",
"edges",
"(",
"keys",
... | sets edge attributes from a given value or dictionary of values . | train | false |
48,649 | def orderedSet(iterable):
res = []
for el in iterable:
if (el not in res):
res.append(el)
return res
| [
"def",
"orderedSet",
"(",
"iterable",
")",
":",
"res",
"=",
"[",
"]",
"for",
"el",
"in",
"iterable",
":",
"if",
"(",
"el",
"not",
"in",
"res",
")",
":",
"res",
".",
"append",
"(",
"el",
")",
"return",
"res"
] | remove all duplicates from the input iterable . | train | false |
48,652 | def equals_lf(line):
return (line == ('\n' if isinstance(line, str) else '\n'))
| [
"def",
"equals_lf",
"(",
"line",
")",
":",
"return",
"(",
"line",
"==",
"(",
"'\\n'",
"if",
"isinstance",
"(",
"line",
",",
"str",
")",
"else",
"'\\n'",
")",
")"
] | return true if line equals . | train | false |
48,653 | def meanexcess(thresh, shape, scale):
warnif((shape > (-1)), 'shape > -1')
warnif((thresh >= 0), 'thresh >= 0')
warnif(((scale - (shape * thresh)) > 0), '(scale - shape*thresh) > 0')
return ((scale - (shape * thresh)) / (1 + shape))
| [
"def",
"meanexcess",
"(",
"thresh",
",",
"shape",
",",
"scale",
")",
":",
"warnif",
"(",
"(",
"shape",
">",
"(",
"-",
"1",
")",
")",
",",
"'shape > -1'",
")",
"warnif",
"(",
"(",
"thresh",
">=",
"0",
")",
",",
"'thresh >= 0'",
")",
"warnif",
"(",
... | mean excess function of genpareto assert are inequality conditions in de zea bermudez . | train | false |
48,655 | def allow_access(course, user, level, send_email=True):
_change_access(course, user, level, 'allow', send_email)
| [
"def",
"allow_access",
"(",
"course",
",",
"user",
",",
"level",
",",
"send_email",
"=",
"True",
")",
":",
"_change_access",
"(",
"course",
",",
"user",
",",
"level",
",",
"'allow'",
",",
"send_email",
")"
] | allow user access to course modification . | train | false |
48,656 | @login_required
@ensure_csrf_cookie
@require_http_methods(['GET'])
def course_rerun_handler(request, course_key_string):
if (not GlobalStaff().has_user(request.user)):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=3)
if (request.method == 'GET'):
return render_to_response('course-create-rerun.html', {'source_course_key': course_key, 'display_name': course_module.display_name, 'user': request.user, 'course_creator_status': _get_course_creator_status(request.user), 'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)})
| [
"@",
"login_required",
"@",
"ensure_csrf_cookie",
"@",
"require_http_methods",
"(",
"[",
"'GET'",
"]",
")",
"def",
"course_rerun_handler",
"(",
"request",
",",
"course_key_string",
")",
":",
"if",
"(",
"not",
"GlobalStaff",
"(",
")",
".",
"has_user",
"(",
"req... | the restful handler for course reruns . | train | false |
48,657 | def my_update_subtask_status(entry_id, current_task_id, new_subtask_status):
entry = InstructorTask.objects.get(pk=entry_id)
subtask_dict = json.loads(entry.subtasks)
subtask_status_info = subtask_dict['status']
current_subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
current_retry_count = current_subtask_status.get_retry_count()
new_retry_count = new_subtask_status.get_retry_count()
if (current_retry_count <= new_retry_count):
update_subtask_status(entry_id, current_task_id, new_subtask_status)
| [
"def",
"my_update_subtask_status",
"(",
"entry_id",
",",
"current_task_id",
",",
"new_subtask_status",
")",
":",
"entry",
"=",
"InstructorTask",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"entry_id",
")",
"subtask_dict",
"=",
"json",
".",
"loads",
"(",
"entry... | check whether a subtask has been updated before really updating . | train | false |
48,658 | def logmps(params, xsorted, dist):
xcdf = np.r_[(0.0, dist.cdf(xsorted, *params), 1.0)]
D = np.diff(xcdf)
return (- np.log(D).mean())
| [
"def",
"logmps",
"(",
"params",
",",
"xsorted",
",",
"dist",
")",
":",
"xcdf",
"=",
"np",
".",
"r_",
"[",
"(",
"0.0",
",",
"dist",
".",
"cdf",
"(",
"xsorted",
",",
"*",
"params",
")",
",",
"1.0",
")",
"]",
"D",
"=",
"np",
".",
"diff",
"(",
... | calculate negative log of product-of-spacings parameters params : array_like . | train | false |
48,659 | def test_ipv6():
if (not cfg.selftest_host()):
return True
try:
info = getipaddress.addresslookup6(cfg.selftest_host())
except:
logging.debug("Test IPv6: Disabling IPv6, because it looks like it's not available. Reason: %s", sys.exc_info()[0])
return False
try:
(af, socktype, proto, canonname, sa) = info[0]
sock = socket.socket(af, socktype, proto)
sock.settimeout(2)
sock.connect(sa[0:2])
sock.close()
logging.debug('Test IPv6: IPv6 test successful. Enabling IPv6')
return True
except socket.error:
logging.debug('Test IPv6: Cannot reach IPv6 test host. Disabling IPv6')
return False
except:
logging.debug('Test IPv6: Problem during IPv6 connect. Disabling IPv6. Reason: %s', sys.exc_info()[0])
return False
| [
"def",
"test_ipv6",
"(",
")",
":",
"if",
"(",
"not",
"cfg",
".",
"selftest_host",
"(",
")",
")",
":",
"return",
"True",
"try",
":",
"info",
"=",
"getipaddress",
".",
"addresslookup6",
"(",
"cfg",
".",
"selftest_host",
"(",
")",
")",
"except",
":",
"l... | check if external ipv6 addresses are reachable . | train | false |
48,660 | def addCircleIntersectionLoop(circleIntersectionLoop, circleIntersections):
firstCircleIntersection = circleIntersectionLoop[0]
circleIntersectionAhead = firstCircleIntersection
for circleIntersectionIndex in xrange((len(circleIntersections) + 1)):
circleIntersectionAhead = circleIntersectionAhead.getCircleIntersectionAhead()
if ((circleIntersectionAhead == firstCircleIntersection) or (circleIntersectionAhead == None)):
firstCircleIntersection.steppedOn = True
return
circleIntersectionAhead.addToList(circleIntersectionLoop)
firstCircleIntersection.steppedOn = True
print 'Warning, addCircleIntersectionLoop would have gone into an endless loop.'
print 'circleIntersectionLoop'
for circleIntersection in circleIntersectionLoop:
print circleIntersection
print circleIntersection.circleNodeAhead
print circleIntersection.circleNodeBehind
print 'firstCircleIntersection'
print firstCircleIntersection
print 'circleIntersections'
for circleIntersection in circleIntersections:
print circleIntersection
| [
"def",
"addCircleIntersectionLoop",
"(",
"circleIntersectionLoop",
",",
"circleIntersections",
")",
":",
"firstCircleIntersection",
"=",
"circleIntersectionLoop",
"[",
"0",
"]",
"circleIntersectionAhead",
"=",
"firstCircleIntersection",
"for",
"circleIntersectionIndex",
"in",
... | add a circle intersection loop . | train | false |
48,661 | def sync_with_om(func):
def om_wrapper(*args, **kwds):
om.manager.process_all_messages()
return func(*args, **kwds)
return om_wrapper
| [
"def",
"sync_with_om",
"(",
"func",
")",
":",
"def",
"om_wrapper",
"(",
"*",
"args",
",",
"**",
"kwds",
")",
":",
"om",
".",
"manager",
".",
"process_all_messages",
"(",
")",
"return",
"func",
"(",
"*",
"args",
",",
"**",
"kwds",
")",
"return",
"om_w... | given that the output manager has been migrated into a producer/consumer model . | train | false |
48,663 | def _append_comment(ret, comment):
if len(ret['comment']):
ret['comment'] = ((ret['comment'].rstrip() + '\n') + comment)
else:
ret['comment'] = comment
return ret
| [
"def",
"_append_comment",
"(",
"ret",
",",
"comment",
")",
":",
"if",
"len",
"(",
"ret",
"[",
"'comment'",
"]",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"(",
"ret",
"[",
"'comment'",
"]",
".",
"rstrip",
"(",
")",
"+",
"'\\n'",
")",
"+",
... | append comment to ret[comment] . | train | true |
48,664 | def construct_query_part(model_cls, prefixes, query_part):
if (not query_part):
return query.TrueQuery()
query_classes = {}
for (k, t) in itertools.chain(model_cls._fields.items(), model_cls._types.items()):
query_classes[k] = t.query
(key, pattern, query_class) = parse_query_part(query_part, query_classes, prefixes)
if (key is None):
if issubclass(query_class, query.FieldQuery):
return query.AnyFieldQuery(pattern, model_cls._search_fields, query_class)
else:
return query_class(pattern)
key = key.lower()
return query_class(key.lower(), pattern, (key in model_cls._fields))
| [
"def",
"construct_query_part",
"(",
"model_cls",
",",
"prefixes",
",",
"query_part",
")",
":",
"if",
"(",
"not",
"query_part",
")",
":",
"return",
"query",
".",
"TrueQuery",
"(",
")",
"query_classes",
"=",
"{",
"}",
"for",
"(",
"k",
",",
"t",
")",
"in"... | create a query from a single query component . | train | false |
48,666 | def get_console_size():
display_width = get_option('display.width')
display_height = get_option('display.height', silent=True)
if com.in_interactive_session():
if com.in_ipython_frontend():
from pandas.core.config import get_default_val
terminal_width = get_default_val('display.width')
terminal_height = get_default_val('display.height')
else:
(terminal_width, terminal_height) = get_terminal_size()
else:
(terminal_width, terminal_height) = (None, None)
return ((display_width or terminal_width), (display_height or terminal_height))
| [
"def",
"get_console_size",
"(",
")",
":",
"display_width",
"=",
"get_option",
"(",
"'display.width'",
")",
"display_height",
"=",
"get_option",
"(",
"'display.height'",
",",
"silent",
"=",
"True",
")",
"if",
"com",
".",
"in_interactive_session",
"(",
")",
":",
... | return console size as tuple = . | train | false |
48,667 | def patch_environ(case, key, value):
old_environ = os.environ.copy()
def cleanup():
os.environ.clear()
os.environ.update(old_environ)
os.environ[key] = value
case.addCleanup(cleanup)
| [
"def",
"patch_environ",
"(",
"case",
",",
"key",
",",
"value",
")",
":",
"old_environ",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"def",
"cleanup",
"(",
")",
":",
"os",
".",
"environ",
".",
"clear",
"(",
")",
"os",
".",
"environ",
".",
"u... | add an environment variable for the duration of a test . | train | false |
48,668 | def get_course_info_section(request, user, course, section_key):
info_module = get_course_info_section_module(request, user, course, section_key)
html = ''
if (info_module is not None):
try:
html = info_module.render(STUDENT_VIEW).content
except Exception:
html = render_to_string('courseware/error-message.html', None)
log.exception(u'Error rendering course_id=%s, section_key=%s', unicode(course.id), section_key)
return html
| [
"def",
"get_course_info_section",
"(",
"request",
",",
"user",
",",
"course",
",",
"section_key",
")",
":",
"info_module",
"=",
"get_course_info_section_module",
"(",
"request",
",",
"user",
",",
"course",
",",
"section_key",
")",
"html",
"=",
"''",
"if",
"(",... | this returns the snippet of html to be rendered on the course info page . | train | false |
48,669 | def get_page(self, suffix):
try:
return int(self.REQUEST[('page%s' % suffix)])
except (KeyError, ValueError, TypeError):
return 1
| [
"def",
"get_page",
"(",
"self",
",",
"suffix",
")",
":",
"try",
":",
"return",
"int",
"(",
"self",
".",
"REQUEST",
"[",
"(",
"'page%s'",
"%",
"suffix",
")",
"]",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"TypeError",
")",
":",
"return",... | a function which will be monkeypatched onto the request to get the current integer representing the current page . | train | false |
48,670 | def rsync_ip(ip):
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error:
return ip
else:
return ('[%s]' % ip)
| [
"def",
"rsync_ip",
"(",
"ip",
")",
":",
"try",
":",
"socket",
".",
"inet_pton",
"(",
"socket",
".",
"AF_INET6",
",",
"ip",
")",
"except",
"socket",
".",
"error",
":",
"return",
"ip",
"else",
":",
"return",
"(",
"'[%s]'",
"%",
"ip",
")"
] | transform ip string to an rsync-compatible form will return ipv4 addresses unchanged . | train | false |
48,671 | @require_GET
def user_lookup(request):
userlist = []
if request.is_ajax():
user = request.GET.get('user', '')
if user:
matches = get_user_model().objects.filter(username__istartswith=user)
for match in matches:
userlist.append({'label': match.username})
data = json.dumps(userlist)
return HttpResponse(data, content_type='application/json; charset=utf-8')
| [
"@",
"require_GET",
"def",
"user_lookup",
"(",
"request",
")",
":",
"userlist",
"=",
"[",
"]",
"if",
"request",
".",
"is_ajax",
"(",
")",
":",
"user",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'user'",
",",
"''",
")",
"if",
"user",
":",
"matche... | returns partial username matches . | train | false |
48,672 | @not_implemented_for('undirected')
def stochastic_graph(G, copy=True, weight='weight'):
if copy:
G = (MultiDiGraph(G) if G.is_multigraph() else DiGraph(G))
degree = dict(G.out_degree(weight=weight))
for (u, v, d) in G.edges(data=True):
if (degree[u] == 0):
d[weight] = 0
else:
d[weight] = (d.get(weight, 1) / degree[u])
return G
| [
"@",
"not_implemented_for",
"(",
"'undirected'",
")",
"def",
"stochastic_graph",
"(",
"G",
",",
"copy",
"=",
"True",
",",
"weight",
"=",
"'weight'",
")",
":",
"if",
"copy",
":",
"G",
"=",
"(",
"MultiDiGraph",
"(",
"G",
")",
"if",
"G",
".",
"is_multigra... | returns a right-stochastic representation of directed graph g . | train | false |
48,674 | def relocate_vm_spec(client_factory, datastore=None, host=None, disk_move_type='moveAllDiskBackingsAndAllowSharing'):
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
rel_spec.host = host
return rel_spec
| [
"def",
"relocate_vm_spec",
"(",
"client_factory",
",",
"datastore",
"=",
"None",
",",
"host",
"=",
"None",
",",
"disk_move_type",
"=",
"'moveAllDiskBackingsAndAllowSharing'",
")",
":",
"rel_spec",
"=",
"client_factory",
".",
"create",
"(",
"'ns0:VirtualMachineRelocate... | builds the vm relocation spec . | train | false |
48,675 | @cache_permission
def can_accept_suggestion(user, translation):
return can_edit(user, translation, 'trans.accept_suggestion')
| [
"@",
"cache_permission",
"def",
"can_accept_suggestion",
"(",
"user",
",",
"translation",
")",
":",
"return",
"can_edit",
"(",
"user",
",",
"translation",
",",
"'trans.accept_suggestion'",
")"
] | checks whether user can accept suggestions to given translation . | train | false |
48,676 | def site_disabled(config):
disable_site(config)
reload_service('apache2')
| [
"def",
"site_disabled",
"(",
"config",
")",
":",
"disable_site",
"(",
"config",
")",
"reload_service",
"(",
"'apache2'",
")"
] | require an apache site to be disabled . | train | false |
48,677 | def test_for(item, min_version=None, callback=extract_version):
try:
check = import_item(item)
except (ImportError, RuntimeError):
return False
else:
if min_version:
if callback:
check = callback(check)
return (check >= min_version)
else:
return True
| [
"def",
"test_for",
"(",
"item",
",",
"min_version",
"=",
"None",
",",
"callback",
"=",
"extract_version",
")",
":",
"try",
":",
"check",
"=",
"import_item",
"(",
"item",
")",
"except",
"(",
"ImportError",
",",
"RuntimeError",
")",
":",
"return",
"False",
... | test to see if item is importable . | train | false |
48,678 | def _get_instance_changes(current, state):
current_keys = set(current.keys())
state_keys = set(state.keys())
changed = salt.utils.compare_dicts(current, state)
for change in changed.keys():
if ((change in changed) and (changed[change]['old'] == '')):
del changed[change]
if ((change in changed) and (changed[change]['new'] == '')):
del changed[change]
return changed
| [
"def",
"_get_instance_changes",
"(",
"current",
",",
"state",
")",
":",
"current_keys",
"=",
"set",
"(",
"current",
".",
"keys",
"(",
")",
")",
"state_keys",
"=",
"set",
"(",
"state",
".",
"keys",
"(",
")",
")",
"changed",
"=",
"salt",
".",
"utils",
... | get modified properties . | train | true |
48,680 | def _ParseResponse(response):
response.reset()
parser = feedparser.FeedParser()
parser._set_headersonly()
while True:
line = response.readline()
if (not feedparser.headerRE.match(line)):
if (not feedparser.NLCRE.match(line)):
parser.feed(line)
break
parser.feed(line)
parsed_response = parser.close()
if ('Status' in parsed_response):
status = int(parsed_response['Status'].split(' ', 1)[0])
del parsed_response['Status']
else:
status = 200
return {'body': (parsed_response.get_payload() + response.read()), 'headers': parsed_response.items(), 'response_code': status}
| [
"def",
"_ParseResponse",
"(",
"response",
")",
":",
"response",
".",
"reset",
"(",
")",
"parser",
"=",
"feedparser",
".",
"FeedParser",
"(",
")",
"parser",
".",
"_set_headersonly",
"(",
")",
"while",
"True",
":",
"line",
"=",
"response",
".",
"readline",
... | parses an http response into a dict . | train | false |
48,681 | def servers():
ntp_servers = __proxy__['napalm.call']('get_ntp_servers', **{})
if (not ntp_servers.get('result')):
return ntp_servers
ntp_servers_list = list(ntp_servers.get('out', {}).keys())
ntp_servers['out'] = ntp_servers_list
return ntp_servers
| [
"def",
"servers",
"(",
")",
":",
"ntp_servers",
"=",
"__proxy__",
"[",
"'napalm.call'",
"]",
"(",
"'get_ntp_servers'",
",",
"**",
"{",
"}",
")",
"if",
"(",
"not",
"ntp_servers",
".",
"get",
"(",
"'result'",
")",
")",
":",
"return",
"ntp_servers",
"ntp_se... | returns a list of the configured ntp servers on the device . | train | false |
48,682 | def get_secrets(namespace, name='', apiserver_url=None, decode=False, brief=False):
apiserver_url = _guess_apiserver(apiserver_url)
if (apiserver_url is None):
return False
if (not decode):
ret = _get_secrets(namespace, name, apiserver_url)
else:
ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url))
return ret
| [
"def",
"get_secrets",
"(",
"namespace",
",",
"name",
"=",
"''",
",",
"apiserver_url",
"=",
"None",
",",
"decode",
"=",
"False",
",",
"brief",
"=",
"False",
")",
":",
"apiserver_url",
"=",
"_guess_apiserver",
"(",
"apiserver_url",
")",
"if",
"(",
"apiserver... | get k8s namespaces cli example: . | train | true |
48,683 | def Laplace(name, mu, b):
return rv(name, LaplaceDistribution, (mu, b))
| [
"def",
"Laplace",
"(",
"name",
",",
"mu",
",",
"b",
")",
":",
"return",
"rv",
"(",
"name",
",",
"LaplaceDistribution",
",",
"(",
"mu",
",",
"b",
")",
")"
] | create a continuous random variable with a laplace distribution . | train | false |
48,684 | def direct_delete_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None):
if (headers is None):
headers = {}
headers = gen_headers(headers, add_ts=('x-timestamp' not in (k.lower() for k in headers)))
path = ('/%s/%s/%s' % (account, container, obj))
_make_req(node, part, 'DELETE', path, headers, 'Object', conn_timeout, response_timeout)
| [
"def",
"direct_delete_object",
"(",
"node",
",",
"part",
",",
"account",
",",
"container",
",",
"obj",
",",
"conn_timeout",
"=",
"5",
",",
"response_timeout",
"=",
"15",
",",
"headers",
"=",
"None",
")",
":",
"if",
"(",
"headers",
"is",
"None",
")",
":... | delete object directly from the object server . | train | false |
48,685 | def p_boolean_sliding_window(texts, segmented_topics, dictionary, window_size):
top_ids = _ret_top_ids(segmented_topics)
window_id = 0
per_topic_postings = {}
token2id_dict = dictionary.token2id
def add_topic_posting(top_ids, window, per_topic_postings, window_id, token2id_dict):
for word in window:
word_id = token2id_dict[word]
if (word_id in top_ids):
if (word_id in per_topic_postings):
per_topic_postings[word_id].add(window_id)
else:
per_topic_postings[word_id] = set([window_id])
window_id += 1
return (window_id, per_topic_postings)
for document in texts:
it = iter(document)
window = tuple(islice(it, window_size))
(window_id, per_topic_postings) = add_topic_posting(top_ids, window, per_topic_postings, window_id, token2id_dict)
for elem in it:
window = (window[1:] + (elem,))
(window_id, per_topic_postings) = add_topic_posting(top_ids, window, per_topic_postings, window_id, token2id_dict)
return (per_topic_postings, window_id)
| [
"def",
"p_boolean_sliding_window",
"(",
"texts",
",",
"segmented_topics",
",",
"dictionary",
",",
"window_size",
")",
":",
"top_ids",
"=",
"_ret_top_ids",
"(",
"segmented_topics",
")",
"window_id",
"=",
"0",
"per_topic_postings",
"=",
"{",
"}",
"token2id_dict",
"=... | this function performs the boolean sliding window probability estimation . | train | false |
48,686 | def check_fill_values(data):
assert_true((data['a'].mask == [False, False]).all())
assert_true((data['a'] == ['1', 'a']).all())
assert_true((data['b'].mask == [False, True]).all())
assert_true((data['b'] == [2, (-999)]).all())
data['b'].mask = False
assert_true((data['b'] == [2, 1]).all())
| [
"def",
"check_fill_values",
"(",
"data",
")",
":",
"assert_true",
"(",
"(",
"data",
"[",
"'a'",
"]",
".",
"mask",
"==",
"[",
"False",
",",
"False",
"]",
")",
".",
"all",
"(",
")",
")",
"assert_true",
"(",
"(",
"data",
"[",
"'a'",
"]",
"==",
"[",
... | compare array column by column with expectation . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.