id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
19,152 | def test_install_package_with_prefix(script, data):
prefix_path = (script.scratch_path / 'prefix')
result = script.pip('install', '--prefix', prefix_path, '-f', data.find_links, '--no-binary', 'simple', '--no-index', 'simple==1.0')
if hasattr(sys, 'pypy_version_info'):
path = (script.scratch / 'prefix')
else:
path = (((script.scratch / 'prefix') / 'lib') / 'python{0}'.format(pyversion))
install_path = ((path / 'site-packages') / 'simple-1.0-py{0}.egg-info'.format(pyversion))
assert (install_path in result.files_created), str(result)
| [
"def",
"test_install_package_with_prefix",
"(",
"script",
",",
"data",
")",
":",
"prefix_path",
"=",
"(",
"script",
".",
"scratch_path",
"/",
"'prefix'",
")",
"result",
"=",
"script",
".",
"pip",
"(",
"'install'",
",",
"'--prefix'",
",",
"prefix_path",
",",
... | test installing a package using pip install --prefix . | train | false |
19,153 | def _require_user(username, fullname, password=None, is_superuser=False, email=None, alt_src_lang=None):
from accounts.utils import verify_user
from django.contrib.auth import get_user_model
User = get_user_model()
criteria = {'username': username, 'full_name': fullname, 'is_active': True, 'is_superuser': is_superuser}
(user, created) = User.objects.get_or_create(**criteria)
if created:
if (password is None):
user.set_unusable_password()
else:
user.set_password(password)
if email:
user.email = email
user.save()
if email:
verify_user(user)
if (alt_src_lang is not None):
user.alt_src_langs.add(alt_src_lang())
return user
| [
"def",
"_require_user",
"(",
"username",
",",
"fullname",
",",
"password",
"=",
"None",
",",
"is_superuser",
"=",
"False",
",",
"email",
"=",
"None",
",",
"alt_src_lang",
"=",
"None",
")",
":",
"from",
"accounts",
".",
"utils",
"import",
"verify_user",
"fr... | helper to get/create a new user . | train | false |
19,155 | def shutdown_hard():
return shutdown(timeout=0)
| [
"def",
"shutdown_hard",
"(",
")",
":",
"return",
"shutdown",
"(",
"timeout",
"=",
"0",
")"
] | shutdown a running system with no timeout or warning . | train | false |
19,156 | def user_passes_test(test_func, login_url=LOGIN_URL):
def _dec(view_func):
def _checklogin(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
return HttpResponseRedirect(('%s?%s=%s' % (login_url, REDIRECT_FIELD_NAME, quote(request.get_full_path()))))
_checklogin.__doc__ = view_func.__doc__
_checklogin.__dict__ = view_func.__dict__
return _checklogin
return _dec
| [
"def",
"user_passes_test",
"(",
"test_func",
",",
"login_url",
"=",
"LOGIN_URL",
")",
":",
"def",
"_dec",
"(",
"view_func",
")",
":",
"def",
"_checklogin",
"(",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"test_func",
"(",
"request"... | decorator for views that checks that the user passes the given test . | train | false |
19,157 | def _TestGenerateAccessToken(action, tester, device_dict, auth_info_dict, user_cookie=None, use_short_token=True):
response_dict = _GenerateAccessToken(action, tester, device_dict, auth_info_dict, user_cookie, use_short_token=use_short_token)
(identity_type, value) = Identity.SplitKey(auth_info_dict['identity'])
expected_digits = (4 if (use_short_token or (identity_type == 'Phone')) else 9)
assert (response_dict['token_digits'] == expected_digits), response_dict
identity = tester._RunAsync(Identity.Query, tester.validator.client, auth_info_dict['identity'], None)
tester.validator.ValidateUpdateDBObject(Identity, key=auth_info_dict['identity'], authority='Viewfinder', user_id=identity.user_id, access_token=identity.access_token, expires=identity.expires)
return identity
| [
"def",
"_TestGenerateAccessToken",
"(",
"action",
",",
"tester",
",",
"device_dict",
",",
"auth_info_dict",
",",
"user_cookie",
"=",
"None",
",",
"use_short_token",
"=",
"True",
")",
":",
"response_dict",
"=",
"_GenerateAccessToken",
"(",
"action",
",",
"tester",
... | invokes the auth api that triggers the email of a viewfinder access token . | train | false |
19,158 | @non_atomic_requests
def site_series(request, format, group, start, end, field):
(start, end) = get_daterange_or_404(start, end)
group = ('date' if (group == 'day') else group)
series = []
(full_series, keys) = _site_query(group, start, end, field, request)
for row in full_series:
if (field in row['data']):
series.append({'date': row['date'], 'count': row['data'][field], 'data': {}})
if (format == 'csv'):
(series, fields) = csv_fields(series)
return render_csv(request, None, series, (['date', 'count'] + list(fields)), title=('%s week Site Statistics' % settings.DOMAIN), show_disclaimer=True)
return render_json(request, None, series)
| [
"@",
"non_atomic_requests",
"def",
"site_series",
"(",
"request",
",",
"format",
",",
"group",
",",
"start",
",",
"end",
",",
"field",
")",
":",
"(",
"start",
",",
"end",
")",
"=",
"get_daterange_or_404",
"(",
"start",
",",
"end",
")",
"group",
"=",
"(... | pull a single field from the site_query data . | train | false |
19,159 | def add_dummy_padding(x, depth, boundary):
for (k, v) in boundary.items():
if (v == 'none'):
d = depth[k]
empty_shape = list(x.shape)
empty_shape[k] = d
empty_chunks = list(x.chunks)
empty_chunks[k] = (d,)
empty = wrap.empty(empty_shape, chunks=empty_chunks, dtype=x.dtype)
out_chunks = list(x.chunks)
ax_chunks = list(out_chunks[k])
ax_chunks[0] += d
ax_chunks[(-1)] += d
out_chunks[k] = ax_chunks
x = concatenate([empty, x, empty], axis=k)
x = x.rechunk(out_chunks)
return x
| [
"def",
"add_dummy_padding",
"(",
"x",
",",
"depth",
",",
"boundary",
")",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"boundary",
".",
"items",
"(",
")",
":",
"if",
"(",
"v",
"==",
"'none'",
")",
":",
"d",
"=",
"depth",
"[",
"k",
"]",
"empty_sha... | pads an array which has none as the boundary type . | train | false |
19,161 | def rand_text_alpha_upper(length, bad=''):
return rand_base(length, bad, set(upperAlpha))
| [
"def",
"rand_text_alpha_upper",
"(",
"length",
",",
"bad",
"=",
"''",
")",
":",
"return",
"rand_base",
"(",
"length",
",",
"bad",
",",
"set",
"(",
"upperAlpha",
")",
")"
] | generate a random upper string with alpha chars . | train | false |
19,163 | def deferToThread(f, *args, **kwargs):
from twisted.internet import reactor
return deferToThreadPool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
| [
"def",
"deferToThread",
"(",
"f",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"from",
"twisted",
".",
"internet",
"import",
"reactor",
"return",
"deferToThreadPool",
"(",
"reactor",
",",
"reactor",
".",
"getThreadPool",
"(",
")",
",",
"f",
",",
"*",... | run a function in a thread and return the result as a deferred . | train | false |
19,166 | def getIntFromFloatString(value):
floatString = str(value).strip()
if (floatString == ''):
return None
dotIndex = floatString.find('.')
if (dotIndex < 0):
return int(value)
return int(round(float(floatString)))
| [
"def",
"getIntFromFloatString",
"(",
"value",
")",
":",
"floatString",
"=",
"str",
"(",
"value",
")",
".",
"strip",
"(",
")",
"if",
"(",
"floatString",
"==",
"''",
")",
":",
"return",
"None",
"dotIndex",
"=",
"floatString",
".",
"find",
"(",
"'.'",
")"... | get the int from the string . | train | false |
19,167 | def _get_processor_decline_html(params):
payment_support_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
return _format_error_html(_('Sorry! Our payment processor did not accept your payment. The decision they returned was {decision}, and the reason was {reason}. You were not charged. Please try a different form of payment. Contact us with payment-related questions at {email}.').format(decision='<span class="decision">{decision}</span>'.format(decision=params['decision']), reason='<span class="reason">{reason_code}:{reason_msg}</span>'.format(reason_code=params['reason_code'], reason_msg=REASONCODE_MAP.get(params['reason_code'])), email=payment_support_email))
| [
"def",
"_get_processor_decline_html",
"(",
"params",
")",
":",
"payment_support_email",
"=",
"configuration_helpers",
".",
"get_value",
"(",
"'payment_support_email'",
",",
"settings",
".",
"PAYMENT_SUPPORT_EMAIL",
")",
"return",
"_format_error_html",
"(",
"_",
"(",
"'S... | return html indicating that the users payment was declined . | train | false |
19,168 | def _fix_osmesa_gl_lib_if_testing():
test_name = os.getenv('_VISPY_TESTING_APP', None)
if (test_name == 'osmesa'):
from ...util.osmesa_gl import fix_osmesa_gl_lib
fix_osmesa_gl_lib()
| [
"def",
"_fix_osmesa_gl_lib_if_testing",
"(",
")",
":",
"test_name",
"=",
"os",
".",
"getenv",
"(",
"'_VISPY_TESTING_APP'",
",",
"None",
")",
"if",
"(",
"test_name",
"==",
"'osmesa'",
")",
":",
"from",
"...",
"util",
".",
"osmesa_gl",
"import",
"fix_osmesa_gl_l... | this functions checks if we a running test with the osmesa backends and fix the gl library if needed . | train | false |
19,169 | def all_correlations_book_version(bait, target):
return np.array([np.corrcoef(bait, c)[(0, 1)] for c in target])
| [
"def",
"all_correlations_book_version",
"(",
"bait",
",",
"target",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"np",
".",
"corrcoef",
"(",
"bait",
",",
"c",
")",
"[",
"(",
"0",
",",
"1",
")",
"]",
"for",
"c",
"in",
"target",
"]",
")"
] | corrs = all_correlations corrs[i] is the correlation between bait and target[i] . | train | false |
19,170 | def allow_remote_invocation(func, method='auto'):
setattr(func, 'allow_rmi', method)
return func
| [
"def",
"allow_remote_invocation",
"(",
"func",
",",
"method",
"=",
"'auto'",
")",
":",
"setattr",
"(",
"func",
",",
"'allow_rmi'",
",",
"method",
")",
"return",
"func"
] | all methods which shall be callable through a given ajax action must be decorated with @allowed_action . | train | false |
19,172 | def dic(trace, model=None):
model = modelcontext(model)
mean_deviance = ((-2) * np.mean([model.logp(pt) for pt in trace]))
free_rv_means = {rv.name: trace[rv.name].mean(axis=0) for rv in model.free_RVs}
deviance_at_mean = ((-2) * model.logp(free_rv_means))
return ((2 * mean_deviance) - deviance_at_mean)
| [
"def",
"dic",
"(",
"trace",
",",
"model",
"=",
"None",
")",
":",
"model",
"=",
"modelcontext",
"(",
"model",
")",
"mean_deviance",
"=",
"(",
"(",
"-",
"2",
")",
"*",
"np",
".",
"mean",
"(",
"[",
"model",
".",
"logp",
"(",
"pt",
")",
"for",
"pt"... | calculate the deviance information criterion of the samples in trace from model read more theory here - in a paper by some of the leading authorities on model selection - dx . | train | false |
19,173 | def getMax(first, second):
return max(first, second)
| [
"def",
"getMax",
"(",
"first",
",",
"second",
")",
":",
"return",
"max",
"(",
"first",
",",
"second",
")"
] | get the max . | train | false |
19,175 | def get_limit():
return _limit[0]
| [
"def",
"get_limit",
"(",
")",
":",
"return",
"_limit",
"[",
"0",
"]"
] | get current connection pool limit . | train | false |
19,176 | def rar_extract(rarfile_path, numrars, one_folder, nzo, setname, extraction_path):
fail = 0
new_files = None
rars = []
passwords = get_all_passwords(nzo)
for password in passwords:
if password:
logging.debug('Trying unrar with password "%s"', password)
msg = (T('Trying unrar with password "%s"') % unicoder(password))
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg)
(fail, new_files, rars) = rar_extract_core(rarfile_path, numrars, one_folder, nzo, setname, extraction_path, password)
if (fail != 2):
break
if (fail == 2):
logging.error('%s (%s)', T('Unpacking failed, archive requires a password'), os.path.split(rarfile_path)[1])
return (fail, new_files, rars)
| [
"def",
"rar_extract",
"(",
"rarfile_path",
",",
"numrars",
",",
"one_folder",
",",
"nzo",
",",
"setname",
",",
"extraction_path",
")",
":",
"fail",
"=",
"0",
"new_files",
"=",
"None",
"rars",
"=",
"[",
"]",
"passwords",
"=",
"get_all_passwords",
"(",
"nzo"... | unpack single rar set rarfile to extraction_path . | train | false |
19,177 | def post_order_list(node, filter_func=no_filter):
(l, stack) = ([], [])
(poped, index) = (0, 0)
while node:
if filter_func(node):
if (node.children and (not poped)):
stack.append((node, index))
index = 0
node = node.children[0]
else:
l.append(node)
index += 1
try:
node = stack[(-1)][0].children[index]
except IndexError:
node = None
else:
node = None
poped = 0
if ((node is None) and stack):
(node, index) = stack.pop()
poped = 1
return l
| [
"def",
"post_order_list",
"(",
"node",
",",
"filter_func",
"=",
"no_filter",
")",
":",
"(",
"l",
",",
"stack",
")",
"=",
"(",
"[",
"]",
",",
"[",
"]",
")",
"(",
"poped",
",",
"index",
")",
"=",
"(",
"0",
",",
"0",
")",
"while",
"node",
":",
"... | create a list with tree nodes for which the <filter> function returned true in a post order fashion . | train | false |
19,181 | def do_votes_by_user(parser, token):
bits = token.contents.split()
if (len(bits) != 6):
raise template.TemplateSyntaxError(("'%s' tag takes exactly four arguments" % bits[0]))
if (bits[2] != 'on'):
raise template.TemplateSyntaxError(("second argument to '%s' tag must be 'on'" % bits[0]))
if (bits[4] != 'as'):
raise template.TemplateSyntaxError(("fourth argument to '%s' tag must be 'as'" % bits[0]))
return VotesByUserNode(bits[1], bits[3], bits[5])
| [
"def",
"do_votes_by_user",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"contents",
".",
"split",
"(",
")",
"if",
"(",
"len",
"(",
"bits",
")",
"!=",
"6",
")",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"(",
"\"'%... | retrieves the votes cast by a user on a list of objects as a dictionary keyed with object ids and stores it in a context variable . | train | false |
19,183 | def testNames(tests):
names = []
for test in _iterateTests(tests):
names.append(test.id())
return names
| [
"def",
"testNames",
"(",
"tests",
")",
":",
"names",
"=",
"[",
"]",
"for",
"test",
"in",
"_iterateTests",
"(",
"tests",
")",
":",
"names",
".",
"append",
"(",
"test",
".",
"id",
"(",
")",
")",
"return",
"names"
] | return the id of each test within the given test suite or case . | train | false |
19,184 | def get_category_value(label):
return _get_array_element('category', label, ('animal', 'human', 'airplane', 'truck', 'car', 'blank'))
| [
"def",
"get_category_value",
"(",
"label",
")",
":",
"return",
"_get_array_element",
"(",
"'category'",
",",
"label",
",",
"(",
"'animal'",
",",
"'human'",
",",
"'airplane'",
",",
"'truck'",
",",
"'car'",
",",
"'blank'",
")",
")"
] | returns the category name represented by a category label int . | train | false |
19,186 | def get_data_cache_dir(data_dir, subdir=None):
data_cache_dir = os.environ.get('NEON_DATA_CACHE_DIR')
if (data_cache_dir is None):
data_cache_dir = data_dir
if subdir:
subdir = (subdir if isinstance(subdir, list) else [subdir])
data_cache_dir = os.path.join(data_cache_dir, *subdir)
if (not os.path.exists(data_cache_dir)):
os.makedirs(data_cache_dir)
return data_cache_dir
| [
"def",
"get_data_cache_dir",
"(",
"data_dir",
",",
"subdir",
"=",
"None",
")",
":",
"data_cache_dir",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'NEON_DATA_CACHE_DIR'",
")",
"if",
"(",
"data_cache_dir",
"is",
"None",
")",
":",
"data_cache_dir",
"=",
"data_... | function for getting cache directory to store data cache files . | train | false |
19,188 | def _lessbroken_deepcopy(a):
from theano.gof.type import _cdata_type
if (type(a) in (numpy.ndarray, numpy.memmap)):
rval = a.copy()
elif (type(a) is _cdata_type):
rval = a
else:
rval = copy.deepcopy(a)
assert (type(rval) == type(a)), (type(rval), type(a))
if isinstance(rval, numpy.ndarray):
assert (rval.dtype == a.dtype)
return rval
| [
"def",
"_lessbroken_deepcopy",
"(",
"a",
")",
":",
"from",
"theano",
".",
"gof",
".",
"type",
"import",
"_cdata_type",
"if",
"(",
"type",
"(",
"a",
")",
"in",
"(",
"numpy",
".",
"ndarray",
",",
"numpy",
".",
"memmap",
")",
")",
":",
"rval",
"=",
"a... | parameters a any object returns object a copy of a that shares no internal storage with the original . | train | false |
19,189 | def fast_relpath(path, start):
if (not path.startswith(start)):
raise ValueError(u'{} is not a prefix of {}'.format(start, path))
if (len(path) == len(start)):
return u''
elif (len(start) == 0):
return path
elif (start[(-1)] == u'/'):
return path[len(start):]
elif (path[len(start)] == u'/'):
return path[(len(start) + 1):]
else:
raise ValueError(u'{} is not a directory containing {}'.format(start, path))
| [
"def",
"fast_relpath",
"(",
"path",
",",
"start",
")",
":",
"if",
"(",
"not",
"path",
".",
"startswith",
"(",
"start",
")",
")",
":",
"raise",
"ValueError",
"(",
"u'{} is not a prefix of {}'",
".",
"format",
"(",
"start",
",",
"path",
")",
")",
"if",
"... | a prefix-based relpath . | train | false |
19,190 | def isInf(value):
return ((_exponent(value) == 2047) and _zero_mantissa(value))
| [
"def",
"isInf",
"(",
"value",
")",
":",
"return",
"(",
"(",
"_exponent",
"(",
"value",
")",
"==",
"2047",
")",
"and",
"_zero_mantissa",
"(",
"value",
")",
")"
] | determine if the argument is an infinite ieee 754 value . | train | false |
19,193 | @forum.route('/post/<int:post_id>')
def view_post(post_id):
post = Post.query.filter_by(id=post_id).first_or_404()
post_in_topic = Post.query.filter((Post.topic_id == post.topic_id), (Post.id <= post_id)).order_by(Post.id.asc()).count()
page = math.ceil((post_in_topic / float(flaskbb_config['POSTS_PER_PAGE'])))
return redirect((post.topic.url + ('?page=%d#pid%s' % (page, post.id))))
| [
"@",
"forum",
".",
"route",
"(",
"'/post/<int:post_id>'",
")",
"def",
"view_post",
"(",
"post_id",
")",
":",
"post",
"=",
"Post",
".",
"query",
".",
"filter_by",
"(",
"id",
"=",
"post_id",
")",
".",
"first_or_404",
"(",
")",
"post_in_topic",
"=",
"Post",... | returns post data . | train | false |
19,194 | def get_memcached(key):
data = cache.get(key)
if (not data):
set_memcached(key, {})
obj = cPickle.loads(cache.get(key))
return obj
| [
"def",
"get_memcached",
"(",
"key",
")",
":",
"data",
"=",
"cache",
".",
"get",
"(",
"key",
")",
"if",
"(",
"not",
"data",
")",
":",
"set_memcached",
"(",
"key",
",",
"{",
"}",
")",
"obj",
"=",
"cPickle",
".",
"loads",
"(",
"cache",
".",
"get",
... | return deserialize object from memcached . | train | false |
19,195 | def _create_and_add_option(option):
global _current_option
_current_option = Option()
(type_, params) = _expand_one_key_dictionary(option)
_current_option.type = type_
_create_and_add_parameters(params)
_current_statement.add_child(_current_option)
| [
"def",
"_create_and_add_option",
"(",
"option",
")",
":",
"global",
"_current_option",
"_current_option",
"=",
"Option",
"(",
")",
"(",
"type_",
",",
"params",
")",
"=",
"_expand_one_key_dictionary",
"(",
"option",
")",
"_current_option",
".",
"type",
"=",
"type... | parses the configuration and creates an option instance . | train | true |
19,196 | def check_modify_host_locking(host, update_data):
locked = update_data.get('locked', None)
if (locked is not None):
if (locked and host.locked):
raise model_logic.ValidationError({'locked': ('Host already locked by %s on %s.' % (host.locked_by, host.lock_time))})
if ((not locked) and (not host.locked)):
raise model_logic.ValidationError({'locked': 'Host already unlocked.'})
| [
"def",
"check_modify_host_locking",
"(",
"host",
",",
"update_data",
")",
":",
"locked",
"=",
"update_data",
".",
"get",
"(",
"'locked'",
",",
"None",
")",
"if",
"(",
"locked",
"is",
"not",
"None",
")",
":",
"if",
"(",
"locked",
"and",
"host",
".",
"lo... | checks when locking/unlocking has been requested if the host is already locked/unlocked . | train | false |
19,197 | @handle_response_format
@treeio_login_required
def dashboard_widget_arrange(request, panel='left', response_format='html'):
user = request.user.profile
if ((panel == 'left') or (not panel)):
shift = (-100)
else:
shift = 100
if (request.GET and ('id_widget[]' in request.GET)):
widget_ids = request.GET.getlist('id_widget[]')
widgets = Widget.objects.filter(user=user, pk__in=widget_ids)
for widget in widgets:
if (unicode(widget.id) in widget_ids):
widget.weight = (shift + widget_ids.index(unicode(widget.id)))
widget.save()
return HttpResponseRedirect(reverse('core_dashboard_index'))
| [
"@",
"handle_response_format",
"@",
"treeio_login_required",
"def",
"dashboard_widget_arrange",
"(",
"request",
",",
"panel",
"=",
"'left'",
",",
"response_format",
"=",
"'html'",
")",
":",
"user",
"=",
"request",
".",
"user",
".",
"profile",
"if",
"(",
"(",
"... | arrange widgets with ajax request . | train | false |
19,198 | @given(u'a run having mixed text content')
def given_a_run_having_mixed_text_content(context):
r_xml = (u' <w:r %s>\n <w:t>abc</w:t>\n <w:tab/>\n <w:t>def</w:t>\n <w:cr/>\n <w:t>ghi</w:t>\n <w:drawing/>\n <w:br/>\n <w:t>jkl</w:t>\n </w:r>' % nsdecls(u'w'))
r = parse_xml(r_xml)
context.run = Run(r, None)
| [
"@",
"given",
"(",
"u'a run having mixed text content'",
")",
"def",
"given_a_run_having_mixed_text_content",
"(",
"context",
")",
":",
"r_xml",
"=",
"(",
"u' <w:r %s>\\n <w:t>abc</w:t>\\n <w:tab/>\\n <w:t>def</w:t>\\n <w:cr/>\\n <w:t>g... | mixed here meaning it contains <w:tab/> . | train | false |
19,199 | def chi_square_test(observed, expected, alpha=0.05, df=None):
if (df is None):
df = (observed.size - 1)
if (alpha == 0.01):
alpha_idx = 0
elif (alpha == 0.05):
alpha_idx = 1
else:
raise ValueError('support only alpha == 0.05 or 0.01')
chi_square = numpy.sum((((observed - expected) ** 2) / expected))
return (chi_square < chi_square_table[alpha_idx][df])
| [
"def",
"chi_square_test",
"(",
"observed",
",",
"expected",
",",
"alpha",
"=",
"0.05",
",",
"df",
"=",
"None",
")",
":",
"if",
"(",
"df",
"is",
"None",
")",
":",
"df",
"=",
"(",
"observed",
".",
"size",
"-",
"1",
")",
"if",
"(",
"alpha",
"==",
... | testing goodness-of-fit test with pearsons chi-squared test . | train | false |
19,200 | def insert_pure_function(module, fnty, name):
fn = module.get_or_insert_function(fnty, name=name)
fn.attributes.add('readonly')
fn.attributes.add('nounwind')
return fn
| [
"def",
"insert_pure_function",
"(",
"module",
",",
"fnty",
",",
"name",
")",
":",
"fn",
"=",
"module",
".",
"get_or_insert_function",
"(",
"fnty",
",",
"name",
"=",
"name",
")",
"fn",
".",
"attributes",
".",
"add",
"(",
"'readonly'",
")",
"fn",
".",
"a... | insert a pure function in the given module . | train | false |
19,201 | def security_group_count_by_project(context, project_id, session=None):
return IMPL.security_group_count_by_project(context, project_id, session=session)
| [
"def",
"security_group_count_by_project",
"(",
"context",
",",
"project_id",
",",
"session",
"=",
"None",
")",
":",
"return",
"IMPL",
".",
"security_group_count_by_project",
"(",
"context",
",",
"project_id",
",",
"session",
"=",
"session",
")"
] | count number of security groups in a project . | train | false |
19,202 | def get_listing_content_type(req):
query_format = get_param(req, 'format')
if query_format:
req.accept = FORMAT2CONTENT_TYPE.get(query_format.lower(), FORMAT2CONTENT_TYPE['plain'])
out_content_type = req.accept.best_match(['text/plain', 'application/json', 'application/xml', 'text/xml'])
if (not out_content_type):
raise HTTPNotAcceptable(request=req)
return out_content_type
| [
"def",
"get_listing_content_type",
"(",
"req",
")",
":",
"query_format",
"=",
"get_param",
"(",
"req",
",",
"'format'",
")",
"if",
"query_format",
":",
"req",
".",
"accept",
"=",
"FORMAT2CONTENT_TYPE",
".",
"get",
"(",
"query_format",
".",
"lower",
"(",
")",... | determine the content type to use for an account or container listing response . | train | false |
19,203 | def tag_(name, image, force=False):
image_id = inspect_image(name)['Id']
(repo_name, repo_tag) = _get_repo_tag(image)
response = _client_wrapper('tag', image_id, repo_name, tag=repo_tag, force=force)
_clear_context()
return response
| [
"def",
"tag_",
"(",
"name",
",",
"image",
",",
"force",
"=",
"False",
")",
":",
"image_id",
"=",
"inspect_image",
"(",
"name",
")",
"[",
"'Id'",
"]",
"(",
"repo_name",
",",
"repo_tag",
")",
"=",
"_get_repo_tag",
"(",
"image",
")",
"response",
"=",
"_... | tag an image into a repository and return true . | train | false |
19,204 | def setup_redis():
pools = {}
for (name, config) in settings.STREAM_REDIS_CONFIG.items():
pool = redis.ConnectionPool(host=config['host'], port=config['port'], password=config.get('password'), db=config['db'], decode_responses=True)
pools[name] = pool
return pools
| [
"def",
"setup_redis",
"(",
")",
":",
"pools",
"=",
"{",
"}",
"for",
"(",
"name",
",",
"config",
")",
"in",
"settings",
".",
"STREAM_REDIS_CONFIG",
".",
"items",
"(",
")",
":",
"pool",
"=",
"redis",
".",
"ConnectionPool",
"(",
"host",
"=",
"config",
"... | generate config for redis cache . | train | false |
19,206 | def get_job_info(name=None):
if (not name):
raise SaltInvocationError('Required parameter `name` is missing.')
server = _connect()
if (not job_exists(name)):
raise SaltInvocationError('Job `{0}` does not exist.'.format(name))
job_info = server.get_job_info(name)
if job_info:
return job_info
return False
| [
"def",
"get_job_info",
"(",
"name",
"=",
"None",
")",
":",
"if",
"(",
"not",
"name",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'Required parameter `name` is missing.'",
")",
"server",
"=",
"_connect",
"(",
")",
"if",
"(",
"not",
"job_exists",
"(",
"nam... | return information about the jenkins job . | train | true |
19,207 | def decorated_with_abc(func):
if func.decorators:
for node in func.decorators.nodes:
try:
infered = next(node.infer())
except astroid.InferenceError:
continue
if (infered and (infered.qname() in ABC_METHODS)):
return True
| [
"def",
"decorated_with_abc",
"(",
"func",
")",
":",
"if",
"func",
".",
"decorators",
":",
"for",
"node",
"in",
"func",
".",
"decorators",
".",
"nodes",
":",
"try",
":",
"infered",
"=",
"next",
"(",
"node",
".",
"infer",
"(",
")",
")",
"except",
"astr... | determine if the func node is decorated with abc decorators . | train | false |
19,208 | def post_save_profile(instance, sender, **kwargs):
if ((not instance.is_superuser) and kwargs['created'] and (not kwargs['raw']) and (instance.username != 'AnonymousUser')):
current_site = Site.objects.get_current()
SitePeople.objects.get(site=current_site).people.add(instance)
| [
"def",
"post_save_profile",
"(",
"instance",
",",
"sender",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"(",
"not",
"instance",
".",
"is_superuser",
")",
"and",
"kwargs",
"[",
"'created'",
"]",
"and",
"(",
"not",
"kwargs",
"[",
"'raw'",
"]",
")",
"and",
... | signal to ensure that every created user is assigned to the current site only . | train | false |
19,212 | def check_boolean_field_default_value():
fields = []
for cls in models.get_models():
opts = cls._meta
for f in opts.local_fields:
if (isinstance(f, models.BooleanField) and (not f.has_default())):
fields.append((u'%s.%s: "%s"' % (opts.app_label, opts.object_name, f.name)))
if fields:
fieldnames = u', '.join(fields)
message = [u'You have not set a default value for one or more BooleanFields:', (u'%s.' % fieldnames), u'In Django 1.6 the default value of BooleanField was changed from', u"False to Null when Field.default isn't defined. See", u'https://docs.djangoproject.com/en/1.6/ref/models/fields/#booleanfield', u'for more information.']
return u' '.join(message)
| [
"def",
"check_boolean_field_default_value",
"(",
")",
":",
"fields",
"=",
"[",
"]",
"for",
"cls",
"in",
"models",
".",
"get_models",
"(",
")",
":",
"opts",
"=",
"cls",
".",
"_meta",
"for",
"f",
"in",
"opts",
".",
"local_fields",
":",
"if",
"(",
"isinst... | checks if there are any booleanfields without a default value . | train | false |
19,213 | def test_finally_execution():
segBad = FakeWalSegment((('1' * 8) * 3))
segOK = FakeWalSegment((('2' * 8) * 3))
class CleanupCheckingUploader(object, ):
def __init__(self):
self.cleaned_up = False
def __call__(self, segment):
if (segment is segOK):
try:
while True:
gevent.sleep(0.1)
finally:
self.cleaned_up = True
elif (segment is segBad):
raise Explosion('fail')
else:
assert False, 'Expect only two segments'
segment._uploaded = True
return segment
uploader = CleanupCheckingUploader()
group = worker.WalTransferGroup(uploader)
group.start(segOK)
group.start(segBad)
with pytest.raises(Explosion):
group.join()
assert (uploader.cleaned_up is True)
| [
"def",
"test_finally_execution",
"(",
")",
":",
"segBad",
"=",
"FakeWalSegment",
"(",
"(",
"(",
"'1'",
"*",
"8",
")",
"*",
"3",
")",
")",
"segOK",
"=",
"FakeWalSegment",
"(",
"(",
"(",
"'2'",
"*",
"8",
")",
"*",
"3",
")",
")",
"class",
"CleanupChec... | when one segment fails ensure parallel segments clean up . | train | false |
19,214 | def accept_vpc_peering_connection(conn_id='', name='', region=None, key=None, keyid=None, profile=None, dry_run=False):
if (not _exactly_one((conn_id, name))):
raise SaltInvocationError('One (but not both) of vpc_peering_connection_id or name must be provided.')
conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
if name:
conn_id = _vpc_peering_conn_id_for_name(name, conn)
if (not conn_id):
raise SaltInvocationError('No ID found for this VPC peering connection! ({0}) Please make sure this VPC peering connection exists or invoke this function with a VPC peering connection ID'.format(name))
try:
log.debug('Trying to accept vpc peering connection')
conn.accept_vpc_peering_connection(DryRun=dry_run, VpcPeeringConnectionId=conn_id)
return {'msg': 'VPC peering connection accepted.'}
except botocore.exceptions.ClientError as err:
log.error('Got an error while trying to accept vpc peering')
return {'error': salt.utils.boto.get_error(err)}
| [
"def",
"accept_vpc_peering_connection",
"(",
"conn_id",
"=",
"''",
",",
"name",
"=",
"''",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"dry_run",
"=",
"False",
")",
":",
"if",
"(",... | request a vpc peering connection between two vpcs . | train | true |
19,215 | def test_empty_locale_html():
s = helpers.locale_html(None)
assert (not s), 'locale_html on None must be empty.'
| [
"def",
"test_empty_locale_html",
"(",
")",
":",
"s",
"=",
"helpers",
".",
"locale_html",
"(",
"None",
")",
"assert",
"(",
"not",
"s",
")",
",",
"'locale_html on None must be empty.'"
] | locale_html must still work if field is none . | train | false |
19,216 | def can_access_others_blocks(requesting_user, course_key):
return has_access(requesting_user, CourseStaffRole.ROLE, course_key)
| [
"def",
"can_access_others_blocks",
"(",
"requesting_user",
",",
"course_key",
")",
":",
"return",
"has_access",
"(",
"requesting_user",
",",
"CourseStaffRole",
".",
"ROLE",
",",
"course_key",
")"
] | returns whether the requesting_user can access the blocks for other users in the given course . | train | false |
19,217 | def olympus_special_mode(v):
mode1 = {0: 'Normal', 1: 'Unknown', 2: 'Fast', 3: 'Panorama'}
mode2 = {0: 'Non-panoramic', 1: 'Left to right', 2: 'Right to left', 3: 'Bottom to top', 4: 'Top to bottom'}
if ((v[0] not in mode1) or (v[2] not in mode2)):
return v
return ('%s - sequence %d - %s' % (mode1[v[0]], v[1], mode2[v[2]]))
| [
"def",
"olympus_special_mode",
"(",
"v",
")",
":",
"mode1",
"=",
"{",
"0",
":",
"'Normal'",
",",
"1",
":",
"'Unknown'",
",",
"2",
":",
"'Fast'",
",",
"3",
":",
"'Panorama'",
"}",
"mode2",
"=",
"{",
"0",
":",
"'Non-panoramic'",
",",
"1",
":",
"'Left... | decode olympus specialmode tag in makernote . | train | false |
19,219 | def _openstack_logged_method(method_name, original_name):
def _run_with_logging(self, *args, **kwargs):
original = getattr(self, original_name)
method = getattr(original, method_name)
with OPENSTACK_ACTION(operation=[method_name, args, kwargs]):
try:
return method(*args, **kwargs)
except NovaClientException as e:
NOVA_CLIENT_EXCEPTION(code=e.code, message=e.message, details=e.details, request_id=e.request_id, url=e.url, method=e.method).write()
raise
except KeystoneHttpError as e:
KEYSTONE_HTTP_ERROR(code=e.http_status, message=e.message, details=e.details, request_id=e.request_id, url=e.url, method=e.method, response=e.response.text).write()
raise
return _run_with_logging
| [
"def",
"_openstack_logged_method",
"(",
"method_name",
",",
"original_name",
")",
":",
"def",
"_run_with_logging",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"original",
"=",
"getattr",
"(",
"self",
",",
"original_name",
")",
"method",
"="... | run a method and log additional information about any exceptions that are raised . | train | false |
19,220 | def col(loc, strg):
return ((((loc < len(strg)) and (strg[loc] == '\n')) and 1) or (loc - strg.rfind('\n', 0, loc)))
| [
"def",
"col",
"(",
"loc",
",",
"strg",
")",
":",
"return",
"(",
"(",
"(",
"(",
"loc",
"<",
"len",
"(",
"strg",
")",
")",
"and",
"(",
"strg",
"[",
"loc",
"]",
"==",
"'\\n'",
")",
")",
"and",
"1",
")",
"or",
"(",
"loc",
"-",
"strg",
".",
"r... | return a symbolic column variable . | train | true |
19,221 | def _str_extract_frame(arr, pat, flags=0):
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get((1 + i), i) for i in range(regex.groups)]
if (len(arr) == 0):
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame([groups_or_na(val) for val in arr], columns=columns, index=result_index, dtype=object)
| [
"def",
"_str_extract_frame",
"(",
"arr",
",",
"pat",
",",
"flags",
"=",
"0",
")",
":",
"from",
"pandas",
"import",
"DataFrame",
"regex",
"=",
"re",
".",
"compile",
"(",
"pat",
",",
"flags",
"=",
"flags",
")",
"groups_or_na",
"=",
"_groups_or_na_fun",
"("... | for each subject string in the series . | train | true |
19,222 | def printout(queries, things, default=None, f=sys.stdout, **kwargs):
results = denorm(queries, things, default=None)
fields = set(itertools.chain(*(x.keys() for x in results)))
W = csv.DictWriter(f=f, fieldnames=fields, **kwargs)
W.writeheader()
for r in results:
W.writerow(r)
| [
"def",
"printout",
"(",
"queries",
",",
"things",
",",
"default",
"=",
"None",
",",
"f",
"=",
"sys",
".",
"stdout",
",",
"**",
"kwargs",
")",
":",
"results",
"=",
"denorm",
"(",
"queries",
",",
"things",
",",
"default",
"=",
"None",
")",
"fields",
... | will print header and objects **kwargs go to csv . | train | false |
19,224 | def printable_name(string, code_style=False):
if (code_style and ('_' in string)):
string = string.replace('_', ' ')
parts = string.split()
if (code_style and (len(parts) == 1) and (not (string.isalpha() and string.islower()))):
parts = _split_camel_case(parts[0])
return ' '.join(((part[0].upper() + part[1:]) for part in parts))
| [
"def",
"printable_name",
"(",
"string",
",",
"code_style",
"=",
"False",
")",
":",
"if",
"(",
"code_style",
"and",
"(",
"'_'",
"in",
"string",
")",
")",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"parts",
"=",
"string"... | generates and returns printable name from the given string . | train | false |
19,226 | @raises(ValueError)
def test_desaturation_prop():
utils.desaturate('blue', 50)
| [
"@",
"raises",
"(",
"ValueError",
")",
"def",
"test_desaturation_prop",
"(",
")",
":",
"utils",
".",
"desaturate",
"(",
"'blue'",
",",
"50",
")"
] | test that pct outside of [0 . | train | false |
19,227 | def _sh_real_to_complex(shs, order):
if (order == 0):
return shs[0]
else:
return ((shs[0] + ((1j * np.sign(order)) * shs[1])) / np.sqrt(2.0))
| [
"def",
"_sh_real_to_complex",
"(",
"shs",
",",
"order",
")",
":",
"if",
"(",
"order",
"==",
"0",
")",
":",
"return",
"shs",
"[",
"0",
"]",
"else",
":",
"return",
"(",
"(",
"shs",
"[",
"0",
"]",
"+",
"(",
"(",
"1j",
"*",
"np",
".",
"sign",
"("... | convert real spherical harmonic pair to complex . | train | false |
19,228 | def save_backend_configuration(dataset_backend_name, dataset_backend_configuration):
dataset_path = FilePath(mkdtemp()).child('dataset-backend.yml')
print 'Saving dataset backend config to: {}'.format(dataset_path.path)
dataset_path.setContent(yaml.safe_dump({dataset_backend_name.name: dataset_backend_configuration}))
return dataset_path
| [
"def",
"save_backend_configuration",
"(",
"dataset_backend_name",
",",
"dataset_backend_configuration",
")",
":",
"dataset_path",
"=",
"FilePath",
"(",
"mkdtemp",
"(",
")",
")",
".",
"child",
"(",
"'dataset-backend.yml'",
")",
"print",
"'Saving dataset backend config to: ... | saves the backend configuration to a local file for consumption by the trial process . | train | false |
19,229 | def req_items_for_inv(site_id, quantity_type):
if (not settings.has_module('req')):
return Storage()
table = s3db.req_req
itable = s3db.req_req_item
query = (((((((table.site_id == site_id) & (table.id == itable.req_id)) & (itable.item_pack_id == itable.item_pack_id)) & (itable[('quantity_%s' % quantity_type)] < itable.quantity)) & (table.cancel == False)) & (table.deleted == False)) & (itable.deleted == False))
req_items = db(query).select(itable.id, itable.req_id, itable.item_id, itable.quantity, itable[('quantity_%s' % quantity_type)], itable.item_pack_id, orderby=(table.date_required | table.date))
req_item_ids = []
unique_req_items = Storage()
for req_item in req_items:
if (req_item.item_id not in req_item_ids):
unique_req_items[req_item.item_id] = Storage(req_item.as_dict())
req_item_ids.append(req_item.item_id)
return unique_req_items
| [
"def",
"req_items_for_inv",
"(",
"site_id",
",",
"quantity_type",
")",
":",
"if",
"(",
"not",
"settings",
".",
"has_module",
"(",
"'req'",
")",
")",
":",
"return",
"Storage",
"(",
")",
"table",
"=",
"s3db",
".",
"req_req",
"itable",
"=",
"s3db",
".",
"... | used by recv_process & send_process returns a dict of unique req items key = item_id . | train | false |
19,230 | def str_distance(a, b):
(n, m) = (len(a), len(b))
if (n > m):
(a, b) = (b, a)
(n, m) = (m, n)
current = range((n + 1))
for i in range(1, (m + 1)):
(previous, current) = (current, ([i] + ([0] * n)))
for j in range(1, (n + 1)):
(add, delete) = ((previous[j] + 1), (current[(j - 1)] + 1))
change = previous[(j - 1)]
if (a[(j - 1)] != b[(i - 1)]):
change += 1
current[j] = min(add, delete, change)
return current[n]
| [
"def",
"str_distance",
"(",
"a",
",",
"b",
")",
":",
"(",
"n",
",",
"m",
")",
"=",
"(",
"len",
"(",
"a",
")",
",",
"len",
"(",
"b",
")",
")",
"if",
"(",
"n",
">",
"m",
")",
":",
"(",
"a",
",",
"b",
")",
"=",
"(",
"b",
",",
"a",
")",... | calculates the levenshtein distance between a and b . | train | true |
19,231 | def line_collection_2d_to_3d(col, zs=0, zdir=u'z'):
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
| [
"def",
"line_collection_2d_to_3d",
"(",
"col",
",",
"zs",
"=",
"0",
",",
"zdir",
"=",
"u'z'",
")",
":",
"segments3d",
"=",
"paths_to_3d_segments",
"(",
"col",
".",
"get_paths",
"(",
")",
",",
"zs",
",",
"zdir",
")",
"col",
".",
"__class__",
"=",
"Line3... | convert a linecollection to a line3dcollection object . | train | false |
19,232 | def load_grammar(gt='Grammar.txt', gp=None, save=True, force=False, logger=None):
if (logger is None):
logger = logging.getLogger()
if (gp is None):
(head, tail) = os.path.splitext(gt)
if (tail == '.txt'):
tail = ''
gp = (((head + tail) + '.'.join(map(str, sys.version_info))) + '.pickle')
if (force or (not _newer(gp, gt))):
logger.info('Generating grammar tables from %s', gt)
g = pgen.generate_grammar(gt)
if save:
logger.info('Writing grammar tables to %s', gp)
try:
g.dump(gp)
except IOError as e:
logger.info(('Writing failed:' + str(e)))
else:
g = grammar.Grammar()
g.load(gp)
return g
| [
"def",
"load_grammar",
"(",
"gt",
"=",
"'Grammar.txt'",
",",
"gp",
"=",
"None",
",",
"save",
"=",
"True",
",",
"force",
"=",
"False",
",",
"logger",
"=",
"None",
")",
":",
"if",
"(",
"logger",
"is",
"None",
")",
":",
"logger",
"=",
"logging",
".",
... | load the grammar . | train | true |
19,234 | def file_move_safe(old_file_name, new_file_name, chunk_size=(1024 * 64), allow_overwrite=False):
if _samefile(old_file_name, new_file_name):
return
try:
os.rename(old_file_name, new_file_name)
return
except OSError:
pass
old_file = open(old_file_name, 'rb')
try:
fd = os.open(new_file_name, (((os.O_WRONLY | os.O_CREAT) | getattr(os, 'O_BINARY', 0)) | (((not allow_overwrite) and os.O_EXCL) or 0)))
try:
locks.lock(fd, locks.LOCK_EX)
current_chunk = None
while (current_chunk != ''):
current_chunk = old_file.read(chunk_size)
os.write(fd, current_chunk)
finally:
locks.unlock(fd)
os.close(fd)
finally:
old_file.close()
copystat(old_file_name, new_file_name)
try:
os.remove(old_file_name)
except OSError as e:
if ((getattr(e, 'winerror', 0) != 32) and (getattr(e, 'errno', 0) != 13)):
raise
| [
"def",
"file_move_safe",
"(",
"old_file_name",
",",
"new_file_name",
",",
"chunk_size",
"=",
"(",
"1024",
"*",
"64",
")",
",",
"allow_overwrite",
"=",
"False",
")",
":",
"if",
"_samefile",
"(",
"old_file_name",
",",
"new_file_name",
")",
":",
"return",
"try"... | moves a file from one location to another in the safest way possible . | train | false |
19,236 | def _urljoin(base, url):
parsed = urlparse(base)
scheme = parsed.scheme
return urlparse(urljoin(parsed._replace(scheme='http').geturl(), url))._replace(scheme=scheme).geturl()
| [
"def",
"_urljoin",
"(",
"base",
",",
"url",
")",
":",
"parsed",
"=",
"urlparse",
"(",
"base",
")",
"scheme",
"=",
"parsed",
".",
"scheme",
"return",
"urlparse",
"(",
"urljoin",
"(",
"parsed",
".",
"_replace",
"(",
"scheme",
"=",
"'http'",
")",
".",
"... | construct a full url by combining a "base url" with another url . | train | true |
19,238 | def advance_rest(clock):
clock.advance((EXPIRATION_TIME.total_seconds() - 1))
| [
"def",
"advance_rest",
"(",
"clock",
")",
":",
"clock",
".",
"advance",
"(",
"(",
"EXPIRATION_TIME",
".",
"total_seconds",
"(",
")",
"-",
"1",
")",
")"
] | move the clock forward by a lot of time . | train | false |
19,239 | def get_updated_cache_time():
cache_mtime = get_cache_mtime()
mtimestamp = datetime.datetime.fromtimestamp(cache_mtime)
updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
return (mtimestamp, updated_cache_time)
| [
"def",
"get_updated_cache_time",
"(",
")",
":",
"cache_mtime",
"=",
"get_cache_mtime",
"(",
")",
"mtimestamp",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"cache_mtime",
")",
"updated_cache_time",
"=",
"int",
"(",
"time",
".",
"mktime",
"(",
"... | return the mtime time stamp and the updated cache time . | train | false |
19,240 | def public_factory(target, location):
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = ('Construct a new :class:`.%s` object. \n\nThis constructor is mirrored as a public API function; see :func:`~%s` for a full usage and argument description.' % (target.__name__, location))
else:
fn = callable_ = target
doc = ('This function is mirrored; see :func:`~%s` for a description of arguments.' % location)
location_name = location.split('.')[(-1)]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata['name'] = location_name
code = ('def %(name)s(%(args)s):\n return cls(%(apply_kw)s)\n' % metadata)
env = {'cls': callable_, 'symbol': symbol}
exec code in env
decorated = env[location_name]
decorated.__doc__ = fn.__doc__
decorated.__module__ = ('sqlalchemy' + location.rsplit('.', 1)[0])
if (compat.py2k or hasattr(fn, '__func__')):
fn.__func__.__doc__ = doc
else:
fn.__doc__ = doc
return decorated
| [
"def",
"public_factory",
"(",
"target",
",",
"location",
")",
":",
"if",
"isinstance",
"(",
"target",
",",
"type",
")",
":",
"fn",
"=",
"target",
".",
"__init__",
"callable_",
"=",
"target",
"doc",
"=",
"(",
"'Construct a new :class:`.%s` object. \\n\\nThis cons... | produce a wrapping function for the given cls or classmethod . | train | false |
19,241 | def systemInformationType2():
a = L2PseudoLength(l2pLength=22)
b = TpPd(pd=6)
c = MessageType(mesType=26)
d = NeighbourCellsDescription()
e = NccPermitted()
f = RachControlParameters()
packet = (((((a / b) / c) / d) / e) / f)
return packet
| [
"def",
"systemInformationType2",
"(",
")",
":",
"a",
"=",
"L2PseudoLength",
"(",
"l2pLength",
"=",
"22",
")",
"b",
"=",
"TpPd",
"(",
"pd",
"=",
"6",
")",
"c",
"=",
"MessageType",
"(",
"mesType",
"=",
"26",
")",
"d",
"=",
"NeighbourCellsDescription",
"(... | system information type 2 section 9 . | train | true |
19,242 | def transaction_teardown_request(error=None):
if view_has_annotation(NO_AUTO_TRANSACTION_ATTR):
return
if ((error is not None) and current_atomic):
current_atomic.__exit__(error.__class__, error, None)
| [
"def",
"transaction_teardown_request",
"(",
"error",
"=",
"None",
")",
":",
"if",
"view_has_annotation",
"(",
"NO_AUTO_TRANSACTION_ATTR",
")",
":",
"return",
"if",
"(",
"(",
"error",
"is",
"not",
"None",
")",
"and",
"current_atomic",
")",
":",
"current_atomic",
... | rollback transaction on uncaught error . | train | false |
19,244 | def is_scriptable(application):
if os.path.isdir(application):
plistfile = os.path.join(application, 'Contents', 'Info.plist')
if (not os.path.exists(plistfile)):
return False
plist = plistlib.Plist.fromFile(plistfile)
return plist.get('NSAppleScriptEnabled', False)
currf = CurResFile()
try:
refno = macresource.open_pathname(application)
except MacOS.Error:
return False
UseResFile(refno)
n_terminology = (((Count1Resources('aete') + Count1Resources('aeut')) + Count1Resources('scsz')) + Count1Resources('osiz'))
CloseResFile(refno)
UseResFile(currf)
return (n_terminology > 0)
| [
"def",
"is_scriptable",
"(",
"application",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"application",
")",
":",
"plistfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"application",
",",
"'Contents'",
",",
"'Info.plist'",
")",
"if",
"(",
"no... | return true if the application is scriptable . | train | false |
19,246 | def get_example_data(fname):
datadir = os.path.join(get_data_path(), 'example')
fullpath = os.path.join(datadir, fname)
if (not os.path.exists(fullpath)):
raise IOError(('could not find matplotlib example file "%s" in data directory "%s"' % (fname, datadir)))
return file(fullpath, 'rb')
| [
"def",
"get_example_data",
"(",
"fname",
")",
":",
"datadir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_data_path",
"(",
")",
",",
"'example'",
")",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"datadir",
",",
"fname",
")",
"if",
"(",
... | return a filehandle to one of the example files in mpl-data/example *fname* the name of one of the files in mpl-data/example . | train | false |
19,247 | def get_gssapi_token(principal, host, domain):
if (not HAS_GSSAPI):
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service {0}'.format(service))
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while (not ctx.established):
out_token = ctx.step(in_token)
if out_token:
encoded_token = base64.b64encode(out_token)
return encoded_token
if ctx.established:
break
if (not in_token):
raise salt.exceptions.CommandExecutionError("Can't receive token, no response from server")
raise salt.exceptions.CommandExecutionError("Context established, but didn't receive token")
| [
"def",
"get_gssapi_token",
"(",
"principal",
",",
"host",
",",
"domain",
")",
":",
"if",
"(",
"not",
"HAS_GSSAPI",
")",
":",
"raise",
"ImportError",
"(",
"'The gssapi library is not imported.'",
")",
"service",
"=",
"'{0}/{1}@{2}'",
".",
"format",
"(",
"principa... | get the gssapi token for kerberos connection principal the service principal host host url where we would like to authenticate domain kerberos user domain . | train | true |
19,249 | @pytest.fixture(autouse=True)
def unpin_db(request):
request.addfinalizer(pinning.unpin_this_thread)
| [
"@",
"pytest",
".",
"fixture",
"(",
"autouse",
"=",
"True",
")",
"def",
"unpin_db",
"(",
"request",
")",
":",
"request",
".",
"addfinalizer",
"(",
"pinning",
".",
"unpin_this_thread",
")"
] | unpin the database from master in the current db . | train | false |
19,251 | def get_numpy_dtype(obj):
if (ndarray is not FakeObject):
import numpy as np
if (isinstance(obj, np.generic) or isinstance(obj, np.ndarray)):
try:
return obj.dtype.type
except (AttributeError, RuntimeError):
return
| [
"def",
"get_numpy_dtype",
"(",
"obj",
")",
":",
"if",
"(",
"ndarray",
"is",
"not",
"FakeObject",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"(",
"isinstance",
"(",
"obj",
",",
"np",
".",
"generic",
")",
"or",
"isinstance",
"(",
"obj",
",",
"np",
... | return numpy data type associated to obj return none if numpy is not available or if obj is not a numpy array or scalar . | train | true |
19,252 | def make_character(info, value, in_set=False):
if in_set:
return Character(value)
return Character(value, case_flags=make_case_flags(info))
| [
"def",
"make_character",
"(",
"info",
",",
"value",
",",
"in_set",
"=",
"False",
")",
":",
"if",
"in_set",
":",
"return",
"Character",
"(",
"value",
")",
"return",
"Character",
"(",
"value",
",",
"case_flags",
"=",
"make_case_flags",
"(",
"info",
")",
")... | makes a character literal . | train | false |
19,253 | def get_pem_entries(glob_path):
ret = {}
for path in glob.glob(glob_path):
if os.path.isfile(path):
try:
ret[path] = get_pem_entry(text=path)
except ValueError:
pass
return ret
| [
"def",
"get_pem_entries",
"(",
"glob_path",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"path",
"in",
"glob",
".",
"glob",
"(",
"glob_path",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"try",
":",
"ret",
"[",
"path",
"]",
... | returns a dict containing pem entries in files matching a glob glob_path: a path to certificates to be read and returned . | train | false |
19,254 | def read_raw_brainvision(vhdr_fname, montage=None, eog=('HEOGL', 'HEOGR', 'VEOGb'), misc='auto', scale=1.0, preload=False, response_trig_shift=0, event_id=None, verbose=None):
return RawBrainVision(vhdr_fname=vhdr_fname, montage=montage, eog=eog, misc=misc, scale=scale, preload=preload, response_trig_shift=response_trig_shift, event_id=event_id, verbose=verbose)
| [
"def",
"read_raw_brainvision",
"(",
"vhdr_fname",
",",
"montage",
"=",
"None",
",",
"eog",
"=",
"(",
"'HEOGL'",
",",
"'HEOGR'",
",",
"'VEOGb'",
")",
",",
"misc",
"=",
"'auto'",
",",
"scale",
"=",
"1.0",
",",
"preload",
"=",
"False",
",",
"response_trig_s... | reader for brain vision eeg file . | train | false |
19,255 | def upload_template(filename, destination, context=None, use_jinja=False, template_dir=None, use_sudo=False, backup=True, mirror_local_mode=False, mode=None, pty=None, keep_trailing_newline=False, temp_dir=''):
func = ((use_sudo and sudo) or run)
if (pty is not None):
func = partial(func, pty=pty)
with settings(hide('everything'), warn_only=True):
if func(('test -d %s' % _expand_path(destination))).succeeded:
sep = ('' if destination.endswith('/') else '/')
destination += (sep + os.path.basename(filename))
if (mirror_local_mode and (mode is None)):
mode = os.stat(apply_lcwd(filename, env)).st_mode
mirror_local_mode = False
text = None
if use_jinja:
try:
template_dir = (template_dir or os.getcwd())
template_dir = apply_lcwd(template_dir, env)
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir), keep_trailing_newline=keep_trailing_newline)
text = jenv.get_template(filename).render(**(context or {}))
text = text.encode('utf-8')
except ImportError:
import traceback
tb = traceback.format_exc()
abort((tb + '\nUnable to import Jinja2 -- see above.'))
else:
if template_dir:
filename = os.path.join(template_dir, filename)
filename = apply_lcwd(filename, env)
with open(os.path.expanduser(filename)) as inputfile:
text = inputfile.read()
if context:
text = (text % context)
if (backup and exists(destination)):
func(('cp %s{,.bak}' % _expand_path(destination)))
return put(local_path=StringIO(text), remote_path=destination, use_sudo=use_sudo, mirror_local_mode=mirror_local_mode, mode=mode, temp_dir=temp_dir)
| [
"def",
"upload_template",
"(",
"filename",
",",
"destination",
",",
"context",
"=",
"None",
",",
"use_jinja",
"=",
"False",
",",
"template_dir",
"=",
"None",
",",
"use_sudo",
"=",
"False",
",",
"backup",
"=",
"True",
",",
"mirror_local_mode",
"=",
"False",
... | upload a template file . | train | false |
19,256 | def update_language_names():
with open(frappe.get_app_path(u'frappe', u'geo', u'languages.json'), u'r') as f:
data = json.loads(f.read())
for l in data:
frappe.db.set_value(u'Language', l[u'code'], u'language_name', l[u'name'])
| [
"def",
"update_language_names",
"(",
")",
":",
"with",
"open",
"(",
"frappe",
".",
"get_app_path",
"(",
"u'frappe'",
",",
"u'geo'",
",",
"u'languages.json'",
")",
",",
"u'r'",
")",
"as",
"f",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read... | update frappe/geo/languages . | train | false |
19,257 | def test_locale_html_xss():
testfield = Mock()
testfield.locale = '<script>alert(1)</script>'
s = helpers.locale_html(testfield)
assert ('<script>' not in s)
assert ('<script>alert(1)</script>' in s)
| [
"def",
"test_locale_html_xss",
"(",
")",
":",
"testfield",
"=",
"Mock",
"(",
")",
"testfield",
".",
"locale",
"=",
"'<script>alert(1)</script>'",
"s",
"=",
"helpers",
".",
"locale_html",
"(",
"testfield",
")",
"assert",
"(",
"'<script>'",
"not",
"in",
"s",
"... | test for nastiness-removal in the transfields locale . | train | false |
19,258 | def writeOutputFileHeader(output_file, session_metadata_columns, log_entry_names):
allcols = ((session_metadata_columns + log_entry_names) + session_uservar_columns)
output_file.write(' DCTB '.join(allcols))
output_file.write('\n')
| [
"def",
"writeOutputFileHeader",
"(",
"output_file",
",",
"session_metadata_columns",
",",
"log_entry_names",
")",
":",
"allcols",
"=",
"(",
"(",
"session_metadata_columns",
"+",
"log_entry_names",
")",
"+",
"session_uservar_columns",
")",
"output_file",
".",
"write",
... | writes the header line at the top of the log file . | train | false |
19,259 | def datastore_fields(resource, valid_field_types):
data = {'resource_id': resource['id'], 'limit': 0}
fields = toolkit.get_action('datastore_search')({}, data)['fields']
return [{'value': f['id'], 'text': f['id']} for f in fields if (f['type'] in valid_field_types)]
| [
"def",
"datastore_fields",
"(",
"resource",
",",
"valid_field_types",
")",
":",
"data",
"=",
"{",
"'resource_id'",
":",
"resource",
"[",
"'id'",
"]",
",",
"'limit'",
":",
"0",
"}",
"fields",
"=",
"toolkit",
".",
"get_action",
"(",
"'datastore_search'",
")",
... | return a list of all datastore fields for a given resource . | train | false |
19,260 | def reflection_from_matrix(matrix):
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
(w, V) = numpy.linalg.eig(M[:3, :3])
i = numpy.where((abs((numpy.real(w) + 1.0)) < 1e-08))[0]
if (not len(i)):
raise ValueError('no unit eigenvector corresponding to eigenvalue -1')
normal = numpy.real(V[:, i[0]]).squeeze()
(w, V) = numpy.linalg.eig(M)
i = numpy.where((abs((numpy.real(w) - 1.0)) < 1e-08))[0]
if (not len(i)):
raise ValueError('no unit eigenvector corresponding to eigenvalue 1')
point = numpy.real(V[:, i[(-1)]]).squeeze()
point /= point[3]
return (point, normal)
| [
"def",
"reflection_from_matrix",
"(",
"matrix",
")",
":",
"M",
"=",
"numpy",
".",
"array",
"(",
"matrix",
",",
"dtype",
"=",
"numpy",
".",
"float64",
",",
"copy",
"=",
"False",
")",
"(",
"w",
",",
"V",
")",
"=",
"numpy",
".",
"linalg",
".",
"eig",
... | return mirror plane point and normal vector from reflection matrix . | train | true |
19,261 | def unicode(s):
if PY3:
return s
else:
return __builtin__.unicode(s, 'utf-8')
| [
"def",
"unicode",
"(",
"s",
")",
":",
"if",
"PY3",
":",
"return",
"s",
"else",
":",
"return",
"__builtin__",
".",
"unicode",
"(",
"s",
",",
"'utf-8'",
")"
] | force conversion of s to unicode . | train | false |
19,262 | def _xml_escape(data):
from_symbols = '&><"\''
to_symbols = ((('&' + s) + ';') for s in 'amp gt lt quot apos'.split())
for (from_, to_) in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
| [
"def",
"_xml_escape",
"(",
"data",
")",
":",
"from_symbols",
"=",
"'&><\"\\''",
"to_symbols",
"=",
"(",
"(",
"(",
"'&'",
"+",
"s",
")",
"+",
"';'",
")",
"for",
"s",
"in",
"'amp gt lt quot apos'",
".",
"split",
"(",
")",
")",
"for",
"(",
"from_",
",",... | escape & . | train | true |
19,266 | def test_oldstyle_getattr():
class C:
def __getattr__(self, name):
return globals()[name]
a = C()
| [
"def",
"test_oldstyle_getattr",
"(",
")",
":",
"class",
"C",
":",
"def",
"__getattr__",
"(",
"self",
",",
"name",
")",
":",
"return",
"globals",
"(",
")",
"[",
"name",
"]",
"a",
"=",
"C",
"(",
")"
] | verify we dont access __getattr__ while creating an old style class . | train | false |
19,267 | def load_configuration(arg_list, log_printer, arg_parser=None):
cli_sections = parse_cli(arg_list=arg_list, arg_parser=arg_parser)
check_conflicts(cli_sections)
if (bool(cli_sections['default'].get('find_config', 'False')) and (str(cli_sections['default'].get('config')) == '')):
cli_sections['default'].add_or_create_setting(Setting('config', re.escape(find_user_config(os.getcwd()))))
targets = []
for item in list(cli_sections['default'].contents.pop('targets', '')):
targets.append(item.lower())
if bool(cli_sections['default'].get('no_config', 'False')):
sections = cli_sections
else:
base_sections = load_config_file(Constants.system_coafile, log_printer)
user_sections = load_config_file(Constants.user_coafile, log_printer, silent=True)
default_config = str(base_sections['default'].get('config', '.coafile'))
user_config = str(user_sections['default'].get('config', default_config))
config = os.path.abspath(str(cli_sections['default'].get('config', user_config)))
try:
save = bool(cli_sections['default'].get('save', 'False'))
except ValueError:
save = True
coafile_sections = load_config_file(config, log_printer, silent=save)
sections = merge_section_dicts(base_sections, user_sections)
sections = merge_section_dicts(sections, coafile_sections)
sections = merge_section_dicts(sections, cli_sections)
for section in sections:
if (section != 'default'):
sections[section].defaults = sections['default']
str_log_level = str(sections['default'].get('log_level', '')).upper()
log_printer.log_level = LOG_LEVEL.str_dict.get(str_log_level, LOG_LEVEL.INFO)
return (sections, targets)
| [
"def",
"load_configuration",
"(",
"arg_list",
",",
"log_printer",
",",
"arg_parser",
"=",
"None",
")",
":",
"cli_sections",
"=",
"parse_cli",
"(",
"arg_list",
"=",
"arg_list",
",",
"arg_parser",
"=",
"arg_parser",
")",
"check_conflicts",
"(",
"cli_sections",
")"... | load a yaml rule file and fill in the relevant fields with objects . | train | false |
19,268 | def ratsimp(expr):
(f, g) = cancel(expr).as_numer_denom()
try:
(Q, r) = reduced(f, [g], field=True, expand=False)
except ComputationFailed:
return (f / g)
return (Add(*Q) + cancel((r / g)))
| [
"def",
"ratsimp",
"(",
"expr",
")",
":",
"(",
"f",
",",
"g",
")",
"=",
"cancel",
"(",
"expr",
")",
".",
"as_numer_denom",
"(",
")",
"try",
":",
"(",
"Q",
",",
"r",
")",
"=",
"reduced",
"(",
"f",
",",
"[",
"g",
"]",
",",
"field",
"=",
"True"... | put an expression over a common denominator . | train | false |
19,270 | def check_lowercase_bucketname(n):
if (not (n + 'a').islower()):
raise BotoClientError('Bucket names cannot contain upper-case characters when using either the sub-domain or virtual hosting calling format.')
return True
| [
"def",
"check_lowercase_bucketname",
"(",
"n",
")",
":",
"if",
"(",
"not",
"(",
"n",
"+",
"'a'",
")",
".",
"islower",
"(",
")",
")",
":",
"raise",
"BotoClientError",
"(",
"'Bucket names cannot contain upper-case characters when using either the sub-domain or virtual hos... | bucket names must not contain uppercase characters . | train | false |
19,271 | def get_ipython_cmd(as_string=False):
ipython_cmd = [sys.executable, '-m', 'IPython']
if as_string:
ipython_cmd = ' '.join(ipython_cmd)
return ipython_cmd
| [
"def",
"get_ipython_cmd",
"(",
"as_string",
"=",
"False",
")",
":",
"ipython_cmd",
"=",
"[",
"sys",
".",
"executable",
",",
"'-m'",
",",
"'IPython'",
"]",
"if",
"as_string",
":",
"ipython_cmd",
"=",
"' '",
".",
"join",
"(",
"ipython_cmd",
")",
"return",
... | return appropriate ipython command line name . | train | false |
19,273 | def sz_margin_details(date='', retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request((rv.MAR_SZ_MX_URL % (ct.P_TYPE['http'], ct.DOMAINS['szse'], ct.PAGES['szsefc'], date)))
lines = urlopen(request, timeout=10).read()
if (len(lines) <= 200):
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_MX_COLS
df['stockCode'] = df['stockCode'].map((lambda x: str(x).zfill(6)))
df['opDate'] = date
except Exception as e:
print e
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
| [
"def",
"sz_margin_details",
"(",
"date",
"=",
"''",
",",
"retry_count",
"=",
"3",
",",
"pause",
"=",
"0.001",
")",
":",
"for",
"_",
"in",
"range",
"(",
"retry_count",
")",
":",
"time",
".",
"sleep",
"(",
"pause",
")",
"try",
":",
"request",
"=",
"R... | parameters date:string 明细数据日期 format:yyyy-mm-dd 默认为空 retry_count : int . | train | false |
19,274 | def _ancestors_to_call(klass_node, method='__init__'):
to_call = {}
for base_node in klass_node.ancestors(recurs=False):
try:
to_call[base_node] = next(base_node.igetattr(method))
except astroid.InferenceError:
continue
return to_call
| [
"def",
"_ancestors_to_call",
"(",
"klass_node",
",",
"method",
"=",
"'__init__'",
")",
":",
"to_call",
"=",
"{",
"}",
"for",
"base_node",
"in",
"klass_node",
".",
"ancestors",
"(",
"recurs",
"=",
"False",
")",
":",
"try",
":",
"to_call",
"[",
"base_node",
... | return a dictionary where keys are the list of base classes providing the queried method . | train | true |
19,276 | def _find_yaml_path():
(current, last) = (os.getcwd(), None)
while (current != last):
for yaml_name in FILENAMES:
yaml_path = os.path.join(current, yaml_name)
if os.path.exists(yaml_path):
return yaml_path
last = current
(current, last) = (os.path.dirname(current), current)
return None
| [
"def",
"_find_yaml_path",
"(",
")",
":",
"(",
"current",
",",
"last",
")",
"=",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"None",
")",
"while",
"(",
"current",
"!=",
"last",
")",
":",
"for",
"yaml_name",
"in",
"FILENAMES",
":",
"yaml_path",
"=",
"os... | traverse directory trees to find conf . | train | false |
19,277 | def setExtendedPoint(lineSegmentEnd, pointOriginal, x):
if ((x > min(lineSegmentEnd.point.real, pointOriginal.real)) and (x < max(lineSegmentEnd.point.real, pointOriginal.real))):
lineSegmentEnd.point = complex(x, pointOriginal.imag)
| [
"def",
"setExtendedPoint",
"(",
"lineSegmentEnd",
",",
"pointOriginal",
",",
"x",
")",
":",
"if",
"(",
"(",
"x",
">",
"min",
"(",
"lineSegmentEnd",
".",
"point",
".",
"real",
",",
"pointOriginal",
".",
"real",
")",
")",
"and",
"(",
"x",
"<",
"max",
"... | set the point in the extended line segment . | train | false |
19,278 | def create_bootstrap_script(extra_text, python_version=''):
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:(-1)]
with codecs.open(filename, 'r', encoding='utf-8') as f:
content = f.read()
py_exe = ('python%s' % python_version)
content = ((('#!/usr/bin/env %s\n' % py_exe) + '## WARNING: This file is generated\n') + content)
return content.replace('##EXTEND##', extra_text)
| [
"def",
"create_bootstrap_script",
"(",
"extra_text",
",",
"python_version",
"=",
"''",
")",
":",
"filename",
"=",
"__file__",
"if",
"filename",
".",
"endswith",
"(",
"'.pyc'",
")",
":",
"filename",
"=",
"filename",
"[",
":",
"(",
"-",
"1",
")",
"]",
"wit... | creates a bootstrap script . | train | true |
19,279 | def _parse_ipv6(a):
(l, _, r) = a.partition('::')
l_groups = list(chain(*[divmod(int(x, 16), 256) for x in l.split(':') if x]))
r_groups = list(chain(*[divmod(int(x, 16), 256) for x in r.split(':') if x]))
zeros = ([0] * ((16 - len(l_groups)) - len(r_groups)))
return tuple(((l_groups + zeros) + r_groups))
| [
"def",
"_parse_ipv6",
"(",
"a",
")",
":",
"(",
"l",
",",
"_",
",",
"r",
")",
"=",
"a",
".",
"partition",
"(",
"'::'",
")",
"l_groups",
"=",
"list",
"(",
"chain",
"(",
"*",
"[",
"divmod",
"(",
"int",
"(",
"x",
",",
"16",
")",
",",
"256",
")"... | parse ipv6 address . | train | false |
19,281 | def _build_config_tree(name, configuration):
(type_, id_, options) = _get_type_id_options(name, configuration)
global _INDENT, _current_statement
_INDENT = ''
if (type_ == 'config'):
_current_statement = GivenStatement(options)
elif (type_ == 'log'):
_current_statement = UnnamedStatement(type='log')
_parse_log_statement(options)
else:
if _is_statement_unnamed(type_):
_current_statement = UnnamedStatement(type=type_)
else:
_current_statement = NamedStatement(type=type_, id=id_)
_parse_statement(options)
| [
"def",
"_build_config_tree",
"(",
"name",
",",
"configuration",
")",
":",
"(",
"type_",
",",
"id_",
",",
"options",
")",
"=",
"_get_type_id_options",
"(",
"name",
",",
"configuration",
")",
"global",
"_INDENT",
",",
"_current_statement",
"_INDENT",
"=",
"''",
... | build the configuration tree . | train | true |
19,282 | def _MakeDispatchListIntoYaml(application, dispatch_list):
statements = [('application: %s' % application), 'dispatch:']
for entry in dispatch_list:
statements += entry.ToYaml()
return ('\n'.join(statements) + '\n')
| [
"def",
"_MakeDispatchListIntoYaml",
"(",
"application",
",",
"dispatch_list",
")",
":",
"statements",
"=",
"[",
"(",
"'application: %s'",
"%",
"application",
")",
",",
"'dispatch:'",
"]",
"for",
"entry",
"in",
"dispatch_list",
":",
"statements",
"+=",
"entry",
"... | converts list of dispatchentry objects into a yaml string . | train | false |
19,283 | def load_pyopenssl_private_key(*names):
loader = _guess_loader(names[(-1)], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
| [
"def",
"load_pyopenssl_private_key",
"(",
"*",
"names",
")",
":",
"loader",
"=",
"_guess_loader",
"(",
"names",
"[",
"(",
"-",
"1",
")",
"]",
",",
"OpenSSL",
".",
"crypto",
".",
"FILETYPE_PEM",
",",
"OpenSSL",
".",
"crypto",
".",
"FILETYPE_ASN1",
")",
"r... | load pyopenssl private key . | train | false |
19,284 | def findlinestarts(code):
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for (byte_incr, line_incr) in zip(byte_increments, line_increments):
if byte_incr:
if (lineno != lastlineno):
(yield (addr, lineno))
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if (lineno != lastlineno):
(yield (addr, lineno))
| [
"def",
"findlinestarts",
"(",
"code",
")",
":",
"byte_increments",
"=",
"[",
"ord",
"(",
"c",
")",
"for",
"c",
"in",
"code",
".",
"co_lnotab",
"[",
"0",
":",
":",
"2",
"]",
"]",
"line_increments",
"=",
"[",
"ord",
"(",
"c",
")",
"for",
"c",
"in",... | find the offsets in a byte code which are start of lines in the source . | train | true |
19,286 | def _genLoggingFilePath():
appName = (os.path.splitext(os.path.basename(sys.argv[0]))[0] or 'UnknownApp')
appLogDir = os.path.abspath(os.path.join(os.environ['NTA_LOG_DIR'], ('numenta-logs-%s' % (os.environ['USER'],)), appName))
appLogFileName = ('%s-%s-%s.log' % (appName, long(time.mktime(time.gmtime())), os.getpid()))
return os.path.join(appLogDir, appLogFileName)
| [
"def",
"_genLoggingFilePath",
"(",
")",
":",
"appName",
"=",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
"[",
"0",
"]",
"or",
"'UnknownApp'",
")",
"appLogDir",
... | generate a filepath for the calling app . | train | true |
19,287 | def load_lua(filename, **kwargs):
with open(filename, 'rb') as f:
reader = T7Reader(f, **kwargs)
return reader.read()
| [
"def",
"load_lua",
"(",
"filename",
",",
"**",
"kwargs",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"reader",
"=",
"T7Reader",
"(",
"f",
",",
"**",
"kwargs",
")",
"return",
"reader",
".",
"read",
"(",
")"
] | loads the given t7 file using default settings; kwargs are forwarded to t7reader . | train | false |
19,289 | @pytest.mark.parametrize('parallel', [True, False])
def test_include_names(parallel, read_basic):
table = read_basic('A B C D\n1 2 3 4\n5 6 7 8', include_names=['A', 'D'], parallel=parallel)
expected = Table([[1, 5], [4, 8]], names=('A', 'D'))
assert_table_equal(table, expected)
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'parallel'",
",",
"[",
"True",
",",
"False",
"]",
")",
"def",
"test_include_names",
"(",
"parallel",
",",
"read_basic",
")",
":",
"table",
"=",
"read_basic",
"(",
"'A B C D\\n1 2 3 4\\n5 6 7 8'",
",",
"in... | if include_names is not none . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.