id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
16,711 | def compile_multiple(sources, options):
sources = [os.path.abspath(source) for source in sources]
processed = set()
results = CompilationResultSet()
timestamps = options.timestamps
verbose = options.verbose
context = None
cwd = os.getcwd()
for source in sources:
if (source not in processed):
if (context is None):
context = options.create_context()
output_filename = get_output_filename(source, cwd, options)
out_of_date = context.c_file_out_of_date(source, output_filename)
if ((not timestamps) or out_of_date):
if verbose:
sys.stderr.write(('Compiling %s\n' % source))
result = run_pipeline(source, options, context=context)
results.add(source, result)
context = None
processed.add(source)
return results
| [
"def",
"compile_multiple",
"(",
"sources",
",",
"options",
")",
":",
"sources",
"=",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"source",
")",
"for",
"source",
"in",
"sources",
"]",
"processed",
"=",
"set",
"(",
")",
"results",
"=",
"CompilationResultS... | compile_multiple compiles the given sequence of pyrex implementation files and returns a compilationresultset . | train | false |
16,712 | def permutation_matrix(orig_vec, per_vec):
if (not isinstance(orig_vec, (list, tuple))):
orig_vec = flatten(orig_vec)
if (not isinstance(per_vec, (list, tuple))):
per_vec = flatten(per_vec)
if (set(orig_vec) != set(per_vec)):
raise ValueError(('orig_vec and per_vec must be the same length, ' + 'and contain the same symbols.'))
ind_list = [orig_vec.index(i) for i in per_vec]
p_matrix = zeros(len(orig_vec))
for (i, j) in enumerate(ind_list):
p_matrix[(i, j)] = 1
return p_matrix
| [
"def",
"permutation_matrix",
"(",
"orig_vec",
",",
"per_vec",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"orig_vec",
",",
"(",
"list",
",",
"tuple",
")",
")",
")",
":",
"orig_vec",
"=",
"flatten",
"(",
"orig_vec",
")",
"if",
"(",
"not",
"isinstance... | compute the permutation matrix to change order of orig_vec into order of per_vec . | train | false |
16,713 | def linear_fit_slope(ps):
avex = ave([x for (x, y) in ps])
avey = ave([y for (x, y) in ps])
sxy = sum([((x - avex) * (y - avey)) for (x, y) in ps])
sxx = sum([((x - avex) ** 2) for (x, y) in ps])
if (sxx == 0):
return None
return (sxy / sxx)
| [
"def",
"linear_fit_slope",
"(",
"ps",
")",
":",
"avex",
"=",
"ave",
"(",
"[",
"x",
"for",
"(",
"x",
",",
"y",
")",
"in",
"ps",
"]",
")",
"avey",
"=",
"ave",
"(",
"[",
"y",
"for",
"(",
"x",
",",
"y",
")",
"in",
"ps",
"]",
")",
"sxy",
"=",
... | single-independent-variable linear regression -- least squares method . | train | false |
16,715 | def modify_hosts(host_filter_data, update_data):
rpc_utils.check_modify_host(update_data)
hosts = models.Host.query_objects(host_filter_data)
for host in hosts:
host.update_object(update_data)
| [
"def",
"modify_hosts",
"(",
"host_filter_data",
",",
"update_data",
")",
":",
"rpc_utils",
".",
"check_modify_host",
"(",
"update_data",
")",
"hosts",
"=",
"models",
".",
"Host",
".",
"query_objects",
"(",
"host_filter_data",
")",
"for",
"host",
"in",
"hosts",
... | modify multiple hosts . | train | false |
16,716 | def environ(env_key):
return os.environ.get(env_key, '')
| [
"def",
"environ",
"(",
"env_key",
")",
":",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"env_key",
",",
"''",
")"
] | return the requested environment variable . | train | false |
16,718 | def read_sql_table(table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
con = _engine_builder(con)
if (not _is_sqlalchemy_connectable(con)):
raise NotImplementedError('read_sql_table only supported for SQLAlchemy connectable.')
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError(('Table %s not found' % table_name))
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if (table is not None):
return table
else:
raise ValueError(('Table %s not found' % table_name), con)
| [
"def",
"read_sql_table",
"(",
"table_name",
",",
"con",
",",
"schema",
"=",
"None",
",",
"index_col",
"=",
"None",
",",
"coerce_float",
"=",
"True",
",",
"parse_dates",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"chunksize",
"=",
"None",
")",
":",
... | read sql database table into a dataframe . | train | true |
16,720 | def sliding_window(n, seq):
it = iter(seq)
d = collections.deque(itertools.islice(it, n), n)
if (len(d) != n):
raise StopIteration()
d_append = d.append
for item in it:
(yield tuple(d))
d_append(item)
(yield tuple(d))
| [
"def",
"sliding_window",
"(",
"n",
",",
"seq",
")",
":",
"it",
"=",
"iter",
"(",
"seq",
")",
"d",
"=",
"collections",
".",
"deque",
"(",
"itertools",
".",
"islice",
"(",
"it",
",",
"n",
")",
",",
"n",
")",
"if",
"(",
"len",
"(",
"d",
")",
"!=... | a sequence of overlapping subsequences . | train | false |
16,721 | @contextmanager
def _noop_context_manager(obj):
(yield obj)
| [
"@",
"contextmanager",
"def",
"_noop_context_manager",
"(",
"obj",
")",
":",
"(",
"yield",
"obj",
")"
] | context manager that has the same api as closing but does nothing . | train | false |
16,723 | def test_human_readable():
f = formatters.human_readable
assert (f(1) == '1')
assert (f(1.0) == '1')
assert (f(10) == '10')
assert (f(12.5) == '12.5')
assert (f(1000) == '1k')
assert (f(5000) == '5k')
assert (f(100000) == '100k')
assert (f(1253) == '1.253k')
assert (f(1250) == '1.25k')
assert (f(0.1) == '100m')
assert (f(0.01) == '10m')
assert (f(0.001) == '1m')
assert (f(0.002) == '2m')
assert (f(0.0025) == '2.5m')
assert (f(0.0001) == u('100\xc2\xb5'))
assert (f(0.000123) == u('123\xc2\xb5'))
assert (f(1e-05) == u('10\xc2\xb5'))
assert (f(1e-06) == u('1\xc2\xb5'))
assert (f(1e-07) == u('100n'))
assert (f(1e-10) == u('100p'))
assert (f(0) == '0')
assert (f(0.0) == '0')
assert (f((-1337)) == '-1.337k')
assert (f((-4.2e-08)) == '-42n')
| [
"def",
"test_human_readable",
"(",
")",
":",
"f",
"=",
"formatters",
".",
"human_readable",
"assert",
"(",
"f",
"(",
"1",
")",
"==",
"'1'",
")",
"assert",
"(",
"f",
"(",
"1.0",
")",
"==",
"'1'",
")",
"assert",
"(",
"f",
"(",
"10",
")",
"==",
"'10... | test human readable option . | train | false |
16,724 | def refget(objs, level=1):
for _ in xrange(level):
refs = gc.get_referrers(*objs)
try:
refs.remove(objs)
except ValueError:
pass
objs = refs
return refs
| [
"def",
"refget",
"(",
"objs",
",",
"level",
"=",
"1",
")",
":",
"for",
"_",
"in",
"xrange",
"(",
"level",
")",
":",
"refs",
"=",
"gc",
".",
"get_referrers",
"(",
"*",
"objs",
")",
"try",
":",
"refs",
".",
"remove",
"(",
"objs",
")",
"except",
"... | get the referrers to the sequence of objects passed in . | train | false |
16,725 | def median_grouped(data, interval=1):
data = sorted(data)
n = len(data)
if (n == 0):
raise StatisticsError('no median for empty data')
elif (n == 1):
return data[0]
x = data[(n // 2)]
for obj in (x, interval):
if isinstance(obj, (str, bytes)):
raise TypeError(('expected number but got %r' % obj))
try:
L = (x - (interval / 2))
except TypeError:
L = (float(x) - (float(interval) / 2))
l1 = _find_lteq(data, x)
l2 = _find_rteq(data, l1, x)
cf = l1
f = ((l2 - l1) + 1)
return (L + ((interval * ((n / 2) - cf)) / f))
| [
"def",
"median_grouped",
"(",
"data",
",",
"interval",
"=",
"1",
")",
":",
"data",
"=",
"sorted",
"(",
"data",
")",
"n",
"=",
"len",
"(",
"data",
")",
"if",
"(",
"n",
"==",
"0",
")",
":",
"raise",
"StatisticsError",
"(",
"'no median for empty data'",
... | calculates the grouped mean of the num most recent values . | train | false |
16,726 | def _format_optdict(optdict, script=False, ignore=None):
opts = []
for (opt, value) in optdict.iteritems():
if ((not ignore) or (opt not in ignore)):
opts.append(('-%s' % opt))
if (value is not None):
opts.append(_format_optvalue(value, script))
return _flatten(opts)
| [
"def",
"_format_optdict",
"(",
"optdict",
",",
"script",
"=",
"False",
",",
"ignore",
"=",
"None",
")",
":",
"opts",
"=",
"[",
"]",
"for",
"(",
"opt",
",",
"value",
")",
"in",
"optdict",
".",
"iteritems",
"(",
")",
":",
"if",
"(",
"(",
"not",
"ig... | formats optdict to a tuple to pass it to tk . | train | false |
16,727 | @register_canonicalize('local_setsubtensor_of_allocs')
@register_stabilize('local_setsubtensor_of_allocs')
@gof.local_optimizer([IncSubtensor])
def local_setsubtensor_of_constants(node):
if (isinstance(node.op, IncSubtensor) and node.op.set_instead_of_inc):
x = node.inputs[0]
y = node.inputs[1]
try:
replace_x = get_scalar_constant_value(x, elemwise=False)
except NotScalarConstantError:
return
try:
replace_y = get_scalar_constant_value(y, elemwise=False)
except NotScalarConstantError:
return
if (replace_x == replace_y):
return [x]
else:
return False
| [
"@",
"register_canonicalize",
"(",
"'local_setsubtensor_of_allocs'",
")",
"@",
"register_stabilize",
"(",
"'local_setsubtensor_of_allocs'",
")",
"@",
"gof",
".",
"local_optimizer",
"(",
"[",
"IncSubtensor",
"]",
")",
"def",
"local_setsubtensor_of_constants",
"(",
"node",
... | setsubtensor -> x when x is constant or alloc . | train | false |
16,729 | def estimate_optimal_with_K_and_M(num_kmers, mem_cap):
n_tables = (math.log(2) * (mem_cap / float(num_kmers)))
int_n_tables = int(n_tables)
if (int_n_tables == 0):
int_n_tables = 1
ht_size = int((mem_cap / int_n_tables))
mem_cap = (ht_size * int_n_tables)
fp_rate = ((1 - math.exp(((- num_kmers) / float(ht_size)))) ** int_n_tables)
res = namedtuple(u'result', [u'num_htables', u'htable_size', u'mem_use', u'fp_rate'])
return res(int_n_tables, ht_size, mem_cap, fp_rate)
| [
"def",
"estimate_optimal_with_K_and_M",
"(",
"num_kmers",
",",
"mem_cap",
")",
":",
"n_tables",
"=",
"(",
"math",
".",
"log",
"(",
"2",
")",
"*",
"(",
"mem_cap",
"/",
"float",
"(",
"num_kmers",
")",
")",
")",
"int_n_tables",
"=",
"int",
"(",
"n_tables",
... | estimate optimal countgraph args . | train | false |
16,730 | def test_bounds_check():
w = wcs.WCS(naxis=2)
w.wcs.ctype = [u'RA---CAR', u'DEC--CAR']
w.wcs.cdelt = [10, 10]
w.wcs.crval = [(-90), 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
(ra, dec) = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, (-180))
assert_allclose(dec, (-30))
| [
"def",
"test_bounds_check",
"(",
")",
":",
"w",
"=",
"wcs",
".",
"WCS",
"(",
"naxis",
"=",
"2",
")",
"w",
".",
"wcs",
".",
"ctype",
"=",
"[",
"u'RA---CAR'",
",",
"u'DEC--CAR'",
"]",
"w",
".",
"wcs",
".",
"cdelt",
"=",
"[",
"10",
",",
"10",
"]",... | test for #4957 . | train | false |
16,731 | def ssl_required(view_func):
@wraps(view_func)
def _checkssl(request, *args, **kwargs):
if (settings.SESSION_COOKIE_SECURE and (not request.is_secure())):
url_str = request.build_absolute_uri()
url_str = url_str.replace('http://', 'https://')
return http.HttpResponseRedirect(url_str)
return view_func(request, *args, **kwargs)
return _checkssl
| [
"def",
"ssl_required",
"(",
"view_func",
")",
":",
"@",
"wraps",
"(",
"view_func",
")",
"def",
"_checkssl",
"(",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"settings",
".",
"SESSION_COOKIE_SECURE",
"and",
"(",
"not",
"request"... | a view decorator that enforces https . | train | false |
16,732 | def _delete_orphans(course_usage_key, user_id, commit=False):
store = modulestore()
items = store.get_orphans(course_usage_key)
branch = course_usage_key.branch
if commit:
with store.bulk_operations(course_usage_key):
for itemloc in items:
revision = ModuleStoreEnum.RevisionOption.all
if (branch == ModuleStoreEnum.BranchName.published):
revision = ModuleStoreEnum.RevisionOption.published_only
store.delete_item(itemloc, user_id, revision=revision)
return [unicode(item) for item in items]
| [
"def",
"_delete_orphans",
"(",
"course_usage_key",
",",
"user_id",
",",
"commit",
"=",
"False",
")",
":",
"store",
"=",
"modulestore",
"(",
")",
"items",
"=",
"store",
".",
"get_orphans",
"(",
"course_usage_key",
")",
"branch",
"=",
"course_usage_key",
".",
... | helper function to delete orphans for a given course . | train | false |
16,733 | def thin_path(cachedir):
return os.path.join(cachedir, 'thin', 'thin.tgz')
| [
"def",
"thin_path",
"(",
"cachedir",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"cachedir",
",",
"'thin'",
",",
"'thin.tgz'",
")"
] | return the path to the thin tarball . | train | false |
16,734 | def import_vul_ui():
date = request.utcnow
post_vars = request.post_vars
location_id = post_vars.location
update_super = s3db.update_super
ss_table = s3db.stats_source
source_id = ss_table.insert(name='Vulnerability indicators submitted through UI')
vdoc_table = s3db.vulnerability_document
id = vdoc_table.insert(document_type='indicator', date=date, location_id=location_id, source_id=source_id)
update_super(vdoc_table, dict(id=id))
itable = s3db.vulnerability_indicator
rows = db((itable.deleted == False)).select(itable.posn, itable.parameter_id, orderby=itable.posn)
vd_table = db.vulnerability_data
for row in rows:
id = vd_table.insert(parameter_id=row.parameter_id, location_id=location_id, value=post_vars[str(row.posn)], date=date, source_id=source_id)
update_super(vd_table, dict(id=id))
| [
"def",
"import_vul_ui",
"(",
")",
":",
"date",
"=",
"request",
".",
"utcnow",
"post_vars",
"=",
"request",
".",
"post_vars",
"location_id",
"=",
"post_vars",
".",
"location",
"update_super",
"=",
"s3db",
".",
"update_super",
"ss_table",
"=",
"s3db",
".",
"st... | controller to add a new set of vulnerability indicators which have been input direct into the gui . | train | false |
16,735 | def get_kdb_reader(signature):
if (signature[0] != BASE_SIGNATURE):
raise IOError('Unknown base signature.')
if (signature[1] not in _kdb_readers):
raise IOError('Unknown sub signature.')
return _kdb_readers[signature[1]]
| [
"def",
"get_kdb_reader",
"(",
"signature",
")",
":",
"if",
"(",
"signature",
"[",
"0",
"]",
"!=",
"BASE_SIGNATURE",
")",
":",
"raise",
"IOError",
"(",
"'Unknown base signature.'",
")",
"if",
"(",
"signature",
"[",
"1",
"]",
"not",
"in",
"_kdb_readers",
")"... | retrieve the class used to process a keepass file by signature . | train | false |
16,736 | def plugin_report():
plugin_report = []
all_plugins = CMSPlugin.objects.order_by(u'plugin_type')
plugin_types = list(set(all_plugins.values_list(u'plugin_type', flat=True)))
plugin_types.sort()
for plugin_type in plugin_types:
plugin = {}
plugin[u'type'] = plugin_type
try:
plugins = CMSPlugin.objects.filter(plugin_type=plugin_type)
plugin[u'instances'] = plugins
plugin[u'model'] = plugin_pool.get_plugin(name=plugin_type).model
unsaved_instances = [p for p in plugins if (not p.get_plugin_instance()[0])]
plugin[u'unsaved_instances'] = unsaved_instances
except KeyError:
plugin[u'model'] = None
plugin[u'instances'] = plugins
plugin[u'unsaved_instances'] = []
plugin_report.append(plugin)
return plugin_report
| [
"def",
"plugin_report",
"(",
")",
":",
"plugin_report",
"=",
"[",
"]",
"all_plugins",
"=",
"CMSPlugin",
".",
"objects",
".",
"order_by",
"(",
"u'plugin_type'",
")",
"plugin_types",
"=",
"list",
"(",
"set",
"(",
"all_plugins",
".",
"values_list",
"(",
"u'plug... | returns a report of existing plugins structure of report: type: cmsplugin class . | train | false |
16,737 | def make_sign_blob_call(rpc, bytes_to_sign):
if (not isinstance(bytes_to_sign, str)):
raise TypeError(('bytes_to_sign must be str: %s' % bytes_to_sign))
request = app_identity_service_pb.SignForAppRequest()
request.set_bytes_to_sign(bytes_to_sign)
response = app_identity_service_pb.SignForAppResponse()
def signing_for_app_result(rpc):
'Check success, handle exceptions, and return converted RPC result.\n\n This method waits for the RPC if it has not yet finished, and calls the\n post-call hooks on the first invocation.\n\n Args:\n rpc: A UserRPC object.\n\n Returns:\n A tuple that contains signing key name and signature.\n '
assert (rpc.service == _APP_IDENTITY_SERVICE_NAME), repr(rpc.service)
assert (rpc.method == _SIGN_FOR_APP_METHOD_NAME), repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError as err:
raise _to_app_identity_error(err)
return (response.key_name(), response.signature_bytes())
rpc.make_call(_SIGN_FOR_APP_METHOD_NAME, request, response, signing_for_app_result)
| [
"def",
"make_sign_blob_call",
"(",
"rpc",
",",
"bytes_to_sign",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"bytes_to_sign",
",",
"str",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'bytes_to_sign must be str: %s'",
"%",
"bytes_to_sign",
")",
")",
"reque... | executes the rpc call to sign a blob . | train | false |
16,738 | def decimate_surface(points, triangles, n_triangles):
reduction = (1 - (float(n_triangles) / len(triangles)))
return _decimate_surface(points, triangles, reduction)
| [
"def",
"decimate_surface",
"(",
"points",
",",
"triangles",
",",
"n_triangles",
")",
":",
"reduction",
"=",
"(",
"1",
"-",
"(",
"float",
"(",
"n_triangles",
")",
"/",
"len",
"(",
"triangles",
")",
")",
")",
"return",
"_decimate_surface",
"(",
"points",
"... | decimate surface data . | train | false |
16,739 | def p_file_input(p):
if isinstance(p[(len(p) - 1)], basestring):
if (len(p) == 3):
p[0] = p[1]
else:
p[0] = []
elif (len(p) == 3):
p[0] = (p[1] + p[2])
else:
p[0] = p[1]
| [
"def",
"p_file_input",
"(",
"p",
")",
":",
"if",
"isinstance",
"(",
"p",
"[",
"(",
"len",
"(",
"p",
")",
"-",
"1",
")",
"]",
",",
"basestring",
")",
":",
"if",
"(",
"len",
"(",
"p",
")",
"==",
"3",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
... | file_input : file_input newline | file_input stmt | newline | stmt . | train | false |
16,740 | def get_pip_path():
try:
from virtualenv import path_locations
(home_dir, lib_dir, inc_dir, bin_dir) = path_locations(sys.prefix)
return os.path.join(bin_dir, u'pip')
except ImportError:
pass
return u'pip'
| [
"def",
"get_pip_path",
"(",
")",
":",
"try",
":",
"from",
"virtualenv",
"import",
"path_locations",
"(",
"home_dir",
",",
"lib_dir",
",",
"inc_dir",
",",
"bin_dir",
")",
"=",
"path_locations",
"(",
"sys",
".",
"prefix",
")",
"return",
"os",
".",
"path",
... | try to figure out an explicit path to the pip executable script . | train | false |
16,742 | def get_data_filename(filename):
return pkg_resources.resource_filename('certbot_nginx.tests', os.path.join('testdata', 'etc_nginx', filename))
| [
"def",
"get_data_filename",
"(",
"filename",
")",
":",
"return",
"pkg_resources",
".",
"resource_filename",
"(",
"'certbot_nginx.tests'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'testdata'",
",",
"'etc_nginx'",
",",
"filename",
")",
")"
] | gets the filename of a test data file . | train | false |
16,743 | def adjusted_rand_score(labels_true, labels_pred):
(labels_true, labels_pred) = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
if ((n_classes == n_clusters == 1) or (n_classes == n_clusters == 0) or (n_classes == n_clusters == n_samples)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum((comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1))))
sum_comb_k = sum((comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0))))
sum_comb = sum((comb2(n_ij) for n_ij in contingency.data))
prod_comb = ((sum_comb_c * sum_comb_k) / comb(n_samples, 2))
mean_comb = ((sum_comb_k + sum_comb_c) / 2.0)
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
| [
"def",
"adjusted_rand_score",
"(",
"labels_true",
",",
"labels_pred",
")",
":",
"(",
"labels_true",
",",
"labels_pred",
")",
"=",
"check_clusterings",
"(",
"labels_true",
",",
"labels_pred",
")",
"n_samples",
"=",
"labels_true",
".",
"shape",
"[",
"0",
"]",
"n... | rand index adjusted for chance . | train | false |
16,744 | def _easy_install(argv, python_cmd, use_sudo):
command = ('python -c " from pkg_resources import load_entry_point; ez = load_entry_point(\'setuptools\', \'console_scripts\', \'easy_install\'); ez(argv=%(argv)r) ' % locals())
if use_sudo:
run_as_root(command)
else:
run(command)
| [
"def",
"_easy_install",
"(",
"argv",
",",
"python_cmd",
",",
"use_sudo",
")",
":",
"command",
"=",
"(",
"'python -c \" from pkg_resources import load_entry_point; ez = load_entry_point(\\'setuptools\\', \\'console_scripts\\', \\'easy_install\\'); ez(argv=%(argv)r) '... | install packages using easy_install we dont know if the easy_install command in the path will be the right one . | train | true |
16,745 | @login_required
def project_notifications(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
email_form = EmailHookForm(data=(request.POST or None), project=project)
webhook_form = WebHookForm(data=(request.POST or None), project=project)
if (request.method == 'POST'):
if email_form.is_valid():
email_form.save()
if webhook_form.is_valid():
webhook_form.save()
project_dashboard = reverse('projects_notifications', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
emails = project.emailhook_notifications.all()
urls = project.webhook_notifications.all()
return render_to_response('projects/project_notifications.html', {'email_form': email_form, 'webhook_form': webhook_form, 'project': project, 'emails': emails, 'urls': urls}, context_instance=RequestContext(request))
| [
"@",
"login_required",
"def",
"project_notifications",
"(",
"request",
",",
"project_slug",
")",
":",
"project",
"=",
"get_object_or_404",
"(",
"Project",
".",
"objects",
".",
"for_admin_user",
"(",
"request",
".",
"user",
")",
",",
"slug",
"=",
"project_slug",
... | project notification view and form view . | train | false |
16,747 | def theme_url():
return get_bootstrap_setting(u'theme_url')
| [
"def",
"theme_url",
"(",
")",
":",
"return",
"get_bootstrap_setting",
"(",
"u'theme_url'",
")"
] | return the full url to the theme css file . | train | false |
16,748 | def font_is_installed(font):
return [fam for fam in QFontDatabase().families() if (to_text_string(fam) == font)]
| [
"def",
"font_is_installed",
"(",
"font",
")",
":",
"return",
"[",
"fam",
"for",
"fam",
"in",
"QFontDatabase",
"(",
")",
".",
"families",
"(",
")",
"if",
"(",
"to_text_string",
"(",
"fam",
")",
"==",
"font",
")",
"]"
] | check if font is installed . | train | false |
16,749 | def customer(request):
msg = u"The request object does not contain a customer. Edit your MIDDLEWARE_CLASSES setting to insert 'shop.middlerware.CustomerMiddleware'."
assert hasattr(request, u'customer'), msg
context = {u'customer': request.customer, u'site_header': app_settings.APP_LABEL.capitalize()}
if request.user.is_staff:
try:
context.update(customer=CustomerModel.objects.get(pk=request.session[u'emulate_user_id']))
except (CustomerModel.DoesNotExist, KeyError, AttributeError):
pass
return context
| [
"def",
"customer",
"(",
"request",
")",
":",
"msg",
"=",
"u\"The request object does not contain a customer. Edit your MIDDLEWARE_CLASSES setting to insert 'shop.middlerware.CustomerMiddleware'.\"",
"assert",
"hasattr",
"(",
"request",
",",
"u'customer'",
")",
",",
"msg",
"contex... | add the customer to the requestcontext . | train | false |
16,750 | def package_tree(pkgroot):
path = os.path.dirname(__file__)
subdirs = [os.path.relpath(i[0], path).replace(os.path.sep, '.') for i in os.walk(os.path.join(path, pkgroot)) if ('__init__.py' in i[2])]
return sorted(subdirs)
| [
"def",
"package_tree",
"(",
"pkgroot",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"subdirs",
"=",
"[",
"os",
".",
"path",
".",
"relpath",
"(",
"i",
"[",
"0",
"]",
",",
"path",
")",
".",
"replace",
"(",
"os",
... | get the submodule list . | train | false |
16,753 | def tex_coord(x, y, n=4):
m = (1.0 / n)
dx = (x * m)
dy = (y * m)
return (dx, dy, (dx + m), dy, (dx + m), (dy + m), dx, (dy + m))
| [
"def",
"tex_coord",
"(",
"x",
",",
"y",
",",
"n",
"=",
"4",
")",
":",
"m",
"=",
"(",
"1.0",
"/",
"n",
")",
"dx",
"=",
"(",
"x",
"*",
"m",
")",
"dy",
"=",
"(",
"y",
"*",
"m",
")",
"return",
"(",
"dx",
",",
"dy",
",",
"(",
"dx",
"+",
... | return the bounding vertices of the texture square . | train | false |
16,754 | def modify_tags(gce, module, node, tags, state='present'):
zone = node.extra['zone'].name
existing_tags = node.extra['tags']
tags = [x.lower() for x in tags]
tags_changed = []
if (state == 'absent'):
tags_changed = _intersect_items(existing_tags, tags)
if (not tags_changed):
return (False, None)
node_tags = _get_changed_items(existing_tags, tags)
else:
tags_changed = _get_changed_items(tags, existing_tags)
if (not tags_changed):
return (False, None)
node_tags = _union_items(existing_tags, tags)
try:
gce.ex_set_node_tags(node, node_tags)
return (True, tags_changed)
except (GoogleBaseError, InvalidRequestError) as e:
module.fail_json(msg=str(e), changed=False)
| [
"def",
"modify_tags",
"(",
"gce",
",",
"module",
",",
"node",
",",
"tags",
",",
"state",
"=",
"'present'",
")",
":",
"zone",
"=",
"node",
".",
"extra",
"[",
"'zone'",
"]",
".",
"name",
"existing_tags",
"=",
"node",
".",
"extra",
"[",
"'tags'",
"]",
... | modify tags on an instance . | train | false |
16,755 | def cpu():
max_primes = [500, 1000, 2500, 5000]
test_command = 'sysbench --test=cpu --cpu-max-prime={0} run'
result = None
ret_val = {}
for primes in max_primes:
key = 'Prime numbers limit: {0}'.format(primes)
run_command = test_command.format(primes)
result = __salt__['cmd.run'](run_command)
ret_val[key] = _parser(result)
return ret_val
| [
"def",
"cpu",
"(",
")",
":",
"max_primes",
"=",
"[",
"500",
",",
"1000",
",",
"2500",
",",
"5000",
"]",
"test_command",
"=",
"'sysbench --test=cpu --cpu-max-prime={0} run'",
"result",
"=",
"None",
"ret_val",
"=",
"{",
"}",
"for",
"primes",
"in",
"max_primes"... | tests for the cpu performance of minions . | train | true |
16,756 | def reparam(string_, dictionary):
dictionary = dictionary.copy()
result = []
for (live, chunk) in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
| [
"def",
"reparam",
"(",
"string_",
",",
"dictionary",
")",
":",
"dictionary",
"=",
"dictionary",
".",
"copy",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"(",
"live",
",",
"chunk",
")",
"in",
"_interpolate",
"(",
"string_",
")",
":",
"if",
"live",
":",
... | takes a string and a dictionary and interpolates the string using values from the dictionary . | train | false |
16,757 | def generate_timeout_series(timeout):
iteration = 0
while True:
iteration += 1
(yield ((iteration * timeout) + iteration))
| [
"def",
"generate_timeout_series",
"(",
"timeout",
")",
":",
"iteration",
"=",
"0",
"while",
"True",
":",
"iteration",
"+=",
"1",
"(",
"yield",
"(",
"(",
"iteration",
"*",
"timeout",
")",
"+",
"iteration",
")",
")"
] | generate a series of times that exceeds the given timeout . | train | false |
16,758 | def get_liked(user_or_id, model):
obj_type = apps.get_model('contenttypes', 'ContentType').objects.get_for_model(model)
conditions = ('likes_like.content_type_id = %s', ('%s.id = likes_like.object_id' % model._meta.db_table), 'likes_like.user_id = %s')
if isinstance(user_or_id, get_user_model()):
user_id = user_or_id.id
else:
user_id = user_or_id
return model.objects.extra(where=conditions, tables=('likes_like',), params=(obj_type.id, user_id))
| [
"def",
"get_liked",
"(",
"user_or_id",
",",
"model",
")",
":",
"obj_type",
"=",
"apps",
".",
"get_model",
"(",
"'contenttypes'",
",",
"'ContentType'",
")",
".",
"objects",
".",
"get_for_model",
"(",
"model",
")",
"conditions",
"=",
"(",
"'likes_like.content_ty... | get the objects liked by an user . | train | false |
16,759 | def getFilePaths(fileInDirectory=''):
directoryName = os.getcwd()
if (fileInDirectory != ''):
directoryName = os.path.dirname(fileInDirectory)
return getFilePathsByDirectory(directoryName)
| [
"def",
"getFilePaths",
"(",
"fileInDirectory",
"=",
"''",
")",
":",
"directoryName",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"(",
"fileInDirectory",
"!=",
"''",
")",
":",
"directoryName",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fileInDirectory",
... | get the file paths in the directory of the file in directory . | train | false |
16,760 | def rotate_juggle(lst, dist):
n = len(lst)
for i in xrange(gcd(dist, n)):
t = lst[i]
j = i
while 1:
k = ((j + dist) % n)
if (k == i):
break
lst[j] = lst[k]
j = k
lst[j] = t
| [
"def",
"rotate_juggle",
"(",
"lst",
",",
"dist",
")",
":",
"n",
"=",
"len",
"(",
"lst",
")",
"for",
"i",
"in",
"xrange",
"(",
"gcd",
"(",
"dist",
",",
"n",
")",
")",
":",
"t",
"=",
"lst",
"[",
"i",
"]",
"j",
"=",
"i",
"while",
"1",
":",
"... | an iterative juggle method . | train | false |
16,763 | def format_date_time(request, value, format='shortdatetime'):
if (not isinstance(value, datetime.datetime)):
if isinstance(value, datetime.date):
value = datetime.datetime.combine(value, datetime.datetime.min.time())
else:
raise ValueError
default_tz = timezone(settings.TIME_ZONE)
tzvalue = default_tz.localize(value)
user = request.user
try:
if (user.is_authenticated() and user.timezone):
user_tz = timezone(user.timezone)
tzvalue = user_tz.normalize(tzvalue.astimezone(user_tz))
except AttributeError:
pass
locale = _babel_locale(_get_request_locale(request))
try:
formatted = format_date_value(value, tzvalue, locale, format)
except KeyError:
formatted = format_date_value(value, tzvalue, _babel_locale(settings.LANGUAGE_CODE), format)
return (formatted, tzvalue)
| [
"def",
"format_date_time",
"(",
"request",
",",
"value",
",",
"format",
"=",
"'shortdatetime'",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".... | returns date/time formatted using babels locale settings . | train | false |
16,764 | def strlist_minus(a, b):
temp = cidict()
for elt in b:
temp[elt] = elt
result = [elt for elt in a if (not temp.has_key(elt))]
return result
| [
"def",
"strlist_minus",
"(",
"a",
",",
"b",
")",
":",
"temp",
"=",
"cidict",
"(",
")",
"for",
"elt",
"in",
"b",
":",
"temp",
"[",
"elt",
"]",
"=",
"elt",
"result",
"=",
"[",
"elt",
"for",
"elt",
"in",
"a",
"if",
"(",
"not",
"temp",
".",
"has_... | return list of all items in a which are not in b . | train | false |
16,766 | def check_param_val(param):
val = param.value
if (val is None):
raise ValueError('Problem has missing parameter value.')
else:
return val
| [
"def",
"check_param_val",
"(",
"param",
")",
":",
"val",
"=",
"param",
".",
"value",
"if",
"(",
"val",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Problem has missing parameter value.'",
")",
"else",
":",
"return",
"val"
] | wrapper on accessing a parameter . | train | false |
16,767 | def catch_integrity_errors(session):
def decorated(func):
'Returns a decorated version of ``func``, as described in the\n wrapper defined within.\n\n '
@wraps(func)
def wrapped(*args, **kw):
'Executes ``func(*args, **kw)`` but catches any exception\n that warrants a database rollback.\n\n '
try:
return func(*args, **kw)
except SQLAlchemyError as exception:
session.rollback()
status = (409 if is_conflict(exception) else 400)
detail = str(exception)
title = un_camel_case(exception.__class__.__name__)
return error_response(status, cause=exception, detail=detail, title=title)
return wrapped
return decorated
| [
"def",
"catch_integrity_errors",
"(",
"session",
")",
":",
"def",
"decorated",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"**",
"kw",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
","... | returns a decorator that catches database integrity errors . | train | false |
16,768 | def _get_request_obj(csr):
text = _text_or_file(csr)
text = get_pem_entry(text, pem_type='CERTIFICATE REQUEST')
return M2Crypto.X509.load_request_string(text)
| [
"def",
"_get_request_obj",
"(",
"csr",
")",
":",
"text",
"=",
"_text_or_file",
"(",
"csr",
")",
"text",
"=",
"get_pem_entry",
"(",
"text",
",",
"pem_type",
"=",
"'CERTIFICATE REQUEST'",
")",
"return",
"M2Crypto",
".",
"X509",
".",
"load_request_string",
"(",
... | returns a csr object based on pem text . | train | true |
16,769 | def _isnotsuite(test):
try:
iter(test)
except TypeError:
return True
return False
| [
"def",
"_isnotsuite",
"(",
"test",
")",
":",
"try",
":",
"iter",
"(",
"test",
")",
"except",
"TypeError",
":",
"return",
"True",
"return",
"False"
] | a crude way to tell apart testcases and suites with duck-typing . | train | false |
16,771 | def _scipy_sparse_matrix_to_zero(e):
if (not np):
raise ImportError
edense = e.todense()
test = np.zeros_like(edense)
if np.allclose(edense, test):
return 0.0
else:
return e
| [
"def",
"_scipy_sparse_matrix_to_zero",
"(",
"e",
")",
":",
"if",
"(",
"not",
"np",
")",
":",
"raise",
"ImportError",
"edense",
"=",
"e",
".",
"todense",
"(",
")",
"test",
"=",
"np",
".",
"zeros_like",
"(",
"edense",
")",
"if",
"np",
".",
"allclose",
... | convert a scipy . | train | false |
16,772 | def set_autostart(vm_, state='on'):
dom = _get_domain(vm_)
if (state == 'on'):
return (dom.setAutostart(1) == 0)
elif (state == 'off'):
return (dom.setAutostart(0) == 0)
else:
return False
| [
"def",
"set_autostart",
"(",
"vm_",
",",
"state",
"=",
"'on'",
")",
":",
"dom",
"=",
"_get_domain",
"(",
"vm_",
")",
"if",
"(",
"state",
"==",
"'on'",
")",
":",
"return",
"(",
"dom",
".",
"setAutostart",
"(",
"1",
")",
"==",
"0",
")",
"elif",
"("... | set the autostart flag on a vm so that the vm will start with the host system on reboot . | train | false |
16,773 | def egyptian_fraction(r, algorithm='Greedy'):
if (r <= 0):
raise ValueError('Value must be positive')
(prefix, rem) = egypt_harmonic(r)
if (rem == 0):
return prefix
(x, y) = rem.as_numer_denom()
if (algorithm == 'Greedy'):
return (prefix + egypt_greedy(x, y))
elif (algorithm == 'Graham Jewett'):
return (prefix + egypt_graham_jewett(x, y))
elif (algorithm == 'Takenouchi'):
return (prefix + egypt_takenouchi(x, y))
elif (algorithm == 'Golomb'):
return (prefix + egypt_golomb(x, y))
else:
raise ValueError('Entered invalid algorithm')
| [
"def",
"egyptian_fraction",
"(",
"r",
",",
"algorithm",
"=",
"'Greedy'",
")",
":",
"if",
"(",
"r",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Value must be positive'",
")",
"(",
"prefix",
",",
"rem",
")",
"=",
"egypt_harmonic",
"(",
"r",
")",
"... | return the list of denominators of an egyptian fraction expansion [1]_ of the said rational r . | train | false |
16,775 | def make_and_verify_selection(server_root, temp_dir):
(copied_files, copied_dirs) = copy_config(server_root, temp_dir)
print(textwrap.fill('A secure copy of the files that have been selected for submission has been created under {0}. All comments have been removed and the files are only accessible by the current user. A list of the files that have been included is shown below. Please make sure that this selection does not contain private keys, passwords, or any other sensitive information.'.format(temp_dir)))
print('\nFiles:')
for copied_file in copied_files:
print(copied_file)
print('Directories (including all contained files):')
for copied_dir in copied_dirs:
print(copied_dir)
sys.stdout.write('\nIs it safe to submit these files? ')
while True:
ans = raw_input('(Y)es/(N)o: ').lower()
if ans.startswith('y'):
return
elif ans.startswith('n'):
sys.exit('Your files were not submitted')
| [
"def",
"make_and_verify_selection",
"(",
"server_root",
",",
"temp_dir",
")",
":",
"(",
"copied_files",
",",
"copied_dirs",
")",
"=",
"copy_config",
"(",
"server_root",
",",
"temp_dir",
")",
"print",
"(",
"textwrap",
".",
"fill",
"(",
"'A secure copy of the files ... | copies server_root to temp_dir and verifies selection with the user . | train | false |
16,776 | def memoizemethod_noargs(method):
cache = weakref.WeakKeyDictionary()
@wraps(method)
def new_method(self, *args, **kwargs):
if (self not in cache):
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
| [
"def",
"memoizemethod_noargs",
"(",
"method",
")",
":",
"cache",
"=",
"weakref",
".",
"WeakKeyDictionary",
"(",
")",
"@",
"wraps",
"(",
"method",
")",
"def",
"new_method",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"self",... | decorator to cache the result of a method using a weak reference to its object . | train | false |
16,777 | def validate_rpc_host(ip):
if ((not is_valid_ipv4(ip)) and (not is_valid_ipv6(ip))):
raise ApplicationException(desc=('Invalid RPC ip address: %s' % ip))
return ip
| [
"def",
"validate_rpc_host",
"(",
"ip",
")",
":",
"if",
"(",
"(",
"not",
"is_valid_ipv4",
"(",
"ip",
")",
")",
"and",
"(",
"not",
"is_valid_ipv6",
"(",
"ip",
")",
")",
")",
":",
"raise",
"ApplicationException",
"(",
"desc",
"=",
"(",
"'Invalid RPC ip addr... | validates the given ip for use as rpc server address . | train | true |
16,778 | def normalize_languages(opf_languages, mi_languages):
def parse(x):
try:
return parse_lang_code(x)
except ValueError:
return None
opf_languages = filter(None, map(parse, opf_languages))
cc_map = {c.langcode: c.countrycode for c in opf_languages}
mi_languages = filter(None, map(parse, mi_languages))
def norm(x):
lc = x.langcode
cc = (x.countrycode or cc_map.get(lc, None))
lc = (lang_as_iso639_1(lc) or lc)
if cc:
lc += (u'-' + cc)
return lc
return list(map(norm, mi_languages))
| [
"def",
"normalize_languages",
"(",
"opf_languages",
",",
"mi_languages",
")",
":",
"def",
"parse",
"(",
"x",
")",
":",
"try",
":",
"return",
"parse_lang_code",
"(",
"x",
")",
"except",
"ValueError",
":",
"return",
"None",
"opf_languages",
"=",
"filter",
"(",... | preserve original country codes and use 2-letter lang codes where possible . | train | false |
16,779 | def GetIndexesAsync(**kwargs):
extra_hook = kwargs.pop('extra_hook', None)
config = _GetConfigFromKwargs(kwargs)
def local_extra_hook(result):
if extra_hook:
return extra_hook(result)
return result
return _GetConnection().async_get_indexes(config, local_extra_hook)
| [
"def",
"GetIndexesAsync",
"(",
"**",
"kwargs",
")",
":",
"extra_hook",
"=",
"kwargs",
".",
"pop",
"(",
"'extra_hook'",
",",
"None",
")",
"config",
"=",
"_GetConfigFromKwargs",
"(",
"kwargs",
")",
"def",
"local_extra_hook",
"(",
"result",
")",
":",
"if",
"e... | asynchronously retrieves the application indexes and their states . | train | false |
16,780 | def extract_events_from_keen(client, event_collection, start_date=None, end_date=None):
timeframe = VERY_LONG_TIMEFRAME
if (start_date and end_date):
logger.info('Gathering events from the {} collection between {} and {}'.format(event_collection, start_date, end_date))
timeframe = {'start': start_date.isoformat(), 'end': end_date.isoformat()}
else:
logger.info('Gathering events from the {} collection using timeframe {}'.format(event_collection, VERY_LONG_TIMEFRAME))
return client.extraction(event_collection, timeframe=timeframe)
| [
"def",
"extract_events_from_keen",
"(",
"client",
",",
"event_collection",
",",
"start_date",
"=",
"None",
",",
"end_date",
"=",
"None",
")",
":",
"timeframe",
"=",
"VERY_LONG_TIMEFRAME",
"if",
"(",
"start_date",
"and",
"end_date",
")",
":",
"logger",
".",
"in... | get analytics from keen to use as a starting point for smoothing or transferring . | train | false |
16,781 | def not_(clause):
return operators.inv(_literal_as_binds(clause))
| [
"def",
"not_",
"(",
"clause",
")",
":",
"return",
"operators",
".",
"inv",
"(",
"_literal_as_binds",
"(",
"clause",
")",
")"
] | same as not a . | train | false |
16,784 | def security_group_rule_create(context, values):
return IMPL.security_group_rule_create(context, values)
| [
"def",
"security_group_rule_create",
"(",
"context",
",",
"values",
")",
":",
"return",
"IMPL",
".",
"security_group_rule_create",
"(",
"context",
",",
"values",
")"
] | create a new security group . | train | false |
16,785 | def generate_java_binary(target, source, env):
target_name = str(target[0])
onejar_path = str(source[0])
return _generate_java_binary(target_name, onejar_path, '', '')
| [
"def",
"generate_java_binary",
"(",
"target",
",",
"source",
",",
"env",
")",
":",
"target_name",
"=",
"str",
"(",
"target",
"[",
"0",
"]",
")",
"onejar_path",
"=",
"str",
"(",
"source",
"[",
"0",
"]",
")",
"return",
"_generate_java_binary",
"(",
"target... | build function to generate wrapper shell script for java binary . | train | false |
16,786 | @pytest.mark.parametrize((u'model_class', u'mode'), list(itertools.product(test_models_1D, modes)))
def test_pixel_sum_1D(model_class, mode):
if ((model_class == Box1D) and (mode == u'center')):
pytest.skip(u'Non integrating mode. Skip integral test.')
parameters = models_1D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model, models_1D[model_class][u'x_lim'], mode=mode)
assert_allclose(values.sum(), models_1D[model_class][u'integral'], atol=0.0001)
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"(",
"u'model_class'",
",",
"u'mode'",
")",
",",
"list",
"(",
"itertools",
".",
"product",
"(",
"test_models_1D",
",",
"modes",
")",
")",
")",
"def",
"test_pixel_sum_1D",
"(",
"model_class",
",",
"mode",... | test if the sum of all pixels corresponds nearly to the integral . | train | false |
16,787 | def get_repository_by_name_and_owner(app, name, owner):
repository_query = get_repository_query(app)
if is_tool_shed_client(app):
return repository_query.filter(and_((app.install_model.ToolShedRepository.table.c.name == name), (app.install_model.ToolShedRepository.table.c.owner == owner))).first()
user = common_util.get_user_by_username(app, owner)
if user:
return repository_query.filter(and_((app.model.Repository.table.c.name == name), (app.model.Repository.table.c.user_id == user.id))).first()
return None
| [
"def",
"get_repository_by_name_and_owner",
"(",
"app",
",",
"name",
",",
"owner",
")",
":",
"repository_query",
"=",
"get_repository_query",
"(",
"app",
")",
"if",
"is_tool_shed_client",
"(",
"app",
")",
":",
"return",
"repository_query",
".",
"filter",
"(",
"an... | get a repository from the database via name and owner . | train | false |
16,788 | def get_aware_datetime(dt_string, tz=None):
if (not dt_string):
return None
try:
return make_aware(parse_datetime(dt_string), tz=tz)
except ValueError:
raise ArgumentTypeError(('The provided datetime/date string is not valid: "%s"' % dt_string))
| [
"def",
"get_aware_datetime",
"(",
"dt_string",
",",
"tz",
"=",
"None",
")",
":",
"if",
"(",
"not",
"dt_string",
")",
":",
"return",
"None",
"try",
":",
"return",
"make_aware",
"(",
"parse_datetime",
"(",
"dt_string",
")",
",",
"tz",
"=",
"tz",
")",
"ex... | return an aware datetime parsed from a datetime or date string . | train | false |
16,789 | def handle_missing_index_file(app, tool_path, sample_files, repository_tools_tups, sample_files_copied):
for (index, repository_tools_tup) in enumerate(repository_tools_tups):
(tup_path, guid, repository_tool) = repository_tools_tup
params_with_missing_index_file = repository_tool.params_with_missing_index_file
for param in params_with_missing_index_file:
options = param.options
missing_file_name = basic_util.strip_path(options.missing_index_file)
if (missing_file_name not in sample_files_copied):
for sample_file in sample_files:
sample_file_name = basic_util.strip_path(sample_file)
if (sample_file_name == ('%s.sample' % missing_file_name)):
copy_sample_file(app, sample_file)
if (options.tool_data_table and options.tool_data_table.missing_index_file):
options.tool_data_table.handle_found_index_file(options.missing_index_file)
sample_files_copied.append(options.missing_index_file)
break
repository_tool = app.toolbox.load_tool(os.path.join(tool_path, tup_path), guid=guid, use_cached=False)
repository_tools_tups[index] = (tup_path, guid, repository_tool)
return (repository_tools_tups, sample_files_copied)
| [
"def",
"handle_missing_index_file",
"(",
"app",
",",
"tool_path",
",",
"sample_files",
",",
"repository_tools_tups",
",",
"sample_files_copied",
")",
":",
"for",
"(",
"index",
",",
"repository_tools_tup",
")",
"in",
"enumerate",
"(",
"repository_tools_tups",
")",
":... | inspect each tool to see if it has any input parameters that are dynamically generated select lists that depend on a . | train | false |
16,790 | def get_response_object(url, method='GET', headers=None):
parsed_url = urlparse.urlparse(url)
parsed_qs = parse_qs(parsed_url.query)
secure = (parsed_url.scheme == 'https')
headers = (headers or {})
method = method.upper()
con = Connection(secure=secure, host=parsed_url.netloc)
response = con.request(action=parsed_url.path, params=parsed_qs, headers=headers, method=method)
return response
| [
"def",
"get_response_object",
"(",
"url",
",",
"method",
"=",
"'GET'",
",",
"headers",
"=",
"None",
")",
":",
"parsed_url",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"parsed_qs",
"=",
"parse_qs",
"(",
"parsed_url",
".",
"query",
")",
"secure",
"... | utility function which uses libclouds connection class to issue an http request . | train | false |
16,792 | def _get_encrypted_user_slug():
cname = False
data = [(c.user._id36 if c.user_is_loggedin else ''), get_srpath(), (c.lang or ''), cname]
return encrypt('|'.join((_force_utf8(s) for s in data)))
| [
"def",
"_get_encrypted_user_slug",
"(",
")",
":",
"cname",
"=",
"False",
"data",
"=",
"[",
"(",
"c",
".",
"user",
".",
"_id36",
"if",
"c",
".",
"user_is_loggedin",
"else",
"''",
")",
",",
"get_srpath",
"(",
")",
",",
"(",
"c",
".",
"lang",
"or",
"'... | return an encrypted string containing context info . | train | false |
16,793 | @pytest.mark.usefixtures(u'clean_system')
@pytest.mark.parametrize(u'input_params, expected_context', context_data())
def test_generate_context(input_params, expected_context):
assert (generate.generate_context(**input_params) == expected_context)
| [
"@",
"pytest",
".",
"mark",
".",
"usefixtures",
"(",
"u'clean_system'",
")",
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"u'input_params, expected_context'",
",",
"context_data",
"(",
")",
")",
"def",
"test_generate_context",
"(",
"input_params",
",",
"... | test the generated context for several input parameters against the according expected context . | train | false |
16,796 | def endpointForTesting(fireImmediately=False):
@implementer(IStreamClientEndpoint)
class ClientTestEndpoint(object, ):
def connect(self, factory):
result = Deferred()
info.passedFactories.append(factory)
@result.addCallback
def createProtocol(ignored):
protocol = factory.buildProtocol(None)
info.constructedProtocols.append(protocol)
transport = StringTransport()
protocol.makeConnection(transport)
return protocol
info.connectQueue.append(result)
if fireImmediately:
result.callback(None)
return result
info = ConnectInformation()
return (info, ClientTestEndpoint())
| [
"def",
"endpointForTesting",
"(",
"fireImmediately",
"=",
"False",
")",
":",
"@",
"implementer",
"(",
"IStreamClientEndpoint",
")",
"class",
"ClientTestEndpoint",
"(",
"object",
",",
")",
":",
"def",
"connect",
"(",
"self",
",",
"factory",
")",
":",
"result",
... | make a sample endpoint for testing . | train | false |
16,797 | def momentcondunbound(distfn, params, mom2, quantile=None):
(shape, loc, scale) = params
mom2diff = (np.array(distfn.stats(shape, loc, scale)) - mom2)
if (not (quantile is None)):
(pq, xq) = quantile
cdfdiff = (distfn.cdf(xq, shape, loc, scale) - pq)
return np.concatenate([mom2diff, cdfdiff[:1]])
return mom2diff
| [
"def",
"momentcondunbound",
"(",
"distfn",
",",
"params",
",",
"mom2",
",",
"quantile",
"=",
"None",
")",
":",
"(",
"shape",
",",
"loc",
",",
"scale",
")",
"=",
"params",
"mom2diff",
"=",
"(",
"np",
".",
"array",
"(",
"distfn",
".",
"stats",
"(",
"... | moment conditions for estimating distribution parameters using method of moments . | train | false |
16,798 | def isInIOThread():
return (ioThread == getThreadID())
| [
"def",
"isInIOThread",
"(",
")",
":",
"return",
"(",
"ioThread",
"==",
"getThreadID",
"(",
")",
")"
] | are we in the thread responsable for i/o requests ? . | train | false |
16,799 | def allowed_gai_family():
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
| [
"def",
"allowed_gai_family",
"(",
")",
":",
"family",
"=",
"socket",
".",
"AF_INET",
"if",
"HAS_IPV6",
":",
"family",
"=",
"socket",
".",
"AF_UNSPEC",
"return",
"family"
] | this function is designed to work in the context of getaddrinfo . | train | false |
16,800 | def get_default_price_list(party):
if party.default_price_list:
return party.default_price_list
if (party.doctype == u'Customer'):
price_list = frappe.db.get_value(u'Customer Group', party.customer_group, u'default_price_list')
if price_list:
return price_list
return None
| [
"def",
"get_default_price_list",
"(",
"party",
")",
":",
"if",
"party",
".",
"default_price_list",
":",
"return",
"party",
".",
"default_price_list",
"if",
"(",
"party",
".",
"doctype",
"==",
"u'Customer'",
")",
":",
"price_list",
"=",
"frappe",
".",
"db",
"... | return default price list for party . | train | false |
16,801 | def serialize_user(user, node=None, admin=False, full=False, is_profile=False):
fullname = user.display_full_name(node=node)
ret = {'id': str(user._primary_key), 'registered': user.is_registered, 'surname': user.family_name, 'fullname': fullname, 'shortname': (fullname if (len(fullname) < 50) else ((fullname[:23] + '...') + fullname[(-23):])), 'gravatar_url': gravatar(user, use_ssl=True, size=settings.PROFILE_IMAGE_MEDIUM), 'active': user.is_active}
if (node is not None):
if admin:
flags = {'visible': False, 'permission': 'read'}
else:
flags = {'visible': node.contributor_set.filter(user=user, visible=True).exists(), 'permission': reduce_permissions(node.get_permissions(user))}
ret.update(flags)
if user.is_registered:
ret.update({'url': user.url, 'absolute_url': user.absolute_url, 'display_absolute_url': user.display_absolute_url, 'date_registered': user.date_registered.strftime('%Y-%m-%d')})
if full:
if is_profile:
ret['emails'] = ([{'address': each, 'primary': (each.strip().lower() == user.username.strip().lower()), 'confirmed': True} for each in user.emails] + [{'address': each, 'primary': (each.strip().lower() == user.username.strip().lower()), 'confirmed': False} for each in user.unconfirmed_emails])
if user.is_merged:
merger = user.merged_by
merged_by = {'id': str(merger._primary_key), 'url': merger.url, 'absolute_url': merger.absolute_url}
else:
merged_by = None
ret.update({'number_projects': get_projects(user).count(), 'number_public_projects': get_public_projects(user).count(), 'activity_points': user.get_activity_points(), 'gravatar_url': gravatar(user, use_ssl=True, size=settings.PROFILE_IMAGE_LARGE), 'is_merged': user.is_merged, 'merged_by': merged_by})
return ret
| [
"def",
"serialize_user",
"(",
"user",
",",
"node",
"=",
"None",
",",
"admin",
"=",
"False",
",",
"full",
"=",
"False",
",",
"is_profile",
"=",
"False",
")",
":",
"fullname",
"=",
"user",
".",
"display_full_name",
"(",
"node",
"=",
"node",
")",
"ret",
... | returns a milestones-friendly representation of a user object . | train | false |
16,802 | def trim_trailing_lines(lines):
lines = list(lines)
while (lines and (not lines[(-1)])):
lines.pop((-1))
return lines
| [
"def",
"trim_trailing_lines",
"(",
"lines",
")",
":",
"lines",
"=",
"list",
"(",
"lines",
")",
"while",
"(",
"lines",
"and",
"(",
"not",
"lines",
"[",
"(",
"-",
"1",
")",
"]",
")",
")",
":",
"lines",
".",
"pop",
"(",
"(",
"-",
"1",
")",
")",
... | trim trailing blank lines . | train | false |
16,804 | def lint_general(tool_source, lint_ctx):
version = tool_source.parse_version()
if (not version):
lint_ctx.error(ERROR_VERSION_MSG)
else:
lint_ctx.valid((VALID_VERSION_MSG % version))
name = tool_source.parse_name()
if (not name):
lint_ctx.error(ERROR_NAME_MSG)
else:
lint_ctx.valid((VALID_NAME_MSG % name))
tool_id = tool_source.parse_id()
if (not tool_id):
lint_ctx.error(ERROR_ID_MSG)
else:
lint_ctx.valid((VALID_ID_MSG % tool_id))
if re.search('\\s', tool_id):
lint_ctx.warn('Tool id contains a space - this is discouraged.')
| [
"def",
"lint_general",
"(",
"tool_source",
",",
"lint_ctx",
")",
":",
"version",
"=",
"tool_source",
".",
"parse_version",
"(",
")",
"if",
"(",
"not",
"version",
")",
":",
"lint_ctx",
".",
"error",
"(",
"ERROR_VERSION_MSG",
")",
"else",
":",
"lint_ctx",
".... | check tool version . | train | false |
16,805 | def strip_entities(value):
return re.sub('&(?:\\w+|#\\d);', '', value)
| [
"def",
"strip_entities",
"(",
"value",
")",
":",
"return",
"re",
".",
"sub",
"(",
"'&(?:\\\\w+|#\\\\d);'",
",",
"''",
",",
"value",
")"
] | returns the given html with all entities stripped . | train | false |
16,806 | def collect_registered_bears_dirs(entrypoint):
collected_dirs = []
for ep in pkg_resources.iter_entry_points(entrypoint):
registered_package = None
try:
registered_package = ep.load()
except pkg_resources.DistributionNotFound:
continue
collected_dirs.append(os.path.abspath(os.path.dirname(registered_package.__file__)))
return collected_dirs
| [
"def",
"collect_registered_bears_dirs",
"(",
"entrypoint",
")",
":",
"collected_dirs",
"=",
"[",
"]",
"for",
"ep",
"in",
"pkg_resources",
".",
"iter_entry_points",
"(",
"entrypoint",
")",
":",
"registered_package",
"=",
"None",
"try",
":",
"registered_package",
"=... | searches setuptools for the entrypoint and returns the bear directories given by the module . | train | false |
16,808 | def read_uint32(fid):
return _unpack_simple(fid, '>u4', np.uint32)
| [
"def",
"read_uint32",
"(",
"fid",
")",
":",
"return",
"_unpack_simple",
"(",
"fid",
",",
"'>u4'",
",",
"np",
".",
"uint32",
")"
] | read unsigned 32bit integer from bti file . | train | false |
16,809 | def create_target_index(lang):
create_index()
return STORAGE.create_index(TargetSchema(), ('target-%s' % lang))
| [
"def",
"create_target_index",
"(",
"lang",
")",
":",
"create_index",
"(",
")",
"return",
"STORAGE",
".",
"create_index",
"(",
"TargetSchema",
"(",
")",
",",
"(",
"'target-%s'",
"%",
"lang",
")",
")"
] | creates traget string index for given language . | train | false |
16,810 | def get_ndata(ofile):
data = [next(ofile)]
loc = 1
if (data[0].strip()[0] == '{'):
raise ValueError('This looks like a sparse ARFF: not supported yet')
for i in ofile:
loc += 1
return loc
| [
"def",
"get_ndata",
"(",
"ofile",
")",
":",
"data",
"=",
"[",
"next",
"(",
"ofile",
")",
"]",
"loc",
"=",
"1",
"if",
"(",
"data",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"[",
"0",
"]",
"==",
"'{'",
")",
":",
"raise",
"ValueError",
"(",
"'This ... | read the whole file to get number of data attributes . | train | false |
16,811 | def _validate_customization_args_and_values(item_name, item_type, customization_args, ca_specs_to_validate_against):
ca_spec_names = [ca_spec.name for ca_spec in ca_specs_to_validate_against]
if (not isinstance(customization_args, dict)):
raise utils.ValidationError(('Expected customization args to be a dict, received %s' % customization_args))
customization_args = _get_full_customization_args(customization_args, ca_specs_to_validate_against)
extra_args = []
for arg_name in customization_args.keys():
if (not isinstance(arg_name, basestring)):
raise utils.ValidationError(('Invalid customization arg name: %s' % arg_name))
if (arg_name not in ca_spec_names):
extra_args.append(arg_name)
logging.warning(('%s %s does not support customization arg %s.' % (item_name.capitalize(), item_type, arg_name)))
for extra_arg in extra_args:
del customization_args[extra_arg]
for ca_spec in ca_specs_to_validate_against:
try:
customization_args[ca_spec.name]['value'] = schema_utils.normalize_against_schema(customization_args[ca_spec.name]['value'], ca_spec.schema)
except Exception:
pass
| [
"def",
"_validate_customization_args_and_values",
"(",
"item_name",
",",
"item_type",
",",
"customization_args",
",",
"ca_specs_to_validate_against",
")",
":",
"ca_spec_names",
"=",
"[",
"ca_spec",
".",
"name",
"for",
"ca_spec",
"in",
"ca_specs_to_validate_against",
"]",
... | validates the given customization_args dict against the specs set out in ca_specs_to_validate_against . | train | false |
16,812 | def exec_prompt_glib(bus, prompt):
from gi.repository import GLib
loop = GLib.MainLoop()
result = []
def callback(dismissed, unlocked):
result.append(dismissed)
result.append(unlocked)
loop.quit()
exec_prompt(bus, prompt, callback)
loop.run()
return (result[0], result[1])
| [
"def",
"exec_prompt_glib",
"(",
"bus",
",",
"prompt",
")",
":",
"from",
"gi",
".",
"repository",
"import",
"GLib",
"loop",
"=",
"GLib",
".",
"MainLoop",
"(",
")",
"result",
"=",
"[",
"]",
"def",
"callback",
"(",
"dismissed",
",",
"unlocked",
")",
":",
... | like :func:exec_prompt . | train | false |
16,813 | def markdown_column_params():
(yield (tables.Move, None, 'effect'))
(yield (tables.Move, None, 'short_effect'))
for cls in tables.mapped_classes:
for translation_cls in cls.translation_classes:
for column in translation_cls.__table__.c:
if (column.info.get('string_getter') == markdown.MarkdownString):
(yield (cls, translation_cls, column.name))
| [
"def",
"markdown_column_params",
"(",
")",
":",
"(",
"yield",
"(",
"tables",
".",
"Move",
",",
"None",
",",
"'effect'",
")",
")",
"(",
"yield",
"(",
"tables",
".",
"Move",
",",
"None",
",",
"'short_effect'",
")",
")",
"for",
"cls",
"in",
"tables",
".... | check all markdown values scans the database schema for markdown columns . | train | false |
16,814 | @docstring.dedent_interpd
def complex_spectrum(x, Fs=None, window=None, pad_to=None, sides=None):
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to, sides=sides, mode=u'complex')
| [
"@",
"docstring",
".",
"dedent_interpd",
"def",
"complex_spectrum",
"(",
"x",
",",
"Fs",
"=",
"None",
",",
"window",
"=",
"None",
",",
"pad_to",
"=",
"None",
",",
"sides",
"=",
"None",
")",
":",
"return",
"_single_spectrum_helper",
"(",
"x",
"=",
"x",
... | compute the complex-valued frequency spectrum of *x* . | train | false |
16,816 | @pytest.mark.skipif((not PY3), reason='Only necessary on Python3')
def test_not_ascii():
try:
preferred_encoding = locale.getpreferredencoding()
fs_enc = codecs.lookup(preferred_encoding).name
except Exception:
fs_enc = 'ascii'
assert (fs_enc != 'ascii')
| [
"@",
"pytest",
".",
"mark",
".",
"skipif",
"(",
"(",
"not",
"PY3",
")",
",",
"reason",
"=",
"'Only necessary on Python3'",
")",
"def",
"test_not_ascii",
"(",
")",
":",
"try",
":",
"preferred_encoding",
"=",
"locale",
".",
"getpreferredencoding",
"(",
")",
... | make sure that the systems preferred encoding is not ascii . | train | false |
16,817 | @pytest.mark.parametrize('url_text', ['http://abc123.com/this/awesome/url.html', 'https://supersecret.gov/nsa/files.txt', None])
def test_set_hover_url(url_widget, url_text):
url_widget.set_hover_url(url_text)
if (url_text is not None):
assert (url_widget.text() == url_text)
assert (url_widget._urltype == url.UrlType.hover)
else:
assert (url_widget.text() == '')
assert (url_widget._urltype == url.UrlType.normal)
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'url_text'",
",",
"[",
"'http://abc123.com/this/awesome/url.html'",
",",
"'https://supersecret.gov/nsa/files.txt'",
",",
"None",
"]",
")",
"def",
"test_set_hover_url",
"(",
"url_widget",
",",
"url_text",
")",
":",
... | test text when hovering over a link . | train | false |
16,818 | def assert_has_line(output, line):
match = re.search(('^%s$' % re.escape(line)), output, flags=re.MULTILINE)
assert (match is not None), ("No line of output file was '%s' (output was '%s') " % (line, output))
| [
"def",
"assert_has_line",
"(",
"output",
",",
"line",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"(",
"'^%s$'",
"%",
"re",
".",
"escape",
"(",
"line",
")",
")",
",",
"output",
",",
"flags",
"=",
"re",
".",
"MULTILINE",
")",
"assert",
"(",
... | asserts the specified output contains the line specified the argument line . | train | false |
16,819 | def test_patched_no_errwindow(capfd, monkeypatch):
monkeypatch.setattr('qutebrowser.misc.checkpyver.sys.argv', [sys.argv[0], '--no-err-windows'])
monkeypatch.setattr('qutebrowser.misc.checkpyver.sys.hexversion', 50331648)
monkeypatch.setattr('qutebrowser.misc.checkpyver.sys.exit', (lambda status: None))
checkpyver.check_python_version()
(stdout, stderr) = capfd.readouterr()
assert (not stdout)
assert re.match(TEXT, stderr), stderr
| [
"def",
"test_patched_no_errwindow",
"(",
"capfd",
",",
"monkeypatch",
")",
":",
"monkeypatch",
".",
"setattr",
"(",
"'qutebrowser.misc.checkpyver.sys.argv'",
",",
"[",
"sys",
".",
"argv",
"[",
"0",
"]",
",",
"'--no-err-windows'",
"]",
")",
"monkeypatch",
".",
"s... | test with a patched sys . | train | false |
16,820 | def sample(prediction):
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[(0, sample_distribution(prediction[0]))] = 1.0
return p
| [
"def",
"sample",
"(",
"prediction",
")",
":",
"p",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"[",
"1",
",",
"vocabulary_size",
"]",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"p",
"[",
"(",
"0",
",",
"sample_distribution",
"(",
"prediction",
"["... | sample an index from a probability array . | train | false |
16,821 | def delete_all_thumbnails(path, recursive=True):
total = 0
for thumbs in all_thumbnails(path, recursive=recursive).values():
total += _delete_using_thumbs_list(thumbs)
return total
| [
"def",
"delete_all_thumbnails",
"(",
"path",
",",
"recursive",
"=",
"True",
")",
":",
"total",
"=",
"0",
"for",
"thumbs",
"in",
"all_thumbnails",
"(",
"path",
",",
"recursive",
"=",
"recursive",
")",
".",
"values",
"(",
")",
":",
"total",
"+=",
"_delete_... | delete all files within a path which match the thumbnails pattern . | train | true |
16,822 | def _ofport_retry(fn):
@six.wraps(fn)
def wrapped(*args, **kwargs):
self = args[0]
new_fn = tenacity.retry(reraise=True, retry=tenacity.retry_if_result(_ofport_result_pending), wait=tenacity.wait_exponential(multiplier=0.01, max=1), stop=tenacity.stop_after_delay(self.vsctl_timeout))(fn)
return new_fn(*args, **kwargs)
return wrapped
| [
"def",
"_ofport_retry",
"(",
"fn",
")",
":",
"@",
"six",
".",
"wraps",
"(",
"fn",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"self",
"=",
"args",
"[",
"0",
"]",
"new_fn",
"=",
"tenacity",
".",
"retry",
"(",
"reraise",... | decorator for retrying when ovs has yet to assign an ofport . | train | false |
16,823 | def init(mpstate):
return SerialModule(mpstate)
| [
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] | initialize all standard dirs . | train | false |
16,824 | def handdrawn():
table = s3db.vulnerability_document
s3.filter = ((s3db.doc_image.doc_id == table.doc_id) & (table.document_type == 'map'))
return s3_rest_controller('doc', 'image')
| [
"def",
"handdrawn",
"(",
")",
":",
"table",
"=",
"s3db",
".",
"vulnerability_document",
"s3",
".",
"filter",
"=",
"(",
"(",
"s3db",
".",
"doc_image",
".",
"doc_id",
"==",
"table",
".",
"doc_id",
")",
"&",
"(",
"table",
".",
"document_type",
"==",
"'map... | rest controller for hand-drawn maps . | train | false |
16,825 | def mult_matrix((a1, b1, c1, d1, e1, f1), (a0, b0, c0, d0, e0, f0)):
return (((a0 * a1) + (c0 * b1)), ((b0 * a1) + (d0 * b1)), ((a0 * c1) + (c0 * d1)), ((b0 * c1) + (d0 * d1)), (((a0 * e1) + (c0 * f1)) + e0), (((b0 * e1) + (d0 * f1)) + f0))
| [
"def",
"mult_matrix",
"(",
"(",
"a1",
",",
"b1",
",",
"c1",
",",
"d1",
",",
"e1",
",",
"f1",
")",
",",
"(",
"a0",
",",
"b0",
",",
"c0",
",",
"d0",
",",
"e0",
",",
"f0",
")",
")",
":",
"return",
"(",
"(",
"(",
"a0",
"*",
"a1",
")",
"+",
... | returns the multiplication of two matrices . | train | true |
16,827 | def test_optimization_minimizes_kl_divergence():
random_state = check_random_state(0)
(X, _) = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
| [
"def",
"test_optimization_minimizes_kl_divergence",
"(",
")",
":",
"random_state",
"=",
"check_random_state",
"(",
"0",
")",
"(",
"X",
",",
"_",
")",
"=",
"make_blobs",
"(",
"n_features",
"=",
"3",
",",
"random_state",
"=",
"random_state",
")",
"kl_divergences",... | t-sne should give a lower kl divergence with more iterations . | train | false |
16,828 | def check_conv_gradinputs_shape(image_shape, kernel_shape, output_shape, border_mode, subsample, filter_dilation=None):
image_shape = tuple(image_shape)
kernel_shape = tuple(kernel_shape)
output_shape = tuple(output_shape)
if ((len(image_shape) != len(kernel_shape)) or (len(image_shape) != len(output_shape))):
return False
if ((len(image_shape) - 2) != len(subsample)):
return False
if ((filter_dilation is not None) and ((len(image_shape) - 2) != len(filter_dilation))):
return False
computed_output_shape = get_conv_output_shape(image_shape, kernel_shape, border_mode, subsample, filter_dilation)
def check_dim(given, computed):
if ((given is None) or (computed is None)):
return True
try:
given = get_scalar_constant_value(given)
computed = get_scalar_constant_value(computed)
return (int(given) == int(computed))
except NotScalarConstantError:
return True
return all((check_dim(given, computed) for (given, computed) in zip(output_shape, computed_output_shape)))
| [
"def",
"check_conv_gradinputs_shape",
"(",
"image_shape",
",",
"kernel_shape",
",",
"output_shape",
",",
"border_mode",
",",
"subsample",
",",
"filter_dilation",
"=",
"None",
")",
":",
"image_shape",
"=",
"tuple",
"(",
"image_shape",
")",
"kernel_shape",
"=",
"tup... | this function checks if the given image shapes are consistent . | train | false |
16,829 | def _filter_sample_ids_from_category_state_coverage(metadata_map, sample_ids, coverage_category, subject_category, consider_state_fn, min_num_states=None, required_states=None):
subjects = defaultdict(list)
for samp_id in sample_ids:
subject = metadata_map.getCategoryValue(samp_id, subject_category)
subjects[subject].append(samp_id)
samp_ids_to_keep = []
num_subjects_kept = 0
states_kept = []
for (subject, samp_ids) in subjects.items():
subject_covered_states = set(metadata_map.getCategoryValues(samp_ids, coverage_category))
keep_subject = True
if (min_num_states is not None):
if (sum([consider_state_fn(s) for s in subject_covered_states]) < min_num_states):
keep_subject = False
if (keep_subject and (required_states is not None)):
if (len((subject_covered_states & required_states)) != len(required_states)):
keep_subject = False
if keep_subject:
samp_ids_to_keep.extend(samp_ids)
states_kept.extend(subject_covered_states)
num_subjects_kept += 1
return (set(samp_ids_to_keep), num_subjects_kept, set(states_kept))
| [
"def",
"_filter_sample_ids_from_category_state_coverage",
"(",
"metadata_map",
",",
"sample_ids",
",",
"coverage_category",
",",
"subject_category",
",",
"consider_state_fn",
",",
"min_num_states",
"=",
"None",
",",
"required_states",
"=",
"None",
")",
":",
"subjects",
... | helper function to perform filtering based on category state coverage . | train | false |
16,830 | def test_feature_max_length_on_scenario_outline():
feature = Feature.from_string(FEATURE6)
assert_equals(feature.max_length, 79)
| [
"def",
"test_feature_max_length_on_scenario_outline",
"(",
")",
":",
"feature",
"=",
"Feature",
".",
"from_string",
"(",
"FEATURE6",
")",
"assert_equals",
"(",
"feature",
".",
"max_length",
",",
"79",
")"
] | the max length of a feature considering when the table of some of the scenario oulines is longer than the remaining things . | train | false |
16,831 | def percentError(out, true):
arrout = array(out).flatten()
wrong = where((arrout != array(true).flatten()))[0].size
return ((100.0 * float(wrong)) / float(arrout.size))
| [
"def",
"percentError",
"(",
"out",
",",
"true",
")",
":",
"arrout",
"=",
"array",
"(",
"out",
")",
".",
"flatten",
"(",
")",
"wrong",
"=",
"where",
"(",
"(",
"arrout",
"!=",
"array",
"(",
"true",
")",
".",
"flatten",
"(",
")",
")",
")",
"[",
"0... | return percentage of mismatch between out and target values . | train | false |
16,833 | def get_service_protocols(service):
cmd = '--permanent --service={0} --get-protocols'.format(service)
return __firewall_cmd(cmd).split()
| [
"def",
"get_service_protocols",
"(",
"service",
")",
":",
"cmd",
"=",
"'--permanent --service={0} --get-protocols'",
".",
"format",
"(",
"service",
")",
"return",
"__firewall_cmd",
"(",
"cmd",
")",
".",
"split",
"(",
")"
] | list protocols of a service . | train | false |
16,835 | def tunnel_request_data(host, port, proxy_auth_header=None):
host_value = ((to_bytes(host, encoding='ascii') + ':') + to_bytes(str(port)))
tunnel_req = (('CONNECT ' + host_value) + ' HTTP/1.1\r\n')
tunnel_req += (('Host: ' + host_value) + '\r\n')
if proxy_auth_header:
tunnel_req += (('Proxy-Authorization: ' + proxy_auth_header) + '\r\n')
tunnel_req += '\r\n'
return tunnel_req
| [
"def",
"tunnel_request_data",
"(",
"host",
",",
"port",
",",
"proxy_auth_header",
"=",
"None",
")",
":",
"host_value",
"=",
"(",
"(",
"to_bytes",
"(",
"host",
",",
"encoding",
"=",
"'ascii'",
")",
"+",
"':'",
")",
"+",
"to_bytes",
"(",
"str",
"(",
"por... | return binary content of a connect request . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.