id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
18,484
def build_match_conditions(doctype, as_condition=True): import frappe.desk.reportview return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
[ "def", "build_match_conditions", "(", "doctype", ",", "as_condition", "=", "True", ")", ":", "import", "frappe", ".", "desk", ".", "reportview", "return", "frappe", ".", "desk", ".", "reportview", ".", "build_match_conditions", "(", "doctype", ",", "as_condition...
return match for given doctype as list or sql .
train
false
18,485
def test_VAE_cost(): yaml_src_path = os.path.join(os.path.dirname(__file__), 'test_vae_cost_vae_criterion.yaml') train_object = yaml_parse.load_path(yaml_src_path) train_object.main_loop()
[ "def", "test_VAE_cost", "(", ")", ":", "yaml_src_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'test_vae_cost_vae_criterion.yaml'", ")", "train_object", "=", "yaml_parse", ".", "load_path", "(...
vae trains properly with the vae cost .
train
false
18,487
def _notebook_run(path): kernel_name = ('python%d' % sys.version_info[0]) this_file_directory = os.path.dirname(__file__) errors = [] with tempfile.NamedTemporaryFile(suffix='.ipynb', mode='wt') as fout: with open(path) as f: nb = nbformat.read(f, as_version=4) nb.metadata.get('kernelspec', {})['name'] = kernel_name ep = ExecutePreprocessor(kernel_name=kernel_name, timeout=10) try: ep.preprocess(nb, {'metadata': {'path': this_file_directory}}) except CellExecutionError as e: if ('SKIP' in e.traceback): print str(e.traceback).split('\n')[(-2)] else: raise e except TimeoutError as e: print e finally: nbformat.write(nb, fout) return (nb, errors)
[ "def", "_notebook_run", "(", "path", ")", ":", "kernel_name", "=", "(", "'python%d'", "%", "sys", ".", "version_info", "[", "0", "]", ")", "this_file_directory", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "errors", "=", "[", "]", "wi...
execute a notebook via nbconvert and collect output .
train
false
18,489
@profiler.trace @memoized def tenant_quota_usages(request, tenant_id=None): if (not tenant_id): tenant_id = request.user.project_id disabled_quotas = get_disabled_quotas(request) usages = QuotaUsage() for quota in get_tenant_quota_data(request, disabled_quotas=disabled_quotas, tenant_id=tenant_id): usages.add_quota(quota) _get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id) _get_tenant_network_usages(request, usages, disabled_quotas, tenant_id) _get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id) return usages
[ "@", "profiler", ".", "trace", "@", "memoized", "def", "tenant_quota_usages", "(", "request", ",", "tenant_id", "=", "None", ")", ":", "if", "(", "not", "tenant_id", ")", ":", "tenant_id", "=", "request", ".", "user", ".", "project_id", "disabled_quotas", ...
get our quotas and construct our usage object .
train
false
18,492
def _FileMatches(path, excl_regexps): for r in excl_regexps: if re.match(r, path): return False return True
[ "def", "_FileMatches", "(", "path", ",", "excl_regexps", ")", ":", "for", "r", "in", "excl_regexps", ":", "if", "re", ".", "match", "(", "r", ",", "path", ")", ":", "return", "False", "return", "True" ]
returns true if the specified path matches none of the specified exclude regular expresions .
train
false
18,493
def Canonicalize(node, output=None, **kw): if output: apply(_implementation, (node, output.write), kw) else: s = StringIO.StringIO() apply(_implementation, (node, s.write), kw) return s.getvalue()
[ "def", "Canonicalize", "(", "node", ",", "output", "=", "None", ",", "**", "kw", ")", ":", "if", "output", ":", "apply", "(", "_implementation", ",", "(", "node", ",", "output", ".", "write", ")", ",", "kw", ")", "else", ":", "s", "=", "StringIO", ...
canonicalize -> utf-8 canonicalize a dom document/element node and all descendents .
train
true
18,494
def CheckTransaction(request_trusted, request_app_id, transaction): assert isinstance(transaction, datastore_pb.Transaction) CheckAppId(request_trusted, request_app_id, transaction.app())
[ "def", "CheckTransaction", "(", "request_trusted", ",", "request_app_id", ",", "transaction", ")", ":", "assert", "isinstance", "(", "transaction", ",", "datastore_pb", ".", "Transaction", ")", "CheckAppId", "(", "request_trusted", ",", "request_app_id", ",", "trans...
check that this transaction is valid .
train
false
18,495
def DoesTestHaveLabels(cls, labels): labels = set(labels) for name in dir(cls): if name.startswith('test'): item = getattr(cls, name, None) if labels.intersection(getattr(item, 'labels', set(['small']))): return True return False
[ "def", "DoesTestHaveLabels", "(", "cls", ",", "labels", ")", ":", "labels", "=", "set", "(", "labels", ")", "for", "name", "in", "dir", "(", "cls", ")", ":", "if", "name", ".", "startswith", "(", "'test'", ")", ":", "item", "=", "getattr", "(", "cl...
returns true if any tests in cls have any of the labels .
train
false
18,496
def find_loader(fullname): for importer in iter_importers(fullname): loader = importer.find_module(fullname) if (loader is not None): return loader return None
[ "def", "find_loader", "(", "fullname", ")", ":", "for", "importer", "in", "iter_importers", "(", "fullname", ")", ":", "loader", "=", "importer", ".", "find_module", "(", "fullname", ")", "if", "(", "loader", "is", "not", "None", ")", ":", "return", "loa...
find a pep 302 "loader" object for fullname if fullname contains dots .
train
true
18,497
def prepareFailedName(release): fixed = urllib.unquote(release) if fixed.endswith('.nzb'): fixed = fixed.rpartition('.')[0] fixed = re.sub('[\\.\\-\\+\\ ]', '_', fixed) fixed = ss(fixed) return fixed
[ "def", "prepareFailedName", "(", "release", ")", ":", "fixed", "=", "urllib", ".", "unquote", "(", "release", ")", "if", "fixed", ".", "endswith", "(", "'.nzb'", ")", ":", "fixed", "=", "fixed", ".", "rpartition", "(", "'.'", ")", "[", "0", "]", "fix...
standardizes release name for failed db .
train
false
18,498
def weight_variable(shape): initial = tf.zeros(shape) return tf.Variable(initial)
[ "def", "weight_variable", "(", "shape", ")", ":", "initial", "=", "tf", ".", "zeros", "(", "shape", ")", "return", "tf", ".", "Variable", "(", "initial", ")" ]
helper function to create a weight variable initialized with a normal distribution parameters shape : list size of weight variable .
train
false
18,499
def reload_modules(): return True
[ "def", "reload_modules", "(", ")", ":", "return", "True" ]
tell the minion to reload the execution modules cli example: .
train
false
18,501
def _get_enabled_provider(provider_id): enabled_provider = provider.Registry.get(provider_id) if (not enabled_provider): raise ValueError(('Provider %s not enabled' % provider_id)) return enabled_provider
[ "def", "_get_enabled_provider", "(", "provider_id", ")", ":", "enabled_provider", "=", "provider", ".", "Registry", ".", "get", "(", "provider_id", ")", "if", "(", "not", "enabled_provider", ")", ":", "raise", "ValueError", "(", "(", "'Provider %s not enabled'", ...
gets an enabled provider by its provider_id member or throws .
train
false
18,502
def compareYAscending(point, pointOther): if (point.y < pointOther.y): return (-1) return int((point.y > pointOther.y))
[ "def", "compareYAscending", "(", "point", ",", "pointOther", ")", ":", "if", "(", "point", ".", "y", "<", "pointOther", ".", "y", ")", ":", "return", "(", "-", "1", ")", "return", "int", "(", "(", "point", ".", "y", ">", "pointOther", ".", "y", "...
get comparison in order to sort points in ascending y .
train
false
18,503
def get_params_type(descriptor): params = descriptor.split(')')[0][1:].split() if params: return [param for param in params] return []
[ "def", "get_params_type", "(", "descriptor", ")", ":", "params", "=", "descriptor", ".", "split", "(", "')'", ")", "[", "0", "]", "[", "1", ":", "]", ".", "split", "(", ")", "if", "params", ":", "return", "[", "param", "for", "param", "in", "params...
return the parameters type of a descriptor (e .
train
true
18,505
def escape_identifier(text, reg=KWD_RE): if (not text): return '_' if text[0].isdigit(): text = ('_' + text) return reg.sub('\\1_', text)
[ "def", "escape_identifier", "(", "text", ",", "reg", "=", "KWD_RE", ")", ":", "if", "(", "not", "text", ")", ":", "return", "'_'", "if", "text", "[", "0", "]", ".", "isdigit", "(", ")", ":", "text", "=", "(", "'_'", "+", "text", ")", "return", ...
escape partial c identifiers so they can be used as attributes/arguments .
train
true
18,506
def preview_loading(request): return HttpResponse(u'<html><head><title></title></head><body></body></html>')
[ "def", "preview_loading", "(", "request", ")", ":", "return", "HttpResponse", "(", "u'<html><head><title></title></head><body></body></html>'", ")" ]
this page is blank .
train
false
18,507
def test_select_birthday_widget(): assert (SelectBirthdayWidget.FORMAT_CHOICES['%d'] == [(x, str(x)) for x in range(1, 32)]) assert (SelectBirthdayWidget.FORMAT_CHOICES['%m'] == [(x, str(x)) for x in range(1, 13)]) assert (SelectBirthdayWidget.FORMAT_CLASSES == {'%d': 'select_date_day', '%m': 'select_date_month', '%Y': 'select_date_year'}) select_birthday_widget = SelectBirthdayWidget(years=[0, 1]) assert (select_birthday_widget.FORMAT_CHOICES['%Y'] == [(0, '0'), (1, '1')]) class Field(object, ): id = 'world' name = 'helloWorld' format = '%d %m %Y' data = None html = select_birthday_widget(field=Field(), surrounded_div='test-div') assert ('world' in html) assert ('helloWorld' in html) assert ('class="select_date_day"' in html) assert ('class="select_date_month"' in html) assert ('class="select_date_year"' in html) assert ('<div class="test-div">' in html)
[ "def", "test_select_birthday_widget", "(", ")", ":", "assert", "(", "SelectBirthdayWidget", ".", "FORMAT_CHOICES", "[", "'%d'", "]", "==", "[", "(", "x", ",", "str", "(", "x", ")", ")", "for", "x", "in", "range", "(", "1", ",", "32", ")", "]", ")", ...
test the selectdatewidget .
train
false
18,509
def fontifyPython(document): def matcher(node): return ((node.nodeName == 'pre') and node.hasAttribute('class') and (node.getAttribute('class') == 'python')) for node in domhelpers.findElements(document, matcher): fontifyPythonNode(node)
[ "def", "fontifyPython", "(", "document", ")", ":", "def", "matcher", "(", "node", ")", ":", "return", "(", "(", "node", ".", "nodeName", "==", "'pre'", ")", "and", "node", ".", "hasAttribute", "(", "'class'", ")", "and", "(", "node", ".", "getAttribute...
syntax color any node in the given document which contains a python source listing .
train
false
18,510
def solrctl(): for dirname in os.environ.get('PATH', '').split(os.path.pathsep): path = os.path.join(dirname, 'solrctl') if os.path.exists(path): return path return None
[ "def", "solrctl", "(", ")", ":", "for", "dirname", "in", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "path", ".", "pathsep", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "dirname...
solrctl path .
train
false
18,511
def attach_const_node(node, name, value): if (not (name in node.special_attributes)): _attach_local_node(node, const_factory(value), name)
[ "def", "attach_const_node", "(", "node", ",", "name", ",", "value", ")", ":", "if", "(", "not", "(", "name", "in", "node", ".", "special_attributes", ")", ")", ":", "_attach_local_node", "(", "node", ",", "const_factory", "(", "value", ")", ",", "name", ...
create a const node and register it in the locals of the given node with the specified name .
train
true
18,513
def image_get_all(context, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False, v1_mode=False): sort_key = (['created_at'] if (not sort_key) else sort_key) default_sort_dir = 'desc' if (not sort_dir): sort_dir = ([default_sort_dir] * len(sort_key)) elif (len(sort_dir) == 1): default_sort_dir = sort_dir[0] sort_dir *= len(sort_key) filters = (filters or {}) visibility = filters.pop('visibility', None) showing_deleted = (('changes-since' in filters) or filters.get('deleted', False)) (img_cond, prop_cond, tag_cond) = _make_conditions_from_filters(filters, is_public) query = _select_images_query(context, img_cond, admin_as_user, member_status, visibility) if (visibility is not None): query = query.filter((models.Image.visibility == visibility)) elif (context.owner is None): query = query.filter((models.Image.visibility != 'community')) else: community_filters = [(models.Image.owner == context.owner), (models.Image.visibility != 'community')] query = query.filter(sa_sql.or_(*community_filters)) if prop_cond: for prop_condition in prop_cond: query = query.join(models.ImageProperty, aliased=True).filter(sa_sql.and_(*prop_condition)) if tag_cond: for tag_condition in tag_cond: query = query.join(models.ImageTag, aliased=True).filter(sa_sql.and_(*tag_condition)) marker_image = None if (marker is not None): marker_image = _image_get(context, marker, force_show_deleted=showing_deleted) for key in ['created_at', 'id']: if (key not in sort_key): sort_key.append(key) sort_dir.append(default_sort_dir) query = _paginate_query(query, models.Image, limit, sort_key, marker=marker_image, sort_dir=None, sort_dirs=sort_dir) query = query.options(sa_orm.joinedload(models.Image.properties)).options(sa_orm.joinedload(models.Image.locations)) if return_tag: query = query.options(sa_orm.joinedload(models.Image.tags)) images = [] for image in query.all(): image_dict = image.to_dict() image_dict = _normalize_locations(context, image_dict, force_show_deleted=showing_deleted) if return_tag: image_dict = _normalize_tags(image_dict) if v1_mode: image_dict = db_utils.mutate_image_dict_to_v1(image_dict) images.append(image_dict) return images
[ "def", "image_get_all", "(", "context", ",", "filters", "=", "None", ",", "marker", "=", "None", ",", "limit", "=", "None", ",", "sort_key", "=", "None", ",", "sort_dir", "=", "None", ",", "member_status", "=", "'accepted'", ",", "is_public", "=", "None"...
get all images that match zero or more filters .
train
false
18,515
def get_profiler_results_dir(autodir): return os.path.join(autodir, 'results', 'default', 'profiler_sync', 'profiling')
[ "def", "get_profiler_results_dir", "(", "autodir", ")", ":", "return", "os", ".", "path", ".", "join", "(", "autodir", ",", "'results'", ",", "'default'", ",", "'profiler_sync'", ",", "'profiling'", ")" ]
given the directory of the autotest_remote client used to run a profiler .
train
false
18,516
def fitsopen(name, mode='readonly', memmap=None, save_backup=False, cache=True, lazy_load_hdus=None, **kwargs): from .. import conf if (memmap is None): memmap = (None if conf.use_memmap else False) else: memmap = bool(memmap) if (lazy_load_hdus is None): lazy_load_hdus = conf.lazy_load_hdus else: lazy_load_hdus = bool(lazy_load_hdus) if (('uint16' in kwargs) and ('uint' not in kwargs)): kwargs['uint'] = kwargs['uint16'] del kwargs['uint16'] warnings.warn('The uint16 keyword argument is deprecated since v1.1.0. Use the uint argument instead.', AstropyDeprecationWarning) if ('uint' not in kwargs): kwargs['uint'] = conf.enable_uint if (not name): raise ValueError('Empty filename: {}'.format(repr(name))) return HDUList.fromfile(name, mode, memmap, save_backup, cache, lazy_load_hdus, **kwargs)
[ "def", "fitsopen", "(", "name", ",", "mode", "=", "'readonly'", ",", "memmap", "=", "None", ",", "save_backup", "=", "False", ",", "cache", "=", "True", ",", "lazy_load_hdus", "=", "None", ",", "**", "kwargs", ")", ":", "from", ".", ".", "import", "c...
factory function to open a fits file and return an hdulist object .
train
false
18,517
def seconds_to_time(x): t = int((x * (10 ** 6))) ms = (t % (10 ** 6)) t = (t // (10 ** 6)) s = (t % 60) t = (t // 60) m = (t % 60) t = (t // 60) h = t return time(h, m, s, ms)
[ "def", "seconds_to_time", "(", "x", ")", ":", "t", "=", "int", "(", "(", "x", "*", "(", "10", "**", "6", ")", ")", ")", "ms", "=", "(", "t", "%", "(", "10", "**", "6", ")", ")", "t", "=", "(", "t", "//", "(", "10", "**", "6", ")", ")"...
convert a number of second into a time .
train
true
18,518
def task_update(context, task_id, values, session=None): session = (session or get_session()) with session.begin(): task_info_values = _pop_task_info_values(values) task_ref = _task_get(context, task_id, session) _drop_protected_attrs(models.Task, values) values['updated_at'] = timeutils.utcnow() _task_update(context, task_ref, values, session) if task_info_values: _task_info_update(context, task_id, task_info_values, session) return task_get(context, task_id, session)
[ "def", "task_update", "(", "context", ",", "task_id", ",", "values", ",", "session", "=", "None", ")", ":", "session", "=", "(", "session", "or", "get_session", "(", ")", ")", "with", "session", ".", "begin", "(", ")", ":", "task_info_values", "=", "_p...
update a task object .
train
false
18,519
def convert_ListProperty(model, prop, kwargs): return None
[ "def", "convert_ListProperty", "(", "model", ",", "prop", ",", "kwargs", ")", ":", "return", "None" ]
returns a form field for a db .
train
false
18,520
def ssh_command_as_root(ssh_connection, cmd, check_exit_code=True): LOG.debug((_('Running cmd (SSH-as-root): %s') % cmd)) chan = ssh_connection._transport.open_session() chan.exec_command('ioscli oem_setup_env') bufsize = (-1) stdin = chan.makefile('wb', bufsize) stdout = chan.makefile('rb', bufsize) stderr = chan.makefile_stderr('rb', bufsize) stdin.write(('%s\n%s\n' % (cmd, 'exit'))) stdin.flush() exit_status = chan.recv_exit_status() if (exit_status != (-1)): LOG.debug((_('Result was %s') % exit_status)) if (check_exit_code and (exit_status != 0)): raise nova_exception.ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=''.join(cmd)) return (stdout, stderr)
[ "def", "ssh_command_as_root", "(", "ssh_connection", ",", "cmd", ",", "check_exit_code", "=", "True", ")", ":", "LOG", ".", "debug", "(", "(", "_", "(", "'Running cmd (SSH-as-root): %s'", ")", "%", "cmd", ")", ")", "chan", "=", "ssh_connection", ".", "_trans...
method to execute remote command as root .
train
false
18,521
def VaOutput(format, args): text = (format % args) if (_Level > 0): indent = (' DCTB ' * _Level) lines = text.split('\n') for i in range(len(lines)): if (lines[i] and (lines[i][0] != '#')): lines[i] = (indent + lines[i]) text = '\n'.join(lines) _File.write((text + '\n'))
[ "def", "VaOutput", "(", "format", ",", "args", ")", ":", "text", "=", "(", "format", "%", "args", ")", "if", "(", "_Level", ">", "0", ")", ":", "indent", "=", "(", "' DCTB '", "*", "_Level", ")", "lines", "=", "text", ".", "split", "(", "'\\n'", ...
call this with a format string and argument tuple for the format .
train
false
18,523
def build_pillar_data(options): pillar = {'test_transport': options.test_transport, 'cloud_only': options.cloud_only, 'with_coverage': (options.test_without_coverage is False)} if (options.test_git_commit is not None): pillar['test_git_commit'] = options.test_git_commit if (options.test_git_url is not None): pillar['test_git_url'] = options.test_git_url if (options.bootstrap_salt_url is not None): pillar['bootstrap_salt_url'] = options.bootstrap_salt_url if (options.bootstrap_salt_commit is not None): pillar['bootstrap_salt_commit'] = options.bootstrap_salt_commit if options.package_source_dir: pillar['package_source_dir'] = options.package_source_dir if options.package_build_dir: pillar['package_build_dir'] = options.package_build_dir if options.package_artifact_dir: pillar['package_artifact_dir'] = options.package_artifact_dir if options.pillar: pillar.update(dict(options.pillar)) return yaml.dump(pillar, default_flow_style=True, indent=0, width=sys.maxint).rstrip()
[ "def", "build_pillar_data", "(", "options", ")", ":", "pillar", "=", "{", "'test_transport'", ":", "options", ".", "test_transport", ",", "'cloud_only'", ":", "options", ".", "cloud_only", ",", "'with_coverage'", ":", "(", "options", ".", "test_without_coverage", ...
build a yaml formatted string to properly pass pillar data .
train
false
18,524
def process_tex(lines): new_lines = [] for line in lines: line = re.sub('^\\s*\\\\strong{See Also:}\\s*$', '\\paragraph{See Also}', line) if (line.startswith('\\section{scipy.') or line.startswith('\\subsection{scipy.') or line.startswith('\\subsubsection{scipy.') or line.startswith('\\paragraph{scipy.') or line.startswith('\\subparagraph{scipy.')): pass else: new_lines.append(line) return new_lines
[ "def", "process_tex", "(", "lines", ")", ":", "new_lines", "=", "[", "]", "for", "line", "in", "lines", ":", "line", "=", "re", ".", "sub", "(", "'^\\\\s*\\\\\\\\strong{See Also:}\\\\s*$'", ",", "'\\\\paragraph{See Also}'", ",", "line", ")", "if", "(", "line...
remove unnecessary section titles from the latex file .
train
false
18,525
def sample_func(v): return (v + v)
[ "def", "sample_func", "(", "v", ")", ":", "return", "(", "v", "+", "v", ")" ]
blah blah .
train
false
18,526
def CMakeStringEscape(a): return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
[ "def", "CMakeStringEscape", "(", "a", ")", ":", "return", "a", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", ".", "replace", "(", "';'", ",", "'\\\\;'", ")", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")" ]
escapes the string a for use inside a cmake string .
train
false
18,527
def function(inputs, outputs, updates=[], **kwargs): if (len(kwargs) > 0): msg = [('Expected no kwargs, you passed %s' % len(kwargs)), 'kwargs passed to function are ignored with Tensorflow backend'] warnings.warn('\n'.join(msg)) return Function(inputs, outputs, updates=updates)
[ "def", "function", "(", "inputs", ",", "outputs", ",", "updates", "=", "[", "]", ",", "**", "kwargs", ")", ":", "if", "(", "len", "(", "kwargs", ")", ">", "0", ")", ":", "msg", "=", "[", "(", "'Expected no kwargs, you passed %s'", "%", "len", "(", ...
a wrapper around theano .
train
false
18,528
def apply_single_tag_set(tag_set, selection): def tags_match(server_tags): for (key, value) in tag_set.items(): if ((key not in server_tags) or (server_tags[key] != value)): return False return True return selection.with_server_descriptions([s for s in selection.server_descriptions if tags_match(s.tags)])
[ "def", "apply_single_tag_set", "(", "tag_set", ",", "selection", ")", ":", "def", "tags_match", "(", "server_tags", ")", ":", "for", "(", "key", ",", "value", ")", "in", "tag_set", ".", "items", "(", ")", ":", "if", "(", "(", "key", "not", "in", "ser...
all servers matching one tag set .
train
true
18,529
@utils.arg('snapshot', metavar='<snapshot>', help='ID of the snapshot.') @utils.service_type('monitor') def do_snapshot_show(cs, args): snapshot = _find_monitor_snapshot(cs, args.snapshot) _print_monitor_snapshot(snapshot)
[ "@", "utils", ".", "arg", "(", "'snapshot'", ",", "metavar", "=", "'<snapshot>'", ",", "help", "=", "'ID of the snapshot.'", ")", "@", "utils", ".", "service_type", "(", "'monitor'", ")", "def", "do_snapshot_show", "(", "cs", ",", "args", ")", ":", "snapsh...
show details about a snapshot .
train
false
18,530
def istest(func): func.__test__ = True return func
[ "def", "istest", "(", "func", ")", ":", "func", ".", "__test__", "=", "True", "return", "func" ]
decorator to mark a function or method as a test .
train
false
18,531
def calc_text_angle(start, end): text_angle = ((start + end) / 2.0) shift_angles = ((text_angle > (np.pi / 2)) & (text_angle < ((3 * np.pi) / 2))) text_angle[shift_angles] = (text_angle[shift_angles] + np.pi) return text_angle
[ "def", "calc_text_angle", "(", "start", ",", "end", ")", ":", "text_angle", "=", "(", "(", "start", "+", "end", ")", "/", "2.0", ")", "shift_angles", "=", "(", "(", "text_angle", ">", "(", "np", ".", "pi", "/", "2", ")", ")", "&", "(", "text_angl...
produce a column of text angle values based on the bounds of the wedge .
train
false
18,532
def printc(text, color): if sys.stdout.isatty(): print (((('\x1b[' + codeCodes[color]) + 'm') + text) + '\x1b[0m') else: print text
[ "def", "printc", "(", "text", ",", "color", ")", ":", "if", "sys", ".", "stdout", ".", "isatty", "(", ")", ":", "print", "(", "(", "(", "(", "'\\x1b['", "+", "codeCodes", "[", "color", "]", ")", "+", "'m'", ")", "+", "text", ")", "+", "'\\x1b[0...
print in color .
train
false
18,533
def get_labels(fields, doctype): labels = [] for key in fields: key = key.split(u' as ')[0] if (u'.' in key): (parenttype, fieldname) = (key.split(u'.')[0][4:(-1)], key.split(u'.')[1].strip(u'`')) else: parenttype = doctype fieldname = fieldname.strip(u'`') df = frappe.get_meta(parenttype).get_field(fieldname) label = (df.label if df else fieldname.title()) if (label in labels): label = ((doctype + u': ') + label) labels.append(label) return labels
[ "def", "get_labels", "(", "fields", ",", "doctype", ")", ":", "labels", "=", "[", "]", "for", "key", "in", "fields", ":", "key", "=", "key", ".", "split", "(", "u' as '", ")", "[", "0", "]", "if", "(", "u'.'", "in", "key", ")", ":", "(", "paren...
get labels .
train
false
18,534
def getConnectionVertexes(geometryOutput): connectionVertexes = [] addConnectionVertexes(connectionVertexes, geometryOutput) return connectionVertexes
[ "def", "getConnectionVertexes", "(", "geometryOutput", ")", ":", "connectionVertexes", "=", "[", "]", "addConnectionVertexes", "(", "connectionVertexes", ",", "geometryOutput", ")", "return", "connectionVertexes" ]
get the connections and vertexes .
train
false
18,536
def kstest_normal(x, pvalmethod='approx'): x = np.asarray(x) z = ((x - x.mean()) / x.std(ddof=1)) nobs = len(z) d_ks = ksstat(z, stats.norm.cdf, alternative='two_sided') if (pvalmethod == 'approx'): pval = pval_lf(d_ks, nobs) elif (pvalmethod == 'table'): pval = lilliefors_table.prob(d_ks, nobs) return (d_ks, pval)
[ "def", "kstest_normal", "(", "x", ",", "pvalmethod", "=", "'approx'", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "z", "=", "(", "(", "x", "-", "x", ".", "mean", "(", ")", ")", "/", "x", ".", "std", "(", "ddof", "=", "1", ")", ...
lilliefors test for normality .
train
false
18,537
def format_job_instance(job): ret = {'Function': job.get('fun', 'unknown-function'), 'Arguments': list(job.get('arg', [])), 'Target': job.get('tgt', 'unknown-target'), 'Target-type': job.get('tgt_type', []), 'User': job.get('user', 'root')} if ('metadata' in job): ret['Metadata'] = job.get('metadata', {}) elif ('kwargs' in job): if ('metadata' in job['kwargs']): ret['Metadata'] = job['kwargs'].get('metadata', {}) return ret
[ "def", "format_job_instance", "(", "job", ")", ":", "ret", "=", "{", "'Function'", ":", "job", ".", "get", "(", "'fun'", ",", "'unknown-function'", ")", ",", "'Arguments'", ":", "list", "(", "job", ".", "get", "(", "'arg'", ",", "[", "]", ")", ")", ...
format the job instance correctly .
train
true
18,538
def _esc(code): return '\x1b[{}m'.format(code)
[ "def", "_esc", "(", "code", ")", ":", "return", "'\\x1b[{}m'", ".", "format", "(", "code", ")" ]
get an ansi color code based on a color number .
train
false
18,540
def find_language(locale): if (not locale): return None LANGS = settings.AMO_LANGUAGES if (locale in LANGS): return locale loc = settings.SHORTER_LANGUAGES.get(locale) if loc: return loc locale = to_language(locale) if (locale in LANGS): return locale return None
[ "def", "find_language", "(", "locale", ")", ":", "if", "(", "not", "locale", ")", ":", "return", "None", "LANGS", "=", "settings", ".", "AMO_LANGUAGES", "if", "(", "locale", "in", "LANGS", ")", ":", "return", "locale", "loc", "=", "settings", ".", "SHO...
return a locale we support .
train
false
18,542
def reset_all_users(): for user in User.objects.all(): user.delete()
[ "def", "reset_all_users", "(", ")", ":", "for", "user", "in", "User", ".", "objects", ".", "all", "(", ")", ":", "user", ".", "delete", "(", ")" ]
reset to a clean state by deleting all users .
train
false
18,543
def assert_logs(log_action, node_key, index=(-1)): def outer_wrapper(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): node = getattr(self, node_key) last_log = node.logs.latest() func(self, *args, **kwargs) node.reload() new_log = node.logs.order_by('-date')[((- index) - 1)] assert_not_equal(last_log._id, new_log._id) assert_equal(new_log.action, log_action) node.save() return wrapper return outer_wrapper
[ "def", "assert_logs", "(", "log_action", ",", "node_key", ",", "index", "=", "(", "-", "1", ")", ")", ":", "def", "outer_wrapper", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "ar...
a decorator to ensure a log is added during a unit test .
train
false
18,544
def _verify_dict_keys(expected_keys, target_dict, strict=True): if (not isinstance(target_dict, dict)): msg = (_("Invalid input. '%(target_dict)s' must be a dictionary with keys: %(expected_keys)s") % dict(target_dict=target_dict, expected_keys=expected_keys)) return msg expected_keys = set(expected_keys) provided_keys = set(target_dict.keys()) predicate = (expected_keys.__eq__ if strict else expected_keys.issubset) if (not predicate(provided_keys)): msg = (_("Validation of dictionary's keys failed.Expected keys: %(expected_keys)s Provided keys: %(provided_keys)s") % locals()) return msg
[ "def", "_verify_dict_keys", "(", "expected_keys", ",", "target_dict", ",", "strict", "=", "True", ")", ":", "if", "(", "not", "isinstance", "(", "target_dict", ",", "dict", ")", ")", ":", "msg", "=", "(", "_", "(", "\"Invalid input. '%(target_dict)s' must be a...
allows to verify keys in a dictionary .
train
false
18,546
def calc_expected_status_length(status, short_url_length=23): status_length = 0 for word in re.split(u'\\s', status): if is_url(word): status_length += short_url_length else: status_length += len(word) status_length += len(re.findall(u'\\s', status)) return status_length
[ "def", "calc_expected_status_length", "(", "status", ",", "short_url_length", "=", "23", ")", ":", "status_length", "=", "0", "for", "word", "in", "re", ".", "split", "(", "u'\\\\s'", ",", "status", ")", ":", "if", "is_url", "(", "word", ")", ":", "statu...
calculates the length of a tweet .
train
false
18,547
def tuplize_dict(data_dict): tuplized_dict = {} for (key, value) in data_dict.iteritems(): key_list = key.split('__') for (num, key) in enumerate(key_list): if ((num % 2) == 1): try: key_list[num] = int(key) except ValueError: raise df.DataError('Bad key') tuplized_dict[tuple(key_list)] = value return tuplized_dict
[ "def", "tuplize_dict", "(", "data_dict", ")", ":", "tuplized_dict", "=", "{", "}", "for", "(", "key", ",", "value", ")", "in", "data_dict", ".", "iteritems", "(", ")", ":", "key_list", "=", "key", ".", "split", "(", "'__'", ")", "for", "(", "num", ...
takes a dict with keys of the form table__0__key and converts them to a tuple like .
train
false
18,548
def elide(text, length): if (length < 1): raise ValueError('length must be >= 1!') if (len(text) <= length): return text else: return (text[:(length - 1)] + '\\u2026')
[ "def", "elide", "(", "text", ",", "length", ")", ":", "if", "(", "length", "<", "1", ")", ":", "raise", "ValueError", "(", "'length must be >= 1!'", ")", "if", "(", "len", "(", "text", ")", "<=", "length", ")", ":", "return", "text", "else", ":", "...
elide text so it uses a maximum of length chars .
train
false
18,549
def generate_data(n_subjects, n_conditions): rng = np.random.RandomState(42) data = rng.randn((n_subjects * n_conditions)).reshape(n_subjects, n_conditions) return data
[ "def", "generate_data", "(", "n_subjects", ",", "n_conditions", ")", ":", "rng", "=", "np", ".", "random", ".", "RandomState", "(", "42", ")", "data", "=", "rng", ".", "randn", "(", "(", "n_subjects", "*", "n_conditions", ")", ")", ".", "reshape", "(",...
generate random blob-ish data with noisy features .
train
false
18,550
def change_DAILYSEARCH_FREQUENCY(freq): sickbeard.DAILYSEARCH_FREQUENCY = try_int(freq, sickbeard.DEFAULT_DAILYSEARCH_FREQUENCY) if (sickbeard.DAILYSEARCH_FREQUENCY < sickbeard.MIN_DAILYSEARCH_FREQUENCY): sickbeard.DAILYSEARCH_FREQUENCY = sickbeard.MIN_DAILYSEARCH_FREQUENCY sickbeard.dailySearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.DAILYSEARCH_FREQUENCY)
[ "def", "change_DAILYSEARCH_FREQUENCY", "(", "freq", ")", ":", "sickbeard", ".", "DAILYSEARCH_FREQUENCY", "=", "try_int", "(", "freq", ",", "sickbeard", ".", "DEFAULT_DAILYSEARCH_FREQUENCY", ")", "if", "(", "sickbeard", ".", "DAILYSEARCH_FREQUENCY", "<", "sickbeard", ...
change frequency of daily search thread .
train
false
18,551
def general_gaussian(M, p, sig, sym=True): if _len_guards(M): return np.ones(M) (M, needs_trunc) = _extend(M, sym) n = (np.arange(0, M) - ((M - 1.0) / 2.0)) w = np.exp(((-0.5) * (np.abs((n / sig)) ** (2 * p)))) return _truncate(w, needs_trunc)
[ "def", "general_gaussian", "(", "M", ",", "p", ",", "sig", ",", "sym", "=", "True", ")", ":", "if", "_len_guards", "(", "M", ")", ":", "return", "np", ".", "ones", "(", "M", ")", "(", "M", ",", "needs_trunc", ")", "=", "_extend", "(", "M", ",",...
return a window with a generalized gaussian shape .
train
false
18,552
def mean_squared_error(y_true, y_pred): return tf.reduce_mean(tf.square((y_pred - y_true)))
[ "def", "mean_squared_error", "(", "y_true", ",", "y_pred", ")", ":", "return", "tf", ".", "reduce_mean", "(", "tf", ".", "square", "(", "(", "y_pred", "-", "y_true", ")", ")", ")" ]
l2 distance between tensors true and pred .
train
false
18,553
def test_angle_format_roundtripping(): a1 = Angle(0, unit=u.radian) a2 = Angle(10, unit=u.degree) a3 = Angle(0.543, unit=u.degree) a4 = Angle(u'1d2m3.4s') assert (Angle(str(a1)).degree == a1.degree) assert (Angle(str(a2)).degree == a2.degree) assert (Angle(str(a3)).degree == a3.degree) assert (Angle(str(a4)).degree == a4.degree) ra = Longitude(u'1h2m3.4s') dec = Latitude(u'1d2m3.4s') assert_allclose(Angle(str(ra)).degree, ra.degree) assert_allclose(Angle(str(dec)).degree, dec.degree)
[ "def", "test_angle_format_roundtripping", "(", ")", ":", "a1", "=", "Angle", "(", "0", ",", "unit", "=", "u", ".", "radian", ")", "a2", "=", "Angle", "(", "10", ",", "unit", "=", "u", ".", "degree", ")", "a3", "=", "Angle", "(", "0.543", ",", "un...
ensures that the string representation of an angle can be used to create a new valid angle .
train
false
18,554
def _comment_line(line): line = line.rstrip() if line: return ('# ' + line) else: return '#'
[ "def", "_comment_line", "(", "line", ")", ":", "line", "=", "line", ".", "rstrip", "(", ")", "if", "line", ":", "return", "(", "'# '", "+", "line", ")", "else", ":", "return", "'#'" ]
return a commented form of the given line .
train
false
18,556
def list_agents(profile=None): conn = _auth(profile) return conn.list_agents()
[ "def", "list_agents", "(", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "list_agents", "(", ")" ]
list agents .
train
false
18,558
def list_empty(strings): for string in strings: if (len(string) > 0): return False return True
[ "def", "list_empty", "(", "strings", ")", ":", "for", "string", "in", "strings", ":", "if", "(", "len", "(", "string", ")", ">", "0", ")", ":", "return", "False", "return", "True" ]
check if list is exclusively made of empty strings .
train
false
18,559
def device_to_device(dst, src, size, stream=0): varargs = [] if stream: assert isinstance(stream, Stream) fn = driver.cuMemcpyDtoDAsync varargs.append(stream.handle) else: fn = driver.cuMemcpyDtoD fn(device_pointer(dst), device_pointer(src), size, *varargs)
[ "def", "device_to_device", "(", "dst", ",", "src", ",", "size", ",", "stream", "=", "0", ")", ":", "varargs", "=", "[", "]", "if", "stream", ":", "assert", "isinstance", "(", "stream", ",", "Stream", ")", "fn", "=", "driver", ".", "cuMemcpyDtoDAsync", ...
note: the underlying data pointer from the host data buffer is used and it should not be changed until the operation which can be asynchronous completes .
train
false
18,560
def vector_product(v0, v1, axis=0): return numpy.cross(v0, v1, axis=axis)
[ "def", "vector_product", "(", "v0", ",", "v1", ",", "axis", "=", "0", ")", ":", "return", "numpy", ".", "cross", "(", "v0", ",", "v1", ",", "axis", "=", "axis", ")" ]
return vector perpendicular to vectors .
train
true
18,562
def guvectorize(ftylist, signature, **kws): if isinstance(ftylist, str): ftylist = [ftylist] def wrap(func): guvec = GUVectorize(func, signature, **kws) for fty in ftylist: guvec.add(fty) return guvec.build_ufunc() return wrap
[ "def", "guvectorize", "(", "ftylist", ",", "signature", ",", "**", "kws", ")", ":", "if", "isinstance", "(", "ftylist", ",", "str", ")", ":", "ftylist", "=", "[", "ftylist", "]", "def", "wrap", "(", "func", ")", ":", "guvec", "=", "GUVectorize", "(",...
guvectorize a decorator to create numpy generialized-ufunc object from numba compiled code .
train
false
18,563
def show_source(request, project, subproject): (obj, source) = get_source(request, project, subproject) return render(request, 'source.html', {'object': obj, 'project': obj.project, 'source': source, 'title': (_('Source strings in %s') % force_text(obj))})
[ "def", "show_source", "(", "request", ",", "project", ",", "subproject", ")", ":", "(", "obj", ",", "source", ")", "=", "get_source", "(", "request", ",", "project", ",", "subproject", ")", "return", "render", "(", "request", ",", "'source.html'", ",", "...
show source strings summary and checks .
train
false
18,564
def handleCheckIDRequest(request, openid_request): if (not openid_request.idSelect()): id_url = getViewURL(request, idPage) if (id_url != openid_request.identity): error_response = ProtocolError(openid_request.message, ('This server cannot verify the URL %r' % (openid_request.identity,))) return displayResponse(request, error_response) if openid_request.immediate: openid_response = openid_request.answer(False) return displayResponse(request, openid_response) else: setRequest(request, openid_request) return showDecidePage(request, openid_request)
[ "def", "handleCheckIDRequest", "(", "request", ",", "openid_request", ")", ":", "if", "(", "not", "openid_request", ".", "idSelect", "(", ")", ")", ":", "id_url", "=", "getViewURL", "(", "request", ",", "idPage", ")", "if", "(", "id_url", "!=", "openid_req...
handle checkid_* requests .
train
true
18,565
def version_dict(version): match = version_re.match((version or '')) letters = 'alpha pre'.split() numbers = 'major minor1 minor2 minor3 alpha_ver pre_ver'.split() if match: d = match.groupdict() for letter in letters: d[letter] = (d[letter] if d[letter] else None) for num in numbers: if (d[num] == '*'): d[num] = 99 else: d[num] = (int(d[num]) if d[num] else None) else: d = dict(((k, None) for k in numbers)) d.update(((k, None) for k in letters)) return d
[ "def", "version_dict", "(", "version", ")", ":", "match", "=", "version_re", ".", "match", "(", "(", "version", "or", "''", ")", ")", "letters", "=", "'alpha pre'", ".", "split", "(", ")", "numbers", "=", "'major minor1 minor2 minor3 alpha_ver pre_ver'", ".", ...
turn a version string into a dict with major/minor/ .
train
true
18,566
def encode_local(string): if is_python3(): return string return string.encode((sys.getfilesystemencoding() or 'utf-8'))
[ "def", "encode_local", "(", "string", ")", ":", "if", "is_python3", "(", ")", ":", "return", "string", "return", "string", ".", "encode", "(", "(", "sys", ".", "getfilesystemencoding", "(", ")", "or", "'utf-8'", ")", ")" ]
converts string into users preferred encoding .
train
false
18,568
def add_s3(command_table, session, **kwargs): utils.rename_command(command_table, 's3', 's3api') command_table['s3'] = S3(session)
[ "def", "add_s3", "(", "command_table", ",", "session", ",", "**", "kwargs", ")", ":", "utils", ".", "rename_command", "(", "command_table", ",", "'s3'", ",", "'s3api'", ")", "command_table", "[", "'s3'", "]", "=", "S3", "(", "session", ")" ]
this creates a new service object for the s3 plugin .
train
false
18,571
def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, **kwargs): result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response['FileSystems'] else: response = client.describe_file_systems() result = response['FileSystems'] while ('NextMarker' in response): response = client.describe_file_systems(Marker=response['NextMarker']) result.extend(response['FileSystems']) return result
[ "def", "get_file_systems", "(", "filesystemid", "=", "None", ",", "keyid", "=", "None", ",", "key", "=", "None", ",", "profile", "=", "None", ",", "region", "=", "None", ",", "**", "kwargs", ")", ":", "result", "=", "None", "client", "=", "_get_conn", ...
get all efs properties or a specific instance property if filesystemid is specified filesystemid - id of the file system to retrieve properties returns - list of all elastic file system properties cli example: .
train
false
18,572
def json_formatall(filenames, indent=DEFAULT_INDENT_SIZE, dry_run=False): errors = 0 console = logging.getLogger('console') for filename in filenames: try: result = json_format(filename, indent=indent, console=console, dry_run=dry_run) if (not result): errors += 1 except Exception as e: console.error('ERROR %s: %s (filename: %s)', e.__class__.__name__, e, filename) errors += 1 return errors
[ "def", "json_formatall", "(", "filenames", ",", "indent", "=", "DEFAULT_INDENT_SIZE", ",", "dry_run", "=", "False", ")", ":", "errors", "=", "0", "console", "=", "logging", ".", "getLogger", "(", "'console'", ")", "for", "filename", "in", "filenames", ":", ...
format/beautify a json file .
train
false
18,573
def ImportAndCall(node, results, names): obj = results['obj'].clone() if (obj.type == syms.arglist): newarglist = obj.clone() else: newarglist = Node(syms.arglist, [obj.clone()]) after = results['after'] if after: after = [n.clone() for n in after] new = Node(syms.power, ((Attr(Name(names[0]), Name(names[1])) + [Node(syms.trailer, [results['lpar'].clone(), newarglist, results['rpar'].clone()])]) + after)) new.prefix = node.prefix return new
[ "def", "ImportAndCall", "(", "node", ",", "results", ",", "names", ")", ":", "obj", "=", "results", "[", "'obj'", "]", ".", "clone", "(", ")", "if", "(", "obj", ".", "type", "==", "syms", ".", "arglist", ")", ":", "newarglist", "=", "obj", ".", "...
returns an import statement and calls a method of the module: import module module .
train
false
18,574
def test_lessthan(value, other): return (value < other)
[ "def", "test_lessthan", "(", "value", ",", "other", ")", ":", "return", "(", "value", "<", "other", ")" ]
check if value is less than other .
train
false
18,575
def key_type(key, host=None, port=None, db=None, password=None): server = _connect(host, port, db, password) return server.type(key)
[ "def", "key_type", "(", "key", ",", "host", "=", "None", ",", "port", "=", "None", ",", "db", "=", "None", ",", "password", "=", "None", ")", ":", "server", "=", "_connect", "(", "host", ",", "port", ",", "db", ",", "password", ")", "return", "se...
get redis key type cli example: .
train
true
18,577
def is_redirection(status): return (300 <= status <= 399)
[ "def", "is_redirection", "(", "status", ")", ":", "return", "(", "300", "<=", "status", "<=", "399", ")" ]
check if http status code is redirection .
train
false
18,578
def add_model_for_resource(resource, model): _RESOURCE_TO_MODEL_MAP[resource] = model
[ "def", "add_model_for_resource", "(", "resource", ",", "model", ")", ":", "_RESOURCE_TO_MODEL_MAP", "[", "resource", "]", "=", "model" ]
adds a mapping between a callback resource and a db model .
train
false
18,580
def fmt_whitespace(value): value = WHITESPACE_RE.sub(u'<span class="hlspace">\\1</span>', value) value = value.replace(u' DCTB ', SPACE_TAB.format(_(u'Tab character'))) return value
[ "def", "fmt_whitespace", "(", "value", ")", ":", "value", "=", "WHITESPACE_RE", ".", "sub", "(", "u'<span class=\"hlspace\">\\\\1</span>'", ",", "value", ")", "value", "=", "value", ".", "replace", "(", "u' DCTB '", ",", "SPACE_TAB", ".", "format", "(", "_", ...
formats whitespace so that it is more visible .
train
false
18,581
@task(aliases=['pack']) def webpack(ctx, clean=False, watch=False, dev=False): if clean: clean_assets(ctx) if (os.getcwd() != HERE): os.chdir(HERE) webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js') args = [webpack_bin] if (settings.DEBUG_MODE and dev): args += ['--colors'] else: args += ['--progress'] if watch: args += ['--watch'] config_file = ('webpack.admin.config.js' if dev else 'webpack.prod.config.js') args += ['--config {0}'.format(config_file)] command = ' '.join(args) ctx.run(command, echo=True)
[ "@", "task", "(", "aliases", "=", "[", "'pack'", "]", ")", "def", "webpack", "(", "ctx", ",", "clean", "=", "False", ",", "watch", "=", "False", ",", "dev", "=", "False", ")", ":", "if", "clean", ":", "clean_assets", "(", "ctx", ")", "if", "(", ...
build static assets with webpack .
train
false
18,582
@contextmanager def in_dir(dir=None): cwd = os.getcwd() if (dir is None): (yield cwd) return os.chdir(dir) (yield dir) os.chdir(cwd)
[ "@", "contextmanager", "def", "in_dir", "(", "dir", "=", "None", ")", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "if", "(", "dir", "is", "None", ")", ":", "(", "yield", "cwd", ")", "return", "os", ".", "chdir", "(", "dir", ")", "(", "yield...
change directory to given directory for duration of with block useful when you want to use in_tempdir for the final test .
train
false
18,583
def upgrade_master(ansible_module, config_base, from_version, to_version, backup): if (from_version == '3.0'): if (to_version == '3.1'): return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
[ "def", "upgrade_master", "(", "ansible_module", ",", "config_base", ",", "from_version", ",", "to_version", ",", "backup", ")", ":", "if", "(", "from_version", "==", "'3.0'", ")", ":", "if", "(", "to_version", "==", "'3.1'", ")", ":", "return", "upgrade_mast...
upgrade entry point .
train
false
18,584
def connect_to_cloud_loadbalancers(region=None): return _create_client(ep_name='load_balancer', region=region)
[ "def", "connect_to_cloud_loadbalancers", "(", "region", "=", "None", ")", ":", "return", "_create_client", "(", "ep_name", "=", "'load_balancer'", ",", "region", "=", "region", ")" ]
creates a client for working with cloud loadbalancers .
train
false
18,585
def doctest_depends_on(exe=None, modules=None, disable_viewers=None): pyglet = False if ((modules is not None) and ('pyglet' in modules)): pyglet = True def depends_on_deco(fn): fn._doctest_depends_on = dict(exe=exe, modules=modules, disable_viewers=disable_viewers, pyglet=pyglet) if inspect.isclass(fn): fn._doctest_depdends_on = no_attrs_in_subclass(fn, fn._doctest_depends_on) return fn return depends_on_deco
[ "def", "doctest_depends_on", "(", "exe", "=", "None", ",", "modules", "=", "None", ",", "disable_viewers", "=", "None", ")", ":", "pyglet", "=", "False", "if", "(", "(", "modules", "is", "not", "None", ")", "and", "(", "'pyglet'", "in", "modules", ")",...
adds metadata about the depenencies which need to be met for doctesting the docstrings of the decorated objects .
train
false
18,587
def GetFirst(parameters, key, default=None): if (key in parameters): if parameters[key]: return parameters[key][0] return default
[ "def", "GetFirst", "(", "parameters", ",", "key", ",", "default", "=", "None", ")", ":", "if", "(", "key", "in", "parameters", ")", ":", "if", "parameters", "[", "key", "]", ":", "return", "parameters", "[", "key", "]", "[", "0", "]", "return", "de...
returns the first value of the given key .
train
false
18,588
def is_number_match(num1, num2): if (isinstance(num1, PhoneNumber) and isinstance(num2, PhoneNumber)): return _is_number_match_OO(num1, num2) elif isinstance(num1, PhoneNumber): return _is_number_match_OS(num1, num2) elif isinstance(num2, PhoneNumber): return _is_number_match_OS(num2, num1) else: return _is_number_match_SS(num1, num2)
[ "def", "is_number_match", "(", "num1", ",", "num2", ")", ":", "if", "(", "isinstance", "(", "num1", ",", "PhoneNumber", ")", "and", "isinstance", "(", "num2", ",", "PhoneNumber", ")", ")", ":", "return", "_is_number_match_OO", "(", "num1", ",", "num2", "...
takes two phone numbers and compares them for equality .
train
true
18,590
def write_delete_marker_file(directory): filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME) marker_fp = open(filepath, 'w') marker_fp.write(DELETE_MARKER_MESSAGE) marker_fp.close()
[ "def", "write_delete_marker_file", "(", "directory", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "PIP_DELETE_MARKER_FILENAME", ")", "marker_fp", "=", "open", "(", "filepath", ",", "'w'", ")", "marker_fp", ".", "write", "...
write the pip delete marker file into this directory .
train
true
18,594
def hashvalue(arg): if isinstance(arg, XBlock): return unicode(arg.location) else: return unicode(arg)
[ "def", "hashvalue", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "XBlock", ")", ":", "return", "unicode", "(", "arg", ".", "location", ")", "else", ":", "return", "unicode", "(", "arg", ")" ]
if arg is an xblock .
train
false
18,595
def select_app_name(): name = utils.generate_app_name() while App.objects.filter(id=name).exists(): name = utils.generate_app_name() return name
[ "def", "select_app_name", "(", ")", ":", "name", "=", "utils", ".", "generate_app_name", "(", ")", "while", "App", ".", "objects", ".", "filter", "(", "id", "=", "name", ")", ".", "exists", "(", ")", ":", "name", "=", "utils", ".", "generate_app_name",...
select a unique randomly generated app name .
train
false
18,596
def log_conditional_probability(segmented_topics, per_topic_postings, num_docs): m_lc = [] for s_i in segmented_topics: for (w_prime, w_star) in s_i: w_prime_docs = per_topic_postings[w_prime] w_star_docs = per_topic_postings[w_star] co_docs = w_prime_docs.intersection(w_star_docs) if w_star_docs: m_lc_i = np.log((((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))) else: m_lc_i = 0.0 m_lc.append(m_lc_i) return m_lc
[ "def", "log_conditional_probability", "(", "segmented_topics", ",", "per_topic_postings", ",", "num_docs", ")", ":", "m_lc", "=", "[", "]", "for", "s_i", "in", "segmented_topics", ":", "for", "(", "w_prime", ",", "w_star", ")", "in", "s_i", ":", "w_prime_docs"...
this function calculates the log-conditional-probability measure which is used by coherence measures such as u_mass .
train
false
18,597
def TR0(rv): return rv.normal().factor().expand()
[ "def", "TR0", "(", "rv", ")", ":", "return", "rv", ".", "normal", "(", ")", ".", "factor", "(", ")", ".", "expand", "(", ")" ]
simplification of rational polynomials .
train
false
18,598
def func_accepts_var_args(func): return any((p for p in inspect.signature(func).parameters.values() if (p.kind == p.VAR_POSITIONAL)))
[ "def", "func_accepts_var_args", "(", "func", ")", ":", "return", "any", "(", "(", "p", "for", "p", "in", "inspect", ".", "signature", "(", "func", ")", ".", "parameters", ".", "values", "(", ")", "if", "(", "p", ".", "kind", "==", "p", ".", "VAR_PO...
return true if function func accepts positional arguments *args .
train
false
18,599
@click.group(cls=FlaskGroup, create_app=make_app, add_version_option=False) @click.option('--config', expose_value=False, callback=set_config, required=False, is_flag=False, is_eager=True, metavar='CONFIG', help='Specify the config to use in dotted module notation e.g. flaskbb.configs.default.DefaultConfig') @click.option('--version', expose_value=False, callback=get_version, is_flag=True, is_eager=True, help='Show the FlaskBB version.') def flaskbb(): pass
[ "@", "click", ".", "group", "(", "cls", "=", "FlaskGroup", ",", "create_app", "=", "make_app", ",", "add_version_option", "=", "False", ")", "@", "click", ".", "option", "(", "'--config'", ",", "expose_value", "=", "False", ",", "callback", "=", "set_confi...
this is the commandline interface for flaskbb .
train
false
18,600
def uniform_sequence(n): return [random.uniform(0, n) for i in range(n)]
[ "def", "uniform_sequence", "(", "n", ")", ":", "return", "[", "random", ".", "uniform", "(", "0", ",", "n", ")", "for", "i", "in", "range", "(", "n", ")", "]" ]
return sample sequence of length n from a uniform distribution .
train
false
18,601
def test_extras_after_wheel(script, data): simple = (script.site_packages / 'simple') no_extra = script.pip('install', '--no-index', '-f', data.find_links, 'requires_simple_extra', expect_stderr=True) assert (simple not in no_extra.files_created), no_extra.files_created extra = script.pip('install', '--no-index', '-f', data.find_links, 'requires_simple_extra[extra]', expect_stderr=True) assert (simple in extra.files_created), extra.files_created
[ "def", "test_extras_after_wheel", "(", "script", ",", "data", ")", ":", "simple", "=", "(", "script", ".", "site_packages", "/", "'simple'", ")", "no_extra", "=", "script", ".", "pip", "(", "'install'", ",", "'--no-index'", ",", "'-f'", ",", "data", ".", ...
test installing a package with extras after installing from a wheel .
train
false
18,602
def _base_ordering(base, degree): base_len = len(base) ordering = ([0] * degree) for i in range(base_len): ordering[base[i]] = i current = base_len for i in range(degree): if (i not in base): ordering[i] = current current += 1 return ordering
[ "def", "_base_ordering", "(", "base", ",", "degree", ")", ":", "base_len", "=", "len", "(", "base", ")", "ordering", "=", "(", "[", "0", "]", "*", "degree", ")", "for", "i", "in", "range", "(", "base_len", ")", ":", "ordering", "[", "base", "[", ...
order {0 .
train
false
18,603
def get_active_users(): return frappe.db.sql(u"select count(*) from `tabUser`\n DCTB DCTB where enabled = 1 and user_type != 'Website User'\n DCTB DCTB and name not in ({})\n DCTB DCTB and hour(timediff(now(), last_active)) < 72".format(u', '.join(([u'%s'] * len(STANDARD_USERS)))), STANDARD_USERS)[0][0]
[ "def", "get_active_users", "(", ")", ":", "return", "frappe", ".", "db", ".", "sql", "(", "u\"select count(*) from `tabUser`\\n DCTB DCTB where enabled = 1 and user_type != 'Website User'\\n DCTB DCTB and name not in ({})\\n DCTB DCTB and hour(timediff(now(), last_active)) < 72\"", ".",...
returns no .
train
false
18,604
@open_file(1, mode='wb') def write_edgelist(G, path, comments='#', delimiter=' ', data=True, encoding='utf-8'): for line in generate_edgelist(G, delimiter, data): line += '\n' path.write(line.encode(encoding))
[ "@", "open_file", "(", "1", ",", "mode", "=", "'wb'", ")", "def", "write_edgelist", "(", "G", ",", "path", ",", "comments", "=", "'#'", ",", "delimiter", "=", "' '", ",", "data", "=", "True", ",", "encoding", "=", "'utf-8'", ")", ":", "for", "line"...
write graph as a list of edges .
train
false
18,605
def enum(name, items, start=1, is_int=False): enums = [(v, i) for (i, v) in enumerate(items, start)] base = (pyenum.IntEnum if is_int else pyenum.Enum) base = pyenum.unique(base) return base(name, enums)
[ "def", "enum", "(", "name", ",", "items", ",", "start", "=", "1", ",", "is_int", "=", "False", ")", ":", "enums", "=", "[", "(", "v", ",", "i", ")", "for", "(", "i", ",", "v", ")", "in", "enumerate", "(", "items", ",", "start", ")", "]", "b...
construct a new enum object .
train
false
18,606
def get_bound(pts): (x0, y0, x1, y1) = (INF, INF, (- INF), (- INF)) for (x, y) in pts: x0 = min(x0, x) y0 = min(y0, y) x1 = max(x1, x) y1 = max(y1, y) return (x0, y0, x1, y1)
[ "def", "get_bound", "(", "pts", ")", ":", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ")", "=", "(", "INF", ",", "INF", ",", "(", "-", "INF", ")", ",", "(", "-", "INF", ")", ")", "for", "(", "x", ",", "y", ")", "in", "pts", ":", "x0", ...
compute a minimal rectangle that covers all the points .
train
true
18,607
@pytest.hookimpl(hookwrapper=True) def pytest_sessionfinish(exitstatus): outcome = (yield) outcome.get_result() cache_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '.cache') try: os.mkdir(cache_dir) except FileExistsError: pass status_file = os.path.join(cache_dir, 'pytest_status') with open(status_file, 'w', encoding='ascii') as f: f.write(str(exitstatus))
[ "@", "pytest", ".", "hookimpl", "(", "hookwrapper", "=", "True", ")", "def", "pytest_sessionfinish", "(", "exitstatus", ")", ":", "outcome", "=", "(", "yield", ")", "outcome", ".", "get_result", "(", ")", "cache_dir", "=", "os", ".", "path", ".", "join",...
create a file to tell run_pytest .
train
false
18,609
def simplelist(inline): return paramfinder.findall(inline)
[ "def", "simplelist", "(", "inline", ")", ":", "return", "paramfinder", ".", "findall", "(", "inline", ")" ]
parse a string to a list .
train
false
18,611
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get new repository .
train
false
18,612
@default_selem @pad_for_eccentric_selems def opening(image, selem=None, out=None): eroded = erosion(image, selem) out = dilation(eroded, selem, out=out, shift_x=True, shift_y=True) return out
[ "@", "default_selem", "@", "pad_for_eccentric_selems", "def", "opening", "(", "image", ",", "selem", "=", "None", ",", "out", "=", "None", ")", ":", "eroded", "=", "erosion", "(", "image", ",", "selem", ")", "out", "=", "dilation", "(", "eroded", ",", ...
return greyscale morphological opening of an image .
train
false