id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
39,724
def instance_group_members_get(context, group_uuid): return IMPL.instance_group_members_get(context, group_uuid)
[ "def", "instance_group_members_get", "(", "context", ",", "group_uuid", ")", ":", "return", "IMPL", ".", "instance_group_members_get", "(", "context", ",", "group_uuid", ")" ]
get the members from the group .
train
false
39,725
def vb_destroy_machine(name=None, timeout=10000): vbox = vb_get_box() log.info('Destroying machine %s', name) machine = vbox.findMachine(name) files = machine.unregister(2) progress = machine.deleteConfig(files) progress.waitForCompletion(timeout) log.info('Finished destroying machine %s', name)
[ "def", "vb_destroy_machine", "(", "name", "=", "None", ",", "timeout", "=", "10000", ")", ":", "vbox", "=", "vb_get_box", "(", ")", "log", ".", "info", "(", "'Destroying machine %s'", ",", "name", ")", "machine", "=", "vbox", ".", "findMachine", "(", "na...
attempts to get rid of a machine and all its files from the hypervisor .
train
true
39,726
def redirect_if_blocked(course_key, access_point='enrollment', **kwargs): if settings.FEATURES.get('EMBARGO'): is_blocked = (not check_course_access(course_key, **kwargs)) if is_blocked: if (access_point == 'courseware'): if (not RestrictedCourse.is_disabled_access_check(course_key)): return message_url_path(course_key, access_point) else: return message_url_path(course_key, access_point)
[ "def", "redirect_if_blocked", "(", "course_key", ",", "access_point", "=", "'enrollment'", ",", "**", "kwargs", ")", ":", "if", "settings", ".", "FEATURES", ".", "get", "(", "'EMBARGO'", ")", ":", "is_blocked", "=", "(", "not", "check_course_access", "(", "c...
redirect if the user does not have access to the course .
train
false
39,727
def create_gist(description, files): return gh.create_gist(description, files)
[ "def", "create_gist", "(", "description", ",", "files", ")", ":", "return", "gh", ".", "create_gist", "(", "description", ",", "files", ")" ]
create an anonymous public gist .
train
false
39,728
@register.tag(name='eval') def do_eval(parser, token): nodelist = parser.parse(('endeval',)) class EvalNode(template.Node, ): def render(self, context): return template.Template(nodelist.render(context)).render(template.Context(context)) parser.delete_first_token() return EvalNode()
[ "@", "register", ".", "tag", "(", "name", "=", "'eval'", ")", "def", "do_eval", "(", "parser", ",", "token", ")", ":", "nodelist", "=", "parser", ".", "parse", "(", "(", "'endeval'", ",", ")", ")", "class", "EvalNode", "(", "template", ".", "Node", ...
usage: {% eval %}1 + 1{% endeval %} .
train
true
39,732
def messageMethod(name, parameters): def dispatchMethod(self, *arguments): self.dispatcher(name, dict(zip(parameters, arguments))) dispatchMethod.__name__ = name return dispatchMethod
[ "def", "messageMethod", "(", "name", ",", "parameters", ")", ":", "def", "dispatchMethod", "(", "self", ",", "*", "arguments", ")", ":", "self", ".", "dispatcher", "(", "name", ",", "dict", "(", "zip", "(", "parameters", ",", "arguments", ")", ")", ")"...
creates method for dispatching messages .
train
false
39,733
def _decode_stdoutdata(stdoutdata): if (not isinstance(stdoutdata, bytes)): return stdoutdata encoding = getattr(sys.__stdout__, 'encoding', locale.getpreferredencoding()) if (encoding is None): return stdoutdata.decode() return stdoutdata.decode(encoding)
[ "def", "_decode_stdoutdata", "(", "stdoutdata", ")", ":", "if", "(", "not", "isinstance", "(", "stdoutdata", ",", "bytes", ")", ")", ":", "return", "stdoutdata", "encoding", "=", "getattr", "(", "sys", ".", "__stdout__", ",", "'encoding'", ",", "locale", "...
convert data read from stdout/stderr to unicode .
train
false
39,735
def is_none_string(val): if (not isinstance(val, six.string_types)): return False return (val.lower() == 'none')
[ "def", "is_none_string", "(", "val", ")", ":", "if", "(", "not", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ")", ":", "return", "False", "return", "(", "val", ".", "lower", "(", ")", "==", "'none'", ")" ]
check if a string represents a none value .
train
false
39,736
def vote(request, comment_id, vote): rating = {'up': 1, 'down': (-1)}.get(vote, False) if (not rating): raise Http404, 'Invalid vote' if (not request.user.is_authenticated()): raise Http404, _('Anonymous users cannot vote') try: comment = Comment.objects.get(pk=comment_id) except Comment.DoesNotExist: raise Http404, _('Invalid comment ID') if (comment.user.id == request.user.id): raise Http404, _('No voting for yourself') KarmaScore.objects.vote(request.user.id, comment_id, rating) comment = Comment.objects.get(pk=comment_id) return render_to_response('comments/karma_vote_accepted.html', {'comment': comment}, context_instance=RequestContext(request))
[ "def", "vote", "(", "request", ",", "comment_id", ",", "vote", ")", ":", "rating", "=", "{", "'up'", ":", "1", ",", "'down'", ":", "(", "-", "1", ")", "}", ".", "get", "(", "vote", ",", "False", ")", "if", "(", "not", "rating", ")", ":", "rai...
rate a comment templates: karma_vote_accepted context: comment comments .
train
false
39,737
def build_suite(app_module): suite = unittest.TestSuite() if hasattr(app_module, 'suite'): suite.addTest(app_module.suite()) else: suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module)) try: suite.addTest(doctest.DocTestSuite(app_module, checker=doctestOutputChecker, runner=DocTestRunner)) except ValueError: pass test_module = get_tests(app_module) if test_module: if hasattr(test_module, 'suite'): suite.addTest(test_module.suite()) else: suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module)) try: suite.addTest(doctest.DocTestSuite(test_module, checker=doctestOutputChecker, runner=DocTestRunner)) except ValueError: pass return suite
[ "def", "build_suite", "(", "app_module", ")", ":", "suite", "=", "unittest", ".", "TestSuite", "(", ")", "if", "hasattr", "(", "app_module", ",", "'suite'", ")", ":", "suite", ".", "addTest", "(", "app_module", ".", "suite", "(", ")", ")", "else", ":",...
create a complete django test suite for the provided application module .
train
false
39,738
def typedvalue(value): try: return int(value) except ValueError: pass try: return float(value) except ValueError: pass return value
[ "def", "typedvalue", "(", "value", ")", ":", "try", ":", "return", "int", "(", "value", ")", "except", "ValueError", ":", "pass", "try", ":", "return", "float", "(", "value", ")", "except", "ValueError", ":", "pass", "return", "value" ]
convert value to number whenever possible .
train
false
39,740
def _EmitLineUnformatted(state): prev_lineno = None while state.next_token: previous_token = state.next_token.previous_token previous_lineno = previous_token.lineno if previous_token.is_multiline_string: previous_lineno += previous_token.value.count(u'\n') if previous_token.is_continuation: newline = False else: newline = ((prev_lineno is not None) and (state.next_token.lineno > previous_lineno)) prev_lineno = state.next_token.lineno state.AddTokenToState(newline=newline, dry_run=False)
[ "def", "_EmitLineUnformatted", "(", "state", ")", ":", "prev_lineno", "=", "None", "while", "state", ".", "next_token", ":", "previous_token", "=", "state", ".", "next_token", ".", "previous_token", "previous_lineno", "=", "previous_token", ".", "lineno", "if", ...
emit the line without formatting .
train
false
39,741
def clean_dependency_relationships(trans, metadata_dict, tool_shed_repository, tool_shed_url): for rrda in tool_shed_repository.required_repositories: rd = rrda.repository_dependency r = rd.repository if can_eliminate_repository_dependency(metadata_dict, tool_shed_url, r.name, r.owner): message = 'Repository dependency %s by owner %s is not required by repository %s, owner %s, ' message += 'removing from list of repository dependencies.' log.debug((message % (r.name, r.owner, tool_shed_repository.name, tool_shed_repository.owner))) trans.install_model.context.delete(rrda) trans.install_model.context.flush() for td in tool_shed_repository.tool_dependencies: if can_eliminate_tool_dependency(metadata_dict, td.name, td.type, td.version): message = 'Tool dependency %s, version %s is not required by repository %s, owner %s, ' message += 'removing from list of tool dependencies.' log.debug((message % (td.name, td.version, tool_shed_repository.name, tool_shed_repository.owner))) trans.install_model.context.delete(td) trans.install_model.context.flush()
[ "def", "clean_dependency_relationships", "(", "trans", ",", "metadata_dict", ",", "tool_shed_repository", ",", "tool_shed_url", ")", ":", "for", "rrda", "in", "tool_shed_repository", ".", "required_repositories", ":", "rd", "=", "rrda", ".", "repository_dependency", "...
repositories of type tool_dependency_definition allow for defining a package dependency at some point in the change log and then removing the dependency later in the change log .
train
false
39,742
def post_takedown_notice_to_external_site(title, request_type, date_sent, date_received, source, action_taken, public_description, kind, original_url, infringing_urls, submitter_attributes, sender_name, sender_kind, sender_country): notice_json = {'authentication_token': g.secrets['lumendatabase_org_api_key'], 'notice': {'title': title, 'type': request_type, 'date_sent': date_sent.strftime('%Y-%m-%d'), 'date_received': date_received.strftime('%Y-%m-%d'), 'source': source, 'jurisdiction_list': 'US, CA', 'action_taken': action_taken, 'works_attributes': [{'description': public_description, 'kind': kind, 'copyrighted_urls_attributes': [{'url': original_url}], 'infringing_urls_attributes': infringing_urls}], 'entity_notice_roles_attributes': [{'name': 'recipient', 'entity_attributes': submitter_attributes}, {'name': 'sender', 'entity_attributes': {'name': sender_name, 'kind': sender_kind, 'address_line_1': '', 'city': '', 'state': '', 'zip': '', 'country_code': sender_country}}]}} timer = g.stats.get_timer('lumendatabase.takedown_create') timer.start() response = requests.post(('%snotices' % g.live_config['lumendatabase_org_api_base_url']), headers={'Content-type': 'application/json', 'Accept': 'application/json'}, data=json.dumps(notice_json)) timer.stop() return response.headers['location']
[ "def", "post_takedown_notice_to_external_site", "(", "title", ",", "request_type", ",", "date_sent", ",", "date_received", ",", "source", ",", "action_taken", ",", "public_description", ",", "kind", ",", "original_url", ",", "infringing_urls", ",", "submitter_attributes...
this method publicly posts a copy of the takedown notice to URL posting notices to lumen is free .
train
false
39,743
def agent_build_create(context, values): return IMPL.agent_build_create(context, values)
[ "def", "agent_build_create", "(", "context", ",", "values", ")", ":", "return", "IMPL", ".", "agent_build_create", "(", "context", ",", "values", ")" ]
create a new agent build entry .
train
false
39,744
def upload_image(context, image, instance, **kwargs): LOG.debug((_('Uploading image %s to the Glance image server') % image), instance=instance) read_file_handle = read_write_util.VMwareHTTPReadFile(kwargs.get('host'), kwargs.get('data_center_name'), kwargs.get('datastore_name'), kwargs.get('cookies'), kwargs.get('file_path')) file_size = read_file_handle.get_size() (image_service, image_id) = glance.get_remote_image_service(context, image) image_metadata = {'disk_format': 'vmdk', 'is_public': 'false', 'name': kwargs.get('snapshot_name'), 'status': 'active', 'container_format': 'bare', 'size': file_size, 'properties': {'vmware_adaptertype': kwargs.get('adapter_type'), 'vmware_ostype': kwargs.get('os_type'), 'vmware_image_version': kwargs.get('image_version'), 'owner_id': instance['project_id']}} start_transfer(context, read_file_handle, file_size, image_service=image_service, image_id=image_id, image_meta=image_metadata) LOG.debug((_('Uploaded image %s to the Glance image server') % image), instance=instance)
[ "def", "upload_image", "(", "context", ",", "image", ",", "instance", ",", "**", "kwargs", ")", ":", "LOG", ".", "debug", "(", "(", "_", "(", "'Uploading image %s to the Glance image server'", ")", "%", "image", ")", ",", "instance", "=", "instance", ")", ...
upload the snapshotted vm disk file to glance image server .
train
false
39,745
def create_repository_admin_role(app, repository): sa_session = app.model.context.current name = get_repository_admin_role_name(str(repository.name), str(repository.user.username)) description = 'A user or group member with this role can administer this repository.' role = app.model.Role(name=name, description=description, type=app.model.Role.types.SYSTEM) sa_session.add(role) sa_session.flush() app.model.UserRoleAssociation(repository.user, role) rra = app.model.RepositoryRoleAssociation(repository, role) sa_session.add(rra) sa_session.flush() return role
[ "def", "create_repository_admin_role", "(", "app", ",", "repository", ")", ":", "sa_session", "=", "app", ".", "model", ".", "context", ".", "current", "name", "=", "get_repository_admin_role_name", "(", "str", "(", "repository", ".", "name", ")", ",", "str", ...
create a new role with name-spaced name based on the repository name and its owners public user name .
train
false
39,746
@contextmanager def mock_signal_receiver(signal, wraps=None, **kwargs): if (wraps is None): def wraps(*args, **kwargs): return None receiver = Mock(wraps=wraps) signal.connect(receiver, **kwargs) (yield receiver) signal.disconnect(receiver)
[ "@", "contextmanager", "def", "mock_signal_receiver", "(", "signal", ",", "wraps", "=", "None", ",", "**", "kwargs", ")", ":", "if", "(", "wraps", "is", "None", ")", ":", "def", "wraps", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "Non...
temporarily attaches a receiver to the provided signal within the scope of the context manager .
train
false
39,748
def get_or_compute_grads(loss_or_grads, params): if any(((not isinstance(p, theano.compile.SharedVariable)) for p in params)): raise ValueError('params must contain shared variables only. If it contains arbitrary parameter expressions, then lasagne.utils.collect_shared_vars() may help you.') if isinstance(loss_or_grads, list): if (not (len(loss_or_grads) == len(params))): raise ValueError(('Got %d gradient expressions for %d parameters' % (len(loss_or_grads), len(params)))) return loss_or_grads else: return theano.grad(loss_or_grads, params)
[ "def", "get_or_compute_grads", "(", "loss_or_grads", ",", "params", ")", ":", "if", "any", "(", "(", "(", "not", "isinstance", "(", "p", ",", "theano", ".", "compile", ".", "SharedVariable", ")", ")", "for", "p", "in", "params", ")", ")", ":", "raise",...
helper function returning a list of gradients parameters loss_or_grads : symbolic expression or list of expressions a scalar loss expression .
train
false
39,749
def variant_name(variant): if (variant is None): return '<default>' return variant
[ "def", "variant_name", "(", "variant", ")", ":", "if", "(", "variant", "is", "None", ")", ":", "return", "'<default>'", "return", "variant" ]
return a human-readable string representation of variant .
train
false
39,750
def undo_jid(jid, config='root'): (pre_snapshot, post_snapshot) = _get_jid_snapshots(jid, config=config) return undo(config, num_pre=pre_snapshot, num_post=post_snapshot)
[ "def", "undo_jid", "(", "jid", ",", "config", "=", "'root'", ")", ":", "(", "pre_snapshot", ",", "post_snapshot", ")", "=", "_get_jid_snapshots", "(", "jid", ",", "config", "=", "config", ")", "return", "undo", "(", "config", ",", "num_pre", "=", "pre_sn...
undo the changes applied by a salt job jid the job id to lookup config configuration name .
train
true
39,751
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get new repository .
train
false
39,752
def _solve_bf(lap_sparse, B, return_full_prob=False): lap_sparse = lap_sparse.tocsc() solver = sparse.linalg.factorized(lap_sparse.astype(np.double)) X = np.array([solver(np.array((- B[i]).todense()).ravel()) for i in range(len(B))]) if (not return_full_prob): X = np.argmax(X, axis=0) return X
[ "def", "_solve_bf", "(", "lap_sparse", ",", "B", ",", "return_full_prob", "=", "False", ")", ":", "lap_sparse", "=", "lap_sparse", ".", "tocsc", "(", ")", "solver", "=", "sparse", ".", "linalg", ".", "factorized", "(", "lap_sparse", ".", "astype", "(", "...
solves lap_sparse x_i = b_i for each phase i .
train
false
39,753
def search_entries(): view_entries(raw_input('Search query: '))
[ "def", "search_entries", "(", ")", ":", "view_entries", "(", "raw_input", "(", "'Search query: '", ")", ")" ]
search entries .
train
false
39,754
@not_implemented_for('undirected') @not_implemented_for('multigraph') def is_reachable(G, s, t): def two_neighborhood(G, v): 'Returns the set of nodes at distance at most two from `v`.\n\n `G` must be a graph and `v` a node in that graph.\n\n The returned set includes the nodes at distance zero (that is,\n the node `v` itself), the nodes at distance one (that is, the\n out-neighbors of `v`), and the nodes at distance two.\n\n ' return {x for x in G if ((x == v) or (x in G[v]) or any((is_path(G, [v, z, x]) for z in G)))} def is_closed(G, nodes): 'Decides whether the given set of nodes is closed.\n\n A set *S* of nodes is *closed* if for each node *u* in the graph\n not in *S* and for each node *v* in *S*, there is an edge from\n *u* to *v*.\n\n ' return all(((v in G[u]) for u in (set(G) - nodes) for v in nodes)) neighborhoods = [two_neighborhood(G, v) for v in G] return all(((not (is_closed(G, S) and (s in S) and (t not in S))) for S in neighborhoods))
[ "@", "not_implemented_for", "(", "'undirected'", ")", "@", "not_implemented_for", "(", "'multigraph'", ")", "def", "is_reachable", "(", "G", ",", "s", ",", "t", ")", ":", "def", "two_neighborhood", "(", "G", ",", "v", ")", ":", "return", "{", "x", "for",...
decides whether there is a path from s to t in the tournament .
train
false
39,755
def make_or_verify_dir(directory, mode=493, uid=0, strict=False): try: os.makedirs(directory, mode) except OSError as exception: if (exception.errno == errno.EEXIST): if (strict and (not check_permissions(directory, mode, uid))): raise errors.Error(('%s exists, but it should be owned by user %d withpermissions %s' % (directory, uid, oct(mode)))) else: raise
[ "def", "make_or_verify_dir", "(", "directory", ",", "mode", "=", "493", ",", "uid", "=", "0", ",", "strict", "=", "False", ")", ":", "try", ":", "os", ".", "makedirs", "(", "directory", ",", "mode", ")", "except", "OSError", "as", "exception", ":", "...
make sure directory exists with proper permissions .
train
false
39,756
def _relative_timestamp(dt): delta = (datetime.utcnow() - dt) diff = ((delta.microseconds + ((delta.seconds + ((delta.days * 24) * 3600)) * 1000000.0)) / 1000000.0) if (diff < 45): return '{} second{}'.format(int(diff), ('' if (int(diff) == 1) else 's')) elif (diff < 90): return 'a minute' elif (diff < 2700): return '{} minutes'.format(int(max((diff / 60), 2))) elif (diff < 5400): return 'an hour' elif (diff < 79200): return '{} hours'.format(int(max((diff / 3600), 2))) elif (diff < 129600): return 'a day' elif (diff < 2592000): return '{} days'.format(int(max((diff / 86400), 2))) else: return None
[ "def", "_relative_timestamp", "(", "dt", ")", ":", "delta", "=", "(", "datetime", ".", "utcnow", "(", ")", "-", "dt", ")", "diff", "=", "(", "(", "delta", ".", "microseconds", "+", "(", "(", "delta", ".", "seconds", "+", "(", "(", "delta", ".", "...
format a human readable relative time for timestamps up to 30 days old .
train
false
39,757
def ssl_get_cert_from_request(request): certkey = 'SSL_CLIENT_S_DN' cert = request.META.get(certkey, '') if (not cert): cert = request.META.get(('HTTP_' + certkey), '') if (not cert): try: cert = request._req.subprocess_env.get(certkey, '') except Exception: return '' return cert
[ "def", "ssl_get_cert_from_request", "(", "request", ")", ":", "certkey", "=", "'SSL_CLIENT_S_DN'", "cert", "=", "request", ".", "META", ".", "get", "(", "certkey", ",", "''", ")", "if", "(", "not", "cert", ")", ":", "cert", "=", "request", ".", "META", ...
extract user information from certificate .
train
false
39,759
def escape_html(t): return cgi.escape(t)
[ "def", "escape_html", "(", "t", ")", ":", "return", "cgi", ".", "escape", "(", "t", ")" ]
escape & .
train
false
39,760
def compute_rank_inverse(inv): eig = inv['noise_cov']['eig'] if (not inv['noise_cov']['diag']): rank = np.sum((eig > 0)) else: ncomp = make_projector(inv['projs'], inv['noise_cov']['names'])[1] rank = (inv['noise_cov']['dim'] - ncomp) return rank
[ "def", "compute_rank_inverse", "(", "inv", ")", ":", "eig", "=", "inv", "[", "'noise_cov'", "]", "[", "'eig'", "]", "if", "(", "not", "inv", "[", "'noise_cov'", "]", "[", "'diag'", "]", ")", ":", "rank", "=", "np", ".", "sum", "(", "(", "eig", ">...
compute the rank of a linear inverse operator .
train
false
39,761
def test_no_serie_config(): chart = Line() chart.add('1', s1) chart.add('2', s2) q = chart.render_pyquery() assert (len(q('.serie-0 .line')) == 1) assert (len(q('.serie-1 .line')) == 1) assert (len(q('.serie-0 .dot')) == 5) assert (len(q('.serie-1 .dot')) == 6)
[ "def", "test_no_serie_config", "(", ")", ":", "chart", "=", "Line", "(", ")", "chart", ".", "add", "(", "'1'", ",", "s1", ")", "chart", ".", "add", "(", "'2'", ",", "s2", ")", "q", "=", "chart", ".", "render_pyquery", "(", ")", "assert", "(", "le...
test per serie no configuration .
train
false
39,763
def extract_flavor(instance, prefix=''): flavor = objects.Flavor() sys_meta = utils.instance_sys_meta(instance) if (not sys_meta): return None for key in system_metadata_flavor_props.keys(): type_key = ('%sinstance_type_%s' % (prefix, key)) setattr(flavor, key, sys_meta[type_key]) extra_specs = [(k, v) for (k, v) in sys_meta.items() if k.startswith(('%sinstance_type_extra_' % prefix))] if extra_specs: flavor.extra_specs = {} for (key, value) in extra_specs: extra_key = key[len(('%sinstance_type_extra_' % prefix)):] flavor.extra_specs[extra_key] = value return flavor
[ "def", "extract_flavor", "(", "instance", ",", "prefix", "=", "''", ")", ":", "flavor", "=", "objects", ".", "Flavor", "(", ")", "sys_meta", "=", "utils", ".", "instance_sys_meta", "(", "instance", ")", "if", "(", "not", "sys_meta", ")", ":", "return", ...
create a flavor object from instances system_metadata information .
train
false
39,765
def bootStraps(dat, n=1): dat = numpy.asarray(dat) if (len(dat.shape) == 1): dat = numpy.array([dat]) nTrials = dat.shape[1] resamples = numpy.zeros((dat.shape + (n,)), dat.dtype) rand = numpy.random.rand for stimulusN in range(dat.shape[0]): thisStim = dat[stimulusN, :] for sampleN in range(n): indices = numpy.floor((nTrials * rand(nTrials))).astype('i') resamples[stimulusN, :, sampleN] = numpy.take(thisStim, indices) return resamples
[ "def", "bootStraps", "(", "dat", ",", "n", "=", "1", ")", ":", "dat", "=", "numpy", ".", "asarray", "(", "dat", ")", "if", "(", "len", "(", "dat", ".", "shape", ")", "==", "1", ")", ":", "dat", "=", "numpy", ".", "array", "(", "[", "dat", "...
create a list of n bootstrapped resamples of the data slow implementation usage: out = bootstraps where: dat an nxm or 1xn array n number of bootstrapped resamples to create out - dim[0]=conditions - dim[1]=trials - dim[2]=resamples .
train
false
39,766
def decrementAny(tup): res = [] for (i, x) in enumerate(tup): if (x > 0): res.append(tuple(((list(tup[:i]) + [(x - 1)]) + list(tup[(i + 1):])))) return res
[ "def", "decrementAny", "(", "tup", ")", ":", "res", "=", "[", "]", "for", "(", "i", ",", "x", ")", "in", "enumerate", "(", "tup", ")", ":", "if", "(", "x", ">", "0", ")", ":", "res", ".", "append", "(", "tuple", "(", "(", "(", "list", "(", ...
the closest tuples to tup: decrementing by 1 along any dimension .
train
false
39,767
def _kraft_burrows_nousek(N, B, CL): try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: import mpmath HAS_MPMATH = True except ImportError: HAS_MPMATH = False if (HAS_SCIPY and (N <= 100)): try: return _scipy_kraft_burrows_nousek(N, B, CL) except OverflowError: if (not HAS_MPMATH): raise ValueError(u'Need mpmath package for input numbers this large.') if HAS_MPMATH: return _mpmath_kraft_burrows_nousek(N, B, CL) raise ImportError(u'Either scipy or mpmath are required.')
[ "def", "_kraft_burrows_nousek", "(", "N", ",", "B", ",", "CL", ")", ":", "try", ":", "import", "scipy", "HAS_SCIPY", "=", "True", "except", "ImportError", ":", "HAS_SCIPY", "=", "False", "try", ":", "import", "mpmath", "HAS_MPMATH", "=", "True", "except", ...
upper limit on a poisson count rate the implementation is based on kraft .
train
false
39,768
def proxytype(): return 'junos'
[ "def", "proxytype", "(", ")", ":", "return", "'junos'" ]
returns the name of this proxy .
train
false
39,770
def remove_protocol_and_port_from_tool_shed_url(tool_shed_url): tool_shed = remove_protocol_from_tool_shed_url(tool_shed_url) tool_shed = remove_port_from_tool_shed_url(tool_shed) return tool_shed
[ "def", "remove_protocol_and_port_from_tool_shed_url", "(", "tool_shed_url", ")", ":", "tool_shed", "=", "remove_protocol_from_tool_shed_url", "(", "tool_shed_url", ")", "tool_shed", "=", "remove_port_from_tool_shed_url", "(", "tool_shed", ")", "return", "tool_shed" ]
return a partial tool shed url .
train
false
39,771
def mellin_transform(f, x, s, **hints): return MellinTransform(f, x, s).doit(**hints)
[ "def", "mellin_transform", "(", "f", ",", "x", ",", "s", ",", "**", "hints", ")", ":", "return", "MellinTransform", "(", "f", ",", "x", ",", "s", ")", ".", "doit", "(", "**", "hints", ")" ]
compute the mellin transform f(s) of f(x) .
train
false
39,772
def _format_creation(stamp): return ('%s.%06d' % (stamp.strftime(_BASE_CREATION_HEADER_FORMAT), stamp.microsecond))
[ "def", "_format_creation", "(", "stamp", ")", ":", "return", "(", "'%s.%06d'", "%", "(", "stamp", ".", "strftime", "(", "_BASE_CREATION_HEADER_FORMAT", ")", ",", "stamp", ".", "microsecond", ")", ")" ]
format an upload creation timestamp with milliseconds .
train
false
39,775
def copula_bv_clayton(u, v, theta): if (not (theta > 0)): raise ValueError('theta needs to be strictly positive') return np.power(((np.power(u, (- theta)) + np.power(v, (- theta))) - 1), (- theta))
[ "def", "copula_bv_clayton", "(", "u", ",", "v", ",", "theta", ")", ":", "if", "(", "not", "(", "theta", ">", "0", ")", ")", ":", "raise", "ValueError", "(", "'theta needs to be strictly positive'", ")", "return", "np", ".", "power", "(", "(", "(", "np"...
clayton or cook .
train
false
39,777
def is_valid_partition(part): regex = re.compile('/dev/sd[a-z][1-9][0-9]?$') try: return (regex.match(part) is not None) except TypeError: return False
[ "def", "is_valid_partition", "(", "part", ")", ":", "regex", "=", "re", ".", "compile", "(", "'/dev/sd[a-z][1-9][0-9]?$'", ")", "try", ":", "return", "(", "regex", ".", "match", "(", "part", ")", "is", "not", "None", ")", "except", "TypeError", ":", "ret...
checks that part matches the following regular expression: /dev/sd[a-z][1-9][0-9]?$ .
train
false
39,778
def wallclock_for_operation(results, operation): operation_results = itertools.ifilter((lambda r: ((r['metric']['type'] == 'wallclock') and (r['operation']['type'] == operation))), results) values = [r['value'] for r in operation_results] return mean(values)
[ "def", "wallclock_for_operation", "(", "results", ",", "operation", ")", ":", "operation_results", "=", "itertools", ".", "ifilter", "(", "(", "lambda", "r", ":", "(", "(", "r", "[", "'metric'", "]", "[", "'type'", "]", "==", "'wallclock'", ")", "and", "...
calculate the wallclock time for a process running in a particular scenario .
train
false
39,781
def initialize_gatherer(gatherer=None): global _gatherer if (gatherer is not None): LOG.debug(_('using provided stats gatherer %r'), gatherer) _gatherer = gatherer if (_gatherer is None): LOG.debug(_('making a new stats gatherer')) mgr = extension.ExtensionManager(namespace='ceilometer.poll.compute', invoke_on_load=True) _gatherer = DeletedInstanceStatsGatherer(mgr) return _gatherer
[ "def", "initialize_gatherer", "(", "gatherer", "=", "None", ")", ":", "global", "_gatherer", "if", "(", "gatherer", "is", "not", "None", ")", ":", "LOG", ".", "debug", "(", "_", "(", "'using provided stats gatherer %r'", ")", ",", "gatherer", ")", "_gatherer...
set the callable used to gather stats for the instance .
train
false
39,782
def _encode_name(backend, attributes): subject = backend._lib.X509_NAME_new() for attribute in attributes: name_entry = _encode_name_entry(backend, attribute) res = backend._lib.X509_NAME_add_entry(subject, name_entry, (-1), 0) backend.openssl_assert((res == 1)) return subject
[ "def", "_encode_name", "(", "backend", ",", "attributes", ")", ":", "subject", "=", "backend", ".", "_lib", ".", "X509_NAME_new", "(", ")", "for", "attribute", "in", "attributes", ":", "name_entry", "=", "_encode_name_entry", "(", "backend", ",", "attribute", ...
the x509_name created will not be gcd .
train
false
39,783
def qsize(queue): try: return queue.qsize() except NotImplementedError: return (-1)
[ "def", "qsize", "(", "queue", ")", ":", "try", ":", "return", "queue", ".", "qsize", "(", ")", "except", "NotImplementedError", ":", "return", "(", "-", "1", ")" ]
return the queue size where available; -1 where not .
train
false
39,785
def countnans(X, weights=None, axis=None, dtype=None, keepdims=False): if (not sp.issparse(X)): X = np.asanyarray(X) isnan = np.isnan(X) if ((weights is not None) and (weights.shape == X.shape)): isnan = (isnan * weights) counts = isnan.sum(axis=axis, dtype=dtype, keepdims=keepdims) if ((weights is not None) and (weights.shape != X.shape)): counts = (counts * weights) else: if (any(((attr is not None) for attr in [axis, dtype])) or (keepdims is not False)): raise ValueError('Arguments axis, dtype and keepdimsare not yet supported on sparse data!') counts = _count_nans_per_row_sparse(X, weights) return counts
[ "def", "countnans", "(", "X", ",", "weights", "=", "None", ",", "axis", "=", "None", ",", "dtype", "=", "None", ",", "keepdims", "=", "False", ")", ":", "if", "(", "not", "sp", ".", "issparse", "(", "X", ")", ")", ":", "X", "=", "np", ".", "a...
count the undefined elements in arr along given axis .
train
false
39,786
def reread(user=None, conf_file=None, bin_env=None): ret = __salt__['cmd.run_all'](_ctl_cmd('reread', None, conf_file, bin_env), runas=user, python_shell=False) return _get_return(ret)
[ "def", "reread", "(", "user", "=", "None", ",", "conf_file", "=", "None", ",", "bin_env", "=", "None", ")", ":", "ret", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "_ctl_cmd", "(", "'reread'", ",", "None", ",", "conf_file", ",", "bin_env", ")", ...
reload the daemons configuration files user user to run supervisorctl as conf_file path to supervisord config file bin_env path to supervisorctl bin or path to virtualenv with supervisor installed cli example: .
train
true
39,787
@contextmanager def copy_file_to_temp(source): temp_filename = get_temp_filename() with copy_file(source, temp_filename): (yield temp_filename)
[ "@", "contextmanager", "def", "copy_file_to_temp", "(", "source", ")", ":", "temp_filename", "=", "get_temp_filename", "(", ")", "with", "copy_file", "(", "source", ",", "temp_filename", ")", ":", "(", "yield", "temp_filename", ")" ]
context manager that copies the source file to a temporary destination .
train
false
39,788
def dup_prem(f, g, K): df = dup_degree(f) dg = dup_degree(g) (r, dr) = (f, df) if (not g): raise ZeroDivisionError('polynomial division') elif (df < dg): return r N = ((df - dg) + 1) lc_g = dup_LC(g, K) while True: lc_r = dup_LC(r, K) (j, N) = ((dr - dg), (N - 1)) R = dup_mul_ground(r, lc_g, K) G = dup_mul_term(g, lc_r, j, K) r = dup_sub(R, G, K) (_dr, dr) = (dr, dup_degree(r)) if (dr < dg): break elif (not (dr < _dr)): raise PolynomialDivisionFailed(f, g, K) return dup_mul_ground(r, (lc_g ** N), K)
[ "def", "dup_prem", "(", "f", ",", "g", ",", "K", ")", ":", "df", "=", "dup_degree", "(", "f", ")", "dg", "=", "dup_degree", "(", "g", ")", "(", "r", ",", "dr", ")", "=", "(", "f", ",", "df", ")", "if", "(", "not", "g", ")", ":", "raise", ...
polynomial pseudo-remainder in k[x] .
train
false
39,790
def extract_documentation_with_markers(content, docstyle_definition): markers = docstyle_definition.markers marker_dict = {} for marker_set in markers: if (marker_set[0] not in marker_dict): marker_dict[marker_set[0]] = [marker_set] else: marker_dict[marker_set[0]].append(marker_set) begin_regex = _compile_multi_match_regex((marker_set[0] for marker_set in markers)) line = 0 column = 0 while (line < len(content)): (line, column, doc) = _extract_doc_comment_from_line(content, line, column, begin_regex, marker_dict, docstyle_definition) if doc: (yield doc)
[ "def", "extract_documentation_with_markers", "(", "content", ",", "docstyle_definition", ")", ":", "markers", "=", "docstyle_definition", ".", "markers", "marker_dict", "=", "{", "}", "for", "marker_set", "in", "markers", ":", "if", "(", "marker_set", "[", "0", ...
extracts all documentation texts inside the given source-code-string .
train
false
39,791
def reference(object, callback=None, **annotations): if callable(object): weak = callable_reference(object, callback) else: weak = annotatable_weakref(object, callback) for (key, value) in annotations.items(): setattr(weak, key, value) return weak
[ "def", "reference", "(", "object", ",", "callback", "=", "None", ",", "**", "annotations", ")", ":", "if", "callable", "(", "object", ")", ":", "weak", "=", "callable_reference", "(", "object", ",", "callback", ")", "else", ":", "weak", "=", "annotatable...
return an annotated weak ref .
train
false
39,792
def exploit_all(): return False
[ "def", "exploit_all", "(", ")", ":", "return", "False" ]
this function creates an instance of every attack plugin .
train
false
39,794
def config_option_list(context, data_dict): return {'success': False}
[ "def", "config_option_list", "(", "context", ",", "data_dict", ")", ":", "return", "{", "'success'", ":", "False", "}" ]
return a list of runtime-editable config options keys that can be updated with :py:func:~ckan .
train
false
39,795
def _pep8_violations(report_file): with open(report_file) as f: violations_list = f.readlines() num_lines = len(violations_list) return (num_lines, violations_list)
[ "def", "_pep8_violations", "(", "report_file", ")", ":", "with", "open", "(", "report_file", ")", "as", "f", ":", "violations_list", "=", "f", ".", "readlines", "(", ")", "num_lines", "=", "len", "(", "violations_list", ")", "return", "(", "num_lines", ","...
returns a tuple of for all pep8 violations in the given report_file .
train
false
39,796
def uncomment(path, regex, char='#', backup='.bak'): return comment_line(path=path, regex=regex, char=char, cmnt=False, backup=backup)
[ "def", "uncomment", "(", "path", ",", "regex", ",", "char", "=", "'#'", ",", "backup", "=", "'.bak'", ")", ":", "return", "comment_line", "(", "path", "=", "path", ",", "regex", "=", "regex", ",", "char", "=", "char", ",", "cmnt", "=", "False", ","...
attempt to uncomment all lines in filename matching regex .
train
true
39,797
def get_static_file_url(asset): return staticfiles_storage.url(asset)
[ "def", "get_static_file_url", "(", "asset", ")", ":", "return", "staticfiles_storage", ".", "url", "(", "asset", ")" ]
returns url of the themed asset if asset is not themed than returns the default asset url .
train
false
39,798
def chars_to_dictionary(string): chars = set(string) char_idx = {c: i for (i, c) in enumerate(sorted(chars))} return char_idx
[ "def", "chars_to_dictionary", "(", "string", ")", ":", "chars", "=", "set", "(", "string", ")", "char_idx", "=", "{", "c", ":", "i", "for", "(", "i", ",", "c", ")", "in", "enumerate", "(", "sorted", "(", "chars", ")", ")", "}", "return", "char_idx"...
creates a dictionary char:integer for each unique character .
train
false
39,799
def _FindStmtParent(node): if (pytree_utils.NodeName(node) in _STATEMENT_NODES): return node else: return _FindStmtParent(node.parent)
[ "def", "_FindStmtParent", "(", "node", ")", ":", "if", "(", "pytree_utils", ".", "NodeName", "(", "node", ")", "in", "_STATEMENT_NODES", ")", ":", "return", "node", "else", ":", "return", "_FindStmtParent", "(", "node", ".", "parent", ")" ]
find the nearest parent of node that is a statement node .
train
false
39,800
def copy_file_without_comments(source, destination): with open(source, 'r') as infile: with open(destination, 'w') as outfile: for line in infile: if (not (line.isspace() or line.lstrip().startswith('#'))): outfile.write(line)
[ "def", "copy_file_without_comments", "(", "source", ",", "destination", ")", ":", "with", "open", "(", "source", ",", "'r'", ")", "as", "infile", ":", "with", "open", "(", "destination", ",", "'w'", ")", "as", "outfile", ":", "for", "line", "in", "infile...
copies source to destination .
train
false
39,801
def get_current_view(): return getattr(g, '_admin_view', None)
[ "def", "get_current_view", "(", ")", ":", "return", "getattr", "(", "g", ",", "'_admin_view'", ",", "None", ")" ]
get current administrative view .
train
false
39,802
def return_none(exc): return None
[ "def", "return_none", "(", "exc", ")", ":", "return", "None" ]
returns none .
train
false
39,803
def get_enabled(): return _get_svc_list('YES')
[ "def", "get_enabled", "(", ")", ":", "return", "_get_svc_list", "(", "'YES'", ")" ]
return the enabled services cli example: .
train
false
39,804
def track_thread_created_event(request, course, thread, followed): event_name = _EVENT_NAME_TEMPLATE.format(obj_type='thread', action_name='created') event_data = {'commentable_id': thread.commentable_id, 'group_id': thread.get('group_id'), 'thread_type': thread.thread_type, 'title': thread.title, 'anonymous': thread.anonymous, 'anonymous_to_peers': thread.anonymous_to_peers, 'options': {'followed': followed}} track_created_event(request, event_name, course, thread, event_data)
[ "def", "track_thread_created_event", "(", "request", ",", "course", ",", "thread", ",", "followed", ")", ":", "event_name", "=", "_EVENT_NAME_TEMPLATE", ".", "format", "(", "obj_type", "=", "'thread'", ",", "action_name", "=", "'created'", ")", "event_data", "="...
send analytics event for a newly created thread .
train
false
39,805
def get_all_threads(exploration_id, has_suggestion): threads = get_threads(exploration_id) all_threads = [] for thread in threads: if (thread.has_suggestion == has_suggestion): all_threads.append(thread) return all_threads
[ "def", "get_all_threads", "(", "exploration_id", ",", "has_suggestion", ")", ":", "threads", "=", "get_threads", "(", "exploration_id", ")", "all_threads", "=", "[", "]", "for", "thread", "in", "threads", ":", "if", "(", "thread", ".", "has_suggestion", "==", ...
fetches all threads that correspond to the given exploration id .
train
false
39,807
def normalize_dict(dict_): return dict([(k, (v[0] if ((not isinstance(v, str)) and (len(v) == 1)) else v)) for (k, v) in list(dict_.items())])
[ "def", "normalize_dict", "(", "dict_", ")", ":", "return", "dict", "(", "[", "(", "k", ",", "(", "v", "[", "0", "]", "if", "(", "(", "not", "isinstance", "(", "v", ",", "str", ")", ")", "and", "(", "len", "(", "v", ")", "==", "1", ")", ")",...
replaces all values that are single-item iterables with the value of its index 0 .
train
true
39,808
def IndexDefinitionsToKeys(indexes): keyset = set() if (indexes is not None): if indexes.indexes: for index in indexes.indexes: keyset.add(IndexToKey(index)) return keyset
[ "def", "IndexDefinitionsToKeys", "(", "indexes", ")", ":", "keyset", "=", "set", "(", ")", "if", "(", "indexes", "is", "not", "None", ")", ":", "if", "indexes", ".", "indexes", ":", "for", "index", "in", "indexes", ".", "indexes", ":", "keyset", ".", ...
convert indexdefinitions to set of keys .
train
false
39,809
def get_object_transient_sysmeta(key): return ('%s%s' % (OBJECT_TRANSIENT_SYSMETA_PREFIX, key))
[ "def", "get_object_transient_sysmeta", "(", "key", ")", ":", "return", "(", "'%s%s'", "%", "(", "OBJECT_TRANSIENT_SYSMETA_PREFIX", ",", "key", ")", ")" ]
returns the object transient system metadata header for key .
train
false
39,810
def speakerDiarizationEvaluateScript(folderName, LDAs): types = ('*.wav',) wavFilesList = [] for files in types: wavFilesList.extend(glob.glob(os.path.join(folderName, files))) wavFilesList = sorted(wavFilesList) N = [] for wavFile in wavFilesList: gtFile = wavFile.replace('.wav', '.segments') if os.path.isfile(gtFile): [segStart, segEnd, segLabels] = readSegmentGT(gtFile) N.append(len(list(set(segLabels)))) else: N.append((-1)) for l in LDAs: print 'LDA = {0:d}'.format(l) for (i, wavFile) in enumerate(wavFilesList): speakerDiarization(wavFile, N[i], 2.0, 0.2, 0.05, l, PLOT=False) print
[ "def", "speakerDiarizationEvaluateScript", "(", "folderName", ",", "LDAs", ")", ":", "types", "=", "(", "'*.wav'", ",", ")", "wavFilesList", "=", "[", "]", "for", "files", "in", "types", ":", "wavFilesList", ".", "extend", "(", "glob", ".", "glob", "(", ...
this function prints the cluster purity and speaker purity for each wav file stored in a provided directory arguments: - foldername: the full path of the folder where the wav and segment files are stored - ldas: a list of lda dimensions .
train
false
39,812
def ancestors(variable_list, blockers=None): def expand(r): if (r.owner and ((not blockers) or (r not in blockers))): return reversed(r.owner.inputs) dfs_variables = stack_search(deque(variable_list), expand, 'dfs') return dfs_variables
[ "def", "ancestors", "(", "variable_list", ",", "blockers", "=", "None", ")", ":", "def", "expand", "(", "r", ")", ":", "if", "(", "r", ".", "owner", "and", "(", "(", "not", "blockers", ")", "or", "(", "r", "not", "in", "blockers", ")", ")", ")", ...
return the variables that contribute to those in variable_list .
train
false
39,813
@should_profile_core def start_core_profiling(w3af_core): dd_partial = partial(dump_data, w3af_core) dump_data_every_thread(dd_partial, DELAY_MINUTES, SAVE_THREAD_PTR)
[ "@", "should_profile_core", "def", "start_core_profiling", "(", "w3af_core", ")", ":", "dd_partial", "=", "partial", "(", "dump_data", ",", "w3af_core", ")", "dump_data_every_thread", "(", "dd_partial", ",", "DELAY_MINUTES", ",", "SAVE_THREAD_PTR", ")" ]
if the environment variable w3af_profiling is set to 1 .
train
false
39,814
def install_git(path): hook = op.join(path, 'pre-commit') with open(hook, 'w') as fd: fd.write("#!/usr/bin/env python\nimport sys\nfrom pylama.hook import git_hook\n\nif __name__ == '__main__':\n sys.exit(git_hook())\n") chmod(hook, 484)
[ "def", "install_git", "(", "path", ")", ":", "hook", "=", "op", ".", "join", "(", "path", ",", "'pre-commit'", ")", "with", "open", "(", "hook", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "\"#!/usr/bin/env python\\nimport sys\\nfrom pylama....
install hook in git repository .
train
true
39,815
def _parse_vmconfig(config, instances): vmconfig = None if isinstance(config, salt.utils.odict.OrderedDict): vmconfig = OrderedDict() for prop in config.keys(): if (prop not in instances): vmconfig[prop] = config[prop] else: if (not isinstance(config[prop], salt.utils.odict.OrderedDict)): continue vmconfig[prop] = [] for instance in config[prop]: instance_config = config[prop][instance] instance_config[instances[prop]] = instance vmconfig[prop].append(instance_config) else: log.error('smartos.vm_present::parse_vmconfig - failed to parse') return vmconfig
[ "def", "_parse_vmconfig", "(", "config", ",", "instances", ")", ":", "vmconfig", "=", "None", "if", "isinstance", "(", "config", ",", "salt", ".", "utils", ".", "odict", ".", "OrderedDict", ")", ":", "vmconfig", "=", "OrderedDict", "(", ")", "for", "prop...
parse vm_present vm config .
train
true
39,817
@task @timed def clean_mongo(): sh('mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js'.format(host=MONGO_HOST, port=MONGO_PORT_NUM, repo_root=Env.REPO_ROOT))
[ "@", "task", "@", "timed", "def", "clean_mongo", "(", ")", ":", "sh", "(", "'mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js'", ".", "format", "(", "host", "=", "MONGO_HOST", ",", "port", "=", "MONGO_PORT_NUM", ",", "repo_root", "=", "Env", ".", ...
clean mongo test databases .
train
false
39,818
def gen_arg_type_qual(fn): mod = fn.module fnty = fn.type.pointee consts = [lc.MetaDataString.get(mod, '') for _ in fnty.args] name = lc.MetaDataString.get(mod, 'kernel_arg_type_qual') return lc.MetaData.get(mod, ([name] + consts))
[ "def", "gen_arg_type_qual", "(", "fn", ")", ":", "mod", "=", "fn", ".", "module", "fnty", "=", "fn", ".", "type", ".", "pointee", "consts", "=", "[", "lc", ".", "MetaDataString", ".", "get", "(", "mod", ",", "''", ")", "for", "_", "in", "fnty", "...
generate kernel_arg_type_qual metadata .
train
false
39,819
def _call_with_retry(max_attempts): def wrapper(f): def func_wrapper(*args, **kwargs): action = ('%(func)s' % {'func': getattr(f, '__name__', f)}) for attempt in range(1, (max_attempts + 1)): try: return f(*args, **kwargs) except oslo_messaging.MessagingException: with excutils.save_and_reraise_exception(reraise=False) as ctxt: LOG.warning(_LW('Failed to execute %(action)s. %(attempt)d out of %(max_attempts)d'), {'attempt': attempt, 'max_attempts': max_attempts, 'action': action}) if (attempt == max_attempts): ctxt.reraise = True return func_wrapper return wrapper
[ "def", "_call_with_retry", "(", "max_attempts", ")", ":", "def", "wrapper", "(", "f", ")", ":", "def", "func_wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "action", "=", "(", "'%(func)s'", "%", "{", "'func'", ":", "getattr", "(", "f", ",...
a wrapper to retry a function using rpc call in case of messagingexception .
train
false
39,820
@webauth.SecurityCheck @csrf.ensure_csrf_cookie def Homepage(request): renderers_js_files = set(['statistics.js', 'usage.js']) create_time = psutil.Process(os.getpid()).create_time() context = {'heading': config_lib.CONFIG['AdminUI.heading'], 'report_url': config_lib.CONFIG['AdminUI.report_url'], 'help_url': config_lib.CONFIG['AdminUI.help_url'], 'use_precompiled_js': config_lib.CONFIG['AdminUI.use_precompiled_js'], 'renderers_js': renderers_js_files, 'timestamp': create_time} response = shortcuts.render_to_response('base.html', context, context_instance=template.RequestContext(request)) request.REQ = request.GET.dict() request.token = BuildToken(request, 60) user_record = aff4.FACTORY.Create(aff4.ROOT_URN.Add('users').Add(request.user), aff4_type=aff4_users.GRRUser, mode='r', token=request.token) canary_mode = user_record.Get(user_record.Schema.GUI_SETTINGS).canary_mode if canary_mode: response.set_cookie('canary_mode', 'true') else: response.delete_cookie('canary_mode') return response
[ "@", "webauth", ".", "SecurityCheck", "@", "csrf", ".", "ensure_csrf_cookie", "def", "Homepage", "(", "request", ")", ":", "renderers_js_files", "=", "set", "(", "[", "'statistics.js'", ",", "'usage.js'", "]", ")", "create_time", "=", "psutil", ".", "Process",...
basic handler to render the index page .
train
false
39,822
def get_ptb_words_vocabulary(): return _retrieve_word_vocabulary()
[ "def", "get_ptb_words_vocabulary", "(", ")", ":", "return", "_retrieve_word_vocabulary", "(", ")" ]
gets the penn tree bank word vocabulary .
train
false
39,824
def parseXMLString(st): return parseString(st, caseInsensitive=0, preserveCase=1)
[ "def", "parseXMLString", "(", "st", ")", ":", "return", "parseString", "(", "st", ",", "caseInsensitive", "=", "0", ",", "preserveCase", "=", "1", ")" ]
parse an xml readable object .
train
false
39,825
def _convert_children(node): if (not isinstance(node, (list, dict))): return if isinstance(node, list): for child in node: _convert_children(child) return for key in node.keys(): val = node[key] if ((not isinstance(val, dict)) or (val.get('__tag__') != 'code')): _convert_children(val) continue val = eval(val['python']) node[key] = val continue
[ "def", "_convert_children", "(", "node", ")", ":", "if", "(", "not", "isinstance", "(", "node", ",", "(", "list", ",", "dict", ")", ")", ")", ":", "return", "if", "isinstance", "(", "node", ",", "list", ")", ":", "for", "child", "in", "node", ":", ...
recursively convert to functions all "code strings" below the node .
train
false
39,826
def quota_class_get_default(context): return IMPL.quota_class_get_default(context)
[ "def", "quota_class_get_default", "(", "context", ")", ":", "return", "IMPL", ".", "quota_class_get_default", "(", "context", ")" ]
retrieve all default quotas .
train
false
39,828
def _read_simple(fid, tag, shape, rlims, dtype): return _fromstring_rows(fid, tag.size, dtype=dtype, shape=shape, rlims=rlims)
[ "def", "_read_simple", "(", "fid", ",", "tag", ",", "shape", ",", "rlims", ",", "dtype", ")", ":", "return", "_fromstring_rows", "(", "fid", ",", "tag", ".", "size", ",", "dtype", "=", "dtype", ",", "shape", "=", "shape", ",", "rlims", "=", "rlims", ...
read simple datatypes from tag .
train
false
39,829
def find_repeats(arr): compr = np.asarray(ma.compressed(arr), dtype=np.float64) try: need_copy = np.may_share_memory(compr, arr) except AttributeError: need_copy = False if need_copy: compr = compr.copy() return _find_repeats(compr)
[ "def", "find_repeats", "(", "arr", ")", ":", "compr", "=", "np", ".", "asarray", "(", "ma", ".", "compressed", "(", "arr", ")", ",", "dtype", "=", "np", ".", "float64", ")", "try", ":", "need_copy", "=", "np", ".", "may_share_memory", "(", "compr", ...
find repeats and repeat counts .
train
false
39,831
def InetPtoN(protocol, addr_string): if (protocol == socket.AF_INET): return socket.inet_aton(addr_string) if (protocol != socket.AF_INET6): raise socket.error('Unsupported protocol') if (not addr_string): raise socket.error('Empty address string') if BAD_SINGLE_COLON.match(addr_string): raise socket.error('Start or ends with single colon') if (addr_string == '::'): return ('0' * 32).decode('hex_codec') addr_string = _RemoveV4Ending(addr_string) addr_string = _StripLeadingOrTrailingDoubleColons(addr_string) addr_string = _ZeroPad(addr_string) try: return addr_string.decode('hex_codec') except TypeError: raise socket.error(('Error decoding: %s' % addr_string))
[ "def", "InetPtoN", "(", "protocol", ",", "addr_string", ")", ":", "if", "(", "protocol", "==", "socket", ".", "AF_INET", ")", ":", "return", "socket", ".", "inet_aton", "(", "addr_string", ")", "if", "(", "protocol", "!=", "socket", ".", "AF_INET6", ")",...
convert ipv6 string to packed bytes .
train
false
39,833
def check_readonly(view): def _check_readonly(request, *args, **kwargs): if (not flag_is_active(request, 'kumaediting')): raise ReadOnlyException('kumaediting') elif flag_is_active(request, 'kumabanned'): raise ReadOnlyException('kumabanned') return view(request, *args, **kwargs) return _check_readonly
[ "def", "check_readonly", "(", "view", ")", ":", "def", "_check_readonly", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "not", "flag_is_active", "(", "request", ",", "'kumaediting'", ")", ")", ":", "raise", "ReadOnlyException"...
decorator to enable readonly mode .
train
false
39,834
def test_lex_strings(): objs = tokenize('"foo"') assert (objs == [HyString('foo')]) objs = tokenize('\n"a\\\nbc"\n') assert (objs == [HyString('abc')])
[ "def", "test_lex_strings", "(", ")", ":", "objs", "=", "tokenize", "(", "'\"foo\"'", ")", "assert", "(", "objs", "==", "[", "HyString", "(", "'foo'", ")", "]", ")", "objs", "=", "tokenize", "(", "'\\n\"a\\\\\\nbc\"\\n'", ")", "assert", "(", "objs", "==",...
make sure that strings are valid expressions .
train
false
39,835
def change_node_state(deployer, desired_configuration): state_persister = InMemoryStatePersister() def converge(): d = deployer.discover_state(DeploymentState(nodes={NodeState(hostname=deployer.hostname, uuid=deployer.node_uuid, applications=[], manifestations={}, paths={}, devices={})}), persistent_state=state_persister.get_state()) def got_changes(local_state): changes = local_state.shared_state_changes() cluster_state = DeploymentState() for change in changes: cluster_state = change.update_cluster_state(cluster_state) return deployer.calculate_changes(desired_configuration, cluster_state, local_state) d.addCallback(got_changes) d.addCallback((lambda change: run_state_change(change, deployer=deployer, state_persister=state_persister))) return d result = converge() result.addCallback((lambda _: converge())) result.addCallback((lambda _: converge())) return result
[ "def", "change_node_state", "(", "deployer", ",", "desired_configuration", ")", ":", "state_persister", "=", "InMemoryStatePersister", "(", ")", "def", "converge", "(", ")", ":", "d", "=", "deployer", ".", "discover_state", "(", "DeploymentState", "(", "nodes", ...
change the local state to match the given desired state .
train
false
39,836
@require_admin_context def purge_deleted_rows(context, age_in_days): try: age_in_days = int(age_in_days) except ValueError: msg = (_('Invalid value for age, %(age)s') % {'age': age_in_days}) LOG.exception(msg) raise exception.InvalidParameterValue(msg) engine = get_engine() session = get_session() metadata = MetaData() metadata.reflect(engine) for table in reversed(metadata.sorted_tables): if ('deleted' not in table.columns.keys()): continue LOG.info(_LI('Purging deleted rows older than age=%(age)d days from table=%(table)s'), {'age': age_in_days, 'table': table}) deleted_age = (timeutils.utcnow() - dt.timedelta(days=age_in_days)) try: with session.begin(): if (six.text_type(table) == 'quality_of_service_specs'): session.query(models.QualityOfServiceSpecs).filter(and_(models.QualityOfServiceSpecs.specs_id.isnot(None), (models.QualityOfServiceSpecs.deleted == 1), (models.QualityOfServiceSpecs.deleted_at < deleted_age))).delete() result = session.execute(table.delete().where((table.c.deleted_at < deleted_age))) except db_exc.DBReferenceError as ex: LOG.error(_LE('DBError detected when purging from %(tablename)s: %(error)s.'), {'tablename': table, 'error': six.text_type(ex)}) raise rows_purged = result.rowcount if (rows_purged != 0): LOG.info(_LI('Deleted %(row)d rows from table=%(table)s'), {'row': rows_purged, 'table': table})
[ "@", "require_admin_context", "def", "purge_deleted_rows", "(", "context", ",", "age_in_days", ")", ":", "try", ":", "age_in_days", "=", "int", "(", "age_in_days", ")", "except", "ValueError", ":", "msg", "=", "(", "_", "(", "'Invalid value for age, %(age)s'", "...
purge deleted rows older than given age from cinder tables raises invalidparametervalue if age_in_days is incorrect .
train
false
39,837
def from_wire(wire, keyring=None, request_mac='', xfr=False, origin=None, tsig_ctx=None, multi=False, first=True, question_only=False, one_rr_per_rrset=False, ignore_trailing=False): m = Message(id=0) m.keyring = keyring m.request_mac = request_mac m.xfr = xfr m.origin = origin m.tsig_ctx = tsig_ctx m.multi = multi m.first = first reader = _WireReader(wire, m, question_only, one_rr_per_rrset, ignore_trailing) reader.read() return m
[ "def", "from_wire", "(", "wire", ",", "keyring", "=", "None", ",", "request_mac", "=", "''", ",", "xfr", "=", "False", ",", "origin", "=", "None", ",", "tsig_ctx", "=", "None", ",", "multi", "=", "False", ",", "first", "=", "True", ",", "question_onl...
convert a dns wire format message into a message object .
train
true
39,838
def get_action_manager(): mod = SETTINGS.get('MANAGER', 'actstream.managers.ActionManager') mod_path = mod.split('.') try: return getattr(__import__('.'.join(mod_path[:(-1)]), {}, {}, [mod_path[(-1)]]), mod_path[(-1)])() except ImportError: raise ImportError(('Cannot import %s try fixing ACTSTREAM_SETTINGS[MANAGER]setting.' % mod))
[ "def", "get_action_manager", "(", ")", ":", "mod", "=", "SETTINGS", ".", "get", "(", "'MANAGER'", ",", "'actstream.managers.ActionManager'", ")", "mod_path", "=", "mod", ".", "split", "(", "'.'", ")", "try", ":", "return", "getattr", "(", "__import__", "(", ...
returns the class of the action manager to use from actstream_settings[manager] .
train
true
39,839
def bic(llf, nobs, df_modelwc): return (((-2.0) * llf) + (np.log(nobs) * df_modelwc))
[ "def", "bic", "(", "llf", ",", "nobs", ",", "df_modelwc", ")", ":", "return", "(", "(", "(", "-", "2.0", ")", "*", "llf", ")", "+", "(", "np", ".", "log", "(", "nobs", ")", "*", "df_modelwc", ")", ")" ]
bayesian information criterion or schwarz criterion parameters llf : float value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant returns bic : float information criterion references URL .
train
false
39,840
def in_words(integer, in_million=True): locale = (u'en_IN' if (not in_million) else frappe.local.lang) integer = int(integer) try: ret = num2words(integer, lang=locale) except NotImplementedError: ret = num2words(integer, lang=u'en') return ret.replace(u'-', u' ')
[ "def", "in_words", "(", "integer", ",", "in_million", "=", "True", ")", ":", "locale", "=", "(", "u'en_IN'", "if", "(", "not", "in_million", ")", "else", "frappe", ".", "local", ".", "lang", ")", "integer", "=", "int", "(", "integer", ")", "try", ":"...
returns string in words for the given integer .
train
false
39,843
@not_implemented_for('directed') @not_implemented_for('multigraph') def ra_index_soundarajan_hopcroft(G, ebunch=None, community='community'): def predict(u, v): Cu = _community(G, u, community) Cv = _community(G, v, community) if (Cu != Cv): return 0 cnbors = nx.common_neighbors(G, u, v) return sum(((1 / G.degree(w)) for w in cnbors if (_community(G, w, community) == Cu))) return _apply_prediction(G, predict, ebunch)
[ "@", "not_implemented_for", "(", "'directed'", ")", "@", "not_implemented_for", "(", "'multigraph'", ")", "def", "ra_index_soundarajan_hopcroft", "(", "G", ",", "ebunch", "=", "None", ",", "community", "=", "'community'", ")", ":", "def", "predict", "(", "u", ...
compute the resource allocation index of all node pairs in ebunch using community information .
train
false
39,844
def isBlockLevel(tag): return BLOCK_LEVEL_ELEMENTS.match(tag)
[ "def", "isBlockLevel", "(", "tag", ")", ":", "return", "BLOCK_LEVEL_ELEMENTS", ".", "match", "(", "tag", ")" ]
check if the tag is a block level html tag .
train
false
39,845
def parse_media_json(json_object): recordings = {} for recording in json_object['recordings']: match = re.search('.*\\/(?P<format>.*)', recording['mime_type']) file_format = match.group('format') if ((recording['mime_type'] == 'vnd.voc/mp4-web') or (recording['display_mime_type'] == 'video/webm')): continue elif (recording['mime_type'] == 'vnd.voc/h264-hd'): name = '1080p' elif (recording['mime_type'] == 'vnd.voc/h264-lq'): name = '420p' elif re.match('audio', recording['display_mime_type']): name = ('audio_%s' % file_format) elif (recording['hd'] == 'True'): name = '1080p' else: name = '420p' recordings[name] = recording['recording_url'] return recordings
[ "def", "parse_media_json", "(", "json_object", ")", ":", "recordings", "=", "{", "}", "for", "recording", "in", "json_object", "[", "'recordings'", "]", ":", "match", "=", "re", ".", "search", "(", "'.*\\\\/(?P<format>.*)'", ",", "recording", "[", "'mime_type'...
expose available file formats .
train
false
39,846
def match_model_string(s): match = REDIRECT_TO_RE.match(s) if (not match): return None matches = match.groupdict() return (matches[u'app_label'], matches[u'model_name'], int(matches[u'pk']))
[ "def", "match_model_string", "(", "s", ")", ":", "match", "=", "REDIRECT_TO_RE", ".", "match", "(", "s", ")", "if", "(", "not", "match", ")", ":", "return", "None", "matches", "=", "match", ".", "groupdict", "(", ")", "return", "(", "matches", "[", "...
try to parse a string in format "app_label .
train
false
39,847
def add_discussion(page, menu_index=0): page.wait_for_component_menu() click_css(page, 'button>span.large-discussion-icon', menu_index)
[ "def", "add_discussion", "(", "page", ",", "menu_index", "=", "0", ")", ":", "page", ".", "wait_for_component_menu", "(", ")", "click_css", "(", "page", ",", "'button>span.large-discussion-icon'", ",", "menu_index", ")" ]
add a new instance of the discussion category .
train
false
39,848
def key_description(character): ascii_code = ord(character) if (ascii_code < 32): return 'Ctrl+{:c}'.format((ord('@') + ascii_code)) else: return repr(character)
[ "def", "key_description", "(", "character", ")", ":", "ascii_code", "=", "ord", "(", "character", ")", "if", "(", "ascii_code", "<", "32", ")", ":", "return", "'Ctrl+{:c}'", ".", "format", "(", "(", "ord", "(", "'@'", ")", "+", "ascii_code", ")", ")", ...
generate a readable description for a key .
train
false
39,849
def parse_content_range_header(value, on_update=None): if (value is None): return None try: (units, rangedef) = (value or '').strip().split(None, 1) except ValueError: return None if ('/' not in rangedef): return None (rng, length) = rangedef.split('/', 1) if (length == '*'): length = None elif length.isdigit(): length = int(length) else: return None if (rng == '*'): return ContentRange(units, None, None, length, on_update=on_update) elif ('-' not in rng): return None (start, stop) = rng.split('-', 1) try: start = int(start) stop = (int(stop) + 1) except ValueError: return None if is_byte_range_valid(start, stop, length): return ContentRange(units, start, stop, length, on_update=on_update)
[ "def", "parse_content_range_header", "(", "value", ",", "on_update", "=", "None", ")", ":", "if", "(", "value", "is", "None", ")", ":", "return", "None", "try", ":", "(", "units", ",", "rangedef", ")", "=", "(", "value", "or", "''", ")", ".", "strip"...
parses a range header into a :class:~werkzeug .
train
true
39,852
def pre_save_handler(sender, instance, **kwargs): now = timezone.now() if (instance.id is None): instance.creation_date = now instance.modification_date = now
[ "def", "pre_save_handler", "(", "sender", ",", "instance", ",", "**", "kwargs", ")", ":", "now", "=", "timezone", ".", "now", "(", ")", "if", "(", "instance", ".", "id", "is", "None", ")", ":", "instance", ".", "creation_date", "=", "now", "instance", ...
intercept attempts to save and sort the tag field alphabetically .
train
false
39,853
def get_static_tab_contents(request, course, tab): loc = course.id.make_usage_key(tab.type, tab.url_slug) field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course.id, request.user, modulestore().get_item(loc), depth=0) tab_module = get_module(request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path, course=course) logging.debug('course_module = %s', tab_module) html = '' if (tab_module is not None): try: html = tab_module.render(STUDENT_VIEW).content except Exception: html = render_to_string('courseware/error-message.html', None) log.exception(u'Error rendering course=%s, tab=%s', course, tab['url_slug']) return html
[ "def", "get_static_tab_contents", "(", "request", ",", "course", ",", "tab", ")", ":", "loc", "=", "course", ".", "id", ".", "make_usage_key", "(", "tab", ".", "type", ",", "tab", ".", "url_slug", ")", "field_data_cache", "=", "FieldDataCache", ".", "cache...
returns the contents for the given static tab .
train
false