id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
8,230
def check_directory_tree(base_path, file_check, exclusions=set(), pattern='*.py'): if (not base_path): return for (root, dirs, files) in walk(base_path): check_files(glob(join(root, pattern)), file_check, exclusions)
[ "def", "check_directory_tree", "(", "base_path", ",", "file_check", ",", "exclusions", "=", "set", "(", ")", ",", "pattern", "=", "'*.py'", ")", ":", "if", "(", "not", "base_path", ")", ":", "return", "for", "(", "root", ",", "dirs", ",", "files", ")",...
checks all files in the directory tree with the file_check function provided .
train
false
8,231
def _is_unique(url): return (Service.objects.filter(base_url=url).count() == 0)
[ "def", "_is_unique", "(", "url", ")", ":", "return", "(", "Service", ".", "objects", ".", "filter", "(", "base_url", "=", "url", ")", ".", "count", "(", ")", "==", "0", ")" ]
determine if a service is already registered based on matching url .
train
false
8,232
def send_event_publish(email, event_name, link): message_settings = MessageSettings.query.filter_by(action=NEXT_EVENT).first() if ((not message_settings) or (message_settings.mail_status == 1)): send_email(to=email, action=NEXT_EVENT, subject=MAILS[EVENT_PUBLISH]['subject'].format(event_name=event_name), html=MAILS[EVENT_PUBLISH]['message'].format(email=str(email), event_name=str(event_name), link=link))
[ "def", "send_event_publish", "(", "email", ",", "event_name", ",", "link", ")", ":", "message_settings", "=", "MessageSettings", ".", "query", ".", "filter_by", "(", "action", "=", "NEXT_EVENT", ")", ".", "first", "(", ")", "if", "(", "(", "not", "message_...
send email on publishing event .
train
false
8,233
def choose_result_int(*inputs): bitwidth = choose_result_bitwidth(*inputs) signed = any((tp.signed for tp in inputs)) return types.Integer.from_bitwidth(bitwidth, signed)
[ "def", "choose_result_int", "(", "*", "inputs", ")", ":", "bitwidth", "=", "choose_result_bitwidth", "(", "*", "inputs", ")", "signed", "=", "any", "(", "(", "tp", ".", "signed", "for", "tp", "in", "inputs", ")", ")", "return", "types", ".", "Integer", ...
choose the integer result type for an operation on integer inputs .
train
false
8,234
def p_expr_group(p): p[0] = ('GROUP', p[2])
[ "def", "p_expr_group", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'GROUP'", ",", "p", "[", "2", "]", ")" ]
expr : lparen expr rparen .
train
false
8,237
def get_hmm(): return 'hmmm...'
[ "def", "get_hmm", "(", ")", ":", "return", "'hmmm...'" ]
get a thought .
train
false
8,238
def org_parents(organisation_id, path=[]): if (not organisation_id): return path db = current.db s3db = current.s3db otable = s3db.org_organisation btable = s3db.org_organisation.with_alias('org_branch_organisation') ltable = s3db.org_organisation_branch query = (btable.id == organisation_id) join = (((((ltable.deleted != True) & (btable.deleted != True)) & (otable.deleted != True)) & (btable.id == ltable.branch_id)) & (otable.id == ltable.organisation_id)) row = db((query & join)).select(otable.id, limitby=(0, 1)).first() if (row is not None): organisation_id = row.id path.insert(0, organisation_id) return org_parents(organisation_id, path) else: return path
[ "def", "org_parents", "(", "organisation_id", ",", "path", "=", "[", "]", ")", ":", "if", "(", "not", "organisation_id", ")", ":", "return", "path", "db", "=", "current", ".", "db", "s3db", "=", "current", ".", "s3db", "otable", "=", "s3db", ".", "or...
lookup the parent organisations of a branch organisation .
train
false
8,239
def get_cvxopt_sparse_intf(): import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi return smi.SparseMatrixInterface()
[ "def", "get_cvxopt_sparse_intf", "(", ")", ":", "import", "cvxpy", ".", "interface", ".", "cvxopt_interface", ".", "sparse_matrix_interface", "as", "smi", "return", "smi", ".", "SparseMatrixInterface", "(", ")" ]
dynamic import of cvxopt sparse interface .
train
false
8,240
def is_reduced(exp): return _contains(exp, Reduced)
[ "def", "is_reduced", "(", "exp", ")", ":", "return", "_contains", "(", "exp", ",", "Reduced", ")" ]
does exp contain a reduced node .
train
false
8,241
def _compute_tauk(n, k, maxit=5): a = ((n % 2) - 0.5) c = (((((4.0 * floor((n / 2.0))) - (4.0 * k)) + 3.0) * pi) / (((4.0 * floor((n / 2.0))) + (2.0 * a)) + 2.0)) f = (lambda x: ((x - sin(x)) - c)) df = (lambda x: (1.0 - cos(x))) xi = (0.5 * pi) for i in range(maxit): xi = (xi - (f(xi) / df(xi))) return xi
[ "def", "_compute_tauk", "(", "n", ",", "k", ",", "maxit", "=", "5", ")", ":", "a", "=", "(", "(", "n", "%", "2", ")", "-", "0.5", ")", "c", "=", "(", "(", "(", "(", "(", "4.0", "*", "floor", "(", "(", "n", "/", "2.0", ")", ")", ")", "...
helper function for tricomi initial guesses for details .
train
false
8,242
def unset_quota_volume(name, path): cmd = 'volume quota {0}'.format(name) if path: cmd += ' remove {0}'.format(path) if (not _gluster(cmd)): return False return True
[ "def", "unset_quota_volume", "(", "name", ",", "path", ")", ":", "cmd", "=", "'volume quota {0}'", ".", "format", "(", "name", ")", "if", "path", ":", "cmd", "+=", "' remove {0}'", ".", "format", "(", "path", ")", "if", "(", "not", "_gluster", "(", "cm...
unset quota to glusterfs volume .
train
true
8,243
def test_represent_phasegate(): circuit = (PhaseGate(0) * Qubit('01')) answer = represent(circuit, nqubits=2) assert (Matrix([0, I, 0, 0]) == answer)
[ "def", "test_represent_phasegate", "(", ")", ":", "circuit", "=", "(", "PhaseGate", "(", "0", ")", "*", "Qubit", "(", "'01'", ")", ")", "answer", "=", "represent", "(", "circuit", ",", "nqubits", "=", "2", ")", "assert", "(", "Matrix", "(", "[", "0",...
test the representation of the s gate .
train
false
8,244
def _get_rvss(minibatch_RVs, local_RVs, observed_RVs, minibatch_tensors, total_size): if (minibatch_RVs is not None): _value_error(isinstance(minibatch_RVs, list), 'minibatch_RVs must be a list.') _value_error(((local_RVs is None) and (observed_RVs is None)), ('When minibatch_RVs is given, local_RVs and ' + 'observed_RVs must be None.')) s = (floatX(total_size) / minibatch_tensors[0].shape[0]) local_RVs = OrderedDict() observed_RVs = OrderedDict([(v, s) for v in minibatch_RVs]) else: _value_error((isinstance(local_RVs, OrderedDict) and isinstance(observed_RVs, OrderedDict)), 'local_RVs and observed_RVs must be OrderedDict.') return (local_RVs, observed_RVs)
[ "def", "_get_rvss", "(", "minibatch_RVs", ",", "local_RVs", ",", "observed_RVs", ",", "minibatch_tensors", ",", "total_size", ")", ":", "if", "(", "minibatch_RVs", "is", "not", "None", ")", ":", "_value_error", "(", "isinstance", "(", "minibatch_RVs", ",", "li...
returns local_rvs and observed_rvs .
train
false
8,245
def unbox_usecase(x): res = 0 for v in x: res += v return res
[ "def", "unbox_usecase", "(", "x", ")", ":", "res", "=", "0", "for", "v", "in", "x", ":", "res", "+=", "v", "return", "res" ]
expect a list of numbers .
train
false
8,247
def readURLFileLike(urlFileLike): encoding = urlFileLike.info().get('Content-Encoding') if (encoding in ('gzip', 'x-gzip', 'deflate')): content = urlFileLike.read() if (encoding == 'deflate'): data = StringIO.StringIO(zlib.decompress(content)) else: data = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(content)) result = data.read() else: result = urlFileLike.read() urlFileLike.close() return result
[ "def", "readURLFileLike", "(", "urlFileLike", ")", ":", "encoding", "=", "urlFileLike", ".", "info", "(", ")", ".", "get", "(", "'Content-Encoding'", ")", "if", "(", "encoding", "in", "(", "'gzip'", ",", "'x-gzip'", ",", "'deflate'", ")", ")", ":", "cont...
return the contents of the file like objects as string .
train
false
8,248
@profiler.trace def roles_for_user(request, user, project=None, domain=None): manager = keystoneclient(request, admin=True).roles if (VERSIONS.active < 3): return manager.roles_for_user(user, project) else: return manager.list(user=user, domain=domain, project=project)
[ "@", "profiler", ".", "trace", "def", "roles_for_user", "(", "request", ",", "user", ",", "project", "=", "None", ",", "domain", "=", "None", ")", ":", "manager", "=", "keystoneclient", "(", "request", ",", "admin", "=", "True", ")", ".", "roles", "if"...
returns a list of user roles scoped to a project or domain .
train
true
8,249
def get_distance_metres(aLocation1, aLocation2): dlat = (aLocation2.lat - aLocation1.lat) dlong = (aLocation2.lon - aLocation1.lon) return (math.sqrt(((dlat * dlat) + (dlong * dlong))) * 111319.5)
[ "def", "get_distance_metres", "(", "aLocation1", ",", "aLocation2", ")", ":", "dlat", "=", "(", "aLocation2", ".", "lat", "-", "aLocation1", ".", "lat", ")", "dlong", "=", "(", "aLocation2", ".", "lon", "-", "aLocation1", ".", "lon", ")", "return", "(", ...
returns the ground distance in metres between two locationglobal objects .
train
true
8,250
def list_(channel=None): pecl_channel_pat = re.compile('^([^ ]+)[ ]+([^ ]+)[ ]+([^ ]+)') pecls = {} command = 'list' if channel: command = '{0} -c {1}'.format(command, _cmd_quote(channel)) lines = _pecl(command).splitlines() lines = (l for l in lines if pecl_channel_pat.match(l)) for line in lines: match = pecl_channel_pat.match(line) if match: pecls[match.group(1)] = [match.group(2), match.group(3)] return pecls
[ "def", "list_", "(", "channel", "=", "None", ")", ":", "pecl_channel_pat", "=", "re", ".", "compile", "(", "'^([^ ]+)[ ]+([^ ]+)[ ]+([^ ]+)'", ")", "pecls", "=", "{", "}", "command", "=", "'list'", "if", "channel", ":", "command", "=", "'{0} -c {1}'", ".", ...
list installed perl modules .
train
true
8,252
def serial_listen_addr(migrate_data): listen_addr = None if migrate_data.obj_attr_is_set('serial_listen_addr'): listen_addr = migrate_data.serial_listen_addr return listen_addr
[ "def", "serial_listen_addr", "(", "migrate_data", ")", ":", "listen_addr", "=", "None", "if", "migrate_data", ".", "obj_attr_is_set", "(", "'serial_listen_addr'", ")", ":", "listen_addr", "=", "migrate_data", ".", "serial_listen_addr", "return", "listen_addr" ]
returns listen address serial from a libvirtlivemigratedata .
train
false
8,253
def sync_queues(saltenv='base'): return salt.utils.extmods.sync(__opts__, 'queues', saltenv=saltenv)[0]
[ "def", "sync_queues", "(", "saltenv", "=", "'base'", ")", ":", "return", "salt", ".", "utils", ".", "extmods", ".", "sync", "(", "__opts__", ",", "'queues'", ",", "saltenv", "=", "saltenv", ")", "[", "0", "]" ]
sync queue modules from salt://_queues to the master saltenv : base the fileserver environment from which to sync .
train
false
8,255
def test_date_labels(): datey = DateLine(truncate_label=1000) datey.add('dates', [(date(2013, 1, 2), 300), (date(2013, 1, 12), 412), (date(2013, 2, 2), 823), (date(2013, 2, 22), 672)]) datey.x_labels = [date(2013, 1, 1), date(2013, 2, 1), date(2013, 3, 1)] q = datey.render_pyquery() assert (list(map((lambda t: t.split(' ')[0]), q('.axis.x text').map(texts))) == ['2013-01-01', '2013-02-01', '2013-03-01'])
[ "def", "test_date_labels", "(", ")", ":", "datey", "=", "DateLine", "(", "truncate_label", "=", "1000", ")", "datey", ".", "add", "(", "'dates'", ",", "[", "(", "date", "(", "2013", ",", "1", ",", "2", ")", ",", "300", ")", ",", "(", "date", "(",...
test dateline with xrange .
train
false
8,256
def build_recursive_traversal_spec(client_factory): visit_folders_select_spec = build_selection_spec(client_factory, 'visitFolders') dc_to_hf = build_traversal_spec(client_factory, 'dc_to_hf', 'Datacenter', 'hostFolder', False, [visit_folders_select_spec]) dc_to_vmf = build_traversal_spec(client_factory, 'dc_to_vmf', 'Datacenter', 'vmFolder', False, [visit_folders_select_spec]) h_to_vm = build_traversal_spec(client_factory, 'h_to_vm', 'HostSystem', 'vm', False, [visit_folders_select_spec]) cr_to_h = build_traversal_spec(client_factory, 'cr_to_h', 'ComputeResource', 'host', False, []) cr_to_ds = build_traversal_spec(client_factory, 'cr_to_ds', 'ComputeResource', 'datastore', False, []) rp_to_rp_select_spec = build_selection_spec(client_factory, 'rp_to_rp') rp_to_vm_select_spec = build_selection_spec(client_factory, 'rp_to_vm') cr_to_rp = build_traversal_spec(client_factory, 'cr_to_rp', 'ComputeResource', 'resourcePool', False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) rp_to_rp = build_traversal_spec(client_factory, 'rp_to_rp', 'ResourcePool', 'resourcePool', False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) rp_to_vm = build_traversal_spec(client_factory, 'rp_to_vm', 'ResourcePool', 'vm', False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) traversal_spec = build_traversal_spec(client_factory, 'visitFolders', 'Folder', 'childEntity', False, [visit_folders_select_spec, dc_to_hf, dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp, rp_to_rp, h_to_vm, rp_to_vm]) return traversal_spec
[ "def", "build_recursive_traversal_spec", "(", "client_factory", ")", ":", "visit_folders_select_spec", "=", "build_selection_spec", "(", "client_factory", ",", "'visitFolders'", ")", "dc_to_hf", "=", "build_traversal_spec", "(", "client_factory", ",", "'dc_to_hf'", ",", "...
builds the recursive traversal spec to traverse the object managed object hierarchy .
train
false
8,257
@world.absorb def css_find(css, wait_time=GLOBAL_WAIT_FOR_TIMEOUT): wait_for_present(css_selector=css, timeout=wait_time) return world.browser.find_by_css(css)
[ "@", "world", ".", "absorb", "def", "css_find", "(", "css", ",", "wait_time", "=", "GLOBAL_WAIT_FOR_TIMEOUT", ")", ":", "wait_for_present", "(", "css_selector", "=", "css", ",", "timeout", "=", "wait_time", ")", "return", "world", ".", "browser", ".", "find_...
wait for the element(s) as defined by css locator to be present .
train
false
8,259
def getCounter(technique): return kb.counters.get(technique, 0)
[ "def", "getCounter", "(", "technique", ")", ":", "return", "kb", ".", "counters", ".", "get", "(", "technique", ",", "0", ")" ]
returns query counter for a given technique .
train
false
8,261
def sample_user_agent(): class DummyConfig(object, ): 'Shim for computing a sample user agent.' def __init__(self): self.authenticator = 'XXX' self.installer = 'YYY' self.user_agent = None return determine_user_agent(DummyConfig())
[ "def", "sample_user_agent", "(", ")", ":", "class", "DummyConfig", "(", "object", ",", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "authenticator", "=", "'XXX'", "self", ".", "installer", "=", "'YYY'", "self", ".", "user_agent", "=",...
document what this certbots user agent string will be like .
train
false
8,262
def reserve_hosts(host_filter_data, username=None): hosts = models.Host.query_objects(host_filter_data) reservations.create(hosts_to_reserve=[h.hostname for h in hosts], username=username)
[ "def", "reserve_hosts", "(", "host_filter_data", ",", "username", "=", "None", ")", ":", "hosts", "=", "models", ".", "Host", ".", "query_objects", "(", "host_filter_data", ")", "reservations", ".", "create", "(", "hosts_to_reserve", "=", "[", "h", ".", "hos...
reserve some hosts .
train
false
8,264
def pointer_add(builder, ptr, offset, return_type=None): intptr = builder.ptrtoint(ptr, intp_t) if isinstance(offset, utils.INT_TYPES): offset = intp_t(offset) intptr = builder.add(intptr, offset) return builder.inttoptr(intptr, (return_type or ptr.type))
[ "def", "pointer_add", "(", "builder", ",", "ptr", ",", "offset", ",", "return_type", "=", "None", ")", ":", "intptr", "=", "builder", ".", "ptrtoint", "(", "ptr", ",", "intp_t", ")", "if", "isinstance", "(", "offset", ",", "utils", ".", "INT_TYPES", ")...
add an integral *offset* to pointer *ptr* .
train
false
8,265
def test_vm_gc(): x = theano.tensor.vector() p = RunOnce()(x) mode = theano.Mode(linker=theano.gof.vm.VM_Linker(lazy=True)) f = theano.function([theano.In(x, mutable=True)], [(p + 1), (p + 2)], mode=mode) f([1, 2, 3]) p = RunOnce()(x) pp = (p + p) f = theano.function([x], [(pp + pp)], mode=mode) f([1, 2, 3])
[ "def", "test_vm_gc", "(", ")", ":", "x", "=", "theano", ".", "tensor", ".", "vector", "(", ")", "p", "=", "RunOnce", "(", ")", "(", "x", ")", "mode", "=", "theano", ".", "Mode", "(", "linker", "=", "theano", ".", "gof", ".", "vm", ".", "VM_Link...
this already caused a bug in the trunk of theano .
train
false
8,266
def _bootstrap_debian(name, **kwargs): version = kwargs.get('version', False) if (not version): if (__grains__['os'].lower() == 'debian'): version = __grains__['osrelease'] else: version = 'stable' release_blacklist = ['hamm', 'slink', 'potato', 'woody', 'sarge', 'etch', 'lenny', 'squeeze', 'wheezy'] if (version in release_blacklist): raise CommandExecutionError('Unsupported Debian version "{0}". Only "stable" or "jessie" and newer are supported'.format(version)) dst = _make_container_root(name) cmd = 'debootstrap --arch=amd64 {0} {1}'.format(version, dst) ret = __salt__['cmd.run_all'](cmd, python_shell=False) if (ret['retcode'] != 0): _build_failed(dst, name) return ret
[ "def", "_bootstrap_debian", "(", "name", ",", "**", "kwargs", ")", ":", "version", "=", "kwargs", ".", "get", "(", "'version'", ",", "False", ")", "if", "(", "not", "version", ")", ":", "if", "(", "__grains__", "[", "'os'", "]", ".", "lower", "(", ...
bootstrap a debian linux container .
train
true
8,268
def changeRememberedMaster(newPassword): global MasterPassword if (newPassword == u''): MasterPassword = None else: MasterPassword = pwEncode(newPassword)
[ "def", "changeRememberedMaster", "(", "newPassword", ")", ":", "global", "MasterPassword", "if", "(", "newPassword", "==", "u''", ")", ":", "MasterPassword", "=", "None", "else", ":", "MasterPassword", "=", "pwEncode", "(", "newPassword", ")" ]
module function to change the remembered master password .
train
false
8,269
def seq_concat_seq(a, b): prefer = type(max([a, b], key=len)) if (not isinstance(a, prefer)): a = prefer(a) if (not isinstance(b, prefer)): b = prefer(b) return (a + b)
[ "def", "seq_concat_seq", "(", "a", ",", "b", ")", ":", "prefer", "=", "type", "(", "max", "(", "[", "a", ",", "b", "]", ",", "key", "=", "len", ")", ")", "if", "(", "not", "isinstance", "(", "a", ",", "prefer", ")", ")", ":", "a", "=", "pre...
concatenate two sequences: a + b .
train
false
8,270
def _gcd_interpolate(h, x, ring): (f, i) = (ring.zero, 0) if (ring.ngens == 1): while h: g = (h % x) if (g > (x // 2)): g -= x h = ((h - g) // x) if g: f[(i,)] = g i += 1 else: while h: g = h.trunc_ground(x) h = (h - g).quo_ground(x) if g: for (monom, coeff) in g.iterterms(): f[((i,) + monom)] = coeff i += 1 if (f.LC < 0): return (- f) else: return f
[ "def", "_gcd_interpolate", "(", "h", ",", "x", ",", "ring", ")", ":", "(", "f", ",", "i", ")", "=", "(", "ring", ".", "zero", ",", "0", ")", "if", "(", "ring", ".", "ngens", "==", "1", ")", ":", "while", "h", ":", "g", "=", "(", "h", "%",...
interpolate polynomial gcd from integer gcd .
train
false
8,271
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False, win32=False): xml_string = XmlToString(content, encoding, pretty) if (win32 and (os.linesep != '\r\n')): xml_string = xml_string.replace('\n', '\r\n') try: xml_string = xml_string.encode(encoding) except Exception: xml_string = unicode(xml_string, 'latin-1').encode(encoding) try: f = open(path, 'r') existing = f.read() f.close() except: existing = None if (existing != xml_string): f = open(path, 'w') f.write(xml_string) f.close()
[ "def", "WriteXmlIfChanged", "(", "content", ",", "path", ",", "encoding", "=", "'utf-8'", ",", "pretty", "=", "False", ",", "win32", "=", "False", ")", ":", "xml_string", "=", "XmlToString", "(", "content", ",", "encoding", ",", "pretty", ")", "if", "(",...
writes the xml content to disk .
train
false
8,272
def _noop(object): return object
[ "def", "_noop", "(", "object", ")", ":", "return", "object" ]
return the passed object unmodified .
train
false
8,273
def seguid(seq): import hashlib import base64 m = hashlib.sha1() try: seq = str(seq) except AttributeError: pass m.update(_as_bytes(seq.upper())) try: return base64.encodebytes(m.digest()).decode().replace('\n', '').rstrip('=') except AttributeError: pass return base64.b64encode(m.digest()).rstrip('=')
[ "def", "seguid", "(", "seq", ")", ":", "import", "hashlib", "import", "base64", "m", "=", "hashlib", ".", "sha1", "(", ")", "try", ":", "seq", "=", "str", "(", "seq", ")", "except", "AttributeError", ":", "pass", "m", ".", "update", "(", "_as_bytes",...
returns the seguid for a sequence .
train
false
8,274
def split_on_newlines(s): res = [] for x in s.split('\r\n'): for y in x.split('\r'): res.extend(y.split('\n')) return res
[ "def", "split_on_newlines", "(", "s", ")", ":", "res", "=", "[", "]", "for", "x", "in", "s", ".", "split", "(", "'\\r\\n'", ")", ":", "for", "y", "in", "x", ".", "split", "(", "'\\r'", ")", ":", "res", ".", "extend", "(", "y", ".", "split", "...
splits s on all of the three newline sequences: " " .
train
false
8,276
def duplicate(image): return image.copy()
[ "def", "duplicate", "(", "image", ")", ":", "return", "image", ".", "copy", "(", ")" ]
copy a channel .
train
false
8,277
def set_log_methods(cls, logger): cls.__logger = logger for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'): setattr(cls, attr, getattr(logger, attr))
[ "def", "set_log_methods", "(", "cls", ",", "logger", ")", ":", "cls", ".", "__logger", "=", "logger", "for", "attr", "in", "(", "'debug'", ",", "'info'", ",", "'warning'", ",", "'error'", ",", "'critical'", ",", "'exception'", ")", ":", "setattr", "(", ...
bind standard loggers methods as methods on the class .
train
false
8,278
@handle_response_format @treeio_login_required def ticket_delete(request, ticket_id, response_format='html'): ticket = get_object_or_404(Ticket, pk=ticket_id) if (not request.user.profile.has_permission(ticket, mode='w')): return user_denied(request, message="You don't have access to this Ticket") if request.POST: if ('delete' in request.POST): if ('trash' in request.POST): ticket.trash = True ticket.save() else: ticket.delete() return HttpResponseRedirect(reverse('services_index')) elif ('cancel' in request.POST): return HttpResponseRedirect(reverse('services_ticket_view', args=[ticket.id])) context = _get_default_context(request) context.update({'ticket': ticket}) return render_to_response('services/ticket_delete', context, context_instance=RequestContext(request), response_format=response_format)
[ "@", "handle_response_format", "@", "treeio_login_required", "def", "ticket_delete", "(", "request", ",", "ticket_id", ",", "response_format", "=", "'html'", ")", ":", "ticket", "=", "get_object_or_404", "(", "Ticket", ",", "pk", "=", "ticket_id", ")", "if", "("...
ticket delete .
train
false
8,279
def analyze_module(pycore, pymodule, should_analyze, search_subscopes, followed_calls): _analyze_node(pycore, pymodule, should_analyze, search_subscopes, followed_calls)
[ "def", "analyze_module", "(", "pycore", ",", "pymodule", ",", "should_analyze", ",", "search_subscopes", ",", "followed_calls", ")", ":", "_analyze_node", "(", "pycore", ",", "pymodule", ",", "should_analyze", ",", "search_subscopes", ",", "followed_calls", ")" ]
perform static object analysis on a python file in the project note that this might be really time consuming .
train
true
8,280
def autosummary_toc_visit_latex(self, node): pass
[ "def", "autosummary_toc_visit_latex", "(", "self", ",", "node", ")", ":", "pass" ]
show autosummary toctree in latex .
train
false
8,281
def resource_query(name): def make_responder(query_func): def responder(queries): return app.response_class(json_generator(query_func(queries), root='results', expand=is_expand()), mimetype='application/json') responder.__name__ = 'query_{0}'.format(name) return responder return make_responder
[ "def", "resource_query", "(", "name", ")", ":", "def", "make_responder", "(", "query_func", ")", ":", "def", "responder", "(", "queries", ")", ":", "return", "app", ".", "response_class", "(", "json_generator", "(", "query_func", "(", "queries", ")", ",", ...
decorates a function to handle restful http queries for resources .
train
false
8,282
def _parse_mongodb_databases_kv(line, out, prefix=None, force_type=None, value_name=None): try: (key, value) = line.split(' =', 1) value = value[1:] except ValueError: sys.stderr.write(('WARNING: unknown keyword %r\r\n' % line)) return if (key == '$err'): key = 'errmsg' if (prefix is not None): key = ('%s_%s' % (prefix, key)) if (force_type is not None): value = force_type(value) else: value = _MONGODB_DATABASES_TYPES.get(key, (lambda x: x))(value) if isinstance(out, dict): assert (key not in out) out[key] = value elif isinstance(out, list): out.append({'name': key, value_name: value})
[ "def", "_parse_mongodb_databases_kv", "(", "line", ",", "out", ",", "prefix", "=", "None", ",", "force_type", "=", "None", ",", "value_name", "=", "None", ")", ":", "try", ":", "(", "key", ",", "value", ")", "=", "line", ".", "split", "(", "' ='", ",...
parse key = value lines from mongodb-databases output .
train
false
8,283
@register.function @jinja2.contextfunction def collection_widgets(context, collection, condensed=False): c = dict(context.items()) if collection: c.update({'condensed': condensed, 'c': collection}) template = get_env().get_template('bandwagon/collection_widgets.html') return jinja2.Markup(template.render(c))
[ "@", "register", ".", "function", "@", "jinja2", ".", "contextfunction", "def", "collection_widgets", "(", "context", ",", "collection", ",", "condensed", "=", "False", ")", ":", "c", "=", "dict", "(", "context", ".", "items", "(", ")", ")", "if", "colle...
displays collection widgets .
train
false
8,284
def _interpret_emr_step_stderr(fs, matches): for match in matches: path = match['path'] error = _parse_task_stderr(_cat_log(fs, path)) if error: error['path'] = path return dict(errors=[dict(task_error=error)]) return {}
[ "def", "_interpret_emr_step_stderr", "(", "fs", ",", "matches", ")", ":", "for", "match", "in", "matches", ":", "path", "=", "match", "[", "'path'", "]", "error", "=", "_parse_task_stderr", "(", "_cat_log", "(", "fs", ",", "path", ")", ")", "if", "error"...
extract information from step stderr (see :py:func:~mrjob .
train
false
8,285
def _complete_path(path=None): if (not path): return _listdir('.') (dirname, rest) = os.path.split(path) tmp = (dirname if dirname else '.') res = [p for p in _listdir(tmp) if p.startswith(rest)] if ((len(res) > 1) or (not os.path.exists(path))): return res if os.path.isdir(path): return [p for p in _listdir(path)] return [(path + ' ')]
[ "def", "_complete_path", "(", "path", "=", "None", ")", ":", "if", "(", "not", "path", ")", ":", "return", "_listdir", "(", "'.'", ")", "(", "dirname", ",", "rest", ")", "=", "os", ".", "path", ".", "split", "(", "path", ")", "tmp", "=", "(", "...
perform completion of filesystem path .
train
true
8,286
def bin(number): tmp = [BIN_HEX_DICT[hstr] for hstr in hex(number)[2:]] return BIN_ZSTRIP.sub('0b', ''.join(tmp))
[ "def", "bin", "(", "number", ")", ":", "tmp", "=", "[", "BIN_HEX_DICT", "[", "hstr", "]", "for", "hstr", "in", "hex", "(", "number", ")", "[", "2", ":", "]", "]", "return", "BIN_ZSTRIP", ".", "sub", "(", "'0b'", ",", "''", ".", "join", "(", "tm...
returns a dictionary with items in the given list grouped by the given key .
train
false
8,288
def repanel_cov(groups, sigmas): if (groups.ndim == 1): groups = groups[:, None] (nobs, nre) = groups.shape omega = (sigmas[(-1)] * np.eye(nobs)) for igr in range(nre): group = groups[:, igr:(igr + 1)] groupuniq = np.unique(group) dummygr = (sigmas[igr] * (group == groupuniq).astype(float)) omega += np.dot(dummygr, dummygr.T) (ev, evec) = np.linalg.eigh(omega) omegainv = np.dot(evec, ((1 / ev) * evec).T) omegainvhalf = (evec / np.sqrt(ev)) return (omega, omegainv, omegainvhalf)
[ "def", "repanel_cov", "(", "groups", ",", "sigmas", ")", ":", "if", "(", "groups", ".", "ndim", "==", "1", ")", ":", "groups", "=", "groups", "[", ":", ",", "None", "]", "(", "nobs", ",", "nre", ")", "=", "groups", ".", "shape", "omega", "=", "...
calculate error covariance matrix for random effects model parameters groups : array .
train
false
8,289
def no_vtk(): global _vtk_version return (_vtk_version is None)
[ "def", "no_vtk", "(", ")", ":", "global", "_vtk_version", "return", "(", "_vtk_version", "is", "None", ")" ]
checks if vtk is installed and the python wrapper is functional .
train
false
8,291
def log_events(klass): old_event = klass.event @functools.wraps(old_event) def new_event(self, e, *args, **kwargs): 'Wrapper for event() which logs events.' log.misc.debug('Event in {}: {}'.format(utils.qualname(klass), qenum_key(QEvent, e.type()))) return old_event(self, e, *args, **kwargs) klass.event = new_event return klass
[ "def", "log_events", "(", "klass", ")", ":", "old_event", "=", "klass", ".", "event", "@", "functools", ".", "wraps", "(", "old_event", ")", "def", "new_event", "(", "self", ",", "e", ",", "*", "args", ",", "**", "kwargs", ")", ":", "log", ".", "mi...
class decorator to log qt events .
train
false
8,292
def test_majority_label_iris(): clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy') assert_almost_equal(scores.mean(), 0.95, decimal=2)
[ "def", "test_majority_label_iris", "(", ")", ":", "clf1", "=", "LogisticRegression", "(", "random_state", "=", "123", ")", "clf2", "=", "RandomForestClassifier", "(", "random_state", "=", "123", ")", "clf3", "=", "GaussianNB", "(", ")", "eclf", "=", "VotingCla...
check classification by majority label on dataset iris .
train
false
8,293
def _re_compile(regex): return re.compile(regex, (re.I | re.UNICODE))
[ "def", "_re_compile", "(", "regex", ")", ":", "return", "re", ".", "compile", "(", "regex", ",", "(", "re", ".", "I", "|", "re", ".", "UNICODE", ")", ")" ]
compile a string to regex .
train
false
8,294
def get_package_name(filepath): pkg_name = [] while (('site-packages' in filepath) or ('dist-packages' in filepath)): (filepath, p2) = os.path.split(filepath) if (p2 in ('site-packages', 'dist-packages')): break pkg_name.append(p2) if (not pkg_name): return 'pandas' pkg_name.reverse() if pkg_name[0].endswith('.egg'): pkg_name.pop(0) return '.'.join(pkg_name)
[ "def", "get_package_name", "(", "filepath", ")", ":", "pkg_name", "=", "[", "]", "while", "(", "(", "'site-packages'", "in", "filepath", ")", "or", "(", "'dist-packages'", "in", "filepath", ")", ")", ":", "(", "filepath", ",", "p2", ")", "=", "os", "."...
get "name" from a package with a workaround when its not defined .
train
false
8,295
def _range_string_to_set(range_str): if ('..' in range_str): (range_start, range_end) = range_str.split('..') range_start = int(range_start, 16) range_end = int(range_end, 16) return set(range(range_start, (range_end + 1))) else: return {int(range_str, 16)}
[ "def", "_range_string_to_set", "(", "range_str", ")", ":", "if", "(", "'..'", "in", "range_str", ")", ":", "(", "range_start", ",", "range_end", ")", "=", "range_str", ".", "split", "(", "'..'", ")", "range_start", "=", "int", "(", "range_start", ",", "1...
convert a range encoding in a string to a set .
train
false
8,296
def parse_stats(conf, json): 'Ignore stats if coming from non-leading mesos master' elected_result = lookup_stat('master/elected', json, conf) if (elected_result == 1): for (name, key) in get_stats_string(conf['version']).iteritems(): result = lookup_stat(name, json, conf) dispatch_stat(result, name, key, conf) else: log_verbose(conf['verboseLogging'], 'This mesos master node is not elected leader so not writing data.') return None
[ "def", "parse_stats", "(", "conf", ",", "json", ")", ":", "elected_result", "=", "lookup_stat", "(", "'master/elected'", ",", "json", ",", "conf", ")", "if", "(", "elected_result", "==", "1", ")", ":", "for", "(", "name", ",", "key", ")", "in", "get_st...
parse stats response from mesos slave .
train
false
8,297
def member_list(context, data_dict=None): model = context['model'] group = model.Group.get(_get_or_bust(data_dict, 'id')) if (not group): raise NotFound obj_type = data_dict.get('object_type', None) capacity = data_dict.get('capacity', None) _check_access('group_show', context, data_dict) q = model.Session.query(model.Member).filter((model.Member.group_id == group.id)).filter((model.Member.state == 'active')) if obj_type: q = q.filter((model.Member.table_name == obj_type)) if capacity: q = q.filter((model.Member.capacity == capacity)) trans = authz.roles_trans() def translated_capacity(capacity): try: return trans[capacity] except KeyError: return capacity return [(m.table_id, m.table_name, translated_capacity(m.capacity)) for m in q.all()]
[ "def", "member_list", "(", "context", ",", "data_dict", "=", "None", ")", ":", "model", "=", "context", "[", "'model'", "]", "group", "=", "model", ".", "Group", ".", "get", "(", "_get_or_bust", "(", "data_dict", ",", "'id'", ")", ")", "if", "(", "no...
return the members of a group .
train
false
8,298
def _split_series_episode(title): series_title = '' episode_or_year = '' if (title[(-1):] == '}'): begin_eps = title.rfind('{') if (begin_eps == (-1)): return ('', '') series_title = title[:begin_eps].rstrip() episode_or_year = title[begin_eps:].strip() if (episode_or_year[:12] == '{SUSPENDED}}'): return ('', '') elif (title[0:1] == '"'): second_quot = (title[1:].find('"') + 2) if (second_quot != 1): episode_or_year = title[second_quot:].lstrip() first_char = episode_or_year[0:1] if (not first_char): return ('', '') if (first_char != '('): series_title = title[:second_quot] return (series_title, episode_or_year)
[ "def", "_split_series_episode", "(", "title", ")", ":", "series_title", "=", "''", "episode_or_year", "=", "''", "if", "(", "title", "[", "(", "-", "1", ")", ":", "]", "==", "'}'", ")", ":", "begin_eps", "=", "title", ".", "rfind", "(", "'{'", ")", ...
return the series and the episode titles; if this is not a series episode .
train
false
8,300
def setup_package(): util.positional_parameters_enforcement = 'EXCEPTION'
[ "def", "setup_package", "(", ")", ":", "util", ".", "positional_parameters_enforcement", "=", "'EXCEPTION'" ]
run on testing package .
train
false
8,301
def get_map(options, data): try: map_f = open(options.map_fname, 'U').readlines() except (TypeError, IOError): raise MissingFileError('Mapping file required for this analysis') data['map'] = parse_mapping_file(map_f) return data['map']
[ "def", "get_map", "(", "options", ",", "data", ")", ":", "try", ":", "map_f", "=", "open", "(", "options", ".", "map_fname", ",", "'U'", ")", ".", "readlines", "(", ")", "except", "(", "TypeError", ",", "IOError", ")", ":", "raise", "MissingFileError",...
opens and returns mapping data .
train
false
8,302
def load_module_from_name(dotted_name, path=None, use_sys=1): return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
[ "def", "load_module_from_name", "(", "dotted_name", ",", "path", "=", "None", ",", "use_sys", "=", "1", ")", ":", "return", "load_module_from_modpath", "(", "dotted_name", ".", "split", "(", "'.'", ")", ",", "path", ",", "use_sys", ")" ]
load a python module from its name .
train
true
8,304
def pass_context(f): def new_func(*args, **kwargs): return f(get_current_context(), *args, **kwargs) return update_wrapper(new_func, f)
[ "def", "pass_context", "(", "f", ")", ":", "def", "new_func", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "f", "(", "get_current_context", "(", ")", ",", "*", "args", ",", "**", "kwargs", ")", "return", "update_wrapper", "(", "new_func",...
marks a callback as wanting to receive the current context object as first argument .
train
true
8,305
def exception_to_nan(func): def wrap(*a, **kw): try: return func(*a, **kw) except Exception: return np.nan return wrap
[ "def", "exception_to_nan", "(", "func", ")", ":", "def", "wrap", "(", "*", "a", ",", "**", "kw", ")", ":", "try", ":", "return", "func", "(", "*", "a", ",", "**", "kw", ")", "except", "Exception", ":", "return", "np", ".", "nan", "return", "wrap"...
decorate function to return nan if it raises an exception .
train
false
8,306
def ntop(address): af = (((len(address) == 4) and socket.AF_INET) or socket.AF_INET6) return socket.inet_ntop(af, address)
[ "def", "ntop", "(", "address", ")", ":", "af", "=", "(", "(", "(", "len", "(", "address", ")", "==", "4", ")", "and", "socket", ".", "AF_INET", ")", "or", "socket", ".", "AF_INET6", ")", "return", "socket", ".", "inet_ntop", "(", "af", ",", "addr...
convert address to its string representation .
train
false
8,307
def _convert_paths(paths): new_paths = [] for path in paths: new_path = _convert_2to3(path) new_paths.append(new_path) return new_paths
[ "def", "_convert_paths", "(", "paths", ")", ":", "new_paths", "=", "[", "]", "for", "path", "in", "paths", ":", "new_path", "=", "_convert_2to3", "(", "path", ")", "new_paths", ".", "append", "(", "new_path", ")", "return", "new_paths" ]
convert the given files .
train
false
8,308
def _rgb_to_hex(rgbs): (rgbs, n_dim) = _check_color_dim(rgbs) return np.array([('#%02x%02x%02x' % tuple((255 * rgb[:3]).astype(np.uint8))) for rgb in rgbs], '|U7')
[ "def", "_rgb_to_hex", "(", "rgbs", ")", ":", "(", "rgbs", ",", "n_dim", ")", "=", "_check_color_dim", "(", "rgbs", ")", "return", "np", ".", "array", "(", "[", "(", "'#%02x%02x%02x'", "%", "tuple", "(", "(", "255", "*", "rgb", "[", ":", "3", "]", ...
convert rgb to hex triplet .
train
true
8,309
def argmax(seq, func): return max(seq, key=func)
[ "def", "argmax", "(", "seq", ",", "func", ")", ":", "return", "max", "(", "seq", ",", "key", "=", "func", ")" ]
returns the indices of the maximum along an axis .
train
false
8,310
def oo_31_rpm_rename_conversion(rpms, openshift_version=None): if (not isinstance(rpms, list)): raise errors.AnsibleFilterError('failed expects to filter on a list') if ((openshift_version is not None) and (not isinstance(openshift_version, string_types))): raise errors.AnsibleFilterError('failed expects openshift_version to be a string') rpms_31 = [] for rpm in rpms: if ('atomic' not in rpm): rpm = rpm.replace('openshift', 'atomic-openshift') if openshift_version: rpm = (rpm + openshift_version) rpms_31.append(rpm) return rpms_31
[ "def", "oo_31_rpm_rename_conversion", "(", "rpms", ",", "openshift_version", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "rpms", ",", "list", ")", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "'failed expects to filter on a list'", ...
filters a list of 3 .
train
false
8,311
def normal_two_sided_bounds(probability, mu=0, sigma=1): tail_probability = ((1 - probability) / 2) upper_bound = normal_lower_bound(tail_probability, mu, sigma) lower_bound = normal_upper_bound(tail_probability, mu, sigma) return (lower_bound, upper_bound)
[ "def", "normal_two_sided_bounds", "(", "probability", ",", "mu", "=", "0", ",", "sigma", "=", "1", ")", ":", "tail_probability", "=", "(", "(", "1", "-", "probability", ")", "/", "2", ")", "upper_bound", "=", "normal_lower_bound", "(", "tail_probability", ...
returns the symmetric bounds that contain the specified probability .
train
false
8,313
def _nova_to_osvif_ip(ip): floating_ips = [fip['address'] for fip in ip.get('floating_ips', [])] return objects.fixed_ip.FixedIP(address=ip['address'], floating_ips=floating_ips)
[ "def", "_nova_to_osvif_ip", "(", "ip", ")", ":", "floating_ips", "=", "[", "fip", "[", "'address'", "]", "for", "fip", "in", "ip", ".", "get", "(", "'floating_ips'", ",", "[", "]", ")", "]", "return", "objects", ".", "fixed_ip", ".", "FixedIP", "(", ...
convert nova ip object into os_vif object .
train
false
8,315
def test_get_init_2(): with make_tempfile(join(TMP_TEST_DIR, '__init__.pyw')): assert mp.get_init(TMP_TEST_DIR)
[ "def", "test_get_init_2", "(", ")", ":", "with", "make_tempfile", "(", "join", "(", "TMP_TEST_DIR", ",", "'__init__.pyw'", ")", ")", ":", "assert", "mp", ".", "get_init", "(", "TMP_TEST_DIR", ")" ]
see if get_init can find __init__ .
train
false
8,317
def _SetConnection(connection): __InitConnection() _thread_local.connection_stack[(-1)] = connection
[ "def", "_SetConnection", "(", "connection", ")", ":", "__InitConnection", "(", ")", "_thread_local", ".", "connection_stack", "[", "(", "-", "1", ")", "]", "=", "connection" ]
sets the datastore connection local to the thread .
train
false
8,318
def find_outliers(X, threshold=3.0, max_iter=2): from scipy.stats import zscore my_mask = np.zeros(len(X), dtype=np.bool) for _ in range(max_iter): X = np.ma.masked_array(X, my_mask) this_z = np.abs(zscore(X)) local_bad = (this_z > threshold) my_mask = np.max([my_mask, local_bad], 0) if (not np.any(local_bad)): break bad_idx = np.where(my_mask)[0] return bad_idx
[ "def", "find_outliers", "(", "X", ",", "threshold", "=", "3.0", ",", "max_iter", "=", "2", ")", ":", "from", "scipy", ".", "stats", "import", "zscore", "my_mask", "=", "np", ".", "zeros", "(", "len", "(", "X", ")", ",", "dtype", "=", "np", ".", "...
find outliers based on iterated z-scoring .
train
false
8,319
def get_entities(table, schema, db, first_key, last_key): return db.range_query(table, schema, first_key, last_key, _MAX_ENTITIES)
[ "def", "get_entities", "(", "table", ",", "schema", ",", "db", ",", "first_key", ",", "last_key", ")", ":", "return", "db", ".", "range_query", "(", "table", ",", "schema", ",", "first_key", ",", "last_key", ",", "_MAX_ENTITIES", ")" ]
gets entities from a table .
train
false
8,320
def list_stack(profile=None): ret = {} h_client = _auth(profile) for stack in h_client.stacks.list(): links = {} for link in stack.links: links[link['rel']] = link['href'] ret[stack.stack_name] = {'status': stack.stack_status, 'id': stack.id, 'name': stack.stack_name, 'creation': stack.creation_time, 'owner': stack.stack_owner, 'reason': stack.stack_status_reason, 'links': links} return ret
[ "def", "list_stack", "(", "profile", "=", "None", ")", ":", "ret", "=", "{", "}", "h_client", "=", "_auth", "(", "profile", ")", "for", "stack", "in", "h_client", ".", "stacks", ".", "list", "(", ")", ":", "links", "=", "{", "}", "for", "link", "...
return a list of available stack profile profile to use cli example: .
train
true
8,321
def index_delete(index, hosts=None, profile=None): es = _get_instance(hosts, profile) try: if (not index_exists(index=index)): return True else: result = es.indices.delete(index=index) if result.get('acknowledged', False): return True except elasticsearch.exceptions.NotFoundError: return None return None
[ "def", "index_delete", "(", "index", ",", "hosts", "=", "None", ",", "profile", "=", "None", ")", ":", "es", "=", "_get_instance", "(", "hosts", ",", "profile", ")", "try", ":", "if", "(", "not", "index_exists", "(", "index", "=", "index", ")", ")", ...
delete an index cli example:: salt myminion elasticsearch .
train
false
8,322
def escape_unicode(string): returnValue = StringIO() for character in string: code = ord(character) if (code > 127): hexCode = hex(code) returnValue.write(((('%' + hexCode[2:4]) + '%') + hexCode[4:6])) else: returnValue.write(character) return returnValue.getvalue()
[ "def", "escape_unicode", "(", "string", ")", ":", "returnValue", "=", "StringIO", "(", ")", "for", "character", "in", "string", ":", "code", "=", "ord", "(", "character", ")", "if", "(", "code", ">", "127", ")", ":", "hexCode", "=", "hex", "(", "code...
converts a unicode string into us-ascii .
train
false
8,325
def test_bookmark_completion(qtmodeltester, bookmarks): model = miscmodels.BookmarkCompletionModel() qtmodeltester.data_display_may_return_none = True qtmodeltester.check(model) _check_completions(model, {'Bookmarks': [('https://github.com', 'GitHub', ''), ('https://python.org', 'Welcome to Python.org', ''), ('http://qutebrowser.org', 'qutebrowser | qutebrowser', '')]})
[ "def", "test_bookmark_completion", "(", "qtmodeltester", ",", "bookmarks", ")", ":", "model", "=", "miscmodels", ".", "BookmarkCompletionModel", "(", ")", "qtmodeltester", ".", "data_display_may_return_none", "=", "True", "qtmodeltester", ".", "check", "(", "model", ...
test the results of bookmark completion .
train
false
8,327
def case_i(string): return ''.join([(((('[' + c.upper()) + c.lower()) + ']') if c.isalpha() else c) for c in re.escape(string)])
[ "def", "case_i", "(", "string", ")", ":", "return", "''", ".", "join", "(", "[", "(", "(", "(", "(", "'['", "+", "c", ".", "upper", "(", ")", ")", "+", "c", ".", "lower", "(", ")", ")", "+", "']'", ")", "if", "c", ".", "isalpha", "(", ")"...
returns case insensitive regex .
train
false
8,328
def make_path_recipe(r): return ('/recipes/' + r)
[ "def", "make_path_recipe", "(", "r", ")", ":", "return", "(", "'/recipes/'", "+", "r", ")" ]
converts a recipe id into a beaker path .
train
false
8,330
@requires_segment_info def file_name(pl, segment_info, display_no_file=False, no_file_text=u'[No file]'): name = buffer_name(segment_info) if (not name): if display_no_file: return [{u'contents': no_file_text, u'highlight_groups': [u'file_name_no_file', u'file_name']}] else: return None return os.path.basename(name).decode(segment_info[u'encoding'], u'powerline_vim_strtrans_error')
[ "@", "requires_segment_info", "def", "file_name", "(", "pl", ",", "segment_info", ",", "display_no_file", "=", "False", ",", "no_file_text", "=", "u'[No file]'", ")", ":", "name", "=", "buffer_name", "(", "segment_info", ")", "if", "(", "not", "name", ")", "...
return file name .
train
false
8,331
def abs(a): return _abs(a)
[ "def", "abs", "(", "a", ")", ":", "return", "_abs", "(", "a", ")" ]
apply abs to each element of the matrix mat .
train
false
8,334
def _append_package_paths(manifest_, paths, pkg_dir): exports = manifest_.get_export('python', 'path') if exports: for export in exports: if (':' in export): export = export.split(':') else: export = [export] for e in export: paths.append(e.replace('${prefix}', pkg_dir)) else: dirs = [os.path.join(pkg_dir, d) for d in ['src', 'lib']] paths.extend([d for d in dirs if os.path.isdir(d)])
[ "def", "_append_package_paths", "(", "manifest_", ",", "paths", ",", "pkg_dir", ")", ":", "exports", "=", "manifest_", ".", "get_export", "(", "'python'", ",", "'path'", ")", "if", "exports", ":", "for", "export", "in", "exports", ":", "if", "(", "':'", ...
added paths for package to paths .
train
false
8,336
@pytest.mark.usefixtures('break_getuser') @pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows') def test_get_user_uid_not_found(): from _pytest.tmpdir import get_user assert (get_user() is None)
[ "@", "pytest", ".", "mark", ".", "usefixtures", "(", "'break_getuser'", ")", "@", "pytest", ".", "mark", ".", "skipif", "(", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ",", "reason", "=", "'no os.getuid on windows'", ")", "def", "test_ge...
test that get_user() function works even if the current processs user id does not correspond to a valid user (e .
train
false
8,338
def create_size_estimators(): def line_count(filename): with open(filename, u'rb') as fh: return sum((1 for line in fh)) return {u'linecount': (lambda srcs: sum((line_count(src) for src in srcs))), u'filecount': (lambda srcs: len(srcs)), u'filesize': (lambda srcs: sum((os.path.getsize(src) for src in srcs))), u'nosize': (lambda srcs: 0), u'random': (lambda srcs: random.randint(0, 10000))}
[ "def", "create_size_estimators", "(", ")", ":", "def", "line_count", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "u'rb'", ")", "as", "fh", ":", "return", "sum", "(", "(", "1", "for", "line", "in", "fh", ")", ")", "return", "{", ...
create a dict of name to a function that returns an estimated size for a given target .
train
false
8,339
def HeaderPrints(message): m = HeaderPrintMTADetails(message) u = HeaderPrintMUADetails(message, mta=m)[:20] g = HeaderPrintGenericDetails(message)[:50] mua = (u[1] if u else None) if (mua and mua.startswith('Mozilla ')): mua = mua.split()[(-1)] return {'sender': md5_hex('\n'.join(((m + u) + g))), 'tools': md5_hex('\n'.join((u + g))), 'mua': mua}
[ "def", "HeaderPrints", "(", "message", ")", ":", "m", "=", "HeaderPrintMTADetails", "(", "message", ")", "u", "=", "HeaderPrintMUADetails", "(", "message", ",", "mta", "=", "m", ")", "[", ":", "20", "]", "g", "=", "HeaderPrintGenericDetails", "(", "message...
generate fingerprints from message headers which identifies the mua .
train
false
8,340
def clusterstatus(request): return render('clusterstatus.html', request, Cluster(request.jt))
[ "def", "clusterstatus", "(", "request", ")", ":", "return", "render", "(", "'clusterstatus.html'", ",", "request", ",", "Cluster", "(", "request", ".", "jt", ")", ")" ]
we get here from /clusterstatus .
train
false
8,341
def RunDecompiler(d, dx, decompiler, session=None): if (decompiler != None): androconf.debug('Decompiler ...') decompiler = decompiler.lower() if (decompiler == 'dex2jad'): d.set_decompiler(DecompilerDex2Jad(d, androconf.CONF['PATH_DEX2JAR'], androconf.CONF['BIN_DEX2JAR'], androconf.CONF['PATH_JAD'], androconf.CONF['BIN_JAD'], androconf.CONF['TMP_DIRECTORY'])) elif (decompiler == 'dex2fernflower'): d.set_decompiler(DecompilerDex2Fernflower(d, androconf.CONF['PATH_DEX2JAR'], androconf.CONF['BIN_DEX2JAR'], androconf.CONF['PATH_FERNFLOWER'], androconf.CONF['BIN_FERNFLOWER'], androconf.CONF['OPTIONS_FERNFLOWER'], androconf.CONF['TMP_DIRECTORY'])) elif (decompiler == 'ded'): d.set_decompiler(DecompilerDed(d, androconf.CONF['PATH_DED'], androconf.CONF['BIN_DED'], androconf.CONF['TMP_DIRECTORY'])) else: d.set_decompiler(DecompilerDAD(d, dx))
[ "def", "RunDecompiler", "(", "d", ",", "dx", ",", "decompiler", ",", "session", "=", "None", ")", ":", "if", "(", "decompiler", "!=", "None", ")", ":", "androconf", ".", "debug", "(", "'Decompiler ...'", ")", "decompiler", "=", "decompiler", ".", "lower"...
run the decompiler on a specific analysis .
train
false
8,342
def modprobe(mod): return quietRun(['modprobe', mod])
[ "def", "modprobe", "(", "mod", ")", ":", "return", "quietRun", "(", "[", "'modprobe'", ",", "mod", "]", ")" ]
return output of modprobe mod: module string .
train
false
8,343
def test_horse(): horse = data.horse() assert_equal(horse.ndim, 2) assert_equal(horse.dtype, np.dtype('bool'))
[ "def", "test_horse", "(", ")", ":", "horse", "=", "data", ".", "horse", "(", ")", "assert_equal", "(", "horse", ".", "ndim", ",", "2", ")", "assert_equal", "(", "horse", ".", "dtype", ",", "np", ".", "dtype", "(", "'bool'", ")", ")" ]
test that "horse" image can be loaded .
train
false
8,345
def provides(interface): interface_name = interface.__name__ def invariant(value): if interface.providedBy(value): return (True, '') else: return (False, "{value!r} doesn't provide {interface}".format(value=value, interface=interface_name)) invariant.__name__ = 'provides_{}_invariant'.format(interface_name) return invariant
[ "def", "provides", "(", "interface", ")", ":", "interface_name", "=", "interface", ".", "__name__", "def", "invariant", "(", "value", ")", ":", "if", "interface", ".", "providedBy", "(", "value", ")", ":", "return", "(", "True", ",", "''", ")", "else", ...
create an invariant that asserts that the given value provides the given interface .
train
false
8,346
def _import_module(importer, module_name, package): if (module_name in sys.modules): return sys.modules[module_name] loader = importer.find_module(module_name) if (loader is None): return None module = loader.load_module(module_name) local_name = module_name.partition((package.__name__ + '.'))[2] module_components = local_name.split('.') parent = six.moves.reduce(getattr, module_components[:(-1)], package) setattr(parent, module_components[(-1)], module) return module
[ "def", "_import_module", "(", "importer", ",", "module_name", ",", "package", ")", ":", "if", "(", "module_name", "in", "sys", ".", "modules", ")", ":", "return", "sys", ".", "modules", "[", "module_name", "]", "loader", "=", "importer", ".", "find_module"...
import module .
train
false
8,347
def one_one_in_other(book_id_val_map, db, field, *args): deleted = tuple(((k,) for (k, v) in book_id_val_map.iteritems() if (v is None))) if deleted: db.executemany((u'DELETE FROM %s WHERE book=?' % field.metadata[u'table']), deleted) for book_id in deleted: field.table.book_col_map.pop(book_id[0], None) updated = {k: v for (k, v) in book_id_val_map.iteritems() if (v is not None)} if updated: db.executemany((u'INSERT OR REPLACE INTO %s(book,%s) VALUES (?,?)' % (field.metadata[u'table'], field.metadata[u'column'])), ((k, sqlite_datetime(v)) for (k, v) in updated.iteritems())) field.table.book_col_map.update(updated) return set(book_id_val_map)
[ "def", "one_one_in_other", "(", "book_id_val_map", ",", "db", ",", "field", ",", "*", "args", ")", ":", "deleted", "=", "tuple", "(", "(", "(", "k", ",", ")", "for", "(", "k", ",", "v", ")", "in", "book_id_val_map", ".", "iteritems", "(", ")", "if"...
set a one-one field in the non-books table .
train
false
8,348
def create_logout_url(slug): logout_url = utils.set_url_query_parameter('/logout', 'return_url', slug) return logout_url
[ "def", "create_logout_url", "(", "slug", ")", ":", "logout_url", "=", "utils", ".", "set_url_query_parameter", "(", "'/logout'", ",", "'return_url'", ",", "slug", ")", "return", "logout_url" ]
computes the logout url for this request and specified destination url .
train
false
8,349
def write_csv(results, headers, filename): with open(filename, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=headers) writer.writeheader() writer.writerows(results)
[ "def", "write_csv", "(", "results", ",", "headers", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "csvfile", ":", "writer", "=", "csv", ".", "DictWriter", "(", "csvfile", ",", "fieldnames", "=", "headers", ")", "wri...
write a set of results to a csv file .
train
false
8,350
def ensureDeferred(coro): from types import GeneratorType if (version_info >= (3, 4, 0)): from asyncio import iscoroutine if (iscoroutine(coro) or isinstance(coro, GeneratorType)): return _inlineCallbacks(None, coro, Deferred()) elif (version_info >= (3, 3, 0)): if isinstance(coro, GeneratorType): return _inlineCallbacks(None, coro, Deferred()) if (not isinstance(coro, Deferred)): raise ValueError(('%r is not a coroutine or a Deferred' % (coro,))) return coro
[ "def", "ensureDeferred", "(", "coro", ")", ":", "from", "types", "import", "GeneratorType", "if", "(", "version_info", ">=", "(", "3", ",", "4", ",", "0", ")", ")", ":", "from", "asyncio", "import", "iscoroutine", "if", "(", "iscoroutine", "(", "coro", ...
schedule the execution of a coroutine that awaits/yields from l{deferred}s .
train
false
8,352
def _trim_id(data): old_id = data['id'] del data['id'] return (old_id, data)
[ "def", "_trim_id", "(", "data", ")", ":", "old_id", "=", "data", "[", "'id'", "]", "del", "data", "[", "'id'", "]", "return", "(", "old_id", ",", "data", ")" ]
trims id from json .
train
false
8,353
def get_s3a_access_key(): return get_conf().get(_CNF_S3A_ACCESS_KEY)
[ "def", "get_s3a_access_key", "(", ")", ":", "return", "get_conf", "(", ")", ".", "get", "(", "_CNF_S3A_ACCESS_KEY", ")" ]
get s3a aws access key id URL .
train
false
8,355
def axapi_call(module, url, post=None): (rsp, info) = fetch_url(module, url, data=post) if ((not rsp) or (info['status'] >= 400)): module.fail_json(msg=('failed to connect (status code %s), error was %s' % (info['status'], info.get('msg', 'no error given')))) try: raw_data = rsp.read() data = json.loads(raw_data) except ValueError: if ('status="ok"' in raw_data.lower()): data = {'response': {'status': 'OK'}} else: data = {'response': {'status': 'fail', 'err': {'msg': raw_data}}} except: module.fail_json(msg='could not read the result from the host') finally: rsp.close() return data
[ "def", "axapi_call", "(", "module", ",", "url", ",", "post", "=", "None", ")", ":", "(", "rsp", ",", "info", ")", "=", "fetch_url", "(", "module", ",", "url", ",", "data", "=", "post", ")", "if", "(", "(", "not", "rsp", ")", "or", "(", "info", ...
returns a datastructure based on the result of the api call .
train
false
8,356
def _ensure_llvm(): import warnings import llvmlite regex = re.compile('(\\d+)\\.(\\d+).(\\d+)') m = regex.match(llvmlite.__version__) if m: ver = tuple(map(int, m.groups())) if (ver < _min_llvmlite_version): msg = ('Numba requires at least version %d.%d.%d of llvmlite.\nInstalled version is %s.\nPlease update llvmlite.' % (_min_llvmlite_version + (llvmlite.__version__,))) raise ImportError(msg) else: warnings.warn('llvmlite version format not recognized!') from llvmlite.binding import llvm_version_info, check_jit_execution if (llvm_version_info < _min_llvm_version): msg = ('Numba requires at least version %d.%d.%d of LLVM.\nInstalled llvmlite is built against version %d.%d.%d.\nPlease update llvmlite.' % (_min_llvm_version + llvm_version_info)) raise ImportError(msg) check_jit_execution()
[ "def", "_ensure_llvm", "(", ")", ":", "import", "warnings", "import", "llvmlite", "regex", "=", "re", ".", "compile", "(", "'(\\\\d+)\\\\.(\\\\d+).(\\\\d+)'", ")", "m", "=", "regex", ".", "match", "(", "llvmlite", ".", "__version__", ")", "if", "m", ":", "...
make sure llvmlite is operational .
train
false
8,357
def _xor_bytes(aggregating_bytearray, updating_bytes): for i in xrange(len(aggregating_bytearray)): aggregating_bytearray[i] ^= ord(updating_bytes[i]) return aggregating_bytearray
[ "def", "_xor_bytes", "(", "aggregating_bytearray", ",", "updating_bytes", ")", ":", "for", "i", "in", "xrange", "(", "len", "(", "aggregating_bytearray", ")", ")", ":", "aggregating_bytearray", "[", "i", "]", "^=", "ord", "(", "updating_bytes", "[", "i", "]"...
aggregate bytes into a bytearray using xor .
train
false