id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
19,021
def _tx_resource_for_name(name): if (name == 'core'): return 'django.core' else: return ('django.contrib-%s' % name)
[ "def", "_tx_resource_for_name", "(", "name", ")", ":", "if", "(", "name", "==", "'core'", ")", ":", "return", "'django.core'", "else", ":", "return", "(", "'django.contrib-%s'", "%", "name", ")" ]
return the transifex resource name .
train
false
19,023
def grains_refresh(): DETAILS['grains_cache'] = {} return grains()
[ "def", "grains_refresh", "(", ")", ":", "DETAILS", "[", "'grains_cache'", "]", "=", "{", "}", "return", "grains", "(", ")" ]
refresh the grains from the proxy device .
train
false
19,024
def keyevent2tuple(event): return (event.type(), event.key(), event.modifiers(), event.text(), event.isAutoRepeat(), event.count())
[ "def", "keyevent2tuple", "(", "event", ")", ":", "return", "(", "event", ".", "type", "(", ")", ",", "event", ".", "key", "(", ")", ",", "event", ".", "modifiers", "(", ")", ",", "event", ".", "text", "(", ")", ",", "event", ".", "isAutoRepeat", ...
convert qkeyevent instance into a tuple .
train
true
19,025
def list_known_cup_metrics(): return [metric.__name__ for metric in cup_metrics]
[ "def", "list_known_cup_metrics", "(", ")", ":", "return", "[", "metric", ".", "__name__", "for", "metric", "in", "cup_metrics", "]" ]
show the names of available metrics .
train
false
19,026
def formatEvent(event): try: if ('log_flattened' in event): return flatFormat(event) format = event.get('log_format', None) if (format is None): return u'' if isinstance(format, bytes): format = format.decode('utf-8') elif (not isinstance(format, unicode)): raise TypeError('Log format must be unicode or bytes, not {0!r}'.format(format)) return formatWithCall(format, event) except Exception as e: return formatUnformattableEvent(event, e)
[ "def", "formatEvent", "(", "event", ")", ":", "try", ":", "if", "(", "'log_flattened'", "in", "event", ")", ":", "return", "flatFormat", "(", "event", ")", "format", "=", "event", ".", "get", "(", "'log_format'", ",", "None", ")", "if", "(", "format", ...
formats an event as a l{unicode} .
train
false
19,027
def protolinks_simple(proto, url): if (proto in ('iframe', 'embed')): return ('<iframe src="%s" frameborder="0" allowfullscreen></iframe>' % url) elif (proto == 'qr'): return ('<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=%s&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />' % url) return ((proto + ':') + url)
[ "def", "protolinks_simple", "(", "proto", ",", "url", ")", ":", "if", "(", "proto", "in", "(", "'iframe'", ",", "'embed'", ")", ")", ":", "return", "(", "'<iframe src=\"%s\" frameborder=\"0\" allowfullscreen></iframe>'", "%", "url", ")", "elif", "(", "proto", ...
it converts url to html-string using appropriate proto-prefix: uses for construction "proto:url" .
train
false
19,028
def get_sender_name(): sender_name = (frappe.db.get_single_value(u'SMS Settings', u'sms_sender_name') or u'ERPNXT') if ((len(sender_name) > 6) and (frappe.db.get_default(u'country') == u'India')): throw(u'As per TRAI rule, sender name must be exactly 6 characters.\n DCTB DCTB DCTB Kindly change sender name in Setup --> Global Defaults.\n DCTB DCTB DCTB Note: Hyphen, space, numeric digit, special characters are not allowed.') return sender_name
[ "def", "get_sender_name", "(", ")", ":", "sender_name", "=", "(", "frappe", ".", "db", ".", "get_single_value", "(", "u'SMS Settings'", ",", "u'sms_sender_name'", ")", "or", "u'ERPNXT'", ")", "if", "(", "(", "len", "(", "sender_name", ")", ">", "6", ")", ...
returns name as sms sender .
train
false
19,029
def get_file_path(file_name): f = frappe.db.sql(u'select file_url from `tabFile`\n DCTB DCTB where name=%s or file_name=%s', (file_name, file_name)) if f: file_name = f[0][0] file_path = file_name if (u'/' not in file_path): file_path = (u'/files/' + file_path) if file_path.startswith(u'/private/files/'): file_path = get_files_path(is_private=1, *file_path.split(u'/private/files/', 1)[1].split(u'/')) elif file_path.startswith(u'/files/'): file_path = get_files_path(*file_path.split(u'/files/', 1)[1].split(u'/')) else: frappe.throw(_(u'There is some problem with the file url: {0}').format(file_path)) return file_path
[ "def", "get_file_path", "(", "file_name", ")", ":", "f", "=", "frappe", ".", "db", ".", "sql", "(", "u'select file_url from `tabFile`\\n DCTB DCTB where name=%s or file_name=%s'", ",", "(", "file_name", ",", "file_name", ")", ")", "if", "f", ":", "file_name", "="...
get file path from augeas_vhost_path .
train
false
19,031
def service_detail(request, service_id): service = get_object_or_404(Service, pk=service_id) layer_list = service.layer_set.all() service_list = service.service_set.all() service_paginator = Paginator(service_list, 25) layer_paginator = Paginator(layer_list, 25) page = request.GET.get('page') try: layers = layer_paginator.page(page) except PageNotAnInteger: layers = layer_paginator.page(1) except EmptyPage: layers = layer_paginator.page(layer_paginator.num_pages) try: services = service_paginator.page(page) except PageNotAnInteger: services = service_paginator.page(1) except EmptyPage: services = service_paginator.page(service_paginator.num_pages) return render_to_response('services/service_detail.html', RequestContext(request, {'service': service, 'layers': layers, 'services': services, 'permissions_json': _perms_info_json(service)}))
[ "def", "service_detail", "(", "request", ",", "service_id", ")", ":", "service", "=", "get_object_or_404", "(", "Service", ",", "pk", "=", "service_id", ")", "layer_list", "=", "service", ".", "layer_set", ".", "all", "(", ")", "service_list", "=", "service"...
this view shows the details of a service .
train
false
19,037
def _deg_ord_idx(deg, order): return ((((deg * deg) + deg) + order) - 1)
[ "def", "_deg_ord_idx", "(", "deg", ",", "order", ")", ":", "return", "(", "(", "(", "(", "deg", "*", "deg", ")", "+", "deg", ")", "+", "order", ")", "-", "1", ")" ]
get the index into s_in or s_out given a degree and order .
train
false
19,038
def rstdim_to_latexdim(width_str): match = re.match('^(\\d*\\.?\\d*)\\s*(\\S*)$', width_str) if (not match): raise ValueError res = width_str (amount, unit) = match.groups()[:2] float(amount) if (unit in ('', 'px')): res = ('%s\\sphinxpxdimen' % amount) elif (unit == 'pt'): res = ('%sbp' % amount) elif (unit == '%'): res = ('%.3f\\linewidth' % (float(amount) / 100.0)) return res
[ "def", "rstdim_to_latexdim", "(", "width_str", ")", ":", "match", "=", "re", ".", "match", "(", "'^(\\\\d*\\\\.?\\\\d*)\\\\s*(\\\\S*)$'", ",", "width_str", ")", "if", "(", "not", "match", ")", ":", "raise", "ValueError", "res", "=", "width_str", "(", "amount",...
convert width_str with rst length to latex length .
train
false
19,040
def encode_cookie_value(data, quote=default_cookie_quote): if (data is None): return None if (not isinstance(data, bytes)): data = data.encode('utf-8') quoted = quote(data) return quoted
[ "def", "encode_cookie_value", "(", "data", ",", "quote", "=", "default_cookie_quote", ")", ":", "if", "(", "data", "is", "None", ")", ":", "return", "None", "if", "(", "not", "isinstance", "(", "data", ",", "bytes", ")", ")", ":", "data", "=", "data", ...
url-encode strings to make them safe for a cookie value .
train
true
19,041
def _create_titled_dataset(root, key, title, data, comp_kw=None): comp_kw = ({} if (comp_kw is None) else comp_kw) out = root.create_dataset(key, data=data, **comp_kw) out.attrs['TITLE'] = title return out
[ "def", "_create_titled_dataset", "(", "root", ",", "key", ",", "title", ",", "data", ",", "comp_kw", "=", "None", ")", ":", "comp_kw", "=", "(", "{", "}", "if", "(", "comp_kw", "is", "None", ")", "else", "comp_kw", ")", "out", "=", "root", ".", "cr...
helper to create a titled dataset in h5py .
train
false
19,043
def getElementNodeByKey(elementNode, key): if (key not in elementNode.attributes): return None word = str(elementNode.attributes[key]).strip() evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word) if (evaluatedLinkValue.__class__.__name__ == 'ElementNode'): return evaluatedLinkValue print 'Warning, could not get ElementNode in getElementNodeByKey in evaluate for:' print key print evaluatedLinkValue print elementNode return None
[ "def", "getElementNodeByKey", "(", "elementNode", ",", "key", ")", ":", "if", "(", "key", "not", "in", "elementNode", ".", "attributes", ")", ":", "return", "None", "word", "=", "str", "(", "elementNode", ".", "attributes", "[", "key", "]", ")", ".", "...
get the xml element by key .
train
false
19,044
def is_downloadable(url): h = requests.head(url, allow_redirects=True) header = h.headers content_type = header.get('content-type') if ('text' in content_type.lower()): return False if ('html' in content_type.lower()): return False return True
[ "def", "is_downloadable", "(", "url", ")", ":", "h", "=", "requests", ".", "head", "(", "url", ",", "allow_redirects", "=", "True", ")", "header", "=", "h", ".", "headers", "content_type", "=", "header", ".", "get", "(", "'content-type'", ")", "if", "(...
does the url contain a downloadable resource .
train
false
19,045
@np.deprecate(message='scipy.stats.histogram2 is deprecated in scipy 0.16.0; use np.histogram2d instead') def histogram2(a, bins): n = np.searchsorted(np.sort(a), bins) n = np.concatenate([n, [len(a)]]) return (n[1:] - n[:(-1)])
[ "@", "np", ".", "deprecate", "(", "message", "=", "'scipy.stats.histogram2 is deprecated in scipy 0.16.0; use np.histogram2d instead'", ")", "def", "histogram2", "(", "a", ",", "bins", ")", ":", "n", "=", "np", ".", "searchsorted", "(", "np", ".", "sort", "(", "...
compute histogram using divisions in bins .
train
false
19,046
def _ReferenceFromSerialized(serialized): if (not isinstance(serialized, basestring)): raise TypeError(('serialized must be a string; received %r' % serialized)) elif isinstance(serialized, unicode): serialized = serialized.encode('utf8') return entity_pb.Reference(serialized)
[ "def", "_ReferenceFromSerialized", "(", "serialized", ")", ":", "if", "(", "not", "isinstance", "(", "serialized", ",", "basestring", ")", ")", ":", "raise", "TypeError", "(", "(", "'serialized must be a string; received %r'", "%", "serialized", ")", ")", "elif", ...
construct a reference from a serialized reference .
train
true
19,047
def notify_link_count(doctype, name): link_count = frappe.cache().get_value(u'_link_count') if (not link_count): link_count = {} if (not ((doctype, name) in link_count)): link_count[(doctype, name)] = 1 else: link_count[(doctype, name)] += 1 frappe.cache().set_value(u'_link_count', link_count)
[ "def", "notify_link_count", "(", "doctype", ",", "name", ")", ":", "link_count", "=", "frappe", ".", "cache", "(", ")", ".", "get_value", "(", "u'_link_count'", ")", "if", "(", "not", "link_count", ")", ":", "link_count", "=", "{", "}", "if", "(", "not...
updates link count for given document .
train
false
19,048
def request_path(request): url = request.get_full_url() parts = urlparse.urlsplit(url) path = escape_path(parts.path) if (not path.startswith('/')): path = ('/' + path) return path
[ "def", "request_path", "(", "request", ")", ":", "url", "=", "request", ".", "get_full_url", "(", ")", "parts", "=", "urlparse", ".", "urlsplit", "(", "url", ")", "path", "=", "escape_path", "(", "parts", ".", "path", ")", "if", "(", "not", "path", "...
return path component of request-uri .
train
true
19,049
def managed_script(registry, xml_parent, data): step_type = data.get('type', 'script').lower() if (step_type == 'script'): step = 'ScriptBuildStep' script_tag = 'buildStepId' elif (step_type == 'batch'): step = 'WinBatchBuildStep' script_tag = 'command' else: raise InvalidAttributeError('type', step_type, ['script', 'batch']) ms = XML.SubElement(xml_parent, ('org.jenkinsci.plugins.managedscripts.' + step)) try: script_id = data['script-id'] except KeyError: raise MissingAttributeError('script-id') XML.SubElement(ms, script_tag).text = script_id args = XML.SubElement(ms, 'buildStepArgs') for arg in data.get('args', []): XML.SubElement(args, 'string').text = arg
[ "def", "managed_script", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "step_type", "=", "data", ".", "get", "(", "'type'", ",", "'script'", ")", ".", "lower", "(", ")", "if", "(", "step_type", "==", "'script'", ")", ":", "step", "=", "'...
yaml: managed-script this step allows to reference and execute a centrally managed script within your build .
train
false
19,051
def get_fdmax(default=None): fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if (fdmax == resource.RLIM_INFINITY): return default return fdmax
[ "def", "get_fdmax", "(", "default", "=", "None", ")", ":", "fdmax", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NOFILE", ")", "[", "1", "]", "if", "(", "fdmax", "==", "resource", ".", "RLIM_INFINITY", ")", ":", "return", "default", ...
returns the maximum number of open file descriptors on this system .
train
false
19,052
def _discussion_disabled_course_for(user): course_with_disabled_forums = CourseFactory.create() CourseEnrollmentFactory.create(user=user, course_id=course_with_disabled_forums.id) _remove_discussion_tab(course_with_disabled_forums, user.id) return course_with_disabled_forums
[ "def", "_discussion_disabled_course_for", "(", "user", ")", ":", "course_with_disabled_forums", "=", "CourseFactory", ".", "create", "(", ")", "CourseEnrollmentFactory", ".", "create", "(", "user", "=", "user", ",", "course_id", "=", "course_with_disabled_forums", "."...
create and return a course with discussions disabled .
train
false
19,053
@frappe.whitelist() def get_usage_info(): from frappe.email.queue import get_emails_sent_this_month limits = get_limits() if (not (limits and any([limits.users, limits.space, limits.emails, limits.expiry]))): return limits.space = ((limits.space or 0) * 1024.0) if (not limits.space_usage): limits.space_usage = {u'database_size': 26, u'files_size': 1, u'backup_size': 1, u'total': 28} usage_info = frappe._dict({u'limits': limits, u'enabled_users': len(get_enabled_system_users()), u'emails_sent': get_emails_sent_this_month(), u'space_usage': limits.space_usage[u'total']}) if limits.expiry: usage_info[u'expires_on'] = formatdate(limits.expiry) usage_info[u'days_to_expiry'] = (getdate(limits.expiry) - getdate()).days if limits.upgrade_url: usage_info[u'upgrade_url'] = get_upgrade_url(limits.upgrade_url) return usage_info
[ "@", "frappe", ".", "whitelist", "(", ")", "def", "get_usage_info", "(", ")", ":", "from", "frappe", ".", "email", ".", "queue", "import", "get_emails_sent_this_month", "limits", "=", "get_limits", "(", ")", "if", "(", "not", "(", "limits", "and", "any", ...
get data to show for usage info .
train
false
19,056
def split_source(source_code): eol_chars = get_eol_chars(source_code) if eol_chars: return source_code.split(eol_chars) else: return [source_code]
[ "def", "split_source", "(", "source_code", ")", ":", "eol_chars", "=", "get_eol_chars", "(", "source_code", ")", "if", "eol_chars", ":", "return", "source_code", ".", "split", "(", "eol_chars", ")", "else", ":", "return", "[", "source_code", "]" ]
split source code into lines .
train
true
19,058
def get_backend(): return os.environ.get('BACKEND_ID', None)
[ "def", "get_backend", "(", ")", ":", "return", "os", ".", "environ", ".", "get", "(", "'BACKEND_ID'", ",", "None", ")" ]
return the name of the current backend .
train
false
19,059
def sampled_dropout_average(mlp, inputs, num_masks, default_input_include_prob=0.5, input_include_probs=None, default_input_scale=2.0, input_scales=None, rng=(2013, 5, 17), per_example=False): if (input_include_probs is None): input_include_probs = {} if (input_scales is None): input_scales = {} if (not hasattr(rng, 'uniform')): rng = np.random.RandomState(rng) mlp._validate_layer_names(list(input_include_probs.keys())) mlp._validate_layer_names(list(input_scales.keys())) if per_example: outputs = [mlp.dropout_fprop(inputs, default_input_include_prob, input_include_probs, default_input_scale, input_scales) for _ in xrange(num_masks)] else: masks = [generate_dropout_mask(mlp, default_input_include_prob, input_include_probs, rng) for _ in xrange(num_masks)] outputs = [mlp.masked_fprop(inputs, mask, None, default_input_scale, input_scales) for mask in masks] return geometric_mean_prediction(outputs)
[ "def", "sampled_dropout_average", "(", "mlp", ",", "inputs", ",", "num_masks", ",", "default_input_include_prob", "=", "0.5", ",", "input_include_probs", "=", "None", ",", "default_input_scale", "=", "2.0", ",", "input_scales", "=", "None", ",", "rng", "=", "(",...
take the geometric mean over a number of randomly sampled dropout masks for an mlp with softmax outputs .
train
false
19,060
def move(src, dst): real_dst = dst if os.path.isdir(dst): if _samefile(src, dst): os.rename(src, dst) return real_dst = os.path.join(dst, _basename(src)) if os.path.exists(real_dst): raise Error, ("Destination path '%s' already exists" % real_dst) try: os.rename(src, real_dst) except OSError: if os.path.isdir(src): if _destinsrc(src, dst): raise Error, ("Cannot move a directory '%s' into itself '%s'." % (src, dst)) copytree(src, real_dst, symlinks=True) rmtree(src) else: copy2(src, real_dst) os.unlink(src)
[ "def", "move", "(", "src", ",", "dst", ")", ":", "real_dst", "=", "dst", "if", "os", ".", "path", ".", "isdir", "(", "dst", ")", ":", "if", "_samefile", "(", "src", ",", "dst", ")", ":", "os", ".", "rename", "(", "src", ",", "dst", ")", "retu...
move that ensures filesystem encoding of paths .
train
true
19,061
@requires_sklearn def test_ica_reset(): raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:10] run_time_attrs = ('_pre_whitener', 'unmixing_matrix_', 'mixing_matrix_', 'n_components_', 'n_samples_', 'pca_components_', 'pca_explained_variance_', 'pca_mean_') with warnings.catch_warnings(record=True): ica = ICA(n_components=3, max_pca_components=3, n_pca_components=3, method='fastica', max_iter=1).fit(raw, picks=picks) assert_true(all((hasattr(ica, attr) for attr in run_time_attrs))) assert_not_equal(ica.labels_, None) ica._reset() assert_true((not any((hasattr(ica, attr) for attr in run_time_attrs)))) assert_not_equal(ica.labels_, None)
[ "@", "requires_sklearn", "def", "test_ica_reset", "(", ")", ":", "raw", "=", "read_raw_fif", "(", "raw_fname", ")", ".", "crop", "(", "0.5", ",", "stop", ")", ".", "load_data", "(", ")", "picks", "=", "pick_types", "(", "raw", ".", "info", ",", "meg", ...
test ica resetting .
train
false
19,062
def symptom_unique_key_repositories(): return (CONF.credential.key_repository == CONF.fernet_tokens.key_repository)
[ "def", "symptom_unique_key_repositories", "(", ")", ":", "return", "(", "CONF", ".", "credential", ".", "key_repository", "==", "CONF", ".", "fernet_tokens", ".", "key_repository", ")" ]
key repositories for encryption should be unique .
train
false
19,063
def dup_strip(f): if ((not f) or f[0]): return f i = 0 for cf in f: if cf: break else: i += 1 return f[i:]
[ "def", "dup_strip", "(", "f", ")", ":", "if", "(", "(", "not", "f", ")", "or", "f", "[", "0", "]", ")", ":", "return", "f", "i", "=", "0", "for", "cf", "in", "f", ":", "if", "cf", ":", "break", "else", ":", "i", "+=", "1", "return", "f", ...
remove leading zeros from f in k[x] .
train
false
19,064
def requireSocket(*args): err = None missing = [obj for obj in args if (isinstance(obj, str) and (not hasattr(socket, obj)))] if missing: err = ("don't have " + ', '.join((name for name in missing))) else: callargs = [(getattr(socket, obj) if isinstance(obj, str) else obj) for obj in args] try: s = socket.socket(*callargs) except OSError as e: err = str(e) else: s.close() return skipWithClientIf((err is not None), "can't create socket({0}): {1}".format(', '.join((str(o) for o in args)), err))
[ "def", "requireSocket", "(", "*", "args", ")", ":", "err", "=", "None", "missing", "=", "[", "obj", "for", "obj", "in", "args", "if", "(", "isinstance", "(", "obj", ",", "str", ")", "and", "(", "not", "hasattr", "(", "socket", ",", "obj", ")", ")...
skip decorated test if a socket cannot be created with given arguments .
train
false
19,066
def unregister_distributed_server(): if settings.CENTRAL_SERVER: raise CommandError("'Unregister' does not make sense for a central server. Aborting!") own_device = Device.get_own_device() (tmp, settings.DEBUG_ALLOW_DELETIONS) = (settings.DEBUG_ALLOW_DELETIONS, True) DeviceZone.objects.filter(device=own_device).delete() Zone.objects.all().delete() Device.objects.filter(devicemetadata__is_trusted=True).delete() settings.DEBUG_ALLOW_DELETIONS = tmp
[ "def", "unregister_distributed_server", "(", ")", ":", "if", "settings", ".", "CENTRAL_SERVER", ":", "raise", "CommandError", "(", "\"'Unregister' does not make sense for a central server. Aborting!\"", ")", "own_device", "=", "Device", ".", "get_own_device", "(", ")", "...
all local steps necessary for unregistering a server with a central server .
train
false
19,067
def NexusIterator(handle, seq_count=None): n = Nexus.Nexus(handle) if (not n.matrix): raise StopIteration assert (len(n.unaltered_taxlabels) == len(n.taxlabels)) if (seq_count and (seq_count != len(n.unaltered_taxlabels))): raise ValueError(('Found %i sequences, but seq_count=%i' % (len(n.unaltered_taxlabels), seq_count))) records = (SeqRecord(n.matrix[new_name], id=new_name, name=old_name, description='') for (old_name, new_name) in zip(n.unaltered_taxlabels, n.taxlabels)) (yield MultipleSeqAlignment(records, n.alphabet))
[ "def", "NexusIterator", "(", "handle", ",", "seq_count", "=", "None", ")", ":", "n", "=", "Nexus", ".", "Nexus", "(", "handle", ")", "if", "(", "not", "n", ".", "matrix", ")", ":", "raise", "StopIteration", "assert", "(", "len", "(", "n", ".", "una...
returns seqrecord objects from a nexus file .
train
false
19,068
def add_enrollment(student_id, course_id, is_active=True, mode='honor'): enrollment = {'created': datetime.datetime.now(), 'mode': mode, 'is_active': is_active, 'course': _get_fake_course_info(course_id), 'student': student_id} _ENROLLMENTS.append(enrollment) return enrollment
[ "def", "add_enrollment", "(", "student_id", ",", "course_id", ",", "is_active", "=", "True", ",", "mode", "=", "'honor'", ")", ":", "enrollment", "=", "{", "'created'", ":", "datetime", ".", "datetime", ".", "now", "(", ")", ",", "'mode'", ":", "mode", ...
enrolls a user in a course .
train
false
19,070
def convert_ldap_result(ldap_result): py_result = [] at_least_one_referral = False for (dn, attrs) in ldap_result: ldap_attrs = {} if (dn is None): at_least_one_referral = True continue for (kind, values) in attrs.items(): try: val2py = (enabled2py if (kind == 'enabled') else ldap2py) ldap_attrs[kind] = [val2py(x) for x in values] except UnicodeDecodeError: LOG.debug('Unable to decode value for attribute %s', kind) py_result.append((utf8_decode(dn), ldap_attrs)) if at_least_one_referral: LOG.debug('Referrals were returned and ignored. Enable referral chasing in keystone.conf via [ldap] chase_referrals') return py_result
[ "def", "convert_ldap_result", "(", "ldap_result", ")", ":", "py_result", "=", "[", "]", "at_least_one_referral", "=", "False", "for", "(", "dn", ",", "attrs", ")", "in", "ldap_result", ":", "ldap_attrs", "=", "{", "}", "if", "(", "dn", "is", "None", ")",...
convert ldap search result to python types used by openstack .
train
false
19,071
def switch_org(orgname, profile='grafana'): if isinstance(profile, string_types): profile = __salt__['config.option'](profile) org = get_org(orgname, profile) response = requests.post('{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3)) if (response.status_code >= 400): response.raise_for_status() return org
[ "def", "switch_org", "(", "orgname", ",", "profile", "=", "'grafana'", ")", ":", "if", "isinstance", "(", "profile", ",", "string_types", ")", ":", "profile", "=", "__salt__", "[", "'config.option'", "]", "(", "profile", ")", "org", "=", "get_org", "(", ...
switch the current organization .
train
true
19,072
def dup_zz_irreducible_p(f, K): lc = dup_LC(f, K) tc = dup_TC(f, K) e_fc = dup_content(f[1:], K) if e_fc: e_ff = factorint(int(e_fc)) for p in e_ff.keys(): if ((lc % p) and (tc % (p ** 2))): return True
[ "def", "dup_zz_irreducible_p", "(", "f", ",", "K", ")", ":", "lc", "=", "dup_LC", "(", "f", ",", "K", ")", "tc", "=", "dup_TC", "(", "f", ",", "K", ")", "e_fc", "=", "dup_content", "(", "f", "[", "1", ":", "]", ",", "K", ")", "if", "e_fc", ...
test irreducibility using eisensteins criterion .
train
false
19,073
def extrudeFiles(filenames): for filename in filenames: extrudeFile(filename)
[ "def", "extrudeFiles", "(", "filenames", ")", ":", "for", "filename", "in", "filenames", ":", "extrudeFile", "(", "filename", ")" ]
parse gcode files and send the commands to the extruder .
train
false
19,076
def validate_float(s): try: return float(s) except ValueError: raise ValueError((u'Could not convert "%s" to float' % s))
[ "def", "validate_float", "(", "s", ")", ":", "try", ":", "return", "float", "(", "s", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "(", "u'Could not convert \"%s\" to float'", "%", "s", ")", ")" ]
convert s to float or raise .
train
false
19,077
def getProcessOutput(executable, args=(), env={}, path=None, reactor=None, errortoo=0): return _callProtocolWithDeferred((lambda d: _BackRelay(d, errortoo=errortoo)), executable, args, env, path, reactor)
[ "def", "getProcessOutput", "(", "executable", ",", "args", "=", "(", ")", ",", "env", "=", "{", "}", ",", "path", "=", "None", ",", "reactor", "=", "None", ",", "errortoo", "=", "0", ")", ":", "return", "_callProtocolWithDeferred", "(", "(", "lambda", ...
spawn a process and return its output as a deferred returning a l{bytes} .
train
false
19,079
def libvlc_audio_set_format(mp, format, rate, channels): f = (_Cfunctions.get('libvlc_audio_set_format', None) or _Cfunction('libvlc_audio_set_format', ((1,), (1,), (1,), (1,)), None, None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint)) return f(mp, format, rate, channels)
[ "def", "libvlc_audio_set_format", "(", "mp", ",", "format", ",", "rate", ",", "channels", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_audio_set_format'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_audio_set_format'", ",", "(", ...
set decoded audio format .
train
true
19,080
def convert_keys(d): special = {'started_before': 'StartTime<', 'started_after': 'StartTime>', 'started': 'StartTime', 'ended_before': 'EndTime<', 'ended_after': 'EndTime>', 'ended': 'EndTime', 'from_': 'From'} result = {} for (k, v) in iteritems(d): if (k in special): result[special[k]] = v else: result[convert_case(k)] = v return result
[ "def", "convert_keys", "(", "d", ")", ":", "special", "=", "{", "'started_before'", ":", "'StartTime<'", ",", "'started_after'", ":", "'StartTime>'", ",", "'started'", ":", "'StartTime'", ",", "'ended_before'", ":", "'EndTime<'", ",", "'ended_after'", ":", "'End...
return a dictionary with all keys converted from arguments .
train
false
19,081
def user_has_perm_thread(thread, profile): user_post = CommunicationNote.objects.filter(author=profile, thread=thread) user_cc = CommunicationThreadCC.objects.filter(user=profile, thread=thread) if (user_post.exists() or user_cc.exists()): return True if (thread.read_permission_developer and thread.check_obj_author(profile)): return True return check_acls_comm_obj(thread, profile)
[ "def", "user_has_perm_thread", "(", "thread", ",", "profile", ")", ":", "user_post", "=", "CommunicationNote", ".", "objects", ".", "filter", "(", "author", "=", "profile", ",", "thread", "=", "thread", ")", "user_cc", "=", "CommunicationThreadCC", ".", "objec...
check if the user has read/write permissions on the given thread .
train
false
19,084
def i(message): print_log(message)
[ "def", "i", "(", "message", ")", ":", "print_log", "(", "message", ")" ]
print a normal log message .
train
false
19,085
def _get_asn1_time(timestamp): string_timestamp = _ffi.cast('ASN1_STRING*', timestamp) if (_lib.ASN1_STRING_length(string_timestamp) == 0): return None elif (_lib.ASN1_STRING_type(string_timestamp) == _lib.V_ASN1_GENERALIZEDTIME): return _ffi.string(_lib.ASN1_STRING_data(string_timestamp)) else: generalized_timestamp = _ffi.new('ASN1_GENERALIZEDTIME**') _lib.ASN1_TIME_to_generalizedtime(timestamp, generalized_timestamp) if (generalized_timestamp[0] == _ffi.NULL): _untested_error('ASN1_TIME_to_generalizedtime') else: string_timestamp = _ffi.cast('ASN1_STRING*', generalized_timestamp[0]) string_data = _lib.ASN1_STRING_data(string_timestamp) string_result = _ffi.string(string_data) _lib.ASN1_GENERALIZEDTIME_free(generalized_timestamp[0]) return string_result
[ "def", "_get_asn1_time", "(", "timestamp", ")", ":", "string_timestamp", "=", "_ffi", ".", "cast", "(", "'ASN1_STRING*'", ",", "timestamp", ")", "if", "(", "_lib", ".", "ASN1_STRING_length", "(", "string_timestamp", ")", "==", "0", ")", ":", "return", "None"...
retrieve the time value of an asn1 time object .
train
true
19,086
def _calc_factlist(nn): if (nn >= len(_Factlist)): for ii in range(len(_Factlist), int((nn + 1))): _Factlist.append((_Factlist[(ii - 1)] * ii)) return _Factlist[:(int(nn) + 1)]
[ "def", "_calc_factlist", "(", "nn", ")", ":", "if", "(", "nn", ">=", "len", "(", "_Factlist", ")", ")", ":", "for", "ii", "in", "range", "(", "len", "(", "_Factlist", ")", ",", "int", "(", "(", "nn", "+", "1", ")", ")", ")", ":", "_Factlist", ...
function calculates a list of precomputed factorials in order to massively accelerate future calculations of the various coefficients .
train
false
19,087
def test_resize_photo_poorly(): somepic = get_image_path('mozilla.png') src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix='.png', delete=False, dir=settings.TMP_PATH) shutil.copyfile(somepic, src.name) src_image = Image.open(src.name) assert (src_image.size == (339, 128)) resize_photo(src.name, src.name) src_image = Image.open(src.name) assert (src_image.size == (339, 128))
[ "def", "test_resize_photo_poorly", "(", ")", ":", "somepic", "=", "get_image_path", "(", "'mozilla.png'", ")", "src", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'r+w+b'", ",", "suffix", "=", "'.png'", ",", "delete", "=", "False", ",", "di...
if we attempt to set the src/dst .
train
false
19,088
def _create_test_index(): es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH) if (not es.indices.exists(INDEX)): es.indices.create(INDEX)
[ "def", "_create_test_index", "(", ")", ":", "es", "=", "elasticsearch", ".", "Elasticsearch", "(", "connection_class", "=", "Urllib3HttpConnection", ",", "host", "=", "HOST", ",", "port", "=", "PORT", ",", "http_auth", "=", "HTTP_AUTH", ")", "if", "(", "not"...
create content index .
train
false
19,092
def set_hosts(sld, tld, hosts): opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.setHosts') opts['SLD'] = sld opts['TLD'] = tld i = 1 for hostrecord in hosts: str_i = str(i) opts[('HostName' + str_i)] = hostrecord['hostname'] opts[('RecordType' + str_i)] = hostrecord['recordtype'] opts[('Address' + str_i)] = hostrecord['address'] if ('ttl' in hostrecord): opts[('TTL' + str_i)] = hostrecord['ttl'] if ('mxpref' in hostrecord): opts[('MXPref' + str_i)] = hostrecord['mxpref'] opts['EmailType'] = hostrecord['emailtype'] i += 1 response_xml = salt.utils.namecheap.post_request(opts) if (response_xml is None): return False dnsresult = response_xml.getElementsByTagName('DomainDNSSetHostsResult')[0] return salt.utils.namecheap.string_to_value(dnsresult.getAttribute('IsSuccess'))
[ "def", "set_hosts", "(", "sld", ",", "tld", ",", "hosts", ")", ":", "opts", "=", "salt", ".", "utils", ".", "namecheap", ".", "get_opts", "(", "'namecheap.domains.dns.setHosts'", ")", "opts", "[", "'SLD'", "]", "=", "sld", "opts", "[", "'TLD'", "]", "=...
sets dns host records settings for the requested domain .
train
true
19,093
def getProcessOutputAndValueWithInput(executable, args, input): d = defer.Deferred() p = _SubprocessProtocol(input, d) (executable, args) = encodeExecutableAndArgs(executable, args) reactor.spawnProcess(p, executable, ((executable,) + tuple(args))) return d
[ "def", "getProcessOutputAndValueWithInput", "(", "executable", ",", "args", ",", "input", ")", ":", "d", "=", "defer", ".", "Deferred", "(", ")", "p", "=", "_SubprocessProtocol", "(", "input", ",", "d", ")", "(", "executable", ",", "args", ")", "=", "enc...
similar to getprocessoutputandvalue .
train
false
19,094
def all_correlations_fast_no_scipy(y, X): X = np.asanyarray(X, float) y = np.asanyarray(y, float) xy = np.dot(X, y) y_ = y.mean() ys_ = y.std() x_ = X.mean(1) xs_ = X.std(1) n = float(len(y)) ys_ += 1e-05 xs_ += 1e-05 return ((((xy - ((x_ * y_) * n)) / n) / xs_) / ys_)
[ "def", "all_correlations_fast_no_scipy", "(", "y", ",", "X", ")", ":", "X", "=", "np", ".", "asanyarray", "(", "X", ",", "float", ")", "y", "=", "np", ".", "asanyarray", "(", "y", ",", "float", ")", "xy", "=", "np", ".", "dot", "(", "X", ",", "...
cs = all_correlations cs[i] = np .
train
false
19,095
def get_twill_glocals(): global global_dict, _local_dict_stack assert (global_dict is not None), 'must initialize global namespace first!' if (len(_local_dict_stack) == 0): new_local_dict() return (global_dict, _local_dict_stack[(-1)])
[ "def", "get_twill_glocals", "(", ")", ":", "global", "global_dict", ",", "_local_dict_stack", "assert", "(", "global_dict", "is", "not", "None", ")", ",", "'must initialize global namespace first!'", "if", "(", "len", "(", "_local_dict_stack", ")", "==", "0", ")",...
return global dict & current local dictionary .
train
false
19,097
def test_if_docker_app_can_be_deployed(dcos_api_session): dcos_api_session.marathon.deploy_test_app_and_check(*get_test_app_in_docker(ip_per_container=False))
[ "def", "test_if_docker_app_can_be_deployed", "(", "dcos_api_session", ")", ":", "dcos_api_session", ".", "marathon", ".", "deploy_test_app_and_check", "(", "*", "get_test_app_in_docker", "(", "ip_per_container", "=", "False", ")", ")" ]
marathon app inside docker deployment integration test .
train
false
19,098
def get_if(iff, cmd): sck = socket.socket() ifreq = ioctl(sck, cmd, struct.pack('16s16x', iff)) sck.close() return ifreq
[ "def", "get_if", "(", "iff", ",", "cmd", ")", ":", "sck", "=", "socket", ".", "socket", "(", ")", "ifreq", "=", "ioctl", "(", "sck", ",", "cmd", ",", "struct", ".", "pack", "(", "'16s16x'", ",", "iff", ")", ")", "sck", ".", "close", "(", ")", ...
ease siocgif* ioctl calls .
train
true
19,099
def is_cohort_exists(course_key, name): return CourseUserGroup.objects.filter(course_id=course_key, group_type=CourseUserGroup.COHORT, name=name).exists()
[ "def", "is_cohort_exists", "(", "course_key", ",", "name", ")", ":", "return", "CourseUserGroup", ".", "objects", ".", "filter", "(", "course_id", "=", "course_key", ",", "group_type", "=", "CourseUserGroup", ".", "COHORT", ",", "name", "=", "name", ")", "."...
check if a cohort already exists .
train
false
19,100
def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None): if isinstance(name, (str, unicode)): name = dns.name.from_text(name, dns.name.root) if (resolver is None): resolver = get_default_resolver() if (not name.is_absolute()): raise NotAbsolute(name) while 1: try: answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp) if (answer.rrset.name == name): return name except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): pass try: name = name.parent() except dns.name.NoParent: raise NoRootSOA
[ "def", "zone_for_name", "(", "name", ",", "rdclass", "=", "dns", ".", "rdataclass", ".", "IN", ",", "tcp", "=", "False", ",", "resolver", "=", "None", ")", ":", "if", "isinstance", "(", "name", ",", "(", "str", ",", "unicode", ")", ")", ":", "name"...
find the name of the zone which contains the specified name .
train
true
19,101
def random_text(length, alph=(string.ascii_letters + string.digits)): return ''.join((random.choice(alph) for _ in range(length)))
[ "def", "random_text", "(", "length", ",", "alph", "=", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", ")", ":", "return", "''", ".", "join", "(", "(", "random", ".", "choice", "(", "alph", ")", "for", "_", "in", "range", "(...
random text generator .
train
false
19,103
def onLoginAppShutDown(): INFO_MSG('onLoginAppShutDown()')
[ "def", "onLoginAppShutDown", "(", ")", ":", "INFO_MSG", "(", "'onLoginAppShutDown()'", ")" ]
kbengine method .
train
false
19,105
def threadfunc(callback): callback = Async(callback) try: while True: print '!' callback() time.sleep(1) except: print 'thread exiting'
[ "def", "threadfunc", "(", "callback", ")", ":", "callback", "=", "Async", "(", "callback", ")", "try", ":", "while", "True", ":", "print", "'!'", "callback", "(", ")", "time", ".", "sleep", "(", "1", ")", "except", ":", "print", "'thread exiting'" ]
this function will call the callback every second .
train
false
19,107
def vector_to_axis(line, point): line = line.normalized() np = point.norm() angle = line.angle(point) return (point - (line ** (np * numpy.cos(angle))))
[ "def", "vector_to_axis", "(", "line", ",", "point", ")", ":", "line", "=", "line", ".", "normalized", "(", ")", "np", "=", "point", ".", "norm", "(", ")", "angle", "=", "line", ".", "angle", "(", "point", ")", "return", "(", "point", "-", "(", "l...
returns the vector between a point and the closest point on a line .
train
false
19,108
def _to_snake_case(pascal_case): snake_case = re.sub('(^|[a-z])([A-Z])', (lambda match: '{0}_{1}'.format(match.group(1).lower(), match.group(2).lower())), pascal_case) return snake_case.lower().strip('_')
[ "def", "_to_snake_case", "(", "pascal_case", ")", ":", "snake_case", "=", "re", ".", "sub", "(", "'(^|[a-z])([A-Z])'", ",", "(", "lambda", "match", ":", "'{0}_{1}'", ".", "format", "(", "match", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ",",...
convert a pascalcase string to its snake_case equivalent .
train
false
19,109
def index_trim_outlier(resid, k): sort_index = np.argsort(np.abs(resid)) trimmed_index = np.sort(sort_index[:(- k)]) outlier_index = np.sort(sort_index[(- k):]) return (trimmed_index, outlier_index)
[ "def", "index_trim_outlier", "(", "resid", ",", "k", ")", ":", "sort_index", "=", "np", ".", "argsort", "(", "np", ".", "abs", "(", "resid", ")", ")", "trimmed_index", "=", "np", ".", "sort", "(", "sort_index", "[", ":", "(", "-", "k", ")", "]", ...
returns indices to residual array with k outliers removed parameters resid : array_like .
train
false
19,110
def index2lpol(coeffs, index): n = max(index) ar = np.zeros(n) ar[index] = coeffs return ar
[ "def", "index2lpol", "(", "coeffs", ",", "index", ")", ":", "n", "=", "max", "(", "index", ")", "ar", "=", "np", ".", "zeros", "(", "n", ")", "ar", "[", "index", "]", "=", "coeffs", "return", "ar" ]
expand coefficients to lag poly parameters coeffs : array non-zero coefficients of lag polynomial index : array index of lagpolynomial with non-zero elements ar : array_like coefficients of lag polynomial returns ar : array_like coefficients of lag polynomial .
train
false
19,112
def _get_registered_option(key): return _registered_options.get(key)
[ "def", "_get_registered_option", "(", "key", ")", ":", "return", "_registered_options", ".", "get", "(", "key", ")" ]
retrieves the option metadata if key is a registered option .
train
false
19,115
def johnson(G, weight='weight'): if (not nx.is_weighted(G, weight=weight)): raise nx.NetworkXError('Graph is not weighted.') dist = {v: 0 for v in G} pred = {v: [None] for v in G} weight = _weight_function(G, weight) dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist) scale = (lambda u, v: (dist_bellman[u] - dist_bellman[v])) new_weight = (lambda u, v, d: (weight(u, v, d) + scale(u, v))) def dist_path(v): paths = {v: [v]} _dijkstra(G, v, new_weight, paths=paths) return paths return {v: dist_path(v) for v in G}
[ "def", "johnson", "(", "G", ",", "weight", "=", "'weight'", ")", ":", "if", "(", "not", "nx", ".", "is_weighted", "(", "G", ",", "weight", "=", "weight", ")", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "'Graph is not weighted.'", ")", "dist", ...
uses johnsons algorithm to compute shortest paths .
train
false
19,116
def _make_container_root(name): path = _root(name) if os.path.exists(path): __context__['retcode'] = salt.defaults.exitcodes.SALT_BUILD_FAIL raise CommandExecutionError('Container {0} already exists'.format(name)) else: try: os.makedirs(path) return path except OSError as exc: raise CommandExecutionError('Unable to make container root directory {0}: {1}'.format(name, exc))
[ "def", "_make_container_root", "(", "name", ")", ":", "path", "=", "_root", "(", "name", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "__context__", "[", "'retcode'", "]", "=", "salt", ".", "defaults", ".", "exitcodes", ".", "S...
make the container root directory .
train
true
19,117
def previous_event_indexer(all_dates, all_sids, event_dates, event_timestamps, event_sids): validate_event_metadata(event_dates, event_timestamps, event_sids) out = np.full((len(all_dates), len(all_sids)), (-1), dtype=np.int64) eff_dts = np.maximum(event_dates, event_timestamps) sid_ixs = all_sids.searchsorted(event_sids) dt_ixs = all_dates.searchsorted(eff_dts) last_written = {} for i in range((len(event_dates) - 1), (-1), (-1)): sid_ix = sid_ixs[i] dt_ix = dt_ixs[i] out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i last_written[sid_ix] = dt_ix return out
[ "def", "previous_event_indexer", "(", "all_dates", ",", "all_sids", ",", "event_dates", ",", "event_timestamps", ",", "event_sids", ")", ":", "validate_event_metadata", "(", "event_dates", ",", "event_timestamps", ",", "event_sids", ")", "out", "=", "np", ".", "fu...
construct an index array that .
train
true
19,118
def create_logger(app): Logger = getLoggerClass() class DebugLogger(Logger, ): def getEffectiveLevel(x): if ((x.level == 0) and app.debug): return DEBUG return Logger.getEffectiveLevel(x) class DebugHandler(StreamHandler, ): def emit(x, record): (StreamHandler.emit(x, record) if app.debug else None) handler = DebugHandler() handler.setLevel(DEBUG) handler.setFormatter(Formatter(app.debug_log_format)) logger = getLogger(app.logger_name) del logger.handlers[:] logger.__class__ = DebugLogger logger.addHandler(handler) return logger
[ "def", "create_logger", "(", "app", ")", ":", "Logger", "=", "getLoggerClass", "(", ")", "class", "DebugLogger", "(", "Logger", ",", ")", ":", "def", "getEffectiveLevel", "(", "x", ")", ":", "if", "(", "(", "x", ".", "level", "==", "0", ")", "and", ...
create logger according to provided configuration .
train
true
19,119
def _colorformat(text): if (text[0:1] == '#'): col = text[1:] if (col in ANSI_COLOR_NAMES): return col elif (len(col) == 6): return col elif (len(col) == 3): return (((col[0] * 2) + (col[1] * 2)) + (col[2] * 2)) elif (text == ''): return text raise ValueError(('Wrong color format %r' % text))
[ "def", "_colorformat", "(", "text", ")", ":", "if", "(", "text", "[", "0", ":", "1", "]", "==", "'#'", ")", ":", "col", "=", "text", "[", "1", ":", "]", "if", "(", "col", "in", "ANSI_COLOR_NAMES", ")", ":", "return", "col", "elif", "(", "len", ...
parse/validate color format .
train
true
19,120
def _snapshot_service(service): _apply_service(service, SonosDevice.snapshot)
[ "def", "_snapshot_service", "(", "service", ")", ":", "_apply_service", "(", "service", ",", "SonosDevice", ".", "snapshot", ")" ]
take a snapshot .
train
false
19,121
def _scandir_generic(path=unicode('.')): for name in listdir(path): (yield GenericDirEntry(path, name))
[ "def", "_scandir_generic", "(", "path", "=", "unicode", "(", "'.'", ")", ")", ":", "for", "name", "in", "listdir", "(", "path", ")", ":", "(", "yield", "GenericDirEntry", "(", "path", ",", "name", ")", ")" ]
like os .
train
false
19,122
def seven_extract(nzo, sevenset, extensions, extraction_path, one_folder, delete): fail = 0 passwords = get_all_passwords(nzo) for password in passwords: if password: logging.debug('Trying 7zip with password "%s"', password) msg = (T('Trying 7zip with password "%s"') % unicoder(password)) nzo.fail_msg = msg nzo.set_unpack_info('Unpack', msg) (fail, msg) = seven_extract_core(sevenset, extensions, extraction_path, one_folder, delete, password) if (fail != 2): break nzo.fail_msg = '' if (fail == 2): logging.error(u'%s (%s)', T('Unpacking failed, archive requires a password'), os.path.split(sevenset)[1]) return (fail, msg)
[ "def", "seven_extract", "(", "nzo", ",", "sevenset", ",", "extensions", ",", "extraction_path", ",", "one_folder", ",", "delete", ")", ":", "fail", "=", "0", "passwords", "=", "get_all_passwords", "(", "nzo", ")", "for", "password", "in", "passwords", ":", ...
unpack single set sevenset to extraction_path .
train
false
19,123
def s_string(value, size=(-1), padding='\x00', encoding='ascii', fuzzable=True, max_len=0, name=None): s = primitives.string(value, size, padding, encoding, fuzzable, max_len, name) blocks.CURRENT.push(s)
[ "def", "s_string", "(", "value", ",", "size", "=", "(", "-", "1", ")", ",", "padding", "=", "'\\x00'", ",", "encoding", "=", "'ascii'", ",", "fuzzable", "=", "True", ",", "max_len", "=", "0", ",", "name", "=", "None", ")", ":", "s", "=", "primiti...
push a string onto the current block stack .
train
false
19,124
def create_filter(parameters): if parameters['filters']: cli_filters = parameters['filters'] real_filters = [] for (filter_type, filter_pattern) in cli_filters: real_filters.append((filter_type.lstrip('-'), filter_pattern)) source_location = parameters['src'] if source_location.startswith('s3://'): src_rootdir = _get_s3_root(source_location, parameters['dir_op']) else: src_rootdir = _get_local_root(parameters['src'], parameters['dir_op']) destination_location = parameters['dest'] if destination_location.startswith('s3://'): dst_rootdir = _get_s3_root(parameters['dest'], parameters['dir_op']) else: dst_rootdir = _get_local_root(parameters['dest'], parameters['dir_op']) return Filter(real_filters, src_rootdir, dst_rootdir) else: return Filter({}, None, None)
[ "def", "create_filter", "(", "parameters", ")", ":", "if", "parameters", "[", "'filters'", "]", ":", "cli_filters", "=", "parameters", "[", "'filters'", "]", "real_filters", "=", "[", "]", "for", "(", "filter_type", ",", "filter_pattern", ")", "in", "cli_fil...
create a filter function from a string parameter .
train
false
19,125
def parse_implicit_response(uri, state=None, scope=None): fragment = urlparse.urlparse(uri).fragment params = dict(urlparse.parse_qsl(fragment, keep_blank_values=True)) validate_token_parameters(params, scope) if (state and (params.get(u'state', None) != state)): raise ValueError('Mismatching or missing state in params.') return params
[ "def", "parse_implicit_response", "(", "uri", ",", "state", "=", "None", ",", "scope", "=", "None", ")", ":", "fragment", "=", "urlparse", ".", "urlparse", "(", "uri", ")", ".", "fragment", "params", "=", "dict", "(", "urlparse", ".", "parse_qsl", "(", ...
parse the implicit token response uri into a dict .
train
false
19,126
def get_var_endog(y, lags, trend='c', has_constant='skip'): nobs = len(y) Z = np.array([y[(t - lags):t][::(-1)].ravel() for t in range(lags, nobs)]) if (trend != 'nc'): Z = tsa.add_trend(Z, prepend=True, trend=trend, has_constant=has_constant) return Z
[ "def", "get_var_endog", "(", "y", ",", "lags", ",", "trend", "=", "'c'", ",", "has_constant", "=", "'skip'", ")", ":", "nobs", "=", "len", "(", "y", ")", "Z", "=", "np", ".", "array", "(", "[", "y", "[", "(", "t", "-", "lags", ")", ":", "t", ...
make predictor matrix for var(p) process z := .
train
false
19,127
def partialReleaseComplete(): a = TpPd(pd=6) b = MessageType(mesType=15) packet = (a / b) return packet
[ "def", "partialReleaseComplete", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "6", ")", "b", "=", "MessageType", "(", "mesType", "=", "15", ")", "packet", "=", "(", "a", "/", "b", ")", "return", "packet" ]
partial release complete section 9 .
train
true
19,128
def inline_singleton_lists(dsk, dependencies=None): if (dependencies is None): dependencies = {k: get_dependencies(dsk, task=v) for (k, v) in dsk.items()} dependents = reverse_dict(dependencies) keys = [k for (k, v) in dsk.items() if (istask(v) and v and (v[0] is list) and (len(dependents[k]) == 1))] dsk = inline(dsk, keys, inline_constants=False) for k in keys: del dsk[k] return dsk
[ "def", "inline_singleton_lists", "(", "dsk", ",", "dependencies", "=", "None", ")", ":", "if", "(", "dependencies", "is", "None", ")", ":", "dependencies", "=", "{", "k", ":", "get_dependencies", "(", "dsk", ",", "task", "=", "v", ")", "for", "(", "k",...
inline lists that are only used once .
train
false
19,129
def float_repr(value, precision_digits): return (('%%.%sf' % precision_digits) % value)
[ "def", "float_repr", "(", "value", ",", "precision_digits", ")", ":", "return", "(", "(", "'%%.%sf'", "%", "precision_digits", ")", "%", "value", ")" ]
returns a string representation of a float with the the given number of fractional digits .
train
false
19,130
def _isSubdomainOf(descendantName, ancestorName): descendantLabels = _nameToLabels(descendantName.lower()) ancestorLabels = _nameToLabels(ancestorName.lower()) return (descendantLabels[(- len(ancestorLabels)):] == ancestorLabels)
[ "def", "_isSubdomainOf", "(", "descendantName", ",", "ancestorName", ")", ":", "descendantLabels", "=", "_nameToLabels", "(", "descendantName", ".", "lower", "(", ")", ")", "ancestorLabels", "=", "_nameToLabels", "(", "ancestorName", ".", "lower", "(", ")", ")",...
test whether c{descendantname} is equal to or is a i{subdomain} of c{ancestorname} .
train
false
19,131
def course_and_time_based_filename_generator(course_id, base_name): return u'{course_prefix}_{base_name}_{timestamp_str}'.format(course_prefix=course_filename_prefix_generator(course_id), base_name=get_valid_filename(base_name), timestamp_str=datetime.now(UTC).strftime('%Y-%m-%d-%H%M%S'))
[ "def", "course_and_time_based_filename_generator", "(", "course_id", ",", "base_name", ")", ":", "return", "u'{course_prefix}_{base_name}_{timestamp_str}'", ".", "format", "(", "course_prefix", "=", "course_filename_prefix_generator", "(", "course_id", ")", ",", "base_name", ...
generates a filename based on the current time and the supplied filename .
train
false
19,132
def ASSIGNJS(**kargs): from gluon.serializers import json s = '' for (key, value) in kargs.items(): s += ('var %s = %s;\n' % (key, json(value))) return XML(s)
[ "def", "ASSIGNJS", "(", "**", "kargs", ")", ":", "from", "gluon", ".", "serializers", "import", "json", "s", "=", "''", "for", "(", "key", ",", "value", ")", "in", "kargs", ".", "items", "(", ")", ":", "s", "+=", "(", "'var %s = %s;\\n'", "%", "(",...
example: assignjs will return the following javascript variables assignations : var var1 = "1"; var var2 = "2"; args: **kargs: any keywords arguments and assigned values .
train
false
19,133
def clearcache(): global cache cache = {}
[ "def", "clearcache", "(", ")", ":", "global", "cache", "cache", "=", "{", "}" ]
clear cached items - for debugging use .
train
false
19,134
def _auto_create_specific_service_command_generator(run=utils.run): command_generator = _command_generators[get_name_of_init(run)] command_list = [c for c in COMMANDS if (c not in ['list', 'set_target'])] return _ServiceCommandGenerator(command_generator, command_list)
[ "def", "_auto_create_specific_service_command_generator", "(", "run", "=", "utils", ".", "run", ")", ":", "command_generator", "=", "_command_generators", "[", "get_name_of_init", "(", "run", ")", "]", "command_list", "=", "[", "c", "for", "c", "in", "COMMANDS", ...
create a class that will create partial functions that generate commands for the current init command .
train
false
19,135
def _check_cycles_alt_sym(perm): n = perm.size af = perm.array_form current_len = 0 total_len = 0 used = set() for i in range((n // 2)): if ((not (i in used)) and (i < ((n // 2) - total_len))): current_len = 1 used.add(i) j = i while (af[j] != i): current_len += 1 j = af[j] used.add(j) total_len += current_len if ((current_len > (n // 2)) and (current_len < (n - 2)) and isprime(current_len)): return True return False
[ "def", "_check_cycles_alt_sym", "(", "perm", ")", ":", "n", "=", "perm", ".", "size", "af", "=", "perm", ".", "array_form", "current_len", "=", "0", "total_len", "=", "0", "used", "=", "set", "(", ")", "for", "i", "in", "range", "(", "(", "n", "//"...
checks for cycles of prime length p with n/2 < p < n-2 .
train
false
19,136
def search_lxc_bridges(): bridges = __context__.get('lxc.bridges', None) if (not bridges): bridges = set() running_bridges = set() bridges.add(DEFAULT_BR) try: output = __salt__['cmd.run_all']('brctl show') for line in output['stdout'].splitlines()[1:]: if (not line.startswith(' ')): running_bridges.add(line.split()[0].strip()) except (SaltInvocationError, CommandExecutionError): pass for (ifc, ip) in six.iteritems(__grains__.get('ip_interfaces', {})): if (ifc in running_bridges): bridges.add(ifc) elif os.path.exists('/sys/devices/virtual/net/{0}/bridge'.format(ifc)): bridges.add(ifc) bridges = list(bridges) def sort_bridges(a): pref = 'z' if ('lxc' in a): pref = 'a' elif ('br0' == a): pref = 'c' return '{0}_{1}'.format(pref, a) bridges.sort(key=sort_bridges) __context__['lxc.bridges'] = bridges return bridges
[ "def", "search_lxc_bridges", "(", ")", ":", "bridges", "=", "__context__", ".", "get", "(", "'lxc.bridges'", ",", "None", ")", "if", "(", "not", "bridges", ")", ":", "bridges", "=", "set", "(", ")", "running_bridges", "=", "set", "(", ")", "bridges", "...
search which bridges are potentially available as lxc bridges cli example: .
train
true
19,137
def get_kind_key(prefix, key_path): path = [] path.append(key_path.element_list()[(-1)].type()) for e in key_path.element_list(): if e.has_name(): key_id = e.name() else: key_id = str(e.id()).zfill(ID_KEY_LENGTH) path.append('{0}{2}{1}'.format(e.type(), key_id, dbconstants.ID_SEPARATOR)) encoded_path = dbconstants.KIND_SEPARATOR.join(path) encoded_path += dbconstants.KIND_SEPARATOR return ((prefix + dbconstants.KEY_DELIMITER) + encoded_path)
[ "def", "get_kind_key", "(", "prefix", ",", "key_path", ")", ":", "path", "=", "[", "]", "path", ".", "append", "(", "key_path", ".", "element_list", "(", ")", "[", "(", "-", "1", ")", "]", ".", "type", "(", ")", ")", "for", "e", "in", "key_path",...
returns a key for the kind table .
train
false
19,138
def ansiformat(attr, text): result = [] if (attr[:1] == attr[(-1):] == '+'): result.append(codes['blink']) attr = attr[1:(-1)] if (attr[:1] == attr[(-1):] == '*'): result.append(codes['bold']) attr = attr[1:(-1)] if (attr[:1] == attr[(-1):] == '_'): result.append(codes['underline']) attr = attr[1:(-1)] result.append(codes[attr]) result.append(text) result.append(codes['reset']) return ''.join(result)
[ "def", "ansiformat", "(", "attr", ",", "text", ")", ":", "result", "=", "[", "]", "if", "(", "attr", "[", ":", "1", "]", "==", "attr", "[", "(", "-", "1", ")", ":", "]", "==", "'+'", ")", ":", "result", ".", "append", "(", "codes", "[", "'b...
format text with a color and/or some attributes:: color normal color *color* bold color _color_ underlined color +color+ blinking color .
train
true
19,139
def createNextMethod(methodName): methodName = fix_method_name(methodName) def methodNext(self, previous_request, previous_response): "Retrieves the next page of results.\n\nArgs:\n previous_request: The request for the previous page. (required)\n previous_response: The response from the request for the previous page. (required)\n\nReturns:\n A request object that you can call 'execute()' on to request the next\n page. Returns None if there are no more items in the collection.\n " if (('nextPageToken' not in previous_response) or (not previous_response['nextPageToken'])): return None request = copy.copy(previous_request) pageToken = previous_response['nextPageToken'] parsed = list(urlparse(request.uri)) q = parse_qsl(parsed[4]) newq = [(key, value) for (key, value) in q if (key != 'pageToken')] newq.append(('pageToken', pageToken)) parsed[4] = urlencode(newq) uri = urlunparse(parsed) request.uri = uri logger.info(('URL being requested: %s %s' % (methodName, uri))) return request return (methodName, methodNext)
[ "def", "createNextMethod", "(", "methodName", ")", ":", "methodName", "=", "fix_method_name", "(", "methodName", ")", "def", "methodNext", "(", "self", ",", "previous_request", ",", "previous_response", ")", ":", "if", "(", "(", "'nextPageToken'", "not", "in", ...
creates any _next methods for attaching to a resource .
train
false
19,140
def multi_constructor_pkl(loader, tag_suffix, node): global additional_environ if ((tag_suffix != '') and (tag_suffix != u'')): raise AssertionError((('Expected tag_suffix to be "" but it is "' + tag_suffix) + '": Put space between !pkl: and the filename.')) mapping = loader.construct_yaml_str(node) obj = serial.load(preprocess(mapping, additional_environ)) proxy = Proxy(callable=do_not_recurse, positionals=(), keywords={'value': obj}, yaml_src=yaml.serialize(node)) return proxy
[ "def", "multi_constructor_pkl", "(", "loader", ",", "tag_suffix", ",", "node", ")", ":", "global", "additional_environ", "if", "(", "(", "tag_suffix", "!=", "''", ")", "and", "(", "tag_suffix", "!=", "u''", ")", ")", ":", "raise", "AssertionError", "(", "(...
callback used by pyyaml when a "!pkl:" tag is encountered .
train
false
19,141
def simple_load_icon(module, index, as_data=False, size=ICON_SIZE): try: (large_icons, small_icons) = win32gui.ExtractIconEx(module, index, 10) except pywintypes.error as err: if (err.winerror != winerror.ERROR_FILE_NOT_FOUND): raise prints((u'File %r does not exist, cannot load icon' % module)) return icons = (large_icons + small_icons) try: if icons: must_use_qt() pixmap = copy_to_size(QtWin.fromHICON(icons[0]), size=size) if as_data: return pixmap_to_data(pixmap) return QIcon(pixmap) finally: tuple(map(win32gui.DestroyIcon, icons))
[ "def", "simple_load_icon", "(", "module", ",", "index", ",", "as_data", "=", "False", ",", "size", "=", "ICON_SIZE", ")", ":", "try", ":", "(", "large_icons", ",", "small_icons", ")", "=", "win32gui", ".", "ExtractIconEx", "(", "module", ",", "index", ",...
use the win32 api extracticon to load the icon .
train
false
19,142
def send_login_instructions(user): token = generate_login_token(user) login_link = url_for_security('token_login', token=token, _external=True) send_mail(config_value('EMAIL_SUBJECT_PASSWORDLESS'), user.email, 'login_instructions', user=user, login_link=login_link) login_instructions_sent.send(app._get_current_object(), user=user, login_token=token)
[ "def", "send_login_instructions", "(", "user", ")", ":", "token", "=", "generate_login_token", "(", "user", ")", "login_link", "=", "url_for_security", "(", "'token_login'", ",", "token", "=", "token", ",", "_external", "=", "True", ")", "send_mail", "(", "con...
sends the login instructions email for the specified user .
train
true
19,143
@check_simple_wiki_locale @mobile_template('products/{mobile/}product.html') def product_landing(request, template, slug): product = get_object_or_404(Product, slug=slug) if request.is_ajax(): topic_list = list() for t in Topic.objects.filter(product=product, visible=True): topic_list.append({'id': t.id, 'title': t.title}) return HttpResponse(json.dumps({'topics': topic_list}), content_type='application/json') if (slug == 'firefox'): latest_version = product_details.firefox_versions['LATEST_FIREFOX_VERSION'] else: versions = product.versions.filter(default=True) if versions: latest_version = versions[0].min_version else: latest_version = 0 return render(request, template, {'product': product, 'products': Product.objects.filter(visible=True), 'topics': topics_for(product=product, parent=None), 'search_params': {'product': slug}, 'latest_version': latest_version})
[ "@", "check_simple_wiki_locale", "@", "mobile_template", "(", "'products/{mobile/}product.html'", ")", "def", "product_landing", "(", "request", ",", "template", ",", "slug", ")", ":", "product", "=", "get_object_or_404", "(", "Product", ",", "slug", "=", "slug", ...
the product landing page .
train
false
19,144
def closest_feasible(individual): feasible_ind = numpy.array(individual) feasible_ind = numpy.maximum(MIN_BOUND, feasible_ind) feasible_ind = numpy.minimum(MAX_BOUND, feasible_ind) return feasible_ind
[ "def", "closest_feasible", "(", "individual", ")", ":", "feasible_ind", "=", "numpy", ".", "array", "(", "individual", ")", "feasible_ind", "=", "numpy", ".", "maximum", "(", "MIN_BOUND", ",", "feasible_ind", ")", "feasible_ind", "=", "numpy", ".", "minimum", ...
a function returning a valid individual from an invalid one .
train
false
19,145
@treeio_login_required @handle_response_format def event_delete(request, event_id, response_format='html'): event = get_object_or_404(Event, pk=event_id) if (not request.user.profile.has_permission(event, mode='w')): return user_denied(request, message="You don't have access to this Event") if request.POST: if ('delete' in request.POST): if ('trash' in request.POST): event.trash = True event.save() else: event.delete() return HttpResponseRedirect(reverse('events_index')) elif ('cancel' in request.POST): return HttpResponseRedirect(reverse('events_event_view', args=[event.id])) return render_to_response('events/event_delete', {'event': event}, context_instance=RequestContext(request), response_format=response_format)
[ "@", "treeio_login_required", "@", "handle_response_format", "def", "event_delete", "(", "request", ",", "event_id", ",", "response_format", "=", "'html'", ")", ":", "event", "=", "get_object_or_404", "(", "Event", ",", "pk", "=", "event_id", ")", "if", "(", "...
event delete .
train
false
19,147
@lazyobject def GetConsoleScreenBufferInfo(): gcsbi = ctypes.windll.kernel32.GetConsoleScreenBufferInfo gcsbi.errcheck = check_zero gcsbi.argtypes = (HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO)) gcsbi.restype = BOOL return gcsbi
[ "@", "lazyobject", "def", "GetConsoleScreenBufferInfo", "(", ")", ":", "gcsbi", "=", "ctypes", ".", "windll", ".", "kernel32", ".", "GetConsoleScreenBufferInfo", "gcsbi", ".", "errcheck", "=", "check_zero", "gcsbi", ".", "argtypes", "=", "(", "HANDLE", ",", "P...
returns the windows version of the get screen buffer .
train
false
19,148
def host_theme_path(): domain = None for (host, theme) in settings.HOST_THEMES: if (domain is None): domain = Site.objects.get(id=current_site_id()).domain if (host.lower() == domain.lower()): try: __import__(theme) module = sys.modules[theme] except ImportError: pass else: return os.path.dirname(os.path.abspath(module.__file__)) return u''
[ "def", "host_theme_path", "(", ")", ":", "domain", "=", "None", "for", "(", "host", ",", "theme", ")", "in", "settings", ".", "HOST_THEMES", ":", "if", "(", "domain", "is", "None", ")", ":", "domain", "=", "Site", ".", "objects", ".", "get", "(", "...
returns the directory of the theme associated with the given host .
train
true
19,149
@do def update_repo(package_directory, target_bucket, target_key, source_repo, packages, flocker_version, distribution): package_directory.createDirectory() package_type = distribution.package_type() (yield Effect(DownloadS3KeyRecursively(source_bucket=target_bucket, source_prefix=target_key, target_path=package_directory, filter_extensions=(('.' + package_type.value),)))) downloaded_packages = (yield Effect(DownloadPackagesFromRepository(source_repo=source_repo, target_path=package_directory, packages=packages, flocker_version=flocker_version, distribution=distribution))) new_metadata = (yield Effect(CreateRepo(repository_path=package_directory, distribution=distribution))) (yield Effect(UploadToS3Recursively(source_path=package_directory, target_bucket=target_bucket, target_key=target_key, files=(downloaded_packages | new_metadata))))
[ "@", "do", "def", "update_repo", "(", "package_directory", ",", "target_bucket", ",", "target_key", ",", "source_repo", ",", "packages", ",", "flocker_version", ",", "distribution", ")", ":", "package_directory", ".", "createDirectory", "(", ")", "package_type", "...
update target_bucket yum repository with packages from source_repo repository .
train
false
19,150
@dec.onlyif_unicode_paths def test_unicode_cwd(): wd = tempfile.mkdtemp(suffix=u'\u20ac') old_wd = os.getcwd() os.chdir(wd) try: app = BaseIPythonApplication() app.init_profile_dir() app.init_config_files() app.load_config_file(suppress_errors=False) finally: os.chdir(old_wd)
[ "@", "dec", ".", "onlyif_unicode_paths", "def", "test_unicode_cwd", "(", ")", ":", "wd", "=", "tempfile", ".", "mkdtemp", "(", "suffix", "=", "u'\\u20ac'", ")", "old_wd", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "wd", ")", "try", ...
check that ipython starts with non-ascii characters in the path .
train
false
19,151
@pytest.fixture(scope='function') def clean_system(request): user_config_path = os.path.expanduser('~/.cookiecutterrc') user_config_path_backup = os.path.expanduser('~/.cookiecutterrc.backup') if os.path.exists(user_config_path): user_config_found = True shutil.copy(user_config_path, user_config_path_backup) os.remove(user_config_path) else: user_config_found = False cookiecutters_dir = os.path.expanduser('~/.cookiecutters') cookiecutters_dir_backup = os.path.expanduser('~/.cookiecutters.backup') cookiecutters_dir_found = backup_dir(cookiecutters_dir, cookiecutters_dir_backup) cookiecutter_replay_dir = os.path.expanduser('~/.cookiecutter_replay') cookiecutter_replay_dir_backup = os.path.expanduser('~/.cookiecutter_replay.backup') cookiecutter_replay_dir_found = backup_dir(cookiecutter_replay_dir, cookiecutter_replay_dir_backup) def restore_backup(): if (user_config_found and os.path.exists(user_config_path_backup)): shutil.copy(user_config_path_backup, user_config_path) os.remove(user_config_path_backup) restore_backup_dir(cookiecutters_dir, cookiecutters_dir_backup, cookiecutters_dir_found) restore_backup_dir(cookiecutter_replay_dir, cookiecutter_replay_dir_backup, cookiecutter_replay_dir_found) request.addfinalizer(restore_backup)
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "'function'", ")", "def", "clean_system", "(", "request", ")", ":", "user_config_path", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.cookiecutterrc'", ")", "user_config_path_backup", "=", "os", ".", "...
fixture that simulates a clean system with no config/cloned cookiecutters .
train
false