id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
40,118
def file_contains_pattern(file, pattern): if (not os.path.isfile(file)): raise NameError(('file %s does not exist' % file)) cmd_result = utils.run(((('egrep -q "' + pattern) + '" ') + file), ignore_status=True, verbose=False) return (not cmd_result.exit_status)
[ "def", "file_contains_pattern", "(", "file", ",", "pattern", ")", ":", "if", "(", "not", "os", ".", "path", ".", "isfile", "(", "file", ")", ")", ":", "raise", "NameError", "(", "(", "'file %s does not exist'", "%", "file", ")", ")", "cmd_result", "=", ...
return true if file contains the specified egrep pattern .
train
false
40,119
def test_import_vispy_scene(): modnames = loaded_vispy_modules('vispy.scene', 2) more_modules = ['vispy.app', 'vispy.gloo', 'vispy.glsl', 'vispy.scene', 'vispy.color', 'vispy.io', 'vispy.geometry', 'vispy.visuals'] assert_equal(modnames, set((_min_modules + more_modules)))
[ "def", "test_import_vispy_scene", "(", ")", ":", "modnames", "=", "loaded_vispy_modules", "(", "'vispy.scene'", ",", "2", ")", "more_modules", "=", "[", "'vispy.app'", ",", "'vispy.gloo'", ",", "'vispy.glsl'", ",", "'vispy.scene'", ",", "'vispy.color'", ",", "'vis...
importing vispy .
train
false
40,120
def GetUserInfo(http_cookie, cookie_name=COOKIE_NAME): COOKIE_SECRET = '' try: COOKIE_SECRET = os.environ['COOKIE_SECRET'] except Exception as e: logging.info(('WARNING: Cookie secret not set' + str(e))) cookie = Cookie.SimpleCookie(http_cookie) valid_cookie = True cookie_value = '' if (cookie_name in cookie): cookie_value = cookie[cookie_name].value cookie_value = cookie_value.replace('%3A', ':') cookie_value = cookie_value.replace('%40', '@') cookie_value = cookie_value.replace('%2C', ',') (email, nickname, admin, hsh) = (cookie_value.split(':') + ['', '', '', ''])[:4] if (email == ''): nickname = '' admin = '' return ('', '', False, False) else: vhsh = sha.new((((email + nickname) + admin) + COOKIE_SECRET)).hexdigest() if (hsh != vhsh): logging.info((email + ' had invalid cookie, clearing it')) valid_cookie = False email = '' admin = '' nickname = '' return ('', '', False, False) isAdmin = False admin_apps = admin.split(',') current_app = os.environ['APPLICATION_ID'] if (current_app in admin_apps): isAdmin = True return (email, nickname, isAdmin, valid_cookie)
[ "def", "GetUserInfo", "(", "http_cookie", ",", "cookie_name", "=", "COOKIE_NAME", ")", ":", "COOKIE_SECRET", "=", "''", "try", ":", "COOKIE_SECRET", "=", "os", ".", "environ", "[", "'COOKIE_SECRET'", "]", "except", "Exception", "as", "e", ":", "logging", "."...
get a user protobuf for a specific user .
train
false
40,122
def getlimit(stream): pipe = Pipeline() pipe.append(Limit()) result = pipe(stream) try: return int(result) except ValueError: return result
[ "def", "getlimit", "(", "stream", ")", ":", "pipe", "=", "Pipeline", "(", ")", "pipe", ".", "append", "(", "Limit", "(", ")", ")", "result", "=", "pipe", "(", "stream", ")", "try", ":", "return", "int", "(", "result", ")", "except", "ValueError", "...
function that return the limit of a input sql .
train
false
40,124
def check_regexp(option, opt, value): if hasattr(value, 'pattern'): return value try: return re.compile(value) except ValueError: raise OptionValueError(('option %s: invalid regexp value: %r' % (opt, value)))
[ "def", "check_regexp", "(", "option", ",", "opt", ",", "value", ")", ":", "if", "hasattr", "(", "value", ",", "'pattern'", ")", ":", "return", "value", "try", ":", "return", "re", ".", "compile", "(", "value", ")", "except", "ValueError", ":", "raise",...
check a regexp value by trying to compile it return the compiled regexp .
train
false
40,125
def check_number_of_calls(object_with_method, method_name, maximum_calls, minimum_calls=1): return check_sum_of_calls(object_with_method, [method_name], maximum_calls, minimum_calls)
[ "def", "check_number_of_calls", "(", "object_with_method", ",", "method_name", ",", "maximum_calls", ",", "minimum_calls", "=", "1", ")", ":", "return", "check_sum_of_calls", "(", "object_with_method", ",", "[", "method_name", "]", ",", "maximum_calls", ",", "minimu...
instruments the given method on the given object to verify the number of calls to the method is less than or equal to the expected maximum_calls and greater than or equal to the expected minimum_calls .
train
false
40,127
def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None): vm_ = get_configured_provider() apikey = config.get_cloud_config_value('apikey', vm_, __opts__, search_global=False) sharedsecret = config.get_cloud_config_value('sharedsecret', vm_, __opts__, search_global=False) path = 'https://api.gogrid.com/api/' if action: path += action if command: path += '/{0}'.format(command) log.debug('GoGrid URL: {0}'.format(path)) if (not isinstance(args, dict)): args = {} epoch = str(int(time.time())) hashtext = ''.join((apikey, sharedsecret, epoch)) args['sig'] = hashlib.md5(hashtext).hexdigest() args['format'] = 'json' args['v'] = '1.0' args['api_key'] = apikey if (header_dict is None): header_dict = {} if (method != 'POST'): header_dict['Accept'] = 'application/json' decode = True if (method == 'DELETE'): decode = False return_content = None result = salt.utils.http.query(path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, opts=__opts__) log.debug('GoGrid Response Status Code: {0}'.format(result['status'])) return result['dict']
[ "def", "_query", "(", "action", "=", "None", ",", "command", "=", "None", ",", "args", "=", "None", ",", "method", "=", "'GET'", ",", "header_dict", "=", "None", ",", "data", "=", "None", ")", ":", "vm_", "=", "get_configured_provider", "(", ")", "ap...
perform a query directly against the vultr rest api .
train
true
40,130
def _combine_triggers(data, remapping=None): new_trigger = np.zeros(data.shape[1]) if (data.astype(bool).sum(axis=0).max() > 1): logger.info(' Found multiple events at the same time sample. Cannot create trigger channel.') return if (remapping is None): remapping = (np.arange(data) + 1) for (d, event_id) in zip(data, remapping): idx = d.nonzero() if np.any(idx): new_trigger[idx] += event_id return new_trigger
[ "def", "_combine_triggers", "(", "data", ",", "remapping", "=", "None", ")", ":", "new_trigger", "=", "np", ".", "zeros", "(", "data", ".", "shape", "[", "1", "]", ")", "if", "(", "data", ".", "astype", "(", "bool", ")", ".", "sum", "(", "axis", ...
combine binary triggers .
train
false
40,132
def fix_location_header(request, response): if (('Location' in response) and request.get_host()): response['Location'] = request.build_absolute_uri(response['Location']) return response
[ "def", "fix_location_header", "(", "request", ",", "response", ")", ":", "if", "(", "(", "'Location'", "in", "response", ")", "and", "request", ".", "get_host", "(", ")", ")", ":", "response", "[", "'Location'", "]", "=", "request", ".", "build_absolute_ur...
ensures that we always use an absolute uri in any location header in the response .
train
false
40,134
def cast_int_addr(n): if isinstance(n, (int, long)): return n try: import cffi except ImportError: pass else: ffi = cffi.FFI() if isinstance(n, ffi.CData): return int(ffi.cast('size_t', n)) raise ValueError(('Cannot cast %r to int' % n))
[ "def", "cast_int_addr", "(", "n", ")", ":", "if", "isinstance", "(", "n", ",", "(", "int", ",", "long", ")", ")", ":", "return", "n", "try", ":", "import", "cffi", "except", "ImportError", ":", "pass", "else", ":", "ffi", "=", "cffi", ".", "FFI", ...
cast an address to a python int this could be a python integer or a cffi pointer .
train
false
40,135
def is_sentinel(obj): return ((type(obj) is str) and (obj == SENTINEL))
[ "def", "is_sentinel", "(", "obj", ")", ":", "return", "(", "(", "type", "(", "obj", ")", "is", "str", ")", "and", "(", "obj", "==", "SENTINEL", ")", ")" ]
predicate to determine whether an item from the queue is the signal to stop .
train
false
40,136
@curry def lossless_float_to_int(funcname, func, argname, arg): if (not isinstance(arg, float)): return arg arg_as_int = int(arg) if (arg == arg_as_int): warnings.warn('{f} expected an int for argument {name!r}, but got float {arg}. Coercing to int.'.format(f=funcname, name=argname, arg=arg)) return arg_as_int raise TypeError(arg)
[ "@", "curry", "def", "lossless_float_to_int", "(", "funcname", ",", "func", ",", "argname", ",", "arg", ")", ":", "if", "(", "not", "isinstance", "(", "arg", ",", "float", ")", ")", ":", "return", "arg", "arg_as_int", "=", "int", "(", "arg", ")", "if...
a preprocessor that coerces integral floats to ints .
train
true
40,138
def str_to_bytes(size): units = ['', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] curr_size = size try: if (size.lower() != 'infinity'): try: curr_item = size.strip().split(' ') curr_size = ''.join(curr_item) curr_size = int(curr_size) except ValueError: curr_item = size.strip().split(' ') curr_unit = curr_item[(-1)].strip().lower() curr_item = curr_item[0:(-1)] curr_size = ''.join(curr_item) try: curr_size = float(curr_size) except ValueError: error = ('Unable to convert size ' + str(size)) raise MalformedYMLException(error) try: pos = units.index(curr_unit) for x in range(pos, 1, (-1)): curr_size *= 1024 except ValueError: error = ('Unable to convert size ' + str(size)) raise MalformedYMLException(error) except (UnboundLocalError, NameError): pass else: curr_size = (-1) except AttributeError: pass return curr_size
[ "def", "str_to_bytes", "(", "size", ")", ":", "units", "=", "[", "''", ",", "'b'", ",", "'kb'", ",", "'mb'", ",", "'gb'", ",", "'tb'", ",", "'pb'", ",", "'eb'", ",", "'zb'", ",", "'yb'", "]", "curr_size", "=", "size", "try", ":", "if", "(", "si...
uses the bi convention: 1024 b = 1 kb since this method primarily has inputs of bytes for ram @type size: str .
train
false
40,140
def poly_map_domain(oldx, domain, window): domain = np.array(domain, dtype=np.float64) window = np.array(window, dtype=np.float64) scl = ((window[1] - window[0]) / (domain[1] - domain[0])) off = (((window[0] * domain[1]) - (window[1] * domain[0])) / (domain[1] - domain[0])) return (off + (scl * oldx))
[ "def", "poly_map_domain", "(", "oldx", ",", "domain", ",", "window", ")", ":", "domain", "=", "np", ".", "array", "(", "domain", ",", "dtype", "=", "np", ".", "float64", ")", "window", "=", "np", ".", "array", "(", "window", ",", "dtype", "=", "np"...
map domain into window by shifting and scaling .
train
false
40,141
def remote_port_tcp(port): ret = _remotes_on(port, 'remote_port') return ret
[ "def", "remote_port_tcp", "(", "port", ")", ":", "ret", "=", "_remotes_on", "(", "port", ",", "'remote_port'", ")", "return", "ret" ]
return a set of ip addrs the current host is connected to on given port .
train
false
40,142
def _make_archive_copy(instance): archive_object = MicrositeHistory(key=instance.key, site=instance.site, values=instance.values) archive_object.save()
[ "def", "_make_archive_copy", "(", "instance", ")", ":", "archive_object", "=", "MicrositeHistory", "(", "key", "=", "instance", ".", "key", ",", "site", "=", "instance", ".", "site", ",", "values", "=", "instance", ".", "values", ")", "archive_object", ".", ...
helper method to make a copy of a microsite into the history table .
train
false
40,144
def _read_gcloud_config(): gcloud_output = subprocess.check_output('gcloud config list', shell=True) gcloud_output_as_unicode = gcloud_output.decode('utf-8') cloud_cfg = configparser.RawConfigParser(allow_no_value=True) cloud_cfg.readfp(io.StringIO(gcloud_output_as_unicode)) return _cfg_to_dot_path_dict(cloud_cfg)
[ "def", "_read_gcloud_config", "(", ")", ":", "gcloud_output", "=", "subprocess", ".", "check_output", "(", "'gcloud config list'", ",", "shell", "=", "True", ")", "gcloud_output_as_unicode", "=", "gcloud_output", ".", "decode", "(", "'utf-8'", ")", "cloud_cfg", "=...
read in gcloud sdk config defaults .
train
false
40,145
def response_cookie_rewrite(cookie_string): cookie_string = regex_cookie_rewriter.sub(('domain=' + my_host_name_no_port), cookie_string) return cookie_string
[ "def", "response_cookie_rewrite", "(", "cookie_string", ")", ":", "cookie_string", "=", "regex_cookie_rewriter", ".", "sub", "(", "(", "'domain='", "+", "my_host_name_no_port", ")", ",", "cookie_string", ")", "return", "cookie_string" ]
rewrite response cookie strings domain to my_host_name :type cookie_string: str .
train
false
40,146
def securitygroupid(vm_): securitygroupid_set = set() securitygroupid_list = config.get_cloud_config_value('securitygroupid', vm_, __opts__, search_global=False) if securitygroupid_list: securitygroupid_set = securitygroupid_set.union(set(securitygroupid_list)) securitygroupname_list = config.get_cloud_config_value('securitygroupname', vm_, __opts__, search_global=False) if securitygroupname_list: if (not isinstance(securitygroupname_list, list)): securitygroupname_list = [securitygroupname_list] params = {'Action': 'DescribeSecurityGroups'} for sg in aws.query(params, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4'): if (sg['groupName'] in securitygroupname_list): log.debug('AWS SecurityGroup ID of {0} is {1}'.format(sg['groupName'], sg['groupId'])) securitygroupid_set.add(sg['groupId']) return list(securitygroupid_set)
[ "def", "securitygroupid", "(", "vm_", ")", ":", "securitygroupid_set", "=", "set", "(", ")", "securitygroupid_list", "=", "config", ".", "get_cloud_config_value", "(", "'securitygroupid'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", "if...
returns the securitygroupid .
train
true
40,147
def best_search_result(conda_target, conda_context=None, channels_override=None): conda_context = _ensure_conda_context(conda_context) if (not channels_override): conda_context.ensure_channels_configured() search_cmd = [conda_context.conda_exec, 'search', '--full-name', '--json'] if channels_override: search_cmd.append('--override-channels') for channel in channels_override: search_cmd.extend(['--channel', channel]) search_cmd.append(conda_target.package) res = commands.execute(search_cmd) hits = json.loads(res).get(conda_target.package, []) hits = sorted(hits, key=(lambda hit: LooseVersion(hit['version'])), reverse=True) if (len(hits) == 0): return (None, None) best_result = (hits[0], False) for hit in hits: if is_search_hit_exact(conda_target, hit): best_result = (hit, True) break return best_result
[ "def", "best_search_result", "(", "conda_target", ",", "conda_context", "=", "None", ",", "channels_override", "=", "None", ")", ":", "conda_context", "=", "_ensure_conda_context", "(", "conda_context", ")", "if", "(", "not", "channels_override", ")", ":", "conda_...
find best "conda search" result for specified target .
train
false
40,148
@frappe.whitelist() def make_default(name): frappe.has_permission(u'Print Format', u'write') print_format = frappe.get_doc(u'Print Format', name) if ((frappe.conf.get(u'developer_mode') or 0) == 1): doctype = frappe.get_doc(u'DocType', print_format.doc_type) doctype.default_print_format = name doctype.save() else: frappe.make_property_setter({u'doctype_or_field': u'DocType', u'doctype': print_format.doc_type, u'property': u'default_print_format', u'value': name}) frappe.msgprint(frappe._(u'Done'))
[ "@", "frappe", ".", "whitelist", "(", ")", "def", "make_default", "(", "name", ")", ":", "frappe", ".", "has_permission", "(", "u'Print Format'", ",", "u'write'", ")", "print_format", "=", "frappe", ".", "get_doc", "(", "u'Print Format'", ",", "name", ")", ...
set print format as default .
train
false
40,149
def date_to_rfc1123(date): return (datetime.strftime(date, RFC1123_DATE_FORMAT) if date else None)
[ "def", "date_to_rfc1123", "(", "date", ")", ":", "return", "(", "datetime", ".", "strftime", "(", "date", ",", "RFC1123_DATE_FORMAT", ")", "if", "date", "else", "None", ")" ]
converts a datetime value to the corresponding rfc-1123 string .
train
false
40,150
def log_info(**kwargs): Message.log(message_type=_compute_message_type(stack()[1]), **kwargs)
[ "def", "log_info", "(", "**", "kwargs", ")", ":", "Message", ".", "log", "(", "message_type", "=", "_compute_message_type", "(", "stack", "(", ")", "[", "1", "]", ")", ",", "**", "kwargs", ")" ]
prints any generic debugging/informative info that should appear in the log .
train
false
40,151
@with_device def interactive(**kw): return shell(**kw).interactive()
[ "@", "with_device", "def", "interactive", "(", "**", "kw", ")", ":", "return", "shell", "(", "**", "kw", ")", ".", "interactive", "(", ")" ]
set interactive mode to boolean b .
train
false
40,152
def l1(id, vdata): gtable = db.gis_location query = (((gtable.parent == id) & (gtable.level == 'L1')) & (gtable.end_date == None)) aitable = db.vulnerability_aggregated_indicator atable = db.vulnerability_aggregate rquery = (((aitable.name == 'Resilience') & (atable.parameter_id == aitable.parameter_id)) & (atable.agg_type == 4)) rows = db(query).select(gtable.id, gtable.name) for row in rows: query = (rquery & (atable.location_id == row.id)) _row = db(query).select(atable.median, orderby=(~ atable.date)).first() resilience = 0 if (_row and (_row.median is not None)): resilience = int(round(_row.median, 0)) vdata[row.id] = dict(r=resilience, n=row.name, l=1, f=id) return
[ "def", "l1", "(", "id", ",", "vdata", ")", ":", "gtable", "=", "db", ".", "gis_location", "query", "=", "(", "(", "(", "gtable", ".", "parent", "==", "id", ")", "&", "(", "gtable", ".", "level", "==", "'L1'", ")", ")", "&", "(", "gtable", ".", ...
update summary vdata for all child l1s of the start country - used only by the initial map load .
train
false
40,153
def sech(arg): return (1 / numpy.cosh(arg))
[ "def", "sech", "(", "arg", ")", ":", "return", "(", "1", "/", "numpy", ".", "cosh", "(", "arg", ")", ")" ]
hyperbolic secant .
train
false
40,154
def entities_text(text): language_client = language.Client() document = language_client.document_from_text(text) entities = document.analyze_entities() for entity in entities: print ('=' * 20) print '{:<16}: {}'.format('name', entity.name) print '{:<16}: {}'.format('type', entity.entity_type) print '{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url) print '{:<16}: {}'.format('metadata', entity.metadata) print '{:<16}: {}'.format('salience', entity.salience)
[ "def", "entities_text", "(", "text", ")", ":", "language_client", "=", "language", ".", "Client", "(", ")", "document", "=", "language_client", ".", "document_from_text", "(", "text", ")", "entities", "=", "document", ".", "analyze_entities", "(", ")", "for", ...
detects entities in the text .
train
false
40,155
def promote(operator, size): return lo.LinOp(lo.PROMOTE, size, [operator], None)
[ "def", "promote", "(", "operator", ",", "size", ")", ":", "return", "lo", ".", "LinOp", "(", "lo", ".", "PROMOTE", ",", "size", ",", "[", "operator", "]", ",", "None", ")" ]
promotes a scalar operator to the given size .
train
false
40,156
def require_password_change(name): return __salt__['user.update'](name, expired=True)
[ "def", "require_password_change", "(", "name", ")", ":", "return", "__salt__", "[", "'user.update'", "]", "(", "name", ",", "expired", "=", "True", ")" ]
require the user to change their password the next time they log in .
train
false
40,157
def get_run_marketing_url(course_key, user): course_run = get_course_run(course_key, user) return course_run.get('marketing_url')
[ "def", "get_run_marketing_url", "(", "course_key", ",", "user", ")", ":", "course_run", "=", "get_course_run", "(", "course_key", ",", "user", ")", "return", "course_run", ".", "get", "(", "'marketing_url'", ")" ]
get a course runs marketing url from the course catalog service .
train
false
40,158
def getLens(filename): lens = [] fa_instance = screed.open(filename) for record in fa_instance: lens.append(len(record['sequence'])) return sorted(lens)
[ "def", "getLens", "(", "filename", ")", ":", "lens", "=", "[", "]", "fa_instance", "=", "screed", ".", "open", "(", "filename", ")", "for", "record", "in", "fa_instance", ":", "lens", ".", "append", "(", "len", "(", "record", "[", "'sequence'", "]", ...
parses fasta file using screed to create a sorted list of contig lengths .
train
false
40,159
def _copy_lines_to_journal(in_, fields={}, n=None, skip=0, terminal_line=''): if isinstance(in_, str): fin = open(in_, 'r') else: fin = in_ fields.update(TEST_JOURNAL_FIELDS) for i in xrange(skip): fin.readline() i = 0 while ((n is None) or (i < n)): l = fin.readline() if ((terminal_line is not None) and (l == terminal_line)): break journal.send(MESSAGE=l.strip(), **fields) i += 1 if isinstance(in_, str): fin.close()
[ "def", "_copy_lines_to_journal", "(", "in_", ",", "fields", "=", "{", "}", ",", "n", "=", "None", ",", "skip", "=", "0", ",", "terminal_line", "=", "''", ")", ":", "if", "isinstance", "(", "in_", ",", "str", ")", ":", "fin", "=", "open", "(", "in...
copy lines from one file to systemd journal returns none .
train
false
40,162
def _unpack_batch_response(response, content): parser = Parser() message = _generate_faux_mime_message(parser, response, content) if (not isinstance(message._payload, list)): raise ValueError('Bad response: not multi-part') for subrequest in message._payload: (status_line, rest) = subrequest._payload.split('\n', 1) (_, status, _) = status_line.split(' ', 2) sub_message = parser.parsestr(rest) payload = sub_message._payload ctype = sub_message['Content-Type'] msg_headers = dict(sub_message._headers) msg_headers['status'] = status headers = httplib2.Response(msg_headers) if (ctype and ctype.startswith('application/json')): payload = json.loads(payload) (yield (headers, payload))
[ "def", "_unpack_batch_response", "(", "response", ",", "content", ")", ":", "parser", "=", "Parser", "(", ")", "message", "=", "_generate_faux_mime_message", "(", "parser", ",", "response", ",", "content", ")", "if", "(", "not", "isinstance", "(", "message", ...
convert response .
train
false
40,163
def _grow_labels(seeds, extents, hemis, names, dist, vert, subject): labels = [] for (seed, extent, hemi, name) in zip(seeds, extents, hemis, names): (label_verts, label_dist) = _verts_within_dist(dist[hemi], seed, extent) if (len(seed) == 1): seed_repr = str(seed) else: seed_repr = ','.join(map(str, seed)) comment = ('Circular label: seed=%s, extent=%0.1fmm' % (seed_repr, extent)) label = Label(vertices=label_verts, pos=vert[hemi][label_verts], values=label_dist, hemi=hemi, comment=comment, name=str(name), subject=subject) labels.append(label) return labels
[ "def", "_grow_labels", "(", "seeds", ",", "extents", ",", "hemis", ",", "names", ",", "dist", ",", "vert", ",", "subject", ")", ":", "labels", "=", "[", "]", "for", "(", "seed", ",", "extent", ",", "hemi", ",", "name", ")", "in", "zip", "(", "see...
helper for parallelization of grow_labels .
train
false
40,164
def _is_from_logout(request): return getattr(request, 'is_from_logout', False)
[ "def", "_is_from_logout", "(", "request", ")", ":", "return", "getattr", "(", "request", ",", "'is_from_logout'", ",", "False", ")" ]
returns whether the request has come from logout action to see if is_from_logout attribute is present .
train
false
40,165
def gzipped(f): @functools.wraps(f) def view_func(*args, **kwargs): @after_this_request def zipper(response): accept_encoding = request.headers.get('Accept-Encoding', '') if ('gzip' not in accept_encoding.lower()): return response response.direct_passthrough = False if ((response.status_code < 200) or (response.status_code >= 300) or ('Content-Encoding' in response.headers)): return response gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response return f(*args, **kwargs) return view_func
[ "def", "gzipped", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "view_func", "(", "*", "args", ",", "**", "kwargs", ")", ":", "@", "after_this_request", "def", "zipper", "(", "response", ")", ":", "accept_encoding", "=", "...
decorator to make a view compressed .
train
true
40,166
def sortfunc(x_obj, y_obj): return cmp(y_obj[1], x_obj[1])
[ "def", "sortfunc", "(", "x_obj", ",", "y_obj", ")", ":", "return", "cmp", "(", "y_obj", "[", "1", "]", ",", "x_obj", "[", "1", "]", ")" ]
a simple sort function to sort the values of a list using the second item of each item .
train
false
40,168
def _needs_update(cloud, module, router, network, internal_subnet_ids): if (router['admin_state_up'] != module.params['admin_state_up']): return True if router['external_gateway_info']: if (router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']): return True if network: if (not router['external_gateway_info']): return True elif (router['external_gateway_info']['network_id'] != network['id']): return True if module.params['external_fixed_ips']: for new_iface in module.params['external_fixed_ips']: subnet = cloud.get_subnet(new_iface['subnet']) exists = False for existing_iface in router['external_gateway_info']['external_fixed_ips']: if (existing_iface['subnet_id'] == subnet['id']): if ('ip' in new_iface): if (existing_iface['ip_address'] == new_iface['ip']): exists = True break else: exists = True break if (not exists): return True if module.params['interfaces']: existing_subnet_ids = [] for port in cloud.list_router_interfaces(router, 'internal'): if ('fixed_ips' in port): for fixed_ip in port['fixed_ips']: existing_subnet_ids.append(fixed_ip['subnet_id']) if (set(internal_subnet_ids) != set(existing_subnet_ids)): return True return False
[ "def", "_needs_update", "(", "cloud", ",", "module", ",", "router", ",", "network", ",", "internal_subnet_ids", ")", ":", "if", "(", "router", "[", "'admin_state_up'", "]", "!=", "module", ".", "params", "[", "'admin_state_up'", "]", ")", ":", "return", "T...
decide if the given router needs an update .
train
false
40,169
def get_vcpu_pin_set(): if (not CONF.vcpu_pin_set): return None cpuset_ids = parse_cpu_spec(CONF.vcpu_pin_set) if (not cpuset_ids): raise exception.Invalid((_('No CPUs available after parsing %r') % CONF.vcpu_pin_set)) return cpuset_ids
[ "def", "get_vcpu_pin_set", "(", ")", ":", "if", "(", "not", "CONF", ".", "vcpu_pin_set", ")", ":", "return", "None", "cpuset_ids", "=", "parse_cpu_spec", "(", "CONF", ".", "vcpu_pin_set", ")", "if", "(", "not", "cpuset_ids", ")", ":", "raise", "exception",...
parse vcpu_pin_set config .
train
false
40,170
def get_arch_option(major, minor): if config.FORCE_CUDA_CC: arch = config.FORCE_CUDA_CC else: arch = _find_arch((major, minor)) return ('compute_%d%d' % arch)
[ "def", "get_arch_option", "(", "major", ",", "minor", ")", ":", "if", "config", ".", "FORCE_CUDA_CC", ":", "arch", "=", "config", ".", "FORCE_CUDA_CC", "else", ":", "arch", "=", "_find_arch", "(", "(", "major", ",", "minor", ")", ")", "return", "(", "'...
matches with the closest architecture option .
train
false
40,171
def _patched_describe_cluster(emr_conn, *args, **kwargs): try: boto.emr.connection.Cluster = _PatchedCluster return emr_conn.describe_cluster(*args, **kwargs) finally: boto.emr.connection.Cluster = Cluster
[ "def", "_patched_describe_cluster", "(", "emr_conn", ",", "*", "args", ",", "**", "kwargs", ")", ":", "try", ":", "boto", ".", "emr", ".", "connection", ".", "Cluster", "=", "_PatchedCluster", "return", "emr_conn", ".", "describe_cluster", "(", "*", "args", ...
wrapper for :py:meth:boto .
train
false
40,173
def test_uniform_P(): l = Symbol('l', real=True, finite=True) w = Symbol('w', positive=True, finite=True) X = Uniform('x', l, (l + w)) assert ((P((X < l)) == 0) and (P((X > (l + w))) == 0))
[ "def", "test_uniform_P", "(", ")", ":", "l", "=", "Symbol", "(", "'l'", ",", "real", "=", "True", ",", "finite", "=", "True", ")", "w", "=", "Symbol", "(", "'w'", ",", "positive", "=", "True", ",", "finite", "=", "True", ")", "X", "=", "Uniform",...
this stopped working because singlecontinuouspspace .
train
false
40,174
def test_get_mutable_mark_dirty(): class MutableTester(XBlock, ): 'Test class with mutable fields.' list_field = List(default=[]) mutable_test = MutableTester(TestRuntime(services={'field-data': DictFieldData({})}), scope_ids=Mock(spec=ScopeIds)) assert_equals(len(mutable_test._dirty_fields), 0) _test_get = mutable_test.list_field assert_equals(len(mutable_test._dirty_fields), 1) mutable_test.list_field = [] assert_equals(len(mutable_test._dirty_fields), 1) mutable_test.save() assert_equals(len(mutable_test._dirty_fields), 1) _test_get = mutable_test.list_field assert_equals(len(mutable_test._dirty_fields), 1)
[ "def", "test_get_mutable_mark_dirty", "(", ")", ":", "class", "MutableTester", "(", "XBlock", ",", ")", ":", "list_field", "=", "List", "(", "default", "=", "[", "]", ")", "mutable_test", "=", "MutableTester", "(", "TestRuntime", "(", "services", "=", "{", ...
ensure that accessing a mutable field type does not mark it dirty if the field has never been set .
train
false
40,176
def filenameToModule(fn): if (not os.path.exists(fn)): raise ValueError(("%r doesn't exist" % (fn,))) try: ret = reflect.namedAny(reflect.filenameToModuleName(fn)) except (ValueError, AttributeError): return _importFromFile(fn) retFile = (os.path.splitext(ret.__file__)[0] + '.py') same = getattr(os.path, 'samefile', samefile) if (os.path.isfile(fn) and (not same(fn, retFile))): del sys.modules[ret.__name__] ret = _importFromFile(fn) return ret
[ "def", "filenameToModule", "(", "fn", ")", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "fn", ")", ")", ":", "raise", "ValueError", "(", "(", "\"%r doesn't exist\"", "%", "(", "fn", ",", ")", ")", ")", "try", ":", "ret", "=", "re...
given a filename .
train
false
40,177
def remove_temp_dir(ignore_errors=False): shutil.rmtree(get_temp_dir(), ignore_errors=ignore_errors)
[ "def", "remove_temp_dir", "(", "ignore_errors", "=", "False", ")", ":", "shutil", ".", "rmtree", "(", "get_temp_dir", "(", ")", ",", "ignore_errors", "=", "ignore_errors", ")" ]
remove the temp directory .
train
false
40,178
def stop_workers(worker_sockets, log_fh=None): for (i, worker) in enumerate(worker_sockets): try: worker.send('Server shutting down all clients') except error: if log_fh: log_fh.write(('Worker %s seems to be dead already. Check for runaways!\n' % i)) worker.close()
[ "def", "stop_workers", "(", "worker_sockets", ",", "log_fh", "=", "None", ")", ":", "for", "(", "i", ",", "worker", ")", "in", "enumerate", "(", "worker_sockets", ")", ":", "try", ":", "worker", ".", "send", "(", "'Server shutting down all clients'", ")", ...
stop all workers .
train
false
40,180
def rollback(): connection._rollback() set_clean()
[ "def", "rollback", "(", ")", ":", "connection", ".", "_rollback", "(", ")", "set_clean", "(", ")" ]
rollback server configuration changes made during install .
train
false
40,181
def read_possible_plural_rules(): plurals = {} try: import gluon.contrib.plural_rules as package for (importer, modname, ispkg) in pkgutil.iter_modules(package.__path__): if (len(modname) == 2): module = __import__(((package.__name__ + '.') + modname), fromlist=[modname]) lang = modname pname = (modname + '.py') nplurals = getattr(module, 'nplurals', DEFAULT_NPLURALS) get_plural_id = getattr(module, 'get_plural_id', DEFAULT_GET_PLURAL_ID) construct_plural_form = getattr(module, 'construct_plural_form', DEFAULT_CONSTRUCT_PLURAL_FORM) plurals[lang] = (lang, nplurals, get_plural_id, construct_plural_form) except ImportError: e = sys.exc_info()[1] logging.warn(('Unable to import plural rules: %s' % e)) return plurals
[ "def", "read_possible_plural_rules", "(", ")", ":", "plurals", "=", "{", "}", "try", ":", "import", "gluon", ".", "contrib", ".", "plural_rules", "as", "package", "for", "(", "importer", ",", "modname", ",", "ispkg", ")", "in", "pkgutil", ".", "iter_module...
creates list of all possible plural rules files the result is cached in plural_rules dictionary to increase speed .
train
false
40,182
def kernel_modules(attrs=None, where=None): if (__grains__['os_family'] in ['RedHat', 'Debian']): return _osquery_cmd(table='kernel_modules', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
[ "def", "kernel_modules", "(", "attrs", "=", "None", ",", "where", "=", "None", ")", ":", "if", "(", "__grains__", "[", "'os_family'", "]", "in", "[", "'RedHat'", ",", "'Debian'", "]", ")", ":", "return", "_osquery_cmd", "(", "table", "=", "'kernel_module...
return kernel_modules information from osquery cli example: .
train
true
40,183
def extract_meta(total): return {'features': {'total': total.features_ran, 'success': total.features_passed, 'failures': (total.features_ran - total.features_passed)}, 'scenarios': {'total': total.scenarios_ran, 'success': total.scenarios_passed, 'failures': (total.scenarios_ran - total.scenarios_passed)}, 'steps': {'total': total.steps, 'success': total.steps_passed, 'failures': total.steps_failed, 'skipped': total.steps_skipped, 'undefined': total.steps_undefined}, 'is_success': total.is_success}
[ "def", "extract_meta", "(", "total", ")", ":", "return", "{", "'features'", ":", "{", "'total'", ":", "total", ".", "features_ran", ",", "'success'", ":", "total", ".", "features_passed", ",", "'failures'", ":", "(", "total", ".", "features_ran", "-", "tot...
extract metadata from the totalresult .
train
false
40,184
def _month_bounds(date): first_day = date.replace(day=1) if (first_day.month == 12): last_day = first_day.replace(year=(first_day.year + 1), month=1) else: last_day = first_day.replace(month=(first_day.month + 1)) return (first_day, last_day)
[ "def", "_month_bounds", "(", "date", ")", ":", "first_day", "=", "date", ".", "replace", "(", "day", "=", "1", ")", "if", "(", "first_day", ".", "month", "==", "12", ")", ":", "last_day", "=", "first_day", ".", "replace", "(", "year", "=", "(", "fi...
helper: return the first and last days of the month for the given date .
train
false
40,187
def inflate_denoiser_output(centroid_seqs, singleton_seqs, denoiser_map, raw_seqs): id_lookup = parse_denoiser_mapping(denoiser_map) flowgram_to_seq_id_lookup = flowgram_id_to_seq_id_map(raw_seqs) for (id_, seq) in centroid_seqs: (id, cluster_size_str) = id_.split(' | ') cluster_member_ids = id_lookup[id] for c in cluster_member_ids: (yield (flowgram_to_seq_id_lookup[c], seq)) for (id_, seq) in singleton_seqs: (yield (flowgram_to_seq_id_lookup[id_], seq)) return
[ "def", "inflate_denoiser_output", "(", "centroid_seqs", ",", "singleton_seqs", ",", "denoiser_map", ",", "raw_seqs", ")", ":", "id_lookup", "=", "parse_denoiser_mapping", "(", "denoiser_map", ")", "flowgram_to_seq_id_lookup", "=", "flowgram_id_to_seq_id_map", "(", "raw_se...
expand denoiser fasta files based on denoiser map the inflation process works as follows: write each centroid sequence n times .
train
false
40,188
def SMA(ds, count, timeperiod=(- (2 ** 31))): return call_talib_with_ds(ds, count, talib.SMA, timeperiod)
[ "def", "SMA", "(", "ds", ",", "count", ",", "timeperiod", "=", "(", "-", "(", "2", "**", "31", ")", ")", ")", ":", "return", "call_talib_with_ds", "(", "ds", ",", "count", ",", "talib", ".", "SMA", ",", "timeperiod", ")" ]
simple moving average .
train
false
40,189
@with_open_mode('r') @with_sizes('medium') def read_small_chunks(f): f.seek(0) while f.read(20): pass
[ "@", "with_open_mode", "(", "'r'", ")", "@", "with_sizes", "(", "'medium'", ")", "def", "read_small_chunks", "(", "f", ")", ":", "f", ".", "seek", "(", "0", ")", "while", "f", ".", "read", "(", "20", ")", ":", "pass" ]
read 20 units at a time .
train
false
40,190
def metric_to_Ricci_components(expr): riemann = metric_to_Riemann_components(expr) coord_sys = expr.atoms(CoordSystem).pop() indices = list(range(coord_sys.dim)) ricci = [[Add(*[riemann[(k, i, k, j)] for k in indices]) for j in indices] for i in indices] return ImmutableDenseNDimArray(ricci)
[ "def", "metric_to_Ricci_components", "(", "expr", ")", ":", "riemann", "=", "metric_to_Riemann_components", "(", "expr", ")", "coord_sys", "=", "expr", ".", "atoms", "(", "CoordSystem", ")", ".", "pop", "(", ")", "indices", "=", "list", "(", "range", "(", ...
return the components of the ricci tensor expressed in a given basis .
train
false
40,191
def set_prod_state(prod_state, device=None): if (not device): device = __salt__['grains.get']('fqdn') device_object = find_device(device) if (not device_object): return 'Unable to find a device in Zenoss for {0}'.format(device) log.info('Setting prodState to %d on %s device', prod_state, device) data = dict(uids=[device_object['uid']], prodState=prod_state, hashcheck=device_object['hash']) return _router_request('DeviceRouter', 'setProductionState', [data])
[ "def", "set_prod_state", "(", "prod_state", ",", "device", "=", "None", ")", ":", "if", "(", "not", "device", ")", ":", "device", "=", "__salt__", "[", "'grains.get'", "]", "(", "'fqdn'", ")", "device_object", "=", "find_device", "(", "device", ")", "if"...
a function to set the prod_state in zenoss .
train
true
40,194
def p_include(p): thrift = thrift_stack[(-1)] if (thrift.__thrift_file__ is None): raise ThriftParserError('Unexcepted include statement while loadingfrom file like object.') replace_include_dirs = ([os.path.dirname(thrift.__thrift_file__)] + include_dirs_) for include_dir in replace_include_dirs: path = os.path.join(include_dir, p[2]) if os.path.exists(path): child = parse(path) setattr(thrift, child.__name__, child) _add_thrift_meta('includes', child) return raise ThriftParserError(("Couldn't include thrift %s in any directories provided" % p[2]))
[ "def", "p_include", "(", "p", ")", ":", "thrift", "=", "thrift_stack", "[", "(", "-", "1", ")", "]", "if", "(", "thrift", ".", "__thrift_file__", "is", "None", ")", ":", "raise", "ThriftParserError", "(", "'Unexcepted include statement while loadingfrom file lik...
include : include literal .
train
false
40,196
def test_make_grid_layout(): tempdir = _TempDir() tmp_name = 'bar' lout_name = 'test_ica' lout_orig = read_layout(kind=lout_name, path=lout_path) layout = make_grid_layout(_get_test_info()) layout.save(op.join(tempdir, (tmp_name + '.lout'))) lout_new = read_layout(kind=tmp_name, path=tempdir) assert_array_equal(lout_new.kind, tmp_name) assert_array_equal(lout_orig.pos, lout_new.pos) assert_array_equal(lout_orig.names, lout_new.names) layout = make_grid_layout(_get_test_info(), n_col=2) assert_true((layout.pos[(0, 1)] == layout.pos[(1, 1)])) assert_true((layout.pos[(0, 0)] != layout.pos[(1, 0)])) assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
[ "def", "test_make_grid_layout", "(", ")", ":", "tempdir", "=", "_TempDir", "(", ")", "tmp_name", "=", "'bar'", "lout_name", "=", "'test_ica'", "lout_orig", "=", "read_layout", "(", "kind", "=", "lout_name", ",", "path", "=", "lout_path", ")", "layout", "=", ...
test creation of grid layout .
train
false
40,197
def _deep_annotate(element, annotations, exclude=None): def clone(elem): if (exclude and hasattr(elem, 'proxy_set') and elem.proxy_set.intersection(exclude)): newelem = elem._clone() elif (annotations != elem._annotations): newelem = elem._annotate(annotations) else: newelem = elem newelem._copy_internals(clone=clone) return newelem if (element is not None): element = clone(element) return element
[ "def", "_deep_annotate", "(", "element", ",", "annotations", ",", "exclude", "=", "None", ")", ":", "def", "clone", "(", "elem", ")", ":", "if", "(", "exclude", "and", "hasattr", "(", "elem", ",", "'proxy_set'", ")", "and", "elem", ".", "proxy_set", "....
deep copy the given clauseelement .
train
false
40,198
def split_lines(tokenlist): line = [] for item in tokenlist: if (len(item) == 2): (token, string) = item parts = string.split(u'\n') for part in parts[:(-1)]: if part: line.append((token, part)) (yield line) line = [] line.append((token, parts[(-1)])) else: (token, string, mouse_handler) = item parts = string.split(u'\n') for part in parts[:(-1)]: if part: line.append((token, part, mouse_handler)) (yield line) line = [] line.append((token, parts[(-1)], mouse_handler)) (yield line)
[ "def", "split_lines", "(", "tokenlist", ")", ":", "line", "=", "[", "]", "for", "item", "in", "tokenlist", ":", "if", "(", "len", "(", "item", ")", "==", "2", ")", ":", "(", "token", ",", "string", ")", "=", "item", "parts", "=", "string", ".", ...
take a single list of tuples and yield one such list for each line .
train
true
40,199
def get_obj_offset(types, member_list): member_list.reverse() current_type = member_list.pop() offset = 0 while (len(member_list) > 0): if (current_type == 'array'): current_type = member_dict[current_member][1][2][0] if (current_type in builtin_types): current_type_size = builtin_size(current_type) else: current_type_size = obj_size(types, current_type) index = member_list.pop() offset += (index * current_type_size) continue elif (not types.has_key(current_type)): raise Exception(('Invalid type ' + current_type)) member_dict = types[current_type][1] current_member = member_list.pop() if (not member_dict.has_key(current_member)): raise Exception(('Invalid member %s in type %s' % (current_member, current_type))) offset += member_dict[current_member][0] current_type = member_dict[current_member][1][0] return (offset, current_type)
[ "def", "get_obj_offset", "(", "types", ",", "member_list", ")", ":", "member_list", ".", "reverse", "(", ")", "current_type", "=", "member_list", ".", "pop", "(", ")", "offset", "=", "0", "while", "(", "len", "(", "member_list", ")", ">", "0", ")", ":"...
returns the pair for a given list .
train
false
40,200
def test_ast_bad_type(): try: hy_compile(u'foo', u'__main__') assert (True is False) except HyCompileError: pass
[ "def", "test_ast_bad_type", "(", ")", ":", "try", ":", "hy_compile", "(", "u'foo'", ",", "u'__main__'", ")", "assert", "(", "True", "is", "False", ")", "except", "HyCompileError", ":", "pass" ]
make sure ast breakage can happen .
train
false
40,201
def _pairwise_broadcast(shape1, shape2): (shape1, shape2) = map(tuple, [shape1, shape2]) while (len(shape1) < len(shape2)): shape1 = ((1,) + shape1) while (len(shape1) > len(shape2)): shape2 = ((1,) + shape2) return tuple((_broadcast_axis(a, b) for (a, b) in zip(shape1, shape2)))
[ "def", "_pairwise_broadcast", "(", "shape1", ",", "shape2", ")", ":", "(", "shape1", ",", "shape2", ")", "=", "map", "(", "tuple", ",", "[", "shape1", ",", "shape2", "]", ")", "while", "(", "len", "(", "shape1", ")", "<", "len", "(", "shape2", ")",...
raises valueerror if broadcast fails .
train
false
40,202
def get_fontext_synonyms(fontext): return {'ttf': ('ttf', 'otf'), 'otf': ('ttf', 'otf'), 'afm': ('afm',)}[fontext]
[ "def", "get_fontext_synonyms", "(", "fontext", ")", ":", "return", "{", "'ttf'", ":", "(", "'ttf'", ",", "'otf'", ")", ",", "'otf'", ":", "(", "'ttf'", ",", "'otf'", ")", ",", "'afm'", ":", "(", "'afm'", ",", ")", "}", "[", "fontext", "]" ]
return a list of file extensions extensions that are synonyms for the given file extension *fileext* .
train
false
40,204
def _get_cache_dir(): cache_dir = os.path.join(__opts__['cachedir'], 'pillar_s3fs') if (not os.path.isdir(cache_dir)): log.debug('Initializing S3 Pillar Cache') os.makedirs(cache_dir) return cache_dir
[ "def", "_get_cache_dir", "(", ")", ":", "cache_dir", "=", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'cachedir'", "]", ",", "'pillar_s3fs'", ")", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "cache_dir", ")", ")", ":", "log", ...
get pillar cache directory .
train
true
40,206
def create_struct_proxy(fe_type, kind='value'): cache_key = (fe_type, kind) res = _struct_proxy_cache.get(cache_key) if (res is None): base = {'value': ValueStructProxy, 'data': DataStructProxy}[kind] clsname = ((base.__name__ + '_') + str(fe_type)) bases = (base,) clsmembers = dict(_fe_type=fe_type) res = type(clsname, bases, clsmembers) _struct_proxy_cache[cache_key] = res return res
[ "def", "create_struct_proxy", "(", "fe_type", ",", "kind", "=", "'value'", ")", ":", "cache_key", "=", "(", "fe_type", ",", "kind", ")", "res", "=", "_struct_proxy_cache", ".", "get", "(", "cache_key", ")", "if", "(", "res", "is", "None", ")", ":", "ba...
returns a specialized structproxy subclass for the given fe_type .
train
false
40,207
def missing_whitespace(logical_line): line = logical_line for index in range((len(line) - 1)): char = line[index] if ((char in ',;:') and (line[(index + 1)] not in WHITESPACE)): before = line[:index] if ((char == ':') and (before.count('[') > before.count(']')) and (before.rfind('{') < before.rfind('['))): continue if ((char == ',') and (line[(index + 1)] == ')')): continue (yield (index, ("E231 missing whitespace after '%s'" % char)))
[ "def", "missing_whitespace", "(", "logical_line", ")", ":", "line", "=", "logical_line", "for", "index", "in", "range", "(", "(", "len", "(", "line", ")", "-", "1", ")", ")", ":", "char", "=", "line", "[", "index", "]", "if", "(", "(", "char", "in"...
jcr: each comma .
train
true
40,208
@common_exceptions_400 def view_success(request): return HttpResponse('success')
[ "@", "common_exceptions_400", "def", "view_success", "(", "request", ")", ":", "return", "HttpResponse", "(", "'success'", ")" ]
a dummy view for testing that returns a simple http response .
train
false
40,209
def takes_arguments(function, *named_arguments): return set(named_arguments).intersection(arguments(function))
[ "def", "takes_arguments", "(", "function", ",", "*", "named_arguments", ")", ":", "return", "set", "(", "named_arguments", ")", ".", "intersection", "(", "arguments", "(", "function", ")", ")" ]
returns the arguments that a function takes from a list of requested arguments .
train
false
40,210
@cache_permission def can_translate(user, translation): return can_edit(user, translation, 'trans.save_translation')
[ "@", "cache_permission", "def", "can_translate", "(", "user", ",", "translation", ")", ":", "return", "can_edit", "(", "user", ",", "translation", ",", "'trans.save_translation'", ")" ]
checks whether user can translate given translation .
train
false
40,212
def is_git_sha(text): if (len(text) in (40, 7)): try: int(text, 16) return True except ValueError: pass return False
[ "def", "is_git_sha", "(", "text", ")", ":", "if", "(", "len", "(", "text", ")", "in", "(", "40", ",", "7", ")", ")", ":", "try", ":", "int", "(", "text", ",", "16", ")", "return", "True", "except", "ValueError", ":", "pass", "return", "False" ]
return whether this is probably a git sha .
train
false
40,213
def getPathsByKeys(keys, xmlElement): pathsByKeys = [] for key in keys: pathsByKeys += getPathsByKey(key, xmlElement) return pathsByKeys
[ "def", "getPathsByKeys", "(", "keys", ",", "xmlElement", ")", ":", "pathsByKeys", "=", "[", "]", "for", "key", "in", "keys", ":", "pathsByKeys", "+=", "getPathsByKey", "(", "key", ",", "xmlElement", ")", "return", "pathsByKeys" ]
get paths by keys .
train
false
40,214
def histogram(a, bins=10, range=None, weights=None, **kwargs): if isinstance(bins, six.string_types): a = np.asarray(a).ravel() if (weights is not None): raise NotImplementedError(u'weights are not yet supported for the enhanced histogram') if (range is not None): a = a[((a >= range[0]) & (a <= range[1]))] if (bins == u'blocks'): bins = bayesian_blocks(a) elif (bins == u'knuth'): (da, bins) = knuth_bin_width(a, True) elif (bins == u'scott'): (da, bins) = scott_bin_width(a, True) elif (bins == u'freedman'): (da, bins) = freedman_bin_width(a, True) else: raise ValueError(u"unrecognized bin code: '{}'".format(bins)) return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
[ "def", "histogram", "(", "a", ",", "bins", "=", "10", ",", "range", "=", "None", ",", "weights", "=", "None", ",", "**", "kwargs", ")", ":", "if", "isinstance", "(", "bins", ",", "six", ".", "string_types", ")", ":", "a", "=", "np", ".", "asarray...
calculates the histogram of the given image .
train
false
40,215
def is_effective_group(group_id_or_name): egid = os.getegid() if (str(group_id_or_name) == str(egid)): return True effective_group_name = grp.getgrgid(egid).gr_name return (group_id_or_name == effective_group_name)
[ "def", "is_effective_group", "(", "group_id_or_name", ")", ":", "egid", "=", "os", ".", "getegid", "(", ")", "if", "(", "str", "(", "group_id_or_name", ")", "==", "str", "(", "egid", ")", ")", ":", "return", "True", "effective_group_name", "=", "grp", "....
returns true if group_id_or_name is effective group .
train
false
40,222
def has_perm_or_owns_or_403(perm, owner_attr, obj_lookup, perm_obj_lookup, **kwargs): def decorator(view_func): def _wrapped_view(request, *args, **kwargs): user = request.user if user.is_authenticated(): obj = _resolve_lookup(obj_lookup, kwargs) perm_obj = _resolve_lookup(perm_obj_lookup, kwargs) granted = access.has_perm_or_owns(user, perm, obj, perm_obj, owner_attr) if (granted or user.has_perm(perm)): return view_func(request, *args, **kwargs) return HttpResponseForbidden() return wraps(view_func)(_wrapped_view) return decorator
[ "def", "has_perm_or_owns_or_403", "(", "perm", ",", "owner_attr", ",", "obj_lookup", ",", "perm_obj_lookup", ",", "**", "kwargs", ")", ":", "def", "decorator", "(", "view_func", ")", ":", "def", "_wrapped_view", "(", "request", ",", "*", "args", ",", "**", ...
act like permission_required_or_403 but also grant permission to owners .
train
false
40,224
def read_int_list(start, end, addr_space): return int_list(read_addr_range(start, end, addr_space), (end - start))
[ "def", "read_int_list", "(", "start", ",", "end", ",", "addr_space", ")", ":", "return", "int_list", "(", "read_addr_range", "(", "start", ",", "end", ",", "addr_space", ")", ",", "(", "end", "-", "start", ")", ")" ]
read a number of pages and split it into integers .
train
false
40,225
def jacobian1(f, v): f = tt.flatten(f) idx = tt.arange(f.shape[0]) def grad_i(i): return gradient1(f[i], v) return theano.map(grad_i, idx)[0]
[ "def", "jacobian1", "(", "f", ",", "v", ")", ":", "f", "=", "tt", ".", "flatten", "(", "f", ")", "idx", "=", "tt", ".", "arange", "(", "f", ".", "shape", "[", "0", "]", ")", "def", "grad_i", "(", "i", ")", ":", "return", "gradient1", "(", "...
jacobian of f wrt v .
train
false
40,226
def test_passing_ImageHDU(): path = get_pkg_data_filename(u'data/validate.fits') hdulist = fits.open(path) wcs_hdu = wcs.WCS(hdulist[0]) wcs_header = wcs.WCS(hdulist[0].header) assert wcs_hdu.wcs.compare(wcs_header.wcs) wcs_hdu = wcs.WCS(hdulist[1]) wcs_header = wcs.WCS(hdulist[1].header) assert wcs_hdu.wcs.compare(wcs_header.wcs) hdulist.close()
[ "def", "test_passing_ImageHDU", "(", ")", ":", "path", "=", "get_pkg_data_filename", "(", "u'data/validate.fits'", ")", "hdulist", "=", "fits", ".", "open", "(", "path", ")", "wcs_hdu", "=", "wcs", ".", "WCS", "(", "hdulist", "[", "0", "]", ")", "wcs_heade...
passing imagehdu or primaryhdu and comparing it with wcs initialized from header .
train
false
40,227
def _get_call_line(in_verbose=False): back = (2 if (not in_verbose) else 4) call_frame = inspect.getouterframes(inspect.currentframe())[back][0] context = inspect.getframeinfo(call_frame).code_context context = ('unknown' if (context is None) else context[0].strip()) return context
[ "def", "_get_call_line", "(", "in_verbose", "=", "False", ")", ":", "back", "=", "(", "2", "if", "(", "not", "in_verbose", ")", "else", "4", ")", "call_frame", "=", "inspect", ".", "getouterframes", "(", "inspect", ".", "currentframe", "(", ")", ")", "...
get the call line from within a function .
train
false
40,228
def installed_from_source(version=VERSION): from fabtools.require import directory as require_directory from fabtools.require import file as require_file from fabtools.require import user as require_user from fabtools.require.deb import packages as require_deb_packages from fabtools.require.rpm import packages as require_rpm_packages family = distrib_family() if (family == 'debian'): require_deb_packages(['build-essential']) elif (family == 'redhat'): require_rpm_packages(['gcc', 'make']) require_user('redis', home='/var/lib/redis', system=True) require_directory('/var/lib/redis', owner='redis', use_sudo=True) dest_dir = ('/opt/redis-%(version)s' % locals()) require_directory(dest_dir, use_sudo=True, owner='redis') if (not is_file(('%(dest_dir)s/redis-server' % locals()))): with cd('/tmp'): tarball = ('redis-%(version)s.tar.gz' % locals()) url = (_download_url(version) + tarball) require_file(tarball, url=url) run(('tar xzf %(tarball)s' % locals())) with cd(('redis-%(version)s' % locals())): run('make') for filename in BINARIES: run_as_root(('cp -pf src/%(filename)s %(dest_dir)s/' % locals())) run_as_root(('chown redis: %(dest_dir)s/%(filename)s' % locals()))
[ "def", "installed_from_source", "(", "version", "=", "VERSION", ")", ":", "from", "fabtools", ".", "require", "import", "directory", "as", "require_directory", "from", "fabtools", ".", "require", "import", "file", "as", "require_file", "from", "fabtools", ".", "...
require node .
train
false
40,229
def connection_ip_list(name, addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ret = {'name': name, 'changes': {}, 'comment': str(), 'result': None} if (not addresses): addresses = dict() current_addresses = __salt__['win_smtp_server.get_connection_ip_list'](server=server) if (addresses == current_addresses): ret['comment'] = 'IPGrant already contains the provided addresses.' ret['result'] = True elif __opts__['test']: ret['comment'] = 'IPGrant will be changed.' ret['changes'] = {'old': current_addresses, 'new': addresses} else: ret['comment'] = 'Set IPGrant to contain the provided addresses.' ret['changes'] = {'old': current_addresses, 'new': addresses} ret['result'] = __salt__['win_smtp_server.set_connection_ip_list'](addresses=addresses, grant_by_default=grant_by_default, server=server) return ret
[ "def", "connection_ip_list", "(", "name", ",", "addresses", "=", "None", ",", "grant_by_default", "=", "False", ",", "server", "=", "_DEFAULT_SERVER", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":...
manage ip list for smtp connections .
train
true
40,230
@cleanup def test_violin_point_mass(): plt.violinplot(np.array([0, 0]))
[ "@", "cleanup", "def", "test_violin_point_mass", "(", ")", ":", "plt", ".", "violinplot", "(", "np", ".", "array", "(", "[", "0", ",", "0", "]", ")", ")" ]
violin plot should handle point mass pdf gracefully .
train
false
40,231
def _get_deprecated_option(key): try: d = _deprecated_options[key] except KeyError: return None else: return d
[ "def", "_get_deprecated_option", "(", "key", ")", ":", "try", ":", "d", "=", "_deprecated_options", "[", "key", "]", "except", "KeyError", ":", "return", "None", "else", ":", "return", "d" ]
retrieves the metadata for a deprecated option .
train
false
40,232
def html_safe(klass): if ('__html__' in klass.__dict__): raise ValueError(("can't apply @html_safe to %s because it defines __html__()." % klass.__name__)) if ('__str__' not in klass.__dict__): raise ValueError(("can't apply @html_safe to %s because it doesn't define __str__()." % klass.__name__)) klass_str = klass.__str__ klass.__str__ = (lambda self: mark_safe(klass_str(self))) klass.__html__ = (lambda self: str(self)) return klass
[ "def", "html_safe", "(", "klass", ")", ":", "if", "(", "'__html__'", "in", "klass", ".", "__dict__", ")", ":", "raise", "ValueError", "(", "(", "\"can't apply @html_safe to %s because it defines __html__().\"", "%", "klass", ".", "__name__", ")", ")", "if", "(",...
a decorator that defines the __html__ method .
train
false
40,233
@removals.remove(message='use get_ipv6_addr_by_EUI64 from oslo_utils.netutils', version='Newton', removal_version='Ocata') def get_ipv6_addr_by_EUI64(cidr, mac): is_ipv4 = netutils.is_valid_ipv4(cidr) if is_ipv4: msg = 'Unable to generate IP address by EUI64 for IPv4 prefix' raise TypeError(msg) try: eui64 = int(netaddr.EUI(mac).eui64()) prefix = netaddr.IPNetwork(cidr) return netaddr.IPAddress(((prefix.first + eui64) ^ (1 << 57))) except (ValueError, netaddr.AddrFormatError): raise TypeError(('Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, %(mac)s:' % {'prefix': cidr, 'mac': mac})) except TypeError: raise TypeError(('Bad prefix type for generate IPv6 address by EUI-64: %s' % cidr))
[ "@", "removals", ".", "remove", "(", "message", "=", "'use get_ipv6_addr_by_EUI64 from oslo_utils.netutils'", ",", "version", "=", "'Newton'", ",", "removal_version", "=", "'Ocata'", ")", "def", "get_ipv6_addr_by_EUI64", "(", "cidr", ",", "mac", ")", ":", "is_ipv4",...
generate a ipv6 addr by eui-64 with cidr and mac .
train
false
40,234
@pytest.mark.parametrize('model', CLASSES) def test_column_width_sum(model): assert (sum(model.COLUMN_WIDTHS) == 100)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'model'", ",", "CLASSES", ")", "def", "test_column_width_sum", "(", "model", ")", ":", "assert", "(", "sum", "(", "model", ".", "COLUMN_WIDTHS", ")", "==", "100", ")" ]
test if the sum of the widths asserts to 100 .
train
false
40,235
def getIsIdentityTetragridOrNone(tetragrid): if (tetragrid == None): return True return getIsIdentityTetragrid(tetragrid)
[ "def", "getIsIdentityTetragridOrNone", "(", "tetragrid", ")", ":", "if", "(", "tetragrid", "==", "None", ")", ":", "return", "True", "return", "getIsIdentityTetragrid", "(", "tetragrid", ")" ]
determine if the tetragrid is none or if it is the identity tetragrid .
train
false
40,236
def get_or_create_db_cart(cart_queryset=Cart.objects.all()): def get_cart(view): @wraps(view) def func(request, *args, **kwargs): cart = get_or_create_cart_from_request(request, cart_queryset) response = view(request, cart, *args, **kwargs) if (not request.user.is_authenticated()): set_cart_cookie(cart, response) return response return func return get_cart
[ "def", "get_or_create_db_cart", "(", "cart_queryset", "=", "Cart", ".", "objects", ".", "all", "(", ")", ")", ":", "def", "get_cart", "(", "view", ")", ":", "@", "wraps", "(", "view", ")", "def", "func", "(", "request", ",", "*", "args", ",", "**", ...
get cart or create if necessary .
train
false
40,237
def fix_whitespace(line, offset, replacement): left = line[:offset].rstrip(u'\n\r DCTB \\') right = line[offset:].lstrip(u'\n\r DCTB \\') if right.startswith(u'#'): return line else: return ((left + replacement) + right)
[ "def", "fix_whitespace", "(", "line", ",", "offset", ",", "replacement", ")", ":", "left", "=", "line", "[", ":", "offset", "]", ".", "rstrip", "(", "u'\\n\\r DCTB \\\\'", ")", "right", "=", "line", "[", "offset", ":", "]", ".", "lstrip", "(", "u'\\n\...
replace whitespace at offset and return fixed line .
train
true
40,238
def notify_parse_error(subproject, translation, error, filename): subscriptions = Profile.objects.subscribed_merge_failure(subproject.project) users = set() mails = [] for subscription in subscriptions: mails.append(subscription.notify_parse_error(subproject, translation, error, filename)) users.add(subscription.user_id) for owner in subproject.project.owners.all(): mails.append(owner.profile.notify_parse_error(subproject, translation, error, filename)) mails.append(get_notification_email(u'en', u'ADMINS', u'parse_error', (translation if (translation is not None) else subproject), {u'subproject': subproject, u'translation': translation, u'error': error, u'filename': filename})) send_mails(mails)
[ "def", "notify_parse_error", "(", "subproject", ",", "translation", ",", "error", ",", "filename", ")", ":", "subscriptions", "=", "Profile", ".", "objects", ".", "subscribed_merge_failure", "(", "subproject", ".", "project", ")", "users", "=", "set", "(", ")"...
notification on parse error .
train
false
40,239
def render_atom(children): if (len(children) == 3): return LatexRendered(children[1].latex, parens=children[0].latex, tall=children[1].tall) else: return children[0]
[ "def", "render_atom", "(", "children", ")", ":", "if", "(", "len", "(", "children", ")", "==", "3", ")", ":", "return", "LatexRendered", "(", "children", "[", "1", "]", ".", "latex", ",", "parens", "=", "children", "[", "0", "]", ".", "latex", ",",...
properly handle parens .
train
false
40,240
def ManifestFromResFile(filename, names=None, languages=None): res = GetManifestResources(filename, names, languages) pth = [] if (res and res[RT_MANIFEST]): while (isinstance(res, dict) and res.keys()): key = res.keys()[0] pth.append(str(key)) res = res[key] if isinstance(res, dict): raise InvalidManifestError(("No matching manifest resource found in '%s'" % filename)) manifest = Manifest() manifest.filename = ':'.join(([filename] + pth)) manifest.parse_string(res, False) return manifest
[ "def", "ManifestFromResFile", "(", "filename", ",", "names", "=", "None", ",", "languages", "=", "None", ")", ":", "res", "=", "GetManifestResources", "(", "filename", ",", "names", ",", "languages", ")", "pth", "=", "[", "]", "if", "(", "res", "and", ...
create and return manifest instance from resource in dll/exe file .
train
true
40,241
def _generate_cmap(name, lutsize): spec = datad[name] if (u'red' in spec): return colors.LinearSegmentedColormap(name, spec, lutsize) elif (u'listed' in spec): return colors.ListedColormap(spec[u'listed'], name) else: return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)
[ "def", "_generate_cmap", "(", "name", ",", "lutsize", ")", ":", "spec", "=", "datad", "[", "name", "]", "if", "(", "u'red'", "in", "spec", ")", ":", "return", "colors", ".", "LinearSegmentedColormap", "(", "name", ",", "spec", ",", "lutsize", ")", "eli...
generates the requested cmap from its *name* .
train
false
40,243
def build_alert_hooks_from_path(patterns_path, warnfile): (dirname, basename) = os.path.split(patterns_path) site_overrides_basename = (('site_' + basename) + '_overrides') site_overrides_path = os.path.join(dirname, site_overrides_basename) site_overrides_file = None patterns_file = open(patterns_path) try: if os.path.exists(site_overrides_path): site_overrides_file = open(site_overrides_path) try: return build_alert_hooks(patterns_file, warnfile, overrides_file=site_overrides_file) finally: if site_overrides_file: site_overrides_file.close() finally: patterns_file.close()
[ "def", "build_alert_hooks_from_path", "(", "patterns_path", ",", "warnfile", ")", ":", "(", "dirname", ",", "basename", ")", "=", "os", ".", "path", ".", "split", "(", "patterns_path", ")", "site_overrides_basename", "=", "(", "(", "'site_'", "+", "basename", ...
same as build_alert_hooks .
train
false
40,244
def returner(ret): _options = _get_options(ret) sid = _options.get('sid', None) token = _options.get('token', None) sender = _options.get('from', None) receiver = _options.get('to', None) if ((sid is None) or (token is None)): log.error('Twilio sid/authentication token missing') return None if ((sender is None) or (receiver is None)): log.error('Twilio to/from fields are missing') return None client = TwilioRestClient(sid, token) try: message = client.messages.create(body='Minion: {0}\nCmd: {1}\nSuccess: {2}\n\nJid: {3}'.format(ret['id'], ret['fun'], ret['success'], ret['jid']), to=receiver, from_=sender) except TwilioRestException as e: log.error('Twilio [https://www.twilio.com/docs/errors/{0}]'.format(e.code)) return False return True
[ "def", "returner", "(", "ret", ")", ":", "_options", "=", "_get_options", "(", "ret", ")", "sid", "=", "_options", ".", "get", "(", "'sid'", ",", "None", ")", "token", "=", "_options", ".", "get", "(", "'token'", ",", "None", ")", "sender", "=", "_...
return data to a influxdb data store .
train
true
40,245
@command(usage='echo arguments') def echo(args): import lixian_cli_parser print ' '.join(lixian_cli_parser.expand_command_line(args))
[ "@", "command", "(", "usage", "=", "'echo arguments'", ")", "def", "echo", "(", "args", ")", ":", "import", "lixian_cli_parser", "print", "' '", ".", "join", "(", "lixian_cli_parser", ".", "expand_command_line", "(", "args", ")", ")" ]
echo calls to a function .
train
false
40,246
def test_url(): schema = vol.Schema(cv.url) for value in ('invalid', None, 100, 'htp://ha.io', 'http//ha.io', 'http://??,**', 'https://??,**'): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ('http://localhost', 'https://localhost/test/index.html', 'http://home-assistant.io', 'http://home-assistant.io/test/', 'https://community.home-assistant.io/'): assert schema(value)
[ "def", "test_url", "(", ")", ":", "schema", "=", "vol", ".", "Schema", "(", "cv", ".", "url", ")", "for", "value", "in", "(", "'invalid'", ",", "None", ",", "100", ",", "'htp://ha.io'", ",", "'http//ha.io'", ",", "'http://??,**'", ",", "'https://??,**'",...
test url .
train
false
40,247
def _get_score_from_csm(csm_scores, block, weight): score = csm_scores.get(block.location) has_valid_score = (score and (score.total is not None)) if has_valid_score: if (score.correct is not None): attempted = True raw_earned = score.correct else: attempted = False raw_earned = 0.0 raw_possible = score.total return (((raw_earned, raw_possible) + weighted_score(raw_earned, raw_possible, weight)) + (attempted,))
[ "def", "_get_score_from_csm", "(", "csm_scores", ",", "block", ",", "weight", ")", ":", "score", "=", "csm_scores", ".", "get", "(", "block", ".", "location", ")", "has_valid_score", "=", "(", "score", "and", "(", "score", ".", "total", "is", "not", "Non...
returns the score values from the courseware student module .
train
false
40,248
def fatal(msg): sys.stderr.write('FATAL: ') sys.stderr.write(msg) sys.stderr.write('\n') sys.exit(1)
[ "def", "fatal", "(", "msg", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'FATAL: '", ")", "sys", ".", "stderr", ".", "write", "(", "msg", ")", "sys", ".", "stderr", ".", "write", "(", "'\\n'", ")", "sys", ".", "exit", "(", "1", ")" ]
a fatal error .
train
false
40,249
def SetCTypesForLibrary(libname, fn_table): libpath = ctypes.util.find_library(libname) if (not libpath): raise ErrorLibNotFound(('Library %s not found' % libname)) lib = ctypes.cdll.LoadLibrary(libpath) for (function, args, result) in fn_table: f = getattr(lib, function) f.argtypes = args f.restype = result return lib
[ "def", "SetCTypesForLibrary", "(", "libname", ",", "fn_table", ")", ":", "libpath", "=", "ctypes", ".", "util", ".", "find_library", "(", "libname", ")", "if", "(", "not", "libpath", ")", ":", "raise", "ErrorLibNotFound", "(", "(", "'Library %s not found'", ...
set function argument types and return types for an objc library .
train
true