id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
13,655
def set_event_transaction_type(action_type): get_cache('event_transaction')['type'] = action_type
[ "def", "set_event_transaction_type", "(", "action_type", ")", ":", "get_cache", "(", "'event_transaction'", ")", "[", "'type'", "]", "=", "action_type" ]
takes a string and stores it in the request cache as the user action type .
train
false
13,656
def systemInformationType9(): a = L2PseudoLength(l2pLength=1) b = TpPd(pd=6) c = MessageType(mesType=4) d = Si9RestOctets() packet = (((a / b) / c) / d) return packet
[ "def", "systemInformationType9", "(", ")", ":", "a", "=", "L2PseudoLength", "(", "l2pLength", "=", "1", ")", "b", "=", "TpPd", "(", "pd", "=", "6", ")", "c", "=", "MessageType", "(", "mesType", "=", "4", ")", "d", "=", "Si9RestOctets", "(", ")", "p...
system information type 9 section 9 .
train
true
13,657
def _access(*args, **kargs): return True
[ "def", "_access", "(", "*", "args", ",", "**", "kargs", ")", ":", "return", "True" ]
assume access to the path is allowed .
train
false
13,660
def str_count(arr, pat, flags=0): regex = re.compile(pat, flags=flags) f = (lambda x: len(regex.findall(x))) return _na_map(f, arr, dtype=int)
[ "def", "str_count", "(", "arr", ",", "pat", ",", "flags", "=", "0", ")", ":", "regex", "=", "re", ".", "compile", "(", "pat", ",", "flags", "=", "flags", ")", "f", "=", "(", "lambda", "x", ":", "len", "(", "regex", ".", "findall", "(", "x", "...
count occurrences of pattern in each string of the series/index .
train
true
13,661
def endpoint_address(address): return (address & _ENDPOINT_ADDR_MASK)
[ "def", "endpoint_address", "(", "address", ")", ":", "return", "(", "address", "&", "_ENDPOINT_ADDR_MASK", ")" ]
return the endpoint absolute address .
train
false
13,663
def diff_values(a, b, tolerance=0.0): if (isinstance(a, float) and isinstance(b, float)): if (np.isnan(a) and np.isnan(b)): return False return (not np.allclose(a, b, tolerance, 0.0)) else: return (a != b)
[ "def", "diff_values", "(", "a", ",", "b", ",", "tolerance", "=", "0.0", ")", ":", "if", "(", "isinstance", "(", "a", ",", "float", ")", "and", "isinstance", "(", "b", ",", "float", ")", ")", ":", "if", "(", "np", ".", "isnan", "(", "a", ")", ...
diff two scalar values .
train
false
13,664
def peak_signal_to_noise_ratio(true, pred): return ((10.0 * tf.log((1.0 / mean_squared_error(true, pred)))) / tf.log(10.0))
[ "def", "peak_signal_to_noise_ratio", "(", "true", ",", "pred", ")", ":", "return", "(", "(", "10.0", "*", "tf", ".", "log", "(", "(", "1.0", "/", "mean_squared_error", "(", "true", ",", "pred", ")", ")", ")", ")", "/", "tf", ".", "log", "(", "10.0"...
image quality metric based on maximal signal power vs .
train
true
13,665
def _construct_odict(load, node): import yaml omap = OrderedDict() (yield omap) if (not isinstance(node, yaml.SequenceNode)): raise yaml.constructor.ConstructorError('while constructing an ordered map', node.start_mark, 'expected a sequence, but found {}'.format(node.id), node.start_mark) for subnode in node.value: if (not isinstance(subnode, yaml.MappingNode)): raise yaml.constructor.ConstructorError('while constructing an ordered map', node.start_mark, 'expected a mapping of length 1, but found {}'.format(subnode.id), subnode.start_mark) if (len(subnode.value) != 1): raise yaml.constructor.ConstructorError('while constructing an ordered map', node.start_mark, 'expected a single mapping item, but found {} items'.format(len(subnode.value)), subnode.start_mark) (key_node, value_node) = subnode.value[0] key = load.construct_object(key_node) value = load.construct_object(value_node) omap[key] = value
[ "def", "_construct_odict", "(", "load", ",", "node", ")", ":", "import", "yaml", "omap", "=", "OrderedDict", "(", ")", "(", "yield", "omap", ")", "if", "(", "not", "isinstance", "(", "node", ",", "yaml", ".", "SequenceNode", ")", ")", ":", "raise", "...
construct ordereddict from !!omap in yaml safe load .
train
false
13,666
def cinder_except_format(logical_line): if logical_line.startswith('except:'): (yield (6, "CINDER N201: no 'except:' at least use 'except Exception:'"))
[ "def", "cinder_except_format", "(", "logical_line", ")", ":", "if", "logical_line", ".", "startswith", "(", "'except:'", ")", ":", "(", "yield", "(", "6", ",", "\"CINDER N201: no 'except:' at least use 'except Exception:'\"", ")", ")" ]
check for except: .
train
false
13,669
def AddSourceToRegistry(appName, msgDLL=None, eventLogType='Application', eventLogFlags=None): if (msgDLL is None): msgDLL = win32evtlog.__file__ hkey = win32api.RegCreateKey(win32con.HKEY_LOCAL_MACHINE, ('SYSTEM\\CurrentControlSet\\Services\\EventLog\\%s\\%s' % (eventLogType, appName))) win32api.RegSetValueEx(hkey, 'EventMessageFile', 0, win32con.REG_EXPAND_SZ, msgDLL) if (eventLogFlags is None): eventLogFlags = ((win32evtlog.EVENTLOG_ERROR_TYPE | win32evtlog.EVENTLOG_WARNING_TYPE) | win32evtlog.EVENTLOG_INFORMATION_TYPE) win32api.RegSetValueEx(hkey, 'TypesSupported', 0, win32con.REG_DWORD, eventLogFlags) win32api.RegCloseKey(hkey)
[ "def", "AddSourceToRegistry", "(", "appName", ",", "msgDLL", "=", "None", ",", "eventLogType", "=", "'Application'", ",", "eventLogFlags", "=", "None", ")", ":", "if", "(", "msgDLL", "is", "None", ")", ":", "msgDLL", "=", "win32evtlog", ".", "__file__", "h...
add a source of messages to the event log .
train
false
13,670
def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None): if (not HAS_LIBCLOUD): return False if (not isinstance(reqver, (list, tuple))): raise RuntimeError("'reqver' needs to passed as a tuple or list, i.e., (0, 14, 0)") try: import libcloud except ImportError: raise ImportError('salt-cloud requires >= libcloud {0} which is not installed'.format('.'.join([str(num) for num in reqver]))) if (LIBCLOUD_VERSION_INFO >= reqver): return libcloud.__version__ errormsg = 'Your version of libcloud is {0}. '.format(libcloud.__version__) errormsg += 'salt-cloud requires >= libcloud {0}'.format('.'.join([str(num) for num in reqver])) if why: errormsg += ' for {0}'.format(why) errormsg += '. Please upgrade.' raise ImportError(errormsg)
[ "def", "check_libcloud_version", "(", "reqver", "=", "LIBCLOUD_MINIMAL_VERSION", ",", "why", "=", "None", ")", ":", "if", "(", "not", "HAS_LIBCLOUD", ")", ":", "return", "False", "if", "(", "not", "isinstance", "(", "reqver", ",", "(", "list", ",", "tuple"...
compare different libcloud versions .
train
true
13,671
def export_action_form_factory(formats): class _ExportActionForm(ActionForm, ): u'\n Action form with export format ChoiceField.\n ' file_format = forms.ChoiceField(label=_(u'Format'), choices=formats, required=False) _ExportActionForm.__name__ = str(u'ExportActionForm') return _ExportActionForm
[ "def", "export_action_form_factory", "(", "formats", ")", ":", "class", "_ExportActionForm", "(", "ActionForm", ",", ")", ":", "file_format", "=", "forms", ".", "ChoiceField", "(", "label", "=", "_", "(", "u'Format'", ")", ",", "choices", "=", "formats", ","...
returns an actionform subclass containing a choicefield populated with the given formats .
train
true
13,672
def _make_c_string_check(string): if isinstance(string, bytes): if ('\x00' in string): raise InvalidDocument('BSON keys / regex patterns must not contain a NUL character') try: _utf_8_decode(string, None, True) return (string + '\x00') except UnicodeError: raise InvalidStringData(('strings in documents must be valid UTF-8: %r' % string)) else: if ('\x00' in string): raise InvalidDocument('BSON keys / regex patterns must not contain a NUL character') return (_utf_8_encode(string)[0] + '\x00')
[ "def", "_make_c_string_check", "(", "string", ")", ":", "if", "isinstance", "(", "string", ",", "bytes", ")", ":", "if", "(", "'\\x00'", "in", "string", ")", ":", "raise", "InvalidDocument", "(", "'BSON keys / regex patterns must not contain a NUL character'", ")", ...
make a c string .
train
true
13,673
def test_install_from_file_index_hash_link(script, data): result = script.pip('install', '-i', data.index_url(), 'simple==1.0') egg_info_folder = ((script.site_packages / 'simple-1.0-py%s.egg-info') % pyversion) assert (egg_info_folder in result.files_created), str(result)
[ "def", "test_install_from_file_index_hash_link", "(", "script", ",", "data", ")", ":", "result", "=", "script", ".", "pip", "(", "'install'", ",", "'-i'", ",", "data", ".", "index_url", "(", ")", ",", "'simple==1.0'", ")", "egg_info_folder", "=", "(", "(", ...
test that a pkg can be installed from a file:// index using a link with a hash .
train
false
13,677
def ms_payload_2(payload): return {'1': 'shellcode/pyinject', '2': 'shellcode/multipyinject', '3': 'set/reverse_shell', '4': 'set/reverse_shell', '5': 'set/reverse_shell', '6': 'shellcode/alphanum'}.get(payload, 'ERROR')
[ "def", "ms_payload_2", "(", "payload", ")", ":", "return", "{", "'1'", ":", "'shellcode/pyinject'", ",", "'2'", ":", "'shellcode/multipyinject'", ",", "'3'", ":", "'set/reverse_shell'", ",", "'4'", ":", "'set/reverse_shell'", ",", "'5'", ":", "'set/reverse_shell'"...
receives the input given by the user from create_payloads .
train
false
13,678
def show_dry_run(ilo, action, **kwargs): logger.info('DRY-RUN MODE. No changes will be made.') logger.info('(CLOSED) indices may be shown that may not be acted on by action "{0}".'.format(action)) indices = sorted(ilo.indices) for idx in indices: index_closed = (ilo.index_info[idx]['state'] == 'close') logger.info('DRY-RUN: {0}: {1}{2} with arguments: {3}'.format(action, idx, (' (CLOSED)' if index_closed else ''), kwargs))
[ "def", "show_dry_run", "(", "ilo", ",", "action", ",", "**", "kwargs", ")", ":", "logger", ".", "info", "(", "'DRY-RUN MODE. No changes will be made.'", ")", "logger", ".", "info", "(", "'(CLOSED) indices may be shown that may not be acted on by action \"{0}\".'", ".", ...
log dry run output with the action which would have been executed .
train
false
13,679
@nx.utils.not_implemented_for('undirected') def is_arborescence(G): return (is_tree(G) and (max((d for (n, d) in G.in_degree())) <= 1))
[ "@", "nx", ".", "utils", ".", "not_implemented_for", "(", "'undirected'", ")", "def", "is_arborescence", "(", "G", ")", ":", "return", "(", "is_tree", "(", "G", ")", "and", "(", "max", "(", "(", "d", "for", "(", "n", ",", "d", ")", "in", "G", "."...
returns true if g is an arborescence .
train
false
13,681
def read_uic2tag(fh, byteorder, dtype, plane_count): assert ((dtype == '2I') and (byteorder == '<')) values = fh.read_array('<u4', (6 * plane_count)).reshape(plane_count, 6) return {'z_distance': (values[:, 0] / values[:, 1]), 'date_created': values[:, 2], 'time_created': values[:, 3], 'date_modified': values[:, 4], 'time_modified': values[:, 5]}
[ "def", "read_uic2tag", "(", "fh", ",", "byteorder", ",", "dtype", ",", "plane_count", ")", ":", "assert", "(", "(", "dtype", "==", "'2I'", ")", "and", "(", "byteorder", "==", "'<'", ")", ")", "values", "=", "fh", ".", "read_array", "(", "'<u4'", ",",...
read metamorph stk uic2tag from file and return as dictionary .
train
false
13,682
def list_hosts_by_cluster(kwargs=None, call=None): if (call != 'function'): raise SaltCloudSystemExit('The list_hosts_by_cluster function must be called with -f or --function.') ret = {} cluster_name = (kwargs.get('cluster') if (kwargs and ('cluster' in kwargs)) else None) cluster_properties = ['name'] cluster_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.ClusterComputeResource, cluster_properties) for cluster in cluster_list: ret[cluster['name']] = [] for host in cluster['object'].host: if isinstance(host, vim.HostSystem): ret[cluster['name']].append(host.name) if (cluster_name and (cluster_name == cluster['name'])): return {'Hosts by Cluster': {cluster_name: ret[cluster_name]}} return {'Hosts by Cluster': ret}
[ "def", "list_hosts_by_cluster", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "(", "call", "!=", "'function'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'The list_hosts_by_cluster function must be called with -f or --function.'", ")", "re...
list hosts for each cluster; or hosts for a specified cluster in this vmware environment to list hosts for each cluster: cli example: .
train
true
13,683
def release_ownership_of_collection(committer_id, collection_id): _release_ownership_of_activity(committer_id, collection_id, feconf.ACTIVITY_TYPE_COLLECTION)
[ "def", "release_ownership_of_collection", "(", "committer_id", ",", "collection_id", ")", ":", "_release_ownership_of_activity", "(", "committer_id", ",", "collection_id", ",", "feconf", ".", "ACTIVITY_TYPE_COLLECTION", ")" ]
releases ownership of an collection to the community .
train
false
13,684
def GetUnsavedAndSpecifiedBufferData(including_filepath): buffers_data = {} for buffer_object in vim.buffers: buffer_filepath = GetBufferFilepath(buffer_object) if (not (BufferModified(buffer_object) or (buffer_filepath == including_filepath))): continue buffers_data[buffer_filepath] = {u'contents': (JoinLinesAsUnicode(buffer_object) + u'\n'), u'filetypes': FiletypesForBuffer(buffer_object)} return buffers_data
[ "def", "GetUnsavedAndSpecifiedBufferData", "(", "including_filepath", ")", ":", "buffers_data", "=", "{", "}", "for", "buffer_object", "in", "vim", ".", "buffers", ":", "buffer_filepath", "=", "GetBufferFilepath", "(", "buffer_object", ")", "if", "(", "not", "(", ...
build part of the request containing the contents and filetypes of all dirty buffers as well as the buffer with filepath |including_filepath| .
train
false
13,685
def room(request, slug, template='room.html'): context = {'room': get_object_or_404(ChatRoom, slug=slug)} return render(request, template, context)
[ "def", "room", "(", "request", ",", "slug", ",", "template", "=", "'room.html'", ")", ":", "context", "=", "{", "'room'", ":", "get_object_or_404", "(", "ChatRoom", ",", "slug", "=", "slug", ")", "}", "return", "render", "(", "request", ",", "template", ...
restful crud controller .
train
true
13,686
def asResponse(request): return _MemoryResponse('HTTP/1.1', request.code, request.code_message, request.responseHeaders, None, None, request._responseBody)
[ "def", "asResponse", "(", "request", ")", ":", "return", "_MemoryResponse", "(", "'HTTP/1.1'", ",", "request", ".", "code", ",", "request", ".", "code_message", ",", "request", ".", "responseHeaders", ",", "None", ",", "None", ",", "request", ".", "_response...
extract the response data stored on a request and create a real response object from it .
train
false
13,687
def get_territory_language_info(territory): territory = str(territory).upper() return get_global('territory_languages').get(territory, {}).copy()
[ "def", "get_territory_language_info", "(", "territory", ")", ":", "territory", "=", "str", "(", "territory", ")", ".", "upper", "(", ")", "return", "get_global", "(", "'territory_languages'", ")", ".", "get", "(", "territory", ",", "{", "}", ")", ".", "cop...
get a dictionary of language information for a territory .
train
false
13,688
def match_track(artist, title): criteria = {'artist': artist.lower().strip(), 'recording': title.lower().strip()} if (not any(criteria.values())): return try: res = musicbrainzngs.search_recordings(limit=config['musicbrainz']['searchlimit'].get(int), **criteria) except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError(exc, 'recording search', criteria, traceback.format_exc()) for recording in res['recording-list']: (yield track_info(recording))
[ "def", "match_track", "(", "artist", ",", "title", ")", ":", "criteria", "=", "{", "'artist'", ":", "artist", ".", "lower", "(", ")", ".", "strip", "(", ")", ",", "'recording'", ":", "title", ".", "lower", "(", ")", ".", "strip", "(", ")", "}", "...
searches for a single track and returns an iterable of trackinfo objects .
train
false
13,689
def _get_request_locale(request): locale = request.LANGUAGE_CODE if (not localedata.exists(locale)): locale = settings.LANGUAGE_CODE return locale
[ "def", "_get_request_locale", "(", "request", ")", ":", "locale", "=", "request", ".", "LANGUAGE_CODE", "if", "(", "not", "localedata", ".", "exists", "(", "locale", ")", ")", ":", "locale", "=", "settings", ".", "LANGUAGE_CODE", "return", "locale" ]
return locale from the request .
train
false
13,690
def schwefel(individual): N = len(individual) return (((418.9828872724339 * N) - sum(((x * sin(sqrt(abs(x)))) for x in individual))),)
[ "def", "schwefel", "(", "individual", ")", ":", "N", "=", "len", "(", "individual", ")", "return", "(", "(", "(", "418.9828872724339", "*", "N", ")", "-", "sum", "(", "(", "(", "x", "*", "sin", "(", "sqrt", "(", "abs", "(", "x", ")", ")", ")", ...
schwefel test objective function .
train
false
13,691
def _get_chain_list(cursor, varname): cursor.execute('SELECT DISTINCT chain FROM [{}]'.format(varname)) chains = [chain[0] for chain in cursor.fetchall()] chains.sort() return chains
[ "def", "_get_chain_list", "(", "cursor", ",", "varname", ")", ":", "cursor", ".", "execute", "(", "'SELECT DISTINCT chain FROM [{}]'", ".", "format", "(", "varname", ")", ")", "chains", "=", "[", "chain", "[", "0", "]", "for", "chain", "in", "cursor", ".",...
return a list of sorted chains for varname .
train
false
13,692
@memoized def getBox(box, pagesize): box = str(box).split() if (len(box) != 4): raise Exception('box not defined right way') (x, y, w, h) = [getSize(pos) for pos in box] return getCoords(x, y, w, h, pagesize)
[ "@", "memoized", "def", "getBox", "(", "box", ",", "pagesize", ")", ":", "box", "=", "str", "(", "box", ")", ".", "split", "(", ")", "if", "(", "len", "(", "box", ")", "!=", "4", ")", ":", "raise", "Exception", "(", "'box not defined right way'", "...
parse sizes by corners in the form: <x-left> <y-upper> <width> <height> the last to values with negative values are interpreted as offsets form the right and lower border .
train
true
13,693
@socketio.on('join', namespace='/jobs') def on_join_jobs(data): room = data['room'] join_room(room) flask.session['room'] = room
[ "@", "socketio", ".", "on", "(", "'join'", ",", "namespace", "=", "'/jobs'", ")", "def", "on_join_jobs", "(", "data", ")", ":", "room", "=", "data", "[", "'room'", "]", "join_room", "(", "room", ")", "flask", ".", "session", "[", "'room'", "]", "=", ...
somebody joined a room .
train
false
13,694
def set_log(level, filename='jumpserver.log'): log_file = os.path.join(LOG_DIR, filename) if (not os.path.isfile(log_file)): os.mknod(log_file) os.chmod(log_file, 511) log_level_total = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARN, 'error': logging.ERROR, 'critical': logging.CRITICAL} logger_f = logging.getLogger('jumpserver') logger_f.setLevel(logging.DEBUG) fh = logging.FileHandler(log_file) fh.setLevel(log_level_total.get(level, logging.DEBUG)) formatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) logger_f.addHandler(fh) return logger_f
[ "def", "set_log", "(", "level", ",", "filename", "=", "'jumpserver.log'", ")", ":", "log_file", "=", "os", ".", "path", ".", "join", "(", "LOG_DIR", ",", "filename", ")", "if", "(", "not", "os", ".", "path", ".", "isfile", "(", "log_file", ")", ")", ...
return a log file object 根据提示设置log打印 .
train
false
13,695
def ms_payload_3(payload): return {'1': 'windows/shell_reverse_tcp', '2': 'windows/meterpreter/reverse_tcp', '3': 'windows/vncinject/reverse_tcp', '4': 'windows/x64/shell_reverse_tcp', '5': 'windows/x64/meterpreter/reverse_tcp', '6': 'windows/x64/shell_bind_tcp', '7': 'windows/meterpreter/reverse_https'}.get(payload, 'ERROR')
[ "def", "ms_payload_3", "(", "payload", ")", ":", "return", "{", "'1'", ":", "'windows/shell_reverse_tcp'", ",", "'2'", ":", "'windows/meterpreter/reverse_tcp'", ",", "'3'", ":", "'windows/vncinject/reverse_tcp'", ",", "'4'", ":", "'windows/x64/shell_reverse_tcp'", ",", ...
receives the input given by the user from create_payloads .
train
false
13,696
def migrate_instances_add_request_spec(context, max_count): marker = _get_marker_for_migrate_instances(context) attrs = ['system_metadata', 'flavor', 'pci_requests', 'numa_topology', 'availability_zone'] instances = objects.InstanceList.get_by_filters(context, filters={'deleted': False}, sort_key='created_at', sort_dir='asc', limit=max_count, marker=marker, expected_attrs=attrs) count_all = len(instances) count_hit = 0 for instance in instances: try: RequestSpec.get_by_instance_uuid(context, instance.uuid) except exception.RequestSpecNotFound: _create_minimal_request_spec(context, instance) count_hit += 1 if (count_all > 0): marker = instances[(-1)].uuid _set_or_delete_marker_for_migrate_instances(context, marker) return (count_all, count_hit)
[ "def", "migrate_instances_add_request_spec", "(", "context", ",", "max_count", ")", ":", "marker", "=", "_get_marker_for_migrate_instances", "(", "context", ")", "attrs", "=", "[", "'system_metadata'", ",", "'flavor'", ",", "'pci_requests'", ",", "'numa_topology'", ",...
creates and persists a requestspec per instance not yet having it .
train
false
13,697
def _can_access_descriptor_with_milestones(user, descriptor, course_key): if milestones_helpers.get_course_content_milestones(course_key, unicode(descriptor.location), 'requires', user.id): debug('Deny: user has not completed all milestones for content') return ACCESS_DENIED else: return ACCESS_GRANTED
[ "def", "_can_access_descriptor_with_milestones", "(", "user", ",", "descriptor", ",", "course_key", ")", ":", "if", "milestones_helpers", ".", "get_course_content_milestones", "(", "course_key", ",", "unicode", "(", "descriptor", ".", "location", ")", ",", "'requires'...
returns if the object is blocked by an unfulfilled milestone .
train
false
13,698
def read_pid_from_pidfile(pidfile_path): pid = None try: pidfile = open(pidfile_path, 'r') except IOError: pass else: line = pidfile.readline().strip() try: pid = int(line) except ValueError: pass pidfile.close() return pid
[ "def", "read_pid_from_pidfile", "(", "pidfile_path", ")", ":", "pid", "=", "None", "try", ":", "pidfile", "=", "open", "(", "pidfile_path", ",", "'r'", ")", "except", "IOError", ":", "pass", "else", ":", "line", "=", "pidfile", ".", "readline", "(", ")",...
read the pid recorded in the named pid file .
train
true
13,701
def render_response(body=None, status=None, headers=None): headers = (headers or []) headers.append(('Vary', 'X-Auth-Token')) if (body is None): body = '' status = (status or (204, 'No Content')) else: body = jsonutils.dumps(body, cls=utils.SmarterEncoder) headers.append(('Content-Type', 'application/json')) status = (status or (200, 'OK')) return webob.Response(body=body, status=('%s %s' % status), headerlist=headers)
[ "def", "render_response", "(", "body", "=", "None", ",", "status", "=", "None", ",", "headers", "=", "None", ")", ":", "headers", "=", "(", "headers", "or", "[", "]", ")", "headers", ".", "append", "(", "(", "'Vary'", ",", "'X-Auth-Token'", ")", ")",...
forms a wsgi response .
train
false
13,702
def _get_binary(value, bits): return ''.join([str(((value >> y) & 1)) for y in range((bits - 1), (-1), (-1))])
[ "def", "_get_binary", "(", "value", ",", "bits", ")", ":", "return", "''", ".", "join", "(", "[", "str", "(", "(", "(", "value", ">>", "y", ")", "&", "1", ")", ")", "for", "y", "in", "range", "(", "(", "bits", "-", "1", ")", ",", "(", "-", ...
provides the given value as a binary string .
train
false
13,703
def prepare_staging_area(sr_path, staging_path, vdi_uuids, seq_num=0): for vdi_uuid in vdi_uuids: source = os.path.join(sr_path, ('%s.vhd' % vdi_uuid)) link_name = os.path.join(staging_path, ('%d.vhd' % seq_num)) _link(source, link_name) seq_num += 1
[ "def", "prepare_staging_area", "(", "sr_path", ",", "staging_path", ",", "vdi_uuids", ",", "seq_num", "=", "0", ")", ":", "for", "vdi_uuid", "in", "vdi_uuids", ":", "source", "=", "os", ".", "path", ".", "join", "(", "sr_path", ",", "(", "'%s.vhd'", "%",...
hard-link vhds into staging area .
train
false
13,704
def submit_pyspark_job(dataproc, project, cluster_name, bucket_name, filename): job_details = {'projectId': project, 'job': {'placement': {'clusterName': cluster_name}, 'pysparkJob': {'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename)}}} result = dataproc.projects().regions().jobs().submit(projectId=project, region=REGION, body=job_details).execute() job_id = result['reference']['jobId'] print 'Submitted job ID {}'.format(job_id) return job_id
[ "def", "submit_pyspark_job", "(", "dataproc", ",", "project", ",", "cluster_name", ",", "bucket_name", ",", "filename", ")", ":", "job_details", "=", "{", "'projectId'", ":", "project", ",", "'job'", ":", "{", "'placement'", ":", "{", "'clusterName'", ":", "...
submits the pyspark job to the cluster .
train
false
13,705
def previous_history_or_previous_completion(event): event.current_buffer.auto_up()
[ "def", "previous_history_or_previous_completion", "(", "event", ")", ":", "event", ".", "current_buffer", ".", "auto_up", "(", ")" ]
control-p in vi edit mode on readline is history next .
train
false
13,706
def multi_source_dijkstra_path(G, sources, cutoff=None, weight='weight'): (length, path) = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight) return path
[ "def", "multi_source_dijkstra_path", "(", "G", ",", "sources", ",", "cutoff", "=", "None", ",", "weight", "=", "'weight'", ")", ":", "(", "length", ",", "path", ")", "=", "multi_source_dijkstra", "(", "G", ",", "sources", ",", "cutoff", "=", "cutoff", ",...
find shortest weighted paths in g from a given set of source nodes .
train
false
13,709
def test_repr_latex(): somelocation = EarthLocation(lon=u'149:3:57.9', lat=u'-31:16:37.3') somelocation._repr_latex_() somelocation2 = EarthLocation(lon=([1.0, 2.0] * u.deg), lat=([(-1.0), 9.0] * u.deg)) somelocation2._repr_latex_()
[ "def", "test_repr_latex", "(", ")", ":", "somelocation", "=", "EarthLocation", "(", "lon", "=", "u'149:3:57.9'", ",", "lat", "=", "u'-31:16:37.3'", ")", "somelocation", ".", "_repr_latex_", "(", ")", "somelocation2", "=", "EarthLocation", "(", "lon", "=", "(",...
check the _repr_latex_ method .
train
false
13,710
def getOverhangRadians(elementNode): return math.radians(getOverhangAngle(elementNode))
[ "def", "getOverhangRadians", "(", "elementNode", ")", ":", "return", "math", ".", "radians", "(", "getOverhangAngle", "(", "elementNode", ")", ")" ]
get the overhang support angle in radians .
train
false
13,711
def sanitize_open(filename, open_mode): try: if (filename == u'-'): if (sys.platform == u'win32'): import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return ((sys.stdout.buffer if hasattr(sys.stdout, u'buffer') else sys.stdout), filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if (err.errno in (errno.EACCES,)): raise alt_filename = sanitize_path(filename) if (alt_filename == filename): raise else: stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename)
[ "def", "sanitize_open", "(", "filename", ",", "open_mode", ")", ":", "try", ":", "if", "(", "filename", "==", "u'-'", ")", ":", "if", "(", "sys", ".", "platform", "==", "u'win32'", ")", ":", "import", "msvcrt", "msvcrt", ".", "setmode", "(", "sys", "...
try to open the given filename .
train
false
13,712
def add_ls_data(script): def notimplemented(script): sys.stderr.write(('WARNING: migration not implemented for script %(id)r\n' % script)) raise NotImplementedError return {'smb-ls': add_smb_ls_data, 'nfs-ls': add_nfs_ls_data, 'afp-ls': add_afp_ls_data, 'ftp-anon': add_ftp_anon_data}.get(script['id'], notimplemented)(script)
[ "def", "add_ls_data", "(", "script", ")", ":", "def", "notimplemented", "(", "script", ")", ":", "sys", ".", "stderr", ".", "write", "(", "(", "'WARNING: migration not implemented for script %(id)r\\n'", "%", "script", ")", ")", "raise", "NotImplementedError", "re...
this function calls the appropriate add_*_data() function to convert output from scripts that do not include a structured output to a structured output similar to the one provided by the "ls" nse module .
train
false
13,714
def _interpolate_multivariate(evalpoints, hpeval, ring, i, p, ground=False): hp = ring.zero if ground: domain = ring.domain.domain y = ring.domain.gens[i] else: domain = ring.domain y = ring.gens[i] for (a, hpa) in zip(evalpoints, hpeval): numer = ring.one denom = domain.one for b in evalpoints: if (b == a): continue numer *= (y - b) denom *= (a - b) denom = domain.invert(denom, p) coeff = numer.mul_ground(denom) hp += (hpa.set_ring(ring) * coeff) return hp.trunc_ground(p)
[ "def", "_interpolate_multivariate", "(", "evalpoints", ",", "hpeval", ",", "ring", ",", "i", ",", "p", ",", "ground", "=", "False", ")", ":", "hp", "=", "ring", ".", "zero", "if", "ground", ":", "domain", "=", "ring", ".", "domain", ".", "domain", "y...
reconstruct a polynomial h_p in mathbb{z}_p[x_0 .
train
false
13,715
def NewEnum(seq, cls=ListEnumerator, iid=pythoncom.IID_IEnumVARIANT, usePolicy=None, useDispatcher=None): ob = cls(seq, iid=iid) return wrap(ob, iid, usePolicy=usePolicy, useDispatcher=useDispatcher)
[ "def", "NewEnum", "(", "seq", ",", "cls", "=", "ListEnumerator", ",", "iid", "=", "pythoncom", ".", "IID_IEnumVARIANT", ",", "usePolicy", "=", "None", ",", "useDispatcher", "=", "None", ")", ":", "ob", "=", "cls", "(", "seq", ",", "iid", "=", "iid", ...
creates a new enumerator com server .
train
false
13,716
def _bem_inf_fields(rr, rmag, cosmag): diff = (rmag.T[np.newaxis, :, :] - rr[:, :, np.newaxis]) diff_norm = np.sum((diff * diff), axis=1) diff_norm *= np.sqrt(diff_norm) diff_norm[(diff_norm == 0)] = 1 x = np.array([((diff[:, 1] * cosmag[:, 2]) - (diff[:, 2] * cosmag[:, 1])), ((diff[:, 2] * cosmag[:, 0]) - (diff[:, 0] * cosmag[:, 2])), ((diff[:, 0] * cosmag[:, 1]) - (diff[:, 1] * cosmag[:, 0]))]) return np.rollaxis((x / diff_norm), 1)
[ "def", "_bem_inf_fields", "(", "rr", ",", "rmag", ",", "cosmag", ")", ":", "diff", "=", "(", "rmag", ".", "T", "[", "np", ".", "newaxis", ",", ":", ",", ":", "]", "-", "rr", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", ")", "diff_norm"...
compute infinite-medium magnetic field at one meg sensor .
train
false
13,717
@sync_performer def perform_create_repository(dispatcher, intent): package_type = intent.distribution.package_type() if (package_type == PackageTypes.RPM): check_call(['createrepo', '--quiet', intent.repository_path.path]) return _list_new_metadata(repository_path=intent.repository_path) elif (package_type == PackageTypes.DEB): packages_file = intent.repository_path.child('Packages') scan_packages(repository=intent.repository_path.path, packages_file=packages_file.path) intent.repository_path.child('Release').setContent('Origin: ClusterHQ\n') with intent.repository_path.child('Packages.gz').open('w') as raw_file: with GzipFile('Packages.gz', fileobj=raw_file) as gzip_file: gzip_file.write(packages_file.getContent()) return {'Packages.gz', 'Release'} else: raise NotImplementedError(('Unknown package type: %s' % (package_type,)))
[ "@", "sync_performer", "def", "perform_create_repository", "(", "dispatcher", ",", "intent", ")", ":", "package_type", "=", "intent", ".", "distribution", ".", "package_type", "(", ")", "if", "(", "package_type", "==", "PackageTypes", ".", "RPM", ")", ":", "ch...
see :class:createrepo .
train
false
13,719
@handle_response_format @treeio_login_required def transaction_view(request, transaction_id, response_format='html'): transaction = get_object_or_404(Transaction, pk=transaction_id) return render_to_response('finance/transaction_view', {'transaction': transaction}, context_instance=RequestContext(request), response_format=response_format)
[ "@", "handle_response_format", "@", "treeio_login_required", "def", "transaction_view", "(", "request", ",", "transaction_id", ",", "response_format", "=", "'html'", ")", ":", "transaction", "=", "get_object_or_404", "(", "Transaction", ",", "pk", "=", "transaction_id...
single transaction view page .
train
false
13,720
def zone(): return s3_rest_controller()
[ "def", "zone", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
13,721
def _one_line_summary_from_text(text, length=78, escapes={'\n': '\\n', '\r': '\\r', ' DCTB ': '\\t'}): if (len(text) > length): head = text[:(length - 3)] else: head = text escaped = _escaped_text_from_text(head, escapes) if (len(text) > length): summary = (escaped[:(length - 3)] + '...') else: summary = escaped return summary
[ "def", "_one_line_summary_from_text", "(", "text", ",", "length", "=", "78", ",", "escapes", "=", "{", "'\\n'", ":", "'\\\\n'", ",", "'\\r'", ":", "'\\\\r'", ",", "' DCTB '", ":", "'\\\\t'", "}", ")", ":", "if", "(", "len", "(", "text", ")", ">", "le...
summarize the given text with one line of the given length .
train
false
13,723
def sigma_sub(p, q): return (((C_sub - delta(p, q)) - V(p)) - V(q))
[ "def", "sigma_sub", "(", "p", ",", "q", ")", ":", "return", "(", "(", "(", "C_sub", "-", "delta", "(", "p", ",", "q", ")", ")", "-", "V", "(", "p", ")", ")", "-", "V", "(", "q", ")", ")" ]
returns score of a substitution of p with q .
train
false
13,724
@frappe.whitelist() def getdoctype(doctype, with_parent=False, cached_timestamp=None): docs = [] parent_dt = None if with_parent: parent_dt = frappe.model.meta.get_parent_dt(doctype) if parent_dt: docs = get_meta_bundle(parent_dt) frappe.response[u'parent_dt'] = parent_dt if (not docs): docs = get_meta_bundle(doctype) frappe.response[u'user_permissions'] = get_user_permissions(docs) frappe.response[u'list_settings'] = get_list_settings((parent_dt or doctype)) if (cached_timestamp and (docs[0].modified == cached_timestamp)): return u'use_cache' frappe.response.docs.extend(docs)
[ "@", "frappe", ".", "whitelist", "(", ")", "def", "getdoctype", "(", "doctype", ",", "with_parent", "=", "False", ",", "cached_timestamp", "=", "None", ")", ":", "docs", "=", "[", "]", "parent_dt", "=", "None", "if", "with_parent", ":", "parent_dt", "=",...
load doctype .
train
false
13,725
def read_py_url(url, errors='replace', skip_encoding_cookie=True): from urllib.request import urlopen response = urlopen(url) buffer = io.BytesIO(response.read()) return source_to_unicode(buffer, errors, skip_encoding_cookie)
[ "def", "read_py_url", "(", "url", ",", "errors", "=", "'replace'", ",", "skip_encoding_cookie", "=", "True", ")", ":", "from", "urllib", ".", "request", "import", "urlopen", "response", "=", "urlopen", "(", "url", ")", "buffer", "=", "io", ".", "BytesIO", ...
read a python file from a url .
train
false
13,726
@world.absorb def wait_for_present(css_selector, timeout=GLOBAL_WAIT_FOR_TIMEOUT): wait_for(func=(lambda _: EC.presence_of_element_located((By.CSS_SELECTOR, css_selector))), timeout=timeout, timeout_msg='Timed out waiting for {} to be present.'.format(css_selector))
[ "@", "world", ".", "absorb", "def", "wait_for_present", "(", "css_selector", ",", "timeout", "=", "GLOBAL_WAIT_FOR_TIMEOUT", ")", ":", "wait_for", "(", "func", "=", "(", "lambda", "_", ":", "EC", ".", "presence_of_element_located", "(", "(", "By", ".", "CSS_...
wait for the element to be present in the dom .
train
false
13,727
@contextmanager def patch_search_fields(model, new_search_fields): old_search_fields = model.search_fields model.search_fields = new_search_fields (yield) model.search_fields = old_search_fields
[ "@", "contextmanager", "def", "patch_search_fields", "(", "model", ",", "new_search_fields", ")", ":", "old_search_fields", "=", "model", ".", "search_fields", "model", ".", "search_fields", "=", "new_search_fields", "(", "yield", ")", "model", ".", "search_fields",...
a context manager to allow testing of different search_fields configurations without permanently changing the models search_fields .
train
false
13,728
def getSwarmModelParams(modelID): cjDAO = ClientJobsDAO.get() (jobID, description) = cjDAO.modelsGetFields(modelID, ['jobId', 'genDescription']) (baseDescription,) = cjDAO.jobGetFields(jobID, ['genBaseDescription']) descriptionDirectory = tempfile.mkdtemp() try: baseDescriptionFilePath = os.path.join(descriptionDirectory, 'base.py') with open(baseDescriptionFilePath, mode='wb') as f: f.write(baseDescription) descriptionFilePath = os.path.join(descriptionDirectory, 'description.py') with open(descriptionFilePath, mode='wb') as f: f.write(description) expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(opfhelpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory)) return json.dumps(dict(modelConfig=expIface.getModelDescription(), inferenceArgs=expIface.getModelControl().get('inferenceArgs', None))) finally: shutil.rmtree(descriptionDirectory, ignore_errors=True)
[ "def", "getSwarmModelParams", "(", "modelID", ")", ":", "cjDAO", "=", "ClientJobsDAO", ".", "get", "(", ")", "(", "jobID", ",", "description", ")", "=", "cjDAO", ".", "modelsGetFields", "(", "modelID", ",", "[", "'jobId'", ",", "'genDescription'", "]", ")"...
retrieve the engine-level model params from a swarm model args: modelid - engine-level model id of the swarm model returns: json-encoded string containing model params .
train
true
13,729
def read_values(base, key): try: handle = RegOpenKeyEx(base, key) except RegError: return None d = {} i = 0 while 1: try: (name, value, type) = RegEnumValue(handle, i) except RegError: break name = name.lower() d[convert_mbcs(name)] = convert_mbcs(value) i = (i + 1) return d
[ "def", "read_values", "(", "base", ",", "key", ")", ":", "try", ":", "handle", "=", "RegOpenKeyEx", "(", "base", ",", "key", ")", "except", "RegError", ":", "return", "None", "d", "=", "{", "}", "i", "=", "0", "while", "1", ":", "try", ":", "(", ...
return dict of registry keys and values .
train
false
13,732
def encode_7or8bit(msg): orig = msg.get_payload() if (orig is None): msg['Content-Transfer-Encoding'] = '7bit' return try: orig.encode('ascii') except UnicodeError: msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit'
[ "def", "encode_7or8bit", "(", "msg", ")", ":", "orig", "=", "msg", ".", "get_payload", "(", ")", "if", "(", "orig", "is", "None", ")", ":", "msg", "[", "'Content-Transfer-Encoding'", "]", "=", "'7bit'", "return", "try", ":", "orig", ".", "encode", "(",...
set the content-transfer-encoding header to 7bit or 8bit .
train
false
13,735
@inspect_command(default_timeout=60.0, args=[(u'type', text_t), (u'num', int), (u'max_depth', int)], signature=u'[object_type=Request] [num=200 [max_depth=10]]') def objgraph(state, num=200, max_depth=10, type=u'Request'): try: import objgraph as _objgraph except ImportError: raise ImportError(u'Requires the objgraph library') logger.info(u'Dumping graph for type %r', type) with tempfile.NamedTemporaryFile(prefix=u'cobjg', suffix=u'.png', delete=False) as fh: objects = _objgraph.by_type(type)[:num] _objgraph.show_backrefs(objects, max_depth=max_depth, highlight=(lambda v: (v in objects)), filename=fh.name) return {u'filename': fh.name}
[ "@", "inspect_command", "(", "default_timeout", "=", "60.0", ",", "args", "=", "[", "(", "u'type'", ",", "text_t", ")", ",", "(", "u'num'", ",", "int", ")", ",", "(", "u'max_depth'", ",", "int", ")", "]", ",", "signature", "=", "u'[object_type=Request] [...
create graph of uncollected objects .
train
false
13,736
@verbose def streamtofile_demo(limit=20): oauth = credsfromfile() client = Streamer(**oauth) client.register(TweetWriter(limit=limit, repeat=False)) client.statuses.sample()
[ "@", "verbose", "def", "streamtofile_demo", "(", "limit", "=", "20", ")", ":", "oauth", "=", "credsfromfile", "(", ")", "client", "=", "Streamer", "(", "**", "oauth", ")", "client", ".", "register", "(", "TweetWriter", "(", "limit", "=", "limit", ",", ...
write 20 tweets sampled from the public streaming api to a file .
train
false
13,737
def gdal_version(): return _version_info('RELEASE_NAME')
[ "def", "gdal_version", "(", ")", ":", "return", "_version_info", "(", "'RELEASE_NAME'", ")" ]
returns only the gdal version number information .
train
false
13,740
def oo_generate_secret(num_bytes): if (not isinstance(num_bytes, int)): raise errors.AnsibleFilterError('|failed expects num_bytes is int') secret = os.urandom(num_bytes) return secret.encode('base-64').strip()
[ "def", "oo_generate_secret", "(", "num_bytes", ")", ":", "if", "(", "not", "isinstance", "(", "num_bytes", ",", "int", ")", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "'|failed expects num_bytes is int'", ")", "secret", "=", "os", ".", "urand...
generate a session secret .
train
false
13,741
@csrf_exempt def sql_select(request): form = SQLSelectForm((request.POST or None)) if form.is_valid(): sql = form.cleaned_data[u'raw_sql'] params = form.cleaned_data[u'params'] cursor = form.cursor cursor.execute(sql, params) headers = [d[0] for d in cursor.description] result = cursor.fetchall() cursor.close() context = {u'result': result, u'sql': form.reformat_sql(), u'duration': form.cleaned_data[u'duration'], u'headers': headers, u'alias': form.cleaned_data[u'alias']} return render_to_response(u'debug_toolbar/panels/sql_select.html', context) return HttpResponseBadRequest(u'Form errors')
[ "@", "csrf_exempt", "def", "sql_select", "(", "request", ")", ":", "form", "=", "SQLSelectForm", "(", "(", "request", ".", "POST", "or", "None", ")", ")", "if", "form", ".", "is_valid", "(", ")", ":", "sql", "=", "form", ".", "cleaned_data", "[", "u'...
returns the output of the sql select statement .
train
false
13,744
def show_coalesce(devname): try: coalesce = ethtool.get_coalesce(devname) except IOError: log.error('Interrupt coalescing not supported on {0}'.format(devname)) return 'Not supported' ret = {} for (key, value) in coalesce.items(): ret[ethtool_coalesce_remap[key]] = coalesce[key] return ret
[ "def", "show_coalesce", "(", "devname", ")", ":", "try", ":", "coalesce", "=", "ethtool", ".", "get_coalesce", "(", "devname", ")", "except", "IOError", ":", "log", ".", "error", "(", "'Interrupt coalescing not supported on {0}'", ".", "format", "(", "devname", ...
queries the specified network device for coalescing information cli example: .
train
true
13,745
def determine_stable_version(version_list): versions = sort_versions(version_list) versions = [(version_obj, comparable) for (version_obj, comparable) in versions if (not comparable.is_prerelease)] if versions: (version_obj, comparable) = versions[0] return version_obj else: return None
[ "def", "determine_stable_version", "(", "version_list", ")", ":", "versions", "=", "sort_versions", "(", "version_list", ")", "versions", "=", "[", "(", "version_obj", ",", "comparable", ")", "for", "(", "version_obj", ",", "comparable", ")", "in", "versions", ...
determine a stable version for version list takes a list of version model instances and returns the version instance which can be considered the most recent stable one .
train
false
13,746
def spacewalk_report(name): cache_filename = os.path.join(CACHE_DIR, name) if ((not os.path.exists(cache_filename)) or ((time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE)): fh = open(cache_filename, 'w') p = subprocess.Popen([SW_REPORT, name], stdout=fh) p.wait() fh.close() lines = open(cache_filename, 'r').readlines() keys = lines[0].strip().split(',') keys = [('spacewalk_' + key) for key in keys] for line in lines[1:]: values = line.strip().split(',') if (len(keys) == len(values)): (yield dict(zip(keys, values)))
[ "def", "spacewalk_report", "(", "name", ")", ":", "cache_filename", "=", "os", ".", "path", ".", "join", "(", "CACHE_DIR", ",", "name", ")", "if", "(", "(", "not", "os", ".", "path", ".", "exists", "(", "cache_filename", ")", ")", "or", "(", "(", "...
yield a dictionary form of each csv output produced by the specified spacewalk-report .
train
false
13,747
def require_facebook_graph(request, *args, **kwargs): kwargs['raise_'] = True graph = get_facebook_graph(request, *args, **kwargs) if (not graph): raise OpenFacebookException('please authenticate') return graph
[ "def", "require_facebook_graph", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'raise_'", "]", "=", "True", "graph", "=", "get_facebook_graph", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", "if", "(", "n...
just like get_facebook graph .
train
false
13,748
def PipeConnection(incoming, outgoing): return Connection(Channel(PipeStream(incoming, outgoing)))
[ "def", "PipeConnection", "(", "incoming", ",", "outgoing", ")", ":", "return", "Connection", "(", "Channel", "(", "PipeStream", "(", "incoming", ",", "outgoing", ")", ")", ")" ]
shorthand for creating a conneciton over a pipe .
train
false
13,749
def _get_user_info_from_dict(cookie_dict, cookie_name=_COOKIE_NAME): cookie_secret = os.environ['COOKIE_SECRET'] cookie_value = cookie_dict.get(cookie_name, '') cookie_value = cookie_value.replace('%3A', ':') cookie_value = cookie_value.replace('%40', '@') cookie_value = cookie_value.replace('%2C', ',') (email, nickname, admin, hsh) = (cookie_value.split(':') + ['', '', '', ''])[:4] if (email == ''): nickname = '' admin = '' return ('', False, '') else: vhsh = sha.new((((email + nickname) + admin) + cookie_secret)).hexdigest() if (hsh != vhsh): logging.info('{0} has an invalid cookie, so ignoring it.'.format(email)) return ('', False, '') admin_apps = admin.split(',') current_app = os.environ['APPLICATION_ID'] is_admin = (current_app in admin_apps) return (email, is_admin, nickname)
[ "def", "_get_user_info_from_dict", "(", "cookie_dict", ",", "cookie_name", "=", "_COOKIE_NAME", ")", ":", "cookie_secret", "=", "os", ".", "environ", "[", "'COOKIE_SECRET'", "]", "cookie_value", "=", "cookie_dict", ".", "get", "(", "cookie_name", ",", "''", ")",...
gets the requestors user info from a cookie dictionary .
train
false
13,750
def _pick_runner_opts(runner_alias=None, cloud_role=None): return set((opt_name for (opt_name, conf) in _RUNNER_OPTS.items() if (((runner_alias is None) or (conf.get('runners') is None) or (runner_alias in conf['runners'])) and ((cloud_role is None) or (cloud_role == conf.get('cloud_role'))))))
[ "def", "_pick_runner_opts", "(", "runner_alias", "=", "None", ",", "cloud_role", "=", "None", ")", ":", "return", "set", "(", "(", "opt_name", "for", "(", "opt_name", ",", "conf", ")", "in", "_RUNNER_OPTS", ".", "items", "(", ")", "if", "(", "(", "(", ...
return a set of option names that work for the given runner and fullfill the given cloud roles .
train
false
13,751
def calinski_harabaz_score(X, labels): (X, labels) = check_X_y(X, labels) le = LabelEncoder() labels = le.fit_transform(labels) (n_samples, _) = X.shape n_labels = len(le.classes_) check_number_of_labels(n_labels, n_samples) (extra_disp, intra_disp) = (0.0, 0.0) mean = np.mean(X, axis=0) for k in range(n_labels): cluster_k = X[(labels == k)] mean_k = np.mean(cluster_k, axis=0) extra_disp += (len(cluster_k) * np.sum(((mean_k - mean) ** 2))) intra_disp += np.sum(((cluster_k - mean_k) ** 2)) return (1.0 if (intra_disp == 0.0) else ((extra_disp * (n_samples - n_labels)) / (intra_disp * (n_labels - 1.0))))
[ "def", "calinski_harabaz_score", "(", "X", ",", "labels", ")", ":", "(", "X", ",", "labels", ")", "=", "check_X_y", "(", "X", ",", "labels", ")", "le", "=", "LabelEncoder", "(", ")", "labels", "=", "le", ".", "fit_transform", "(", "labels", ")", "(",...
compute the calinski and harabaz score .
train
true
13,754
@register.assignment_tag(takes_context=True) def get_back_button(context): request = context.get('request', None) if (not request): raise Exception('Cannot get request from context') referrer = request.META.get('HTTP_REFERER', None) if (not referrer): return None try: url = parse.urlparse(referrer) except (ValueError, TypeError): return None if (request.get_host() != url.netloc): try: Site.objects.get(domain=url.netloc) except Site.DoesNotExist: return None try: match = resolve(url.path) except Resolver404: return None titles = {'search:search': _('Back to search results')} title = titles.get(match.view_name, None) if (title is None): return None return {'url': referrer, 'title': six.text_type(title), 'match': match}
[ "@", "register", ".", "assignment_tag", "(", "takes_context", "=", "True", ")", "def", "get_back_button", "(", "context", ")", ":", "request", "=", "context", ".", "get", "(", "'request'", ",", "None", ")", "if", "(", "not", "request", ")", ":", "raise",...
show back button .
train
false
13,756
def campaign_response(): return s3_rest_controller()
[ "def", "campaign_response", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
13,757
def report_status(task, task_id, status): if (task in REPORT_TASKS): logging.debug('Sending task status to the AppScale Portal. Task: {0}, Status: {1}'.format(task, status)) url = '{0}{1}'.format(hermes_constants.PORTAL_URL, hermes_constants.PORTAL_STATUS_PATH) data = urllib.urlencode({JSONTags.TASK_ID: task_id, JSONTags.DEPLOYMENT_ID: get_deployment_id(), JSONTags.STATUS: status}) request = create_request(url=url, method='POST', body=data) for _ in range(REPORT_RETRIES): result = urlfetch(request) if result[JSONTags.SUCCESS]: delete_task_from_mem(task_id) return delete_task_from_mem(task_id)
[ "def", "report_status", "(", "task", ",", "task_id", ",", "status", ")", ":", "if", "(", "task", "in", "REPORT_TASKS", ")", ":", "logging", ".", "debug", "(", "'Sending task status to the AppScale Portal. Task: {0}, Status: {1}'", ".", "format", "(", "task", ",", ...
sends a status report for the given task to the appscale portal .
train
false
13,759
def get_location_path(location=DEFAULT_LOCATION, api_host_suffix=JOYENT_API_HOST_SUFFIX): return '{0}://{1}{2}'.format(_get_proto(), location, api_host_suffix)
[ "def", "get_location_path", "(", "location", "=", "DEFAULT_LOCATION", ",", "api_host_suffix", "=", "JOYENT_API_HOST_SUFFIX", ")", ":", "return", "'{0}://{1}{2}'", ".", "format", "(", "_get_proto", "(", ")", ",", "location", ",", "api_host_suffix", ")" ]
create url from location variable .
train
false
13,760
def ceil_intdiv(a, b): div = int_div(a, b) ret = (cast(neq((a % b), 0), div.dtype) + div) assert (ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])) return ret
[ "def", "ceil_intdiv", "(", "a", ",", "b", ")", ":", "div", "=", "int_div", "(", "a", ",", "b", ")", "ret", "=", "(", "cast", "(", "neq", "(", "(", "a", "%", "b", ")", ",", "0", ")", ",", "div", ".", "dtype", ")", "+", "div", ")", "assert"...
safely compute ceil(float_division) .
train
false
13,761
def mkdirall(path): for ancestor in ancestry(path): if (not os.path.isdir(syspath(ancestor))): try: os.mkdir(syspath(ancestor)) except (OSError, IOError) as exc: raise FilesystemError(exc, 'create', (ancestor,), traceback.format_exc())
[ "def", "mkdirall", "(", "path", ")", ":", "for", "ancestor", "in", "ancestry", "(", "path", ")", ":", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "syspath", "(", "ancestor", ")", ")", ")", ":", "try", ":", "os", ".", "mkdir", "(", "...
make all the enclosing directories of path .
train
false
13,762
def request_user_is_system_admin(request): user_db = get_user_db_from_request(request=request) return user_is_system_admin(user_db=user_db)
[ "def", "request_user_is_system_admin", "(", "request", ")", ":", "user_db", "=", "get_user_db_from_request", "(", "request", "=", "request", ")", "return", "user_is_system_admin", "(", "user_db", "=", "user_db", ")" ]
check if the logged-in request user has system admin role .
train
false
13,763
@scope.define def idxs_prod(full_idxs, idxs_by_label, llik_by_label): assert (len(set(full_idxs)) == len(full_idxs)) full_idxs = list(full_idxs) rval = np.zeros(len(full_idxs)) pos_of_tid = dict(list(zip(full_idxs, list(range(len(full_idxs)))))) assert (set(idxs_by_label.keys()) == set(llik_by_label.keys())) for nid in idxs_by_label: idxs = idxs_by_label[nid] llik = llik_by_label[nid] assert np.all((np.asarray(idxs) > 1)) assert (len(set(idxs)) == len(idxs)) assert (len(idxs) == len(llik)) for (ii, ll) in zip(idxs, llik): rval[pos_of_tid[ii]] += ll return rval
[ "@", "scope", ".", "define", "def", "idxs_prod", "(", "full_idxs", ",", "idxs_by_label", ",", "llik_by_label", ")", ":", "assert", "(", "len", "(", "set", "(", "full_idxs", ")", ")", "==", "len", "(", "full_idxs", ")", ")", "full_idxs", "=", "list", "(...
add all of the log-likelihoods together by id .
train
false
13,764
def in6_isgladdr(str): return in6_isincluded(str, '2000::', 3)
[ "def", "in6_isgladdr", "(", "str", ")", ":", "return", "in6_isincluded", "(", "str", ",", "'2000::'", ",", "3", ")" ]
returns true if provided address in printable format belongs to _allocated_ global address space .
train
false
13,765
def _bincount_slow(x, weights=None, minlength=None): if (weights is not None): raise NotImplementedError() if (minlength is None): rlen = (np.max(x) + 1) else: rlen = max((np.max(x) + 1), minlength) rval = np.zeros(rlen, dtype='int') for xi in np.asarray(x).flatten(): rval[xi] += 1 return rval
[ "def", "_bincount_slow", "(", "x", ",", "weights", "=", "None", ",", "minlength", "=", "None", ")", ":", "if", "(", "weights", "is", "not", "None", ")", ":", "raise", "NotImplementedError", "(", ")", "if", "(", "minlength", "is", "None", ")", ":", "r...
backport of np .
train
false
13,766
def _node_value(G, node_attr): if (node_attr is None): value = (lambda u: u) elif (not hasattr(node_attr, '__call__')): value = (lambda u: G.node[u][node_attr]) else: value = node_attr return value
[ "def", "_node_value", "(", "G", ",", "node_attr", ")", ":", "if", "(", "node_attr", "is", "None", ")", ":", "value", "=", "(", "lambda", "u", ":", "u", ")", "elif", "(", "not", "hasattr", "(", "node_attr", ",", "'__call__'", ")", ")", ":", "value",...
returns a function that returns a value from g .
train
false
13,768
def fetch_listing(path, limit=1000, batch_size=100): session = requests.Session() session.headers.update({'User-Agent': 'reddit-test-data-generator/1.0'}) base_url = ('https://api.reddit.com' + path) after = None count = 0 while (count < limit): params = {'limit': batch_size, 'count': count} if after: params['after'] = after print '> {}-{}'.format(count, (count + batch_size)) response = session.get(base_url, params=params) response.raise_for_status() listing = get_requests_resp_json(response)['data'] for child in listing['children']: (yield child['data']) count += 1 after = listing['after'] if (not after): break time.sleep(2)
[ "def", "fetch_listing", "(", "path", ",", "limit", "=", "1000", ",", "batch_size", "=", "100", ")", ":", "session", "=", "requests", ".", "Session", "(", ")", "session", ".", "headers", ".", "update", "(", "{", "'User-Agent'", ":", "'reddit-test-data-gener...
fetch a reddit listing from reddit .
train
false
13,769
def strategy_smallest_last(G, colors): H = G.copy(with_data=False) result = deque() degrees = defaultdict(set) lbound = float('inf') for (node, d) in H.degree(): degrees[d].add(node) lbound = min(lbound, d) def find_min_degree(): return next((d for d in itertools.count(lbound) if (d in degrees))) for _ in G: min_degree = find_min_degree() u = degrees[min_degree].pop() if (not degrees[min_degree]): del degrees[min_degree] result.appendleft(u) for v in H[u]: degree = H.degree(v) degrees[degree].remove(v) if (not degrees[degree]): del degrees[degree] degrees[(degree - 1)].add(v) H.remove_node(u) lbound = (min_degree - 1) return result
[ "def", "strategy_smallest_last", "(", "G", ",", "colors", ")", ":", "H", "=", "G", ".", "copy", "(", "with_data", "=", "False", ")", "result", "=", "deque", "(", ")", "degrees", "=", "defaultdict", "(", "set", ")", "lbound", "=", "float", "(", "'inf'...
returns a deque of the nodes of g .
train
false
13,770
def hacked_init(self): engine = create_engine('sqlite:///:memory:', echo=False) storage.Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) self.session = Session()
[ "def", "hacked_init", "(", "self", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:///:memory:'", ",", "echo", "=", "False", ")", "storage", ".", "Base", ".", "metadata", ".", "create_all", "(", "engine", ")", "Session", "=", "sessionmaker", "(", "...
hack for testing .
train
false
13,772
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
create a stateful counter block function suitable for ctr encryption modes .
train
false
13,773
def test_freeze_with_requirement_option(script): script.scratch_path.join('hint.txt').write((textwrap.dedent(" INITools==0.1\n NoExist==4.2 # A comment that ensures end of line comments work.\n simple==3.0; python_version > '1.0'\n ") + _freeze_req_opts)) result = script.pip_install_local('initools==0.2') result = script.pip_install_local('simple') result = script.pip('freeze', '--requirement', 'hint.txt', expect_stderr=True) expected = textwrap.dedent(' INITools==0.2\n simple==3.0\n ') expected += _freeze_req_opts expected += '## The following requirements were added by pip freeze:...' _check_output(result.stdout, expected) assert ('Requirement file [hint.txt] contains NoExist==4.2, but that package is not installed' in result.stderr)
[ "def", "test_freeze_with_requirement_option", "(", "script", ")", ":", "script", ".", "scratch_path", ".", "join", "(", "'hint.txt'", ")", ".", "write", "(", "(", "textwrap", ".", "dedent", "(", "\" INITools==0.1\\n NoExist==4.2 # A comment that ensures end...
test that new requirements are created correctly with --requirement hints .
train
false
13,774
def s_initialize(name): if blocks.REQUESTS.has_key(name): raise sex.SullyRuntimeError(('blocks.REQUESTS ALREADY EXISTS: %s' % name)) blocks.REQUESTS[name] = blocks.request(name) blocks.CURRENT = blocks.REQUESTS[name]
[ "def", "s_initialize", "(", "name", ")", ":", "if", "blocks", ".", "REQUESTS", ".", "has_key", "(", "name", ")", ":", "raise", "sex", ".", "SullyRuntimeError", "(", "(", "'blocks.REQUESTS ALREADY EXISTS: %s'", "%", "name", ")", ")", "blocks", ".", "REQUESTS"...
initialize a new block request .
train
false
13,775
def get_collection_ids_matching_query(query_string, cursor=None): returned_collection_ids = [] search_cursor = cursor for _ in range(MAX_ITERATIONS): remaining_to_fetch = (feconf.SEARCH_RESULTS_PAGE_SIZE - len(returned_collection_ids)) (collection_ids, search_cursor) = search_collections(query_string, remaining_to_fetch, cursor=search_cursor) invalid_collection_ids = [] for (ind, model) in enumerate(collection_models.CollectionSummaryModel.get_multi(collection_ids)): if (model is not None): returned_collection_ids.append(collection_ids[ind]) else: invalid_collection_ids.append(collection_ids[ind]) if ((len(returned_collection_ids) == feconf.SEARCH_RESULTS_PAGE_SIZE) or (search_cursor is None)): break else: logging.error(('Search index contains stale collection ids: %s' % ', '.join(invalid_collection_ids))) if ((len(returned_collection_ids) < feconf.SEARCH_RESULTS_PAGE_SIZE) and (search_cursor is not None)): logging.error(('Could not fulfill search request for query string %s; at least %s retries were needed.' % (query_string, MAX_ITERATIONS))) return (returned_collection_ids, search_cursor)
[ "def", "get_collection_ids_matching_query", "(", "query_string", ",", "cursor", "=", "None", ")", ":", "returned_collection_ids", "=", "[", "]", "search_cursor", "=", "cursor", "for", "_", "in", "range", "(", "MAX_ITERATIONS", ")", ":", "remaining_to_fetch", "=", ...
returns a list with all collection ids matching the given search query string .
train
false
13,776
def regularise_html(html): if (html is None): return html = re.sub('\n', ' ', html) matches = re.findall('(<[^>]*>|%[^%]\\([^)]*\\)\\w|[^<%]+|%)', html) for i in xrange(len(matches)): match = matches[i] if (match.startswith('<') or match.startswith('%')): continue matches[i] = re.sub('\\s{2,}', ' ', match) html = ''.join(matches) return html
[ "def", "regularise_html", "(", "html", ")", ":", "if", "(", "html", "is", "None", ")", ":", "return", "html", "=", "re", ".", "sub", "(", "'\\n'", ",", "' '", ",", "html", ")", "matches", "=", "re", ".", "findall", "(", "'(<[^>]*>|%[^%]\\\\([^)]*\\\\)\...
take badly formatted html with strings etc and make it beautiful generally remove surlus whitespace and kill this will break <code><pre> tags but they should not be being translated .
train
false
13,780
def update_translations(include_plugins=False): translations_folder = os.path.join(current_app.root_path, 'translations') source_file = os.path.join(translations_folder, 'messages.pot') subprocess.call(['pybabel', 'extract', '-F', 'babel.cfg', '-k', 'lazy_gettext', '-o', source_file, '.']) subprocess.call(['pybabel', 'update', '-i', source_file, '-d', translations_folder]) if include_plugins: for plugin in plugin_manager.all_plugins: update_plugin_translations(plugin)
[ "def", "update_translations", "(", "include_plugins", "=", "False", ")", ":", "translations_folder", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "root_path", ",", "'translations'", ")", "source_file", "=", "os", ".", "path", ".", "join", "(...
update translations from a source and target file for a given language .
train
false
13,782
def dbg_print_vars(*args): import inspect parent_locals = inspect.currentframe().f_back.f_locals maps = [] for arg in args: for (name, value) in parent_locals.items(): if (id(arg) == id(value)): maps.append((name, repr(value))) break print('\n'.join((((name + '=') + value) for (name, value) in maps)))
[ "def", "dbg_print_vars", "(", "*", "args", ")", ":", "import", "inspect", "parent_locals", "=", "inspect", ".", "currentframe", "(", ")", ".", "f_back", ".", "f_locals", "maps", "=", "[", "]", "for", "arg", "in", "args", ":", "for", "(", "name", ",", ...
prints name and repr of each arg on a separate line .
train
false
13,783
def _normalize_info(dev): for (sect, val) in dev.items(): if (len(val) == 1): dev[sect] = val[0] return dev
[ "def", "_normalize_info", "(", "dev", ")", ":", "for", "(", "sect", ",", "val", ")", "in", "dev", ".", "items", "(", ")", ":", "if", "(", "len", "(", "val", ")", "==", "1", ")", ":", "dev", "[", "sect", "]", "=", "val", "[", "0", "]", "retu...
replace list with only one element to the value of the element .
train
true
13,784
def mapping_create(index, doc_type, body, hosts=None, profile=None): es = _get_instance(hosts, profile) try: result = es.indices.put_mapping(index=index, doc_type=doc_type, body=body) return mapping_get(index, doc_type) except elasticsearch.exceptions.NotFoundError: return None return None
[ "def", "mapping_create", "(", "index", ",", "doc_type", ",", "body", ",", "hosts", "=", "None", ",", "profile", "=", "None", ")", ":", "es", "=", "_get_instance", "(", "hosts", ",", "profile", ")", "try", ":", "result", "=", "es", ".", "indices", "."...
create a mapping in a given index cli example:: salt myminion elasticsearch .
train
false
13,786
def GetNextId(): global COUNTER COUNTER += 1 return COUNTER
[ "def", "GetNextId", "(", ")", ":", "global", "COUNTER", "COUNTER", "+=", "1", "return", "COUNTER" ]
generate a unique id .
train
false
13,787
def _parens_around_char(label): return '({first}){rest}'.format(first=label[0], rest=label[1:])
[ "def", "_parens_around_char", "(", "label", ")", ":", "return", "'({first}){rest}'", ".", "format", "(", "first", "=", "label", "[", "0", "]", ",", "rest", "=", "label", "[", "1", ":", "]", ")" ]
place parens around first character of label .
train
false
13,788
def check_controller_csrf_prevention(controller): if getattr(controller, 'handles_csrf', False): return mutating_methods = {'POST', 'PUT', 'PATCH', 'DELETE'} for (name, func) in controller.__dict__.iteritems(): (method, sep, action) = name.partition('_') if (not action): continue if (method not in mutating_methods): continue if (not getattr(func, 'handles_csrf', False)): endpoint_name = ':'.join((controller.__name__, name)) msg = ('Handlers that might mutate data must be explicit about CSRF prevention: %s' % endpoint_name) raise CSRFPreventionException(msg)
[ "def", "check_controller_csrf_prevention", "(", "controller", ")", ":", "if", "getattr", "(", "controller", ",", "'handles_csrf'", ",", "False", ")", ":", "return", "mutating_methods", "=", "{", "'POST'", ",", "'PUT'", ",", "'PATCH'", ",", "'DELETE'", "}", "fo...
check that the a controller and its handlers are properly protected from csrf attacks .
train
false
13,790
def _intercept_dot(w, X, y): c = 0.0 if (w.size == (X.shape[1] + 1)): c = w[(-1)] w = w[:(-1)] z = (safe_sparse_dot(X, w) + c) yz = (y * z) return (w, c, yz)
[ "def", "_intercept_dot", "(", "w", ",", "X", ",", "y", ")", ":", "c", "=", "0.0", "if", "(", "w", ".", "size", "==", "(", "X", ".", "shape", "[", "1", "]", "+", "1", ")", ")", ":", "c", "=", "w", "[", "(", "-", "1", ")", "]", "w", "="...
computes y * np .
train
false