id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
15,733
def get_old_and_new_values(change_type, message): if (change_type in ['subject', 'name', 'estimated_finish', 'estimated_start']): old = message['change']['diff'][change_type]['from'] new = message['change']['diff'][change_type]['to'] return (old, new) try: old = message['change']['diff'][change_type]['from'] except KeyError: old = None try: new = message['change']['diff'][change_type]['to'] except KeyError: new = None return (old, new)
[ "def", "get_old_and_new_values", "(", "change_type", ",", "message", ")", ":", "if", "(", "change_type", "in", "[", "'subject'", ",", "'name'", ",", "'estimated_finish'", ",", "'estimated_start'", "]", ")", ":", "old", "=", "message", "[", "'change'", "]", "...
parses the payload and finds previous and current value of change_type .
train
false
15,735
def sigmoid_derivative(y): return (1.0 - (y * y))
[ "def", "sigmoid_derivative", "(", "y", ")", ":", "return", "(", "1.0", "-", "(", "y", "*", "y", ")", ")" ]
backward propagation activation function derivative .
train
false
15,738
def update_firmware_nfs_or_cifs(filename, share, host=None, admin_username=None, admin_password=None): if os.path.exists(filename): return _update_firmware('update -f {0} -l {1}'.format(filename, share), host=None, admin_username=None, admin_password=None) else: raise CommandExecutionError('Unable to find firmware file {0}'.format(filename))
[ "def", "update_firmware_nfs_or_cifs", "(", "filename", ",", "share", ",", "host", "=", "None", ",", "admin_username", "=", "None", ",", "admin_password", "=", "None", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", ...
executes the following for cifs .
train
true
15,739
def no_type_check(arg): if isinstance(arg, type): arg_attrs = arg.__dict__.copy() for (attr, val) in arg.__dict__.items(): if (val in arg.__bases__): arg_attrs.pop(attr) for obj in arg_attrs.values(): if isinstance(obj, types.FunctionType): obj.__no_type_check__ = True if isinstance(obj, type): no_type_check(obj) try: arg.__no_type_check__ = True except TypeError: pass return arg
[ "def", "no_type_check", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "type", ")", ":", "arg_attrs", "=", "arg", ".", "__dict__", ".", "copy", "(", ")", "for", "(", "attr", ",", "val", ")", "in", "arg", ".", "__dict__", ".", "items", ...
decorator to indicate that annotations are not type hints .
train
true
15,740
def google_fixed_width_font(style): font_family = '' if ('font-family' in style): font_family = style['font-family'] if (('Courier New' == font_family) or ('Consolas' == font_family)): return True return False
[ "def", "google_fixed_width_font", "(", "style", ")", ":", "font_family", "=", "''", "if", "(", "'font-family'", "in", "style", ")", ":", "font_family", "=", "style", "[", "'font-family'", "]", "if", "(", "(", "'Courier New'", "==", "font_family", ")", "or", ...
check if the css of the current element defines a fixed width font .
train
true
15,741
def addObserverAndInit(name, cb): vehicle.add_attribute_listener(name, cb)
[ "def", "addObserverAndInit", "(", "name", ",", "cb", ")", ":", "vehicle", ".", "add_attribute_listener", "(", "name", ",", "cb", ")" ]
we go ahead and call our observer once at startup to get an initial value .
train
false
15,743
@attr('requires_hadoop') def test_live_jobtracker(): raise SkipTest minicluster = pseudo_hdfs4.shared_cluster() jt = minicluster.jt assert_true(jt.queues()) assert_true(jt.cluster_status()) assert_true(jt.all_task_trackers()) assert_true(jt.active_trackers()) assert_true(jt.blacklisted_trackers()) assert_true(jt.running_jobs()) assert_true(jt.completed_jobs()) assert_true(jt.failed_jobs()) assert_true(jt.all_jobs()) assert_true(jt.get_current_time())
[ "@", "attr", "(", "'requires_hadoop'", ")", "def", "test_live_jobtracker", "(", ")", ":", "raise", "SkipTest", "minicluster", "=", "pseudo_hdfs4", ".", "shared_cluster", "(", ")", "jt", "=", "minicluster", ".", "jt", "assert_true", "(", "jt", ".", "queues", ...
checks that livejobtracker never raises exceptions for most of its calls .
train
false
15,744
def setup_tracing(trace_flags): global TRACE_METHOD global TRACE_API try: trace_flags = [flag.strip() for flag in trace_flags] except TypeError: trace_flags = [] for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS): LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag) TRACE_METHOD = ('method' in trace_flags) TRACE_API = ('api' in trace_flags)
[ "def", "setup_tracing", "(", "trace_flags", ")", ":", "global", "TRACE_METHOD", "global", "TRACE_API", "try", ":", "trace_flags", "=", "[", "flag", ".", "strip", "(", ")", "for", "flag", "in", "trace_flags", "]", "except", "TypeError", ":", "trace_flags", "=...
set global variables for each trace flag .
train
false
15,746
def _request_eip(interface, vm_): params = {'Action': 'AllocateAddress'} params['Domain'] = interface.setdefault('domain', 'vpc') eips = aws.query(params, return_root=True, location=get_location(vm_), provider=get_provider(), opts=__opts__, sigver='4') for eip in eips: if ('allocationId' in eip): return eip['allocationId'] return None
[ "def", "_request_eip", "(", "interface", ",", "vm_", ")", ":", "params", "=", "{", "'Action'", ":", "'AllocateAddress'", "}", "params", "[", "'Domain'", "]", "=", "interface", ".", "setdefault", "(", "'domain'", ",", "'vpc'", ")", "eips", "=", "aws", "."...
request and return elastic ip .
train
true
15,748
def test_lower(value): return unicode(value).islower()
[ "def", "test_lower", "(", "value", ")", ":", "return", "unicode", "(", "value", ")", ".", "islower", "(", ")" ]
return true if the variable is lowercased .
train
false
15,749
def imresize(im, sz): pil_im = Image.fromarray(uint8(im)) return array(pil_im.resize(sz))
[ "def", "imresize", "(", "im", ",", "sz", ")", ":", "pil_im", "=", "Image", ".", "fromarray", "(", "uint8", "(", "im", ")", ")", "return", "array", "(", "pil_im", ".", "resize", "(", "sz", ")", ")" ]
resize an image by given output size and method .
train
false
15,751
def coerce_date_dict(date_dict): keys = ['year', 'month', 'day', 'hour', 'minute', 'second'] retVal = {'year': 1, 'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0} modified = False for key in keys: try: retVal[key] = int(date_dict[key]) modified = True except KeyError: break return ((modified and retVal) or {})
[ "def", "coerce_date_dict", "(", "date_dict", ")", ":", "keys", "=", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'minute'", ",", "'second'", "]", "retVal", "=", "{", "'year'", ":", "1", ",", "'month'", ":", "1", ",", "'day'", ":"...
given a dictionary it returns a tuple that represents a date .
train
true
15,752
def itermessages(conn, channel, queue, limit=1, timeout=None, callbacks=None, **kwargs): return drain_consumer(conn.Consumer(queues=[queue], channel=channel, **kwargs), limit=limit, timeout=timeout, callbacks=callbacks)
[ "def", "itermessages", "(", "conn", ",", "channel", ",", "queue", ",", "limit", "=", "1", ",", "timeout", "=", "None", ",", "callbacks", "=", "None", ",", "**", "kwargs", ")", ":", "return", "drain_consumer", "(", "conn", ".", "Consumer", "(", "queues"...
iterator over messages .
train
false
15,753
def _parse_block_device_mapping(bdm): ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: if ec2_id.startswith('snap-'): bdm['snapshot_id'] = ec2utils.ec2_snap_id_to_uuid(ec2_id) elif ec2_id.startswith('vol-'): bdm['volume_id'] = ec2utils.ec2_vol_id_to_uuid(ec2_id) ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm
[ "def", "_parse_block_device_mapping", "(", "bdm", ")", ":", "ebs", "=", "bdm", ".", "pop", "(", "'ebs'", ",", "None", ")", "if", "ebs", ":", "ec2_id", "=", "ebs", ".", "pop", "(", "'snapshot_id'", ",", "None", ")", "if", "ec2_id", ":", "if", "ec2_id"...
parse blockdevicemappingitemtype into flat hash blockdevicedmapping .
train
false
15,754
def invregexp(regex): invReGenerator = GroupEmitter(parser().parseString(regex)).make_generator() return invReGenerator()
[ "def", "invregexp", "(", "regex", ")", ":", "invReGenerator", "=", "GroupEmitter", "(", "parser", "(", ")", ".", "parseString", "(", "regex", ")", ")", ".", "make_generator", "(", ")", "return", "invReGenerator", "(", ")" ]
call this routine as a generator to return all the strings that match the input regular expression .
train
false
15,755
def _create_mock_json_request(user, data, method='POST'): factory = RequestFactory() request = factory.generic(method, '/', content_type='application/json', data=json.dumps(data)) request.user = user request.session = {} return request
[ "def", "_create_mock_json_request", "(", "user", ",", "data", ",", "method", "=", "'POST'", ")", ":", "factory", "=", "RequestFactory", "(", ")", "request", "=", "factory", ".", "generic", "(", "method", ",", "'/'", ",", "content_type", "=", "'application/js...
returns a mock json request for the specified user .
train
false
15,756
def convert_xml_to_csv(): try: phishtank_db_fd = file(XML_DB_FILE, 'r') except Exception as e: msg = 'Failed to open XML phishtank database: "%s", exception: "%s".' sys.exit((msg % (XML_DB_FILE, e))) try: output_csv_file = file(CSV_DB_FILE, 'w') except Exception as e: msg = 'Failed to open CSV phishtank database: "%s", exception: "%s".' sys.exit((msg % (CSV_DB_FILE, e))) pt_handler = PhishTankHandler(output_csv_file) parser = etree.HTMLParser(recover=True, target=pt_handler) print 'Starting the phishtank XML conversion.' try: etree.parse(phishtank_db_fd, parser) except Exception as e: msg = 'XML parsing error in phishtank DB, exception: "%s".' sys.exit((msg % e)) print 'Finished XML conversion.'
[ "def", "convert_xml_to_csv", "(", ")", ":", "try", ":", "phishtank_db_fd", "=", "file", "(", "XML_DB_FILE", ",", "'r'", ")", "except", "Exception", "as", "e", ":", "msg", "=", "'Failed to open XML phishtank database: \"%s\", exception: \"%s\".'", "sys", ".", "exit",...
had to do this because xml parsing with lxml is slow and memory intensive .
train
false
15,757
def check_vlan_exists(eapi_conn, vlan_id): vlan_id = str(vlan_id) cmd = 'show vlan id {}'.format(vlan_id) try: response = eapi_conn.enable(cmd) check_vlan = pyeapi_result(response)['vlans'] return check_vlan[vlan_id]['name'] except (pyeapi.eapilib.CommandError, KeyError): pass return False
[ "def", "check_vlan_exists", "(", "eapi_conn", ",", "vlan_id", ")", ":", "vlan_id", "=", "str", "(", "vlan_id", ")", "cmd", "=", "'show vlan id {}'", ".", "format", "(", "vlan_id", ")", "try", ":", "response", "=", "eapi_conn", ".", "enable", "(", "cmd", ...
check if the given vlan exists return either vlan_name or false .
train
false
15,760
def apply_sync(func, args=(), kwds={}, callback=None): res = func(*args, **kwds) if (callback is not None): callback(res)
[ "def", "apply_sync", "(", "func", ",", "args", "=", "(", ")", ",", "kwds", "=", "{", "}", ",", "callback", "=", "None", ")", ":", "res", "=", "func", "(", "*", "args", ",", "**", "kwds", ")", "if", "(", "callback", "is", "not", "None", ")", "...
a naive synchronous version of apply_async .
train
false
15,764
def save_new_collection_from_yaml(committer_id, yaml_content, collection_id): collection = collection_domain.Collection.from_yaml(collection_id, yaml_content) commit_message = ("New collection created from YAML file with title '%s'." % collection.title) _create_collection(committer_id, collection, commit_message, [{'cmd': CMD_CREATE_NEW, 'title': collection.title, 'category': collection.category}]) return collection
[ "def", "save_new_collection_from_yaml", "(", "committer_id", ",", "yaml_content", ",", "collection_id", ")", ":", "collection", "=", "collection_domain", ".", "Collection", ".", "from_yaml", "(", "collection_id", ",", "yaml_content", ")", "commit_message", "=", "(", ...
saves a new collection from a yaml content string .
train
false
15,765
def _fold_case(info, string): flags = info.flags if ((flags & _ALL_ENCODINGS) == 0): flags |= info.guess_encoding return _regex.fold_case(flags, string)
[ "def", "_fold_case", "(", "info", ",", "string", ")", ":", "flags", "=", "info", ".", "flags", "if", "(", "(", "flags", "&", "_ALL_ENCODINGS", ")", "==", "0", ")", ":", "flags", "|=", "info", ".", "guess_encoding", "return", "_regex", ".", "fold_case",...
folds the case of a string .
train
false
15,766
def _class_means(X, y): means = [] classes = np.unique(y) for group in classes: Xg = X[(y == group), :] means.append(Xg.mean(0)) return np.asarray(means)
[ "def", "_class_means", "(", "X", ",", "y", ")", ":", "means", "=", "[", "]", "classes", "=", "np", ".", "unique", "(", "y", ")", "for", "group", "in", "classes", ":", "Xg", "=", "X", "[", "(", "y", "==", "group", ")", ",", ":", "]", "means", ...
compute class means .
train
false
15,768
def _host_lease(fixedip): timestamp = timeutils.utcnow() seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) return ('%d %s %s %s *' % ((seconds_since_epoch + CONF.dhcp_lease_time), fixedip.virtual_interface.address, fixedip.address, (fixedip.instance.hostname or '*')))
[ "def", "_host_lease", "(", "fixedip", ")", ":", "timestamp", "=", "timeutils", ".", "utcnow", "(", ")", "seconds_since_epoch", "=", "calendar", ".", "timegm", "(", "timestamp", ".", "utctimetuple", "(", ")", ")", "return", "(", "'%d %s %s %s *'", "%", "(", ...
return a host string for an address in leasefile format .
train
false
15,769
def _getLastMessageFormated(acc): m = _getLastMessage(acc) if (m is None): return 'None' t = datetime.datetime.fromtimestamp(m[0]).strftime('%Y-%m-%d %H:%M') return u'{}: {}'.format(t, m[1])
[ "def", "_getLastMessageFormated", "(", "acc", ")", ":", "m", "=", "_getLastMessage", "(", "acc", ")", "if", "(", "m", "is", "None", ")", ":", "return", "'None'", "t", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "m", "[", "0", "]", ")...
returns the last message as a formated string .
train
false
15,771
def _iterate_columns(X, columns=None): if (columns is None): columns = range(X.shape[1]) if issparse(X): for i in columns: x = np.zeros(X.shape[0]) (start_ptr, end_ptr) = (X.indptr[i], X.indptr[(i + 1)]) x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr] (yield x) else: for i in columns: (yield X[:, i])
[ "def", "_iterate_columns", "(", "X", ",", "columns", "=", "None", ")", ":", "if", "(", "columns", "is", "None", ")", ":", "columns", "=", "range", "(", "X", ".", "shape", "[", "1", "]", ")", "if", "issparse", "(", "X", ")", ":", "for", "i", "in...
iterate over columns of a matrix .
train
false
15,772
def neighbors(G, n): return G.neighbors(n)
[ "def", "neighbors", "(", "G", ",", "n", ")", ":", "return", "G", ".", "neighbors", "(", "n", ")" ]
return a list of nodes connected to node n .
train
false
15,773
def rand_int_id(start=0, end=2147483647): return random.randint(start, end)
[ "def", "rand_int_id", "(", "start", "=", "0", ",", "end", "=", "2147483647", ")", ":", "return", "random", ".", "randint", "(", "start", ",", "end", ")" ]
generate a random integer value .
train
false
15,774
def numpy_cupy_array_max_ulp(maxulp=1, dtype=None, name='xp', type_check=True, accept_error=False): def check_func(x, y): array.assert_array_max_ulp(x, y, maxulp, dtype) return _make_decorator(check_func, name, type_check, accept_error)
[ "def", "numpy_cupy_array_max_ulp", "(", "maxulp", "=", "1", ",", "dtype", "=", "None", ",", "name", "=", "'xp'", ",", "type_check", "=", "True", ",", "accept_error", "=", "False", ")", ":", "def", "check_func", "(", "x", ",", "y", ")", ":", "array", ...
decorator that checks results of numpy and cupy ones are equal w .
train
false
15,776
def test_ast_bad_defclass(): cant_compile(u'(defclass)') cant_compile(u'(defclass a None)') cant_compile(u'(defclass a None None)')
[ "def", "test_ast_bad_defclass", "(", ")", ":", "cant_compile", "(", "u'(defclass)'", ")", "cant_compile", "(", "u'(defclass a None)'", ")", "cant_compile", "(", "u'(defclass a None None)'", ")" ]
make sure ast cant compile invalid defclass .
train
false
15,777
def select_files_from_tree(file_tree): selected = {} stack = [file_tree] while len(stack): file_node = stack.pop(0) target_files = [f for f in file_node['children'] if (f['kind'] == 'file')] if target_files: target_file = target_files[0] selected[target_file['extra']['hashes']['sha256']] = target_file target_folders = [f for f in file_node['children'] if (f['kind'] == 'folder')] if target_folders: stack.append(target_folders[0]) return selected
[ "def", "select_files_from_tree", "(", "file_tree", ")", ":", "selected", "=", "{", "}", "stack", "=", "[", "file_tree", "]", "while", "len", "(", "stack", ")", ":", "file_node", "=", "stack", ".", "pop", "(", "0", ")", "target_files", "=", "[", "f", ...
select a file from every depth of a file_tree .
train
false
15,778
def compile_translations(include_plugins=False): translations_folder = os.path.join(current_app.root_path, 'translations') subprocess.call(['pybabel', 'compile', '-d', translations_folder]) if include_plugins: for plugin in plugin_manager.all_plugins: compile_plugin_translations(plugin)
[ "def", "compile_translations", "(", "include_plugins", "=", "False", ")", ":", "translations_folder", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "root_path", ",", "'translations'", ")", "subprocess", ".", "call", "(", "[", "'pybabel'", ",", ...
compiles all translations .
train
false
15,779
def _pad_for_encryption(message, target_length): max_msglength = (target_length - 11) msglength = len(message) if (msglength > max_msglength): raise OverflowError(('%i bytes needed for message, but there is only space for %i' % (msglength, max_msglength))) padding = b('') padding_length = ((target_length - msglength) - 3) while (len(padding) < padding_length): needed_bytes = (padding_length - len(padding)) new_padding = os.urandom((needed_bytes + 5)) new_padding = new_padding.replace(b('\x00'), b('')) padding = (padding + new_padding[:needed_bytes]) assert (len(padding) == padding_length) return b('').join([b('\x00\x02'), padding, b('\x00'), message])
[ "def", "_pad_for_encryption", "(", "message", ",", "target_length", ")", ":", "max_msglength", "=", "(", "target_length", "-", "11", ")", "msglength", "=", "len", "(", "message", ")", "if", "(", "msglength", ">", "max_msglength", ")", ":", "raise", "Overflow...
pads the message for encryption .
train
false
15,780
def check_instance_state(vm_state=None, task_state=(None,), must_have_launched=True): if ((vm_state is not None) and (not isinstance(vm_state, set))): vm_state = set(vm_state) if ((task_state is not None) and (not isinstance(task_state, set))): task_state = set(task_state) def outer(f): @six.wraps(f) def inner(self, context, instance, *args, **kw): if ((vm_state is not None) and (instance.vm_state not in vm_state)): raise exception.InstanceInvalidState(attr='vm_state', instance_uuid=instance.uuid, state=instance.vm_state, method=f.__name__) if ((task_state is not None) and (instance.task_state not in task_state)): raise exception.InstanceInvalidState(attr='task_state', instance_uuid=instance.uuid, state=instance.task_state, method=f.__name__) if (must_have_launched and (not instance.launched_at)): raise exception.InstanceInvalidState(attr='launched_at', instance_uuid=instance.uuid, state=instance.launched_at, method=f.__name__) return f(self, context, instance, *args, **kw) return inner return outer
[ "def", "check_instance_state", "(", "vm_state", "=", "None", ",", "task_state", "=", "(", "None", ",", ")", ",", "must_have_launched", "=", "True", ")", ":", "if", "(", "(", "vm_state", "is", "not", "None", ")", "and", "(", "not", "isinstance", "(", "v...
decorator to check vm and/or task state before entry to api functions .
train
false
15,781
def shortest_hops(gr, s): if (not gr.has_node(s)): raise Exception(('Node %s is not in graph' % s)) else: dist = {} q = deque([s]) nodes_explored = set([s]) for n in gr.nodes(): if (n == s): dist[n] = 0 else: dist[n] = float('inf') while (len(q) != 0): node = q.popleft() for each in gr.neighbors(node): if (each not in nodes_explored): nodes_explored.add(each) q.append(each) dist[each] = (dist[node] + 1) return dist
[ "def", "shortest_hops", "(", "gr", ",", "s", ")", ":", "if", "(", "not", "gr", ".", "has_node", "(", "s", ")", ")", ":", "raise", "Exception", "(", "(", "'Node %s is not in graph'", "%", "s", ")", ")", "else", ":", "dist", "=", "{", "}", "q", "="...
finds the shortest number of hops required to reach a node from s .
train
false
15,782
def _match_request(http_request, stored_request): if ((http_request.uri.host is not None) and (http_request.uri.host != stored_request.uri.host)): return False elif (http_request.uri.path != stored_request.uri.path): return False elif (http_request.method != stored_request.method): return False elif (('gsessionid' in http_request.uri.query) or ('gsessionid' in stored_request.uri.query)): if ('gsessionid' not in stored_request.uri.query): return False elif ('gsessionid' not in http_request.uri.query): return False elif (http_request.uri.query['gsessionid'] != stored_request.uri.query['gsessionid']): return False return True
[ "def", "_match_request", "(", "http_request", ",", "stored_request", ")", ":", "if", "(", "(", "http_request", ".", "uri", ".", "host", "is", "not", "None", ")", "and", "(", "http_request", ".", "uri", ".", "host", "!=", "stored_request", ".", "uri", "."...
determines whether a request is similar enough to a stored request to cause the stored response to be returned .
train
false
15,784
def youtube_video_transcript_name(youtube_text_api): utf8_parser = etree.XMLParser(encoding='utf-8') transcripts_param = {'type': 'list', 'v': youtube_text_api['params']['v']} lang = youtube_text_api['params']['lang'] youtube_response = requests.get(('http://' + youtube_text_api['url']), params=transcripts_param) if ((youtube_response.status_code == 200) and youtube_response.text): youtube_data = etree.fromstring(youtube_response.content, parser=utf8_parser) for element in youtube_data: if ((element.tag == 'track') and (element.get('lang_code', '') == lang)): return element.get('name') return None
[ "def", "youtube_video_transcript_name", "(", "youtube_text_api", ")", ":", "utf8_parser", "=", "etree", ".", "XMLParser", "(", "encoding", "=", "'utf-8'", ")", "transcripts_param", "=", "{", "'type'", ":", "'list'", ",", "'v'", ":", "youtube_text_api", "[", "'pa...
get the transcript name from available transcripts of video with respect to language from youtube server .
train
false
15,785
def status_handler(options, log, arg): print 'Everything seems to be fine!'
[ "def", "status_handler", "(", "options", ",", "log", ",", "arg", ")", ":", "print", "'Everything seems to be fine!'" ]
query the status of something .
train
false
15,786
def any2sparse(vec, eps=1e-09): if isinstance(vec, np.ndarray): return dense2vec(vec, eps) if scipy.sparse.issparse(vec): return scipy2sparse(vec, eps) return [(int(fid), float(fw)) for (fid, fw) in vec if (np.abs(fw) > eps)]
[ "def", "any2sparse", "(", "vec", ",", "eps", "=", "1e-09", ")", ":", "if", "isinstance", "(", "vec", ",", "np", ".", "ndarray", ")", ":", "return", "dense2vec", "(", "vec", ",", "eps", ")", "if", "scipy", ".", "sparse", ".", "issparse", "(", "vec",...
convert a np/scipy vector into gensim document format .
train
false
15,787
def is_not_url_safe(name): return (len(list_url_unsafe_chars(name)) > 0)
[ "def", "is_not_url_safe", "(", "name", ")", ":", "return", "(", "len", "(", "list_url_unsafe_chars", "(", "name", ")", ")", ">", "0", ")" ]
check if a string contains any url reserved characters .
train
false
15,789
def kill_process(proc): p1_group = psutil.Process(proc.pid) child_pids = p1_group.get_children(recursive=True) for child_pid in child_pids: os.kill(child_pid.pid, signal.SIGKILL)
[ "def", "kill_process", "(", "proc", ")", ":", "p1_group", "=", "psutil", ".", "Process", "(", "proc", ".", "pid", ")", "child_pids", "=", "p1_group", ".", "get_children", "(", "recursive", "=", "True", ")", "for", "child_pid", "in", "child_pids", ":", "o...
kill the process with the given pid using the given signal .
train
false
15,790
def join_segmentations(s1, s2): if (s1.shape != s2.shape): raise ValueError(('Cannot join segmentations of different shape. ' + ('s1.shape: %s, s2.shape: %s' % (s1.shape, s2.shape)))) s1 = relabel_sequential(s1)[0] s2 = relabel_sequential(s2)[0] j = (((s2.max() + 1) * s1) + s2) j = relabel_sequential(j)[0] return j
[ "def", "join_segmentations", "(", "s1", ",", "s2", ")", ":", "if", "(", "s1", ".", "shape", "!=", "s2", ".", "shape", ")", ":", "raise", "ValueError", "(", "(", "'Cannot join segmentations of different shape. '", "+", "(", "'s1.shape: %s, s2.shape: %s'", "%", ...
return the join of the two input segmentations .
train
false
15,791
def GetResourceAclSample(): client = CreateClient() for resource in client.GetResources(limit=5).entry: acl_feed = client.GetResourceAcl(resource) for acl in acl_feed.entry: print acl.role.value, acl.scope.type, acl.scope.value
[ "def", "GetResourceAclSample", "(", ")", ":", "client", "=", "CreateClient", "(", ")", "for", "resource", "in", "client", ".", "GetResources", "(", "limit", "=", "5", ")", ".", "entry", ":", "acl_feed", "=", "client", ".", "GetResourceAcl", "(", "resource"...
get and display the acl for a resource .
train
false
15,794
def get_affected_packages(args): recipes_dir = args.recipes_dir hours = args.diff_hours cmd = ('cd \'%s\' && git log --diff-filter=ACMRTUXB --name-only --pretty="" --since="%s hours ago" | grep -E \'^recipes/.*/meta.yaml\' | sort | uniq' % (recipes_dir, hours)) pkg_list = check_output(cmd, shell=True) ret = list() for pkg in pkg_list.strip().split('\n'): if (pkg and os.path.exists(os.path.join(recipes_dir, pkg))): ret.append((get_pkg_name(args, pkg), get_tests(args, pkg))) return ret
[ "def", "get_affected_packages", "(", "args", ")", ":", "recipes_dir", "=", "args", ".", "recipes_dir", "hours", "=", "args", ".", "diff_hours", "cmd", "=", "(", "'cd \\'%s\\' && git log --diff-filter=ACMRTUXB --name-only --pretty=\"\" --since=\"%s hours ago\" | grep -E \\'^reci...
return a list of all meta .
train
false
15,795
def host_is_ipv6(hostname): if ((not hostname) or (not isinstance(hostname, str))): return False if hostname.startswith('['): return True if (len(hostname.split(':')) > 2): return True return False
[ "def", "host_is_ipv6", "(", "hostname", ")", ":", "if", "(", "(", "not", "hostname", ")", "or", "(", "not", "isinstance", "(", "hostname", ",", "str", ")", ")", ")", ":", "return", "False", "if", "hostname", ".", "startswith", "(", "'['", ")", ":", ...
detect if the hostname is an ipv6 host .
train
false
15,796
def test_ast_bad_assoc(): cant_compile(u'(assoc)') cant_compile(u'(assoc 1)') cant_compile(u'(assoc 1 2)') cant_compile(u'(assoc 1 2 3 4)')
[ "def", "test_ast_bad_assoc", "(", ")", ":", "cant_compile", "(", "u'(assoc)'", ")", "cant_compile", "(", "u'(assoc 1)'", ")", "cant_compile", "(", "u'(assoc 1 2)'", ")", "cant_compile", "(", "u'(assoc 1 2 3 4)'", ")" ]
make sure ast cant compile invalid assoc .
train
false
15,797
def UTF8StringToArray(instr): return [ord(c) for c in instr]
[ "def", "UTF8StringToArray", "(", "instr", ")", ":", "return", "[", "ord", "(", "c", ")", "for", "c", "in", "instr", "]" ]
converts utf-8 strings to codepoints array .
train
false
15,798
def render_to_string(template_name, context=None, context_instance=None, response_format='html'): if (context is None): context = {} if ((not response_format) or ('pdf' in response_format) or (response_format not in settings.HARDTREE_RESPONSE_FORMATS)): response_format = 'html' if (not (('.' + response_format) in template_name)): template_name += ('.' + response_format) template_name = ((response_format + '/') + template_name) context['response_format'] = response_format if context_instance: context['site_domain'] = RequestSite(context_instance['request']).domain context = _preprocess_context_html(context) rendered_string = loader.render_to_string(template_name, context, context_instance) return rendered_string
[ "def", "render_to_string", "(", "template_name", ",", "context", "=", "None", ",", "context_instance", "=", "None", ",", "response_format", "=", "'html'", ")", ":", "if", "(", "context", "is", "None", ")", ":", "context", "=", "{", "}", "if", "(", "(", ...
wrapper around django .
train
false
15,799
def append_if(array, item): if (item not in array): array.append(item) return True return False
[ "def", "append_if", "(", "array", ",", "item", ")", ":", "if", "(", "item", "not", "in", "array", ")", ":", "array", ".", "append", "(", "item", ")", "return", "True", "return", "False" ]
append an item to an array if its not already in it .
train
false
15,800
def invert_transform(trans): return Transform(trans['to'], trans['from'], linalg.inv(trans['trans']))
[ "def", "invert_transform", "(", "trans", ")", ":", "return", "Transform", "(", "trans", "[", "'to'", "]", ",", "trans", "[", "'from'", "]", ",", "linalg", ".", "inv", "(", "trans", "[", "'trans'", "]", ")", ")" ]
invert a transformation between coordinate systems .
train
false
15,801
def register_importer(): def test(importer): return (importer.__class__.__name__ == ModuleImporterFromVariables.__name__) already_registered = any([True for i in sys.meta_path if test(i)]) if (not already_registered): importer = ModuleImporterFromVariables(restrict_to=['SelfWrapper']) sys.meta_path.insert(0, importer) return (not already_registered)
[ "def", "register_importer", "(", ")", ":", "def", "test", "(", "importer", ")", ":", "return", "(", "importer", ".", "__class__", ".", "__name__", "==", "ModuleImporterFromVariables", ".", "__name__", ")", "already_registered", "=", "any", "(", "[", "True", ...
registers our fancy importer that can let us import from a module name .
train
true
15,802
def read_int8(fid): return _unpack_simple(fid, '>i1', np.int8)
[ "def", "read_int8", "(", "fid", ")", ":", "return", "_unpack_simple", "(", "fid", ",", "'>i1'", ",", "np", ".", "int8", ")" ]
read 8bit integer from bti file .
train
false
15,804
def run_all_pillar(pillar_name): data = _execute_pillar(pillar_name, run_all) return data
[ "def", "run_all_pillar", "(", "pillar_name", ")", ":", "data", "=", "_execute_pillar", "(", "pillar_name", ",", "run_all", ")", "return", "data" ]
run one or more nagios plugins from pillar data and get the result of cmd .
train
false
15,805
def is_zipfile(filename): result = False try: if hasattr(filename, 'read'): result = _check_zipfile(filename) else: with open(filename, 'rb') as fp: result = _check_zipfile(fp) except IOError: pass return result
[ "def", "is_zipfile", "(", "filename", ")", ":", "result", "=", "False", "try", ":", "if", "hasattr", "(", "filename", ",", "'read'", ")", ":", "result", "=", "_check_zipfile", "(", "filename", ")", "else", ":", "with", "open", "(", "filename", ",", "'r...
quickly see if file is a zip file by checking the magic number .
train
true
15,807
def games(year, week=None, home=None, away=None, kind='REG', started=False): return list(games_gen(year, week, home, away, kind, started))
[ "def", "games", "(", "year", ",", "week", "=", "None", ",", "home", "=", "None", ",", "away", "=", "None", ",", "kind", "=", "'REG'", ",", "started", "=", "False", ")", ":", "return", "list", "(", "games_gen", "(", "year", ",", "week", ",", "home...
games returns a list of all games matching the given criteria .
train
false
15,808
def validate_attributes(config): if (CONF_ATTRS not in config): config[CONF_ATTRS] = {} elif (not isinstance(config[CONF_ATTRS], dict)): _LOGGER.warning('Universal Media Player (%s) specified attributes not dict in config. They will be ignored.', config[CONF_NAME]) config[CONF_ATTRS] = {} for (key, val) in config[CONF_ATTRS].items(): attr = val.split('|', 1) if (len(attr) == 1): attr.append(None) config[CONF_ATTRS][key] = attr
[ "def", "validate_attributes", "(", "config", ")", ":", "if", "(", "CONF_ATTRS", "not", "in", "config", ")", ":", "config", "[", "CONF_ATTRS", "]", "=", "{", "}", "elif", "(", "not", "isinstance", "(", "config", "[", "CONF_ATTRS", "]", ",", "dict", ")",...
validate attributes .
train
false
15,812
def set_modified_on(f): from mkt.site.tasks import set_modified_on_object @functools.wraps(f) def wrapper(*args, **kw): objs = kw.pop('set_modified_on', None) result = f(*args, **kw) if (objs and result): extra_kwargs = (result if isinstance(result, dict) else {}) for obj in objs: task_log.info(('Delaying setting modified on object: %s, %s' % (obj.__class__.__name__, obj.pk))) set_modified_on_object.apply_async(args=[obj._meta.app_label, obj._meta.model_name, obj.pk], kwargs=extra_kwargs, eta=(datetime.datetime.now() + datetime.timedelta(seconds=settings.NFS_LAG_DELAY))) return result return wrapper
[ "def", "set_modified_on", "(", "f", ")", ":", "from", "mkt", ".", "site", ".", "tasks", "import", "set_modified_on_object", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kw", ")", ":", "objs", "=", "kw...
will update the modified timestamp on the provided objects when the wrapped function exits sucessfully .
train
false
15,813
def py3k_bytes(b): try: return b.tobytes() except AttributeError: try: return ''.join((chr(x) for x in b)) except TypeError: return bytes(b)
[ "def", "py3k_bytes", "(", "b", ")", ":", "try", ":", "return", "b", ".", "tobytes", "(", ")", "except", "AttributeError", ":", "try", ":", "return", "''", ".", "join", "(", "(", "chr", "(", "x", ")", "for", "x", "in", "b", ")", ")", "except", "...
emulate the py3k bytes() constructor .
train
false
15,814
def test_confusion_matrix_sample_weight(): (y_true, y_pred, _) = make_prediction(binary=False) weights = ((([0.1] * 25) + ([0.2] * 25)) + ([0.3] * 25)) cm = confusion_matrix(y_true, y_pred, sample_weight=weights) true_cm = (((0.1 * confusion_matrix(y_true[:25], y_pred[:25])) + (0.2 * confusion_matrix(y_true[25:50], y_pred[25:50]))) + (0.3 * confusion_matrix(y_true[50:], y_pred[50:]))) assert_array_almost_equal(cm, true_cm) assert_raises(ValueError, confusion_matrix, y_true, y_pred, sample_weight=weights[:(-1)])
[ "def", "test_confusion_matrix_sample_weight", "(", ")", ":", "(", "y_true", ",", "y_pred", ",", "_", ")", "=", "make_prediction", "(", "binary", "=", "False", ")", "weights", "=", "(", "(", "(", "[", "0.1", "]", "*", "25", ")", "+", "(", "[", "0.2", ...
test confusion matrix - case with sample_weight .
train
false
15,815
def test_feature_description(): string = u'\n # lang: en-us\n Feature: FEATURE NAME! #@@$%\u02c6&*)(*%$E#\n here comes\n the description\n of the scenario\n really!\n ' class FakeFeature: description = 'the description\nof the scenario\n' description = core.FeatureDescription(FakeFeature, __file__, string, core.Language()) assert_equals(description.file, core.fs.relpath(__file__)) assert_not_equals(description.file, __file__) assert_equals(description.line, 3) assert_equals(description.description_at, (5, 6))
[ "def", "test_feature_description", "(", ")", ":", "string", "=", "u'\\n # lang: en-us\\n Feature: FEATURE NAME! #@@$%\\u02c6&*)(*%$E#\\n here comes\\n the description\\n of the scenario\\n really!\\n '", "class", "FakeFeature", ":", "description", "=", "'the descriptio...
feature description takes a feature .
train
false
15,817
def make_path_spec_relative(filename, spec_dir): if os.path.isabs(filename): return filename else: filename = os.path.abspath(filename) filename = os.path.relpath(filename, start=spec_dir) return filename
[ "def", "make_path_spec_relative", "(", "filename", ",", "spec_dir", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "filename", ")", ":", "return", "filename", "else", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", ...
make the filename relative to the directory containing .
train
false
15,818
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None): check_consistent_length(y_true, pred_decision, sample_weight) pred_decision = check_array(pred_decision, ensure_2d=False) y_true = column_or_1d(y_true) y_true_unique = np.unique(y_true) if (y_true_unique.size > 2): if ((labels is None) and (pred_decision.ndim > 1) and (np.size(y_true_unique) != pred_decision.shape[1])): raise ValueError('Please include all labels in y_true or pass labels as third argument') if (labels is None): labels = y_true_unique le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) mask = np.ones_like(pred_decision, dtype=bool) mask[(np.arange(y_true.shape[0]), y_true)] = False margin = pred_decision[(~ mask)] margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], (-1)), axis=1) else: pred_decision = column_or_1d(pred_decision) pred_decision = np.ravel(pred_decision) lbin = LabelBinarizer(neg_label=(-1)) y_true = lbin.fit_transform(y_true)[:, 0] try: margin = (y_true * pred_decision) except TypeError: raise TypeError('pred_decision should be an array of floats.') losses = (1 - margin) losses[(losses <= 0)] = 0 return np.average(losses, weights=sample_weight)
[ "def", "hinge_loss", "(", "y_true", ",", "pred_decision", ",", "labels", "=", "None", ",", "sample_weight", "=", "None", ")", ":", "check_consistent_length", "(", "y_true", ",", "pred_decision", ",", "sample_weight", ")", "pred_decision", "=", "check_array", "("...
hinge loss .
train
false
15,819
def url_slash_cleaner(url): return re.sub('(?<!:)/{2,}', '/', url)
[ "def", "url_slash_cleaner", "(", "url", ")", ":", "return", "re", ".", "sub", "(", "'(?<!:)/{2,}'", ",", "'/'", ",", "url", ")" ]
removes redundant /s from urls .
train
false
15,821
def set_prefs(prefs): prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject', '.hg', '.svn', '_svn', '.git', '.tox', '.env', 'env', 'venv', 'node_modules', 'bower_components'] prefs['save_objectdb'] = True prefs['compress_objectdb'] = False prefs['automatic_soa'] = True prefs['soa_followed_calls'] = 0 prefs['perform_doa'] = True prefs['validate_objectdb'] = True prefs['max_history_items'] = 32 prefs['save_history'] = True prefs['compress_history'] = False prefs['indent_size'] = 4 prefs['extension_modules'] = [] prefs['import_dynload_stdmods'] = True prefs['ignore_syntax_errors'] = False prefs['ignore_bad_imports'] = False prefs['prefer_module_from_imports'] = False prefs['split_imports'] = False prefs['sort_imports_alphabetically'] = False
[ "def", "set_prefs", "(", "prefs", ")", ":", "prefs", "[", "'ignored_resources'", "]", "=", "[", "'*.pyc'", ",", "'*~'", ",", "'.ropeproject'", ",", "'.hg'", ",", "'.svn'", ",", "'_svn'", ",", "'.git'", ",", "'.tox'", ",", "'.env'", ",", "'env'", ",", "...
this function is called before opening the project .
train
true
15,822
def class_renamed(old_name, new_class, message=None): return _defaultdeprecator.class_renamed(None, old_name, new_class, message)
[ "def", "class_renamed", "(", "old_name", ",", "new_class", ",", "message", "=", "None", ")", ":", "return", "_defaultdeprecator", ".", "class_renamed", "(", "None", ",", "old_name", ",", "new_class", ",", "message", ")" ]
automatically creates a class which fires a deprecationwarning when instantiated .
train
false
15,824
def _reindent(s, indent, reformat=True): s = textwrap.dedent(s) s = s.split('\n') s = [x.rstrip() for x in s] while (s and (not s[0])): s = s[1:] while (s and (not s[(-1)])): s = s[:(-1)] if reformat: s = '\n'.join(s) s = textwrap.wrap(s, initial_indent=indent, subsequent_indent=indent) else: s = [(indent + x) for x in s] return ('\n'.join(s) + '\n')
[ "def", "_reindent", "(", "s", ",", "indent", ",", "reformat", "=", "True", ")", ":", "s", "=", "textwrap", ".", "dedent", "(", "s", ")", "s", "=", "s", ".", "split", "(", "'\\n'", ")", "s", "=", "[", "x", ".", "rstrip", "(", ")", "for", "x", ...
remove the existing indentation from each line of a chunk of text .
train
true
15,827
@celery_app.task(name='website.notifications.tasks.send_users_email', max_retries=0) def send_users_email(send_type): grouped_emails = get_users_emails(send_type) if (not grouped_emails): return for group in grouped_emails: user = User.load(group['user_id']) if (not user): log_exception() continue info = group['info'] notification_ids = [message['_id'] for message in info] sorted_messages = group_by_node(info) if sorted_messages: mails.send_mail(to_addr=user.username, mimetype='html', mail=mails.DIGEST, name=user.fullname, message=sorted_messages, callback=remove_notifications(email_notification_ids=notification_ids))
[ "@", "celery_app", ".", "task", "(", "name", "=", "'website.notifications.tasks.send_users_email'", ",", "max_retries", "=", "0", ")", "def", "send_users_email", "(", "send_type", ")", ":", "grouped_emails", "=", "get_users_emails", "(", "send_type", ")", "if", "(...
find pending emails and amalgamates them into a single email .
train
false
15,828
def exhaust(_iter): i = None for i in _iter: pass return i
[ "def", "exhaust", "(", "_iter", ")", ":", "i", "=", "None", "for", "i", "in", "_iter", ":", "pass", "return", "i" ]
apply a branching rule repeatedly until it has no effect .
train
false
15,829
def Pad(ids, pad_id, length): assert (pad_id is not None) assert (length is not None) if (len(ids) < length): a = ([pad_id] * (length - len(ids))) return (ids + a) else: return ids[:length]
[ "def", "Pad", "(", "ids", ",", "pad_id", ",", "length", ")", ":", "assert", "(", "pad_id", "is", "not", "None", ")", "assert", "(", "length", "is", "not", "None", ")", "if", "(", "len", "(", "ids", ")", "<", "length", ")", ":", "a", "=", "(", ...
pad or trim list to len length .
train
false
15,831
def is_cloaked(path, names): fname = unicoder(os.path.split(path)[1]).lower() fname = os.path.splitext(fname)[0] for name in names: name = os.path.split(name.lower())[1] (name, ext) = os.path.splitext(unicoder(name)) if ((ext == u'.rar') and fname.startswith(name) and ((len(fname) - len(name)) < 8) and (len(names) < 3) and (not RE_SUBS.search(fname))): logging.debug('File %s is probably encrypted due to RAR with same name inside this RAR', fname) return True elif ('password' in name): logging.debug('RAR %s is probably encrypted: "password" in filename %s', fname, name) return True return False
[ "def", "is_cloaked", "(", "path", ",", "names", ")", ":", "fname", "=", "unicoder", "(", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", ")", ".", "lower", "(", ")", "fname", "=", "os", ".", "path", ".", "splitext", "(", "fnam...
return true if this is likely to be a cloaked encrypted post .
train
false
15,832
def get_python_args(fname, python_args, interact, debug, end_args): p_args = [] if (python_args is not None): p_args += python_args.split() if interact: p_args.append('-i') if debug: p_args.extend(['-m', 'pdb']) if (fname is not None): if ((os.name == 'nt') and debug): p_args.append(osp.normpath(fname).replace(os.sep, '/')) else: p_args.append(fname) if end_args: p_args.extend(shell_split(end_args)) return p_args
[ "def", "get_python_args", "(", "fname", ",", "python_args", ",", "interact", ",", "debug", ",", "end_args", ")", ":", "p_args", "=", "[", "]", "if", "(", "python_args", "is", "not", "None", ")", ":", "p_args", "+=", "python_args", ".", "split", "(", ")...
construct python interpreter arguments .
train
true
15,836
def _upload_media_queue(srv, obj): global UPLOAD_QUEUE if (srv[0] not in UPLOAD_PATHS): return for i in UPLOAD_PATHS[srv[0]]: path = getattr(obj, i) if (not path): continue UPLOAD_QUEUE.append({'srv': srv, 'id': obj.id, 'field': i}) return
[ "def", "_upload_media_queue", "(", "srv", ",", "obj", ")", ":", "global", "UPLOAD_QUEUE", "if", "(", "srv", "[", "0", "]", "not", "in", "UPLOAD_PATHS", ")", ":", "return", "for", "i", "in", "UPLOAD_PATHS", "[", "srv", "[", "0", "]", "]", ":", "path",...
add media uploads to queue .
train
false
15,837
def action2button(action, autoraise=True, text_beside_icon=False, parent=None): if (parent is None): parent = action.parent() button = QToolButton(parent) button.setDefaultAction(action) button.setAutoRaise(autoraise) if text_beside_icon: button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) return button
[ "def", "action2button", "(", "action", ",", "autoraise", "=", "True", ",", "text_beside_icon", "=", "False", ",", "parent", "=", "None", ")", ":", "if", "(", "parent", "is", "None", ")", ":", "parent", "=", "action", ".", "parent", "(", ")", "button", ...
create a qtoolbutton directly from a qaction object .
train
true
15,838
def _resolve_fork_relationships(workflow): def helper(workflow, node, last_fork): if isinstance(node, Fork): join = None children = node.get_children() for child in children: join = (helper(workflow, child.get_full_node(), node) or join) link = Link(name='related', parent=node, child=join) link.save() node = join elif isinstance(node, Join): return node join = None children = node.get_children() for child in children: join = (helper(workflow, child.get_full_node(), last_fork) or join) return join helper(workflow, workflow.start.get_full_node(), None)
[ "def", "_resolve_fork_relationships", "(", "workflow", ")", ":", "def", "helper", "(", "workflow", ",", "node", ",", "last_fork", ")", ":", "if", "isinstance", "(", "node", ",", "Fork", ")", ":", "join", "=", "None", "children", "=", "node", ".", "get_ch...
requires proper workflow structure .
train
false
15,839
def SlugCheck(slug, allow=''): if (not (slug == CleanText(unicode(slug), banned=CleanText.NONDNS.replace(allow, '')).clean)): raise ValueError((_('Invalid URL slug: %s') % slug)) return slug.lower()
[ "def", "SlugCheck", "(", "slug", ",", "allow", "=", "''", ")", ":", "if", "(", "not", "(", "slug", "==", "CleanText", "(", "unicode", "(", "slug", ")", ",", "banned", "=", "CleanText", ".", "NONDNS", ".", "replace", "(", "allow", ",", "''", ")", ...
verify that a string is a valid url slug .
train
false
15,840
def youtube_speed_dict(item): yt_ids = [item.youtube_id_0_75, item.youtube_id_1_0, item.youtube_id_1_25, item.youtube_id_1_5] yt_speeds = [0.75, 1.0, 1.25, 1.5] youtube_ids = {p[0]: p[1] for p in zip(yt_ids, yt_speeds) if p[0]} return youtube_ids
[ "def", "youtube_speed_dict", "(", "item", ")", ":", "yt_ids", "=", "[", "item", ".", "youtube_id_0_75", ",", "item", ".", "youtube_id_1_0", ",", "item", ".", "youtube_id_1_25", ",", "item", ".", "youtube_id_1_5", "]", "yt_speeds", "=", "[", "0.75", ",", "1...
returns {speed: youtube_ids .
train
false
15,844
def prepare_query_string(params): params = sorted(params.items(), key=(lambda x: x[0])) return (('?%s' % parse.urlencode(params)) if params else '')
[ "def", "prepare_query_string", "(", "params", ")", ":", "params", "=", "sorted", "(", "params", ".", "items", "(", ")", ",", "key", "=", "(", "lambda", "x", ":", "x", "[", "0", "]", ")", ")", "return", "(", "(", "'?%s'", "%", "parse", ".", "urlen...
convert dict params to query string .
train
false
15,845
def gimme_json_for_portfolio(request): "JSON includes:\n * The person's data.\n * other stuff" if (not request.user.is_authenticated()): return HttpResponseServerError("Oops, you're not logged in.") person = request.user.get_profile() citations = list(Citation.untrashed.filter(portfolio_entry__person=person)) portfolio_entries_unserialized = PortfolioEntry.objects.filter(person=person, is_deleted=False) projects_unserialized = [p.project for p in portfolio_entries_unserialized] summaries = {} for c in citations: summaries[c.pk] = render_to_string('profile/portfolio/citation_summary.html', {'citation': c}) five_minutes_ago = (datetime.datetime.utcnow() - datetime.timedelta(minutes=5)) portfolio_entries = json.loads(serializers.serialize('json', portfolio_entries_unserialized)) projects = json.loads(serializers.serialize('json', projects_unserialized)) citations = json.loads(serializers.serialize('json', citations)) portfolio_json = json.dumps({'citations': citations, 'portfolio_entries': portfolio_entries, 'projects': projects, 'summaries': summaries}) return HttpResponse(portfolio_json, mimetype='application/json')
[ "def", "gimme_json_for_portfolio", "(", "request", ")", ":", "if", "(", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", ")", ":", "return", "HttpResponseServerError", "(", "\"Oops, you're not logged in.\"", ")", "person", "=", "request", ".", "...
get json used to live-update the portfolio editor .
train
false
15,846
def systemd_running_state(name, path=None): try: ret = run_all(name, 'systemctl is-system-running', path=path, ignore_retcode=True)['stdout'] except CommandExecutionError: ret = '' return ret
[ "def", "systemd_running_state", "(", "name", ",", "path", "=", "None", ")", ":", "try", ":", "ret", "=", "run_all", "(", "name", ",", "'systemctl is-system-running'", ",", "path", "=", "path", ",", "ignore_retcode", "=", "True", ")", "[", "'stdout'", "]", ...
get the operational state of a systemd based container path path to the container parent default: /var/lib/lxc .
train
true
15,847
def _numeric_arrays(arrays, kinds='buifc'): if (type(arrays) == ndarray): return (arrays.dtype.kind in kinds) for array_ in arrays: if (array_.dtype.kind not in kinds): return False return True
[ "def", "_numeric_arrays", "(", "arrays", ",", "kinds", "=", "'buifc'", ")", ":", "if", "(", "type", "(", "arrays", ")", "==", "ndarray", ")", ":", "return", "(", "arrays", ".", "dtype", ".", "kind", "in", "kinds", ")", "for", "array_", "in", "arrays"...
see if a list of arrays are all numeric .
train
false
15,850
def getDataDirectory(moduleName=None): if (not moduleName): caller = currentframe(1) moduleName = inspect.getmodule(caller).__name__ return appdirs.user_data_dir(moduleName)
[ "def", "getDataDirectory", "(", "moduleName", "=", "None", ")", ":", "if", "(", "not", "moduleName", ")", ":", "caller", "=", "currentframe", "(", "1", ")", "moduleName", "=", "inspect", ".", "getmodule", "(", "caller", ")", ".", "__name__", "return", "a...
get a data directory for the caller function .
train
false
15,852
@pytest.mark.parametrize('elidemode, check', [(Qt.ElideRight, (lambda s: (s.endswith('\xe2\x80\xa6') or s.endswith('...')))), (Qt.ElideLeft, (lambda s: (s.startswith('\xe2\x80\xa6') or s.startswith('...')))), (Qt.ElideMiddle, (lambda s: (('\xe2\x80\xa6' in s) or ('...' in s)))), (Qt.ElideNone, (lambda s: (('\xe2\x80\xa6' not in s) and ('...' not in s))))]) def test_elided_text(fake_statusbar, qtbot, elidemode, check): label = TextBase(elidemode=elidemode) qtbot.add_widget(label) fake_statusbar.hbox.addWidget(label) long_string = ('Hello world! ' * 100) label.setText(long_string) label.show() assert check(label._elided_text)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'elidemode, check'", ",", "[", "(", "Qt", ".", "ElideRight", ",", "(", "lambda", "s", ":", "(", "s", ".", "endswith", "(", "'\\xe2\\x80\\xa6'", ")", "or", "s", ".", "endswith", "(", "'...'", ")", ...
ensure that a widget too small to hold the entire label text will elide .
train
false
15,854
def win_find_exe(filename, installsubdir=None, env='ProgramFiles'): for fn in [filename, (filename + '.exe')]: try: if (installsubdir is None): path = _where(fn) else: path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)]) except IOError: path = filename else: break return path
[ "def", "win_find_exe", "(", "filename", ",", "installsubdir", "=", "None", ",", "env", "=", "'ProgramFiles'", ")", ":", "for", "fn", "in", "[", "filename", ",", "(", "filename", "+", "'.exe'", ")", "]", ":", "try", ":", "if", "(", "installsubdir", "is"...
find executable in current dir .
train
true
15,855
def parse_criteria(criteria): left = criteria.left if (left.__class__.__name__ == 'AnnotatedColumn'): left_val = criteria.left.name else: raise NotImplementedError() right = criteria.right if (right.__class__.__name__ == 'BindParameter'): right_val = criteria.right.value elif (right.__class__.__name__ == 'False_'): right_val = False elif (right.__class__.__name__ == 'True_'): right_val = True else: raise NotImplementedError() return (left_val, right_val)
[ "def", "parse_criteria", "(", "criteria", ")", ":", "left", "=", "criteria", ".", "left", "if", "(", "left", ".", "__class__", ".", "__name__", "==", "'AnnotatedColumn'", ")", ":", "left_val", "=", "criteria", ".", "left", ".", "name", "else", ":", "rais...
builds a unique key for the filter operator .
train
false
15,856
def isPathAdded(edges, faces, loops, remainingEdgeTable, vertexes, z): if (len(remainingEdgeTable) < 1): return False pathIndexes = [] remainingEdgeIndexKey = remainingEdgeTable.keys()[0] pathIndexes.append(remainingEdgeIndexKey) del remainingEdgeTable[remainingEdgeIndexKey] nextEdgeIndexAroundZ = getNextEdgeIndexAroundZ(edges[remainingEdgeIndexKey], faces, remainingEdgeTable) while (nextEdgeIndexAroundZ != (-1)): pathIndexes.append(nextEdgeIndexAroundZ) del remainingEdgeTable[nextEdgeIndexAroundZ] nextEdgeIndexAroundZ = getNextEdgeIndexAroundZ(edges[nextEdgeIndexAroundZ], faces, remainingEdgeTable) if (len(pathIndexes) < 3): print ('Dangling edges, will use intersecting circles to get import layer at height %s' % z) del loops[:] return False loops.append(getPath(edges, pathIndexes, vertexes, z)) return True
[ "def", "isPathAdded", "(", "edges", ",", "faces", ",", "loops", ",", "remainingEdgeTable", ",", "vertexes", ",", "z", ")", ":", "if", "(", "len", "(", "remainingEdgeTable", ")", "<", "1", ")", ":", "return", "False", "pathIndexes", "=", "[", "]", "rema...
get the path indexes around a triangle mesh carve and add the path to the flat loops .
train
false
15,857
def test_adjust_gamma_one(): image = np.random.uniform(0, 255, (8, 8)) result = exposure.adjust_gamma(image, 1) assert_array_equal(result, image)
[ "def", "test_adjust_gamma_one", "(", ")", ":", "image", "=", "np", ".", "random", ".", "uniform", "(", "0", ",", "255", ",", "(", "8", ",", "8", ")", ")", "result", "=", "exposure", ".", "adjust_gamma", "(", "image", ",", "1", ")", "assert_array_equa...
same image should be returned for gamma equal to one .
train
false
15,860
def filter_match_kwargs(kwargs, children=False): kwargs = kwargs.copy() for key in ('pattern', 'start', 'end', 'parent', 'formatter', 'value'): if (key in kwargs): del kwargs[key] if children: for key in ('name',): if (key in kwargs): del kwargs[key] return kwargs
[ "def", "filter_match_kwargs", "(", "kwargs", ",", "children", "=", "False", ")", ":", "kwargs", "=", "kwargs", ".", "copy", "(", ")", "for", "key", "in", "(", "'pattern'", ",", "'start'", ",", "'end'", ",", "'parent'", ",", "'formatter'", ",", "'value'",...
filters out kwargs for match construction .
train
true
15,861
def preBuildPage(page, context, data): extra = {'CURRENT_PAGE': page} context.update(extra) return (context, data)
[ "def", "preBuildPage", "(", "page", ",", "context", ",", "data", ")", ":", "extra", "=", "{", "'CURRENT_PAGE'", ":", "page", "}", "context", ".", "update", "(", "extra", ")", "return", "(", "context", ",", "data", ")" ]
called prior to building a page .
train
false
15,862
def parse_target(target_expression): match = TARGET_REX.match(target_expression) if (not match): log.warning('Unable to parse target "{0}"'.format(target_expression)) ret = {'engine': None, 'delimiter': None, 'pattern': target_expression} else: ret = match.groupdict() return ret
[ "def", "parse_target", "(", "target_expression", ")", ":", "match", "=", "TARGET_REX", ".", "match", "(", "target_expression", ")", "if", "(", "not", "match", ")", ":", "log", ".", "warning", "(", "'Unable to parse target \"{0}\"'", ".", "format", "(", "target...
parse target_expressing splitting it into engine .
train
true
15,863
@decorators.which('chef-client') def client(whyrun=False, localmode=False, logfile=None, **kwargs): if (logfile is None): logfile = _default_logfile('chef-client') args = ['chef-client', '--no-color', '--once', '--logfile "{0}"'.format(logfile), '--format doc'] if whyrun: args.append('--why-run') if localmode: args.append('--local-mode') return _exec_cmd(*args, **kwargs)
[ "@", "decorators", ".", "which", "(", "'chef-client'", ")", "def", "client", "(", "whyrun", "=", "False", ",", "localmode", "=", "False", ",", "logfile", "=", "None", ",", "**", "kwargs", ")", ":", "if", "(", "logfile", "is", "None", ")", ":", "logfi...
create a low-level service client by name using the default session .
train
true
15,865
@testing.requires_testing_data def test_expand(): stc = read_source_estimate(fname_stc, 'sample') assert_true(('sample' in repr(stc))) labels_lh = read_labels_from_annot('sample', 'aparc', 'lh', subjects_dir=subjects_dir) new_label = (labels_lh[0] + labels_lh[1]) stc_limited = stc.in_label(new_label) stc_new = stc_limited.copy() stc_new.data.fill(0) for label in labels_lh[:2]: stc_new += stc.in_label(label).expand(stc_limited.vertices) assert_raises(TypeError, stc_new.expand, stc_limited.vertices[0]) assert_raises(ValueError, stc_new.expand, [stc_limited.vertices[0]]) assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
[ "@", "testing", ".", "requires_testing_data", "def", "test_expand", "(", ")", ":", "stc", "=", "read_source_estimate", "(", "fname_stc", ",", "'sample'", ")", "assert_true", "(", "(", "'sample'", "in", "repr", "(", "stc", ")", ")", ")", "labels_lh", "=", "...
test stc expansion .
train
false
15,866
def pool_add(pool_name, **kwargs): return ceph_cfg.pool_add(pool_name, **kwargs)
[ "def", "pool_add", "(", "pool_name", ",", "**", "kwargs", ")", ":", "return", "ceph_cfg", ".", "pool_add", "(", "pool_name", ",", "**", "kwargs", ")" ]
create a pool cli example: .
train
false
15,868
def add_store(source, store, saltenv='base'): cert_file = __salt__['cp.cache_file'](source, saltenv) cmd = 'certutil.exe -addstore {0} {1}'.format(store, cert_file) return __salt__['cmd.run'](cmd)
[ "def", "add_store", "(", "source", ",", "store", ",", "saltenv", "=", "'base'", ")", ":", "cert_file", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "source", ",", "saltenv", ")", "cmd", "=", "'certutil.exe -addstore {0} {1}'", ".", "format", "(", "stor...
store a certificate to the given store name the certificate to store .
train
true
15,870
def grader_from_conf(conf): if isinstance(conf, CourseGrader): return conf subgraders = [] for subgraderconf in conf: subgraderconf = subgraderconf.copy() weight = subgraderconf.pop('weight', 0) try: if ('min_count' in subgraderconf): subgrader_class = AssignmentFormatGrader else: raise ValueError('Configuration has no appropriate grader class.') bad_args = invalid_args(subgrader_class.__init__, subgraderconf) if (len(bad_args) > 0): log.warning('Invalid arguments for a subgrader: %s', bad_args) for key in bad_args: del subgraderconf[key] subgrader = subgrader_class(**subgraderconf) subgraders.append((subgrader, subgrader.category, weight)) except (TypeError, ValueError) as error: msg = ((('Unable to parse grader configuration:\n ' + str(subgraderconf)) + '\n Error was:\n ') + str(error)) raise ValueError(msg), None, sys.exc_info()[2] return WeightedSubsectionsGrader(subgraders)
[ "def", "grader_from_conf", "(", "conf", ")", ":", "if", "isinstance", "(", "conf", ",", "CourseGrader", ")", ":", "return", "conf", "subgraders", "=", "[", "]", "for", "subgraderconf", "in", "conf", ":", "subgraderconf", "=", "subgraderconf", ".", "copy", ...
this creates a coursegrader from a configuration .
train
false
15,871
def get_word_blacklist_regex(): return re.compile((('\\b(' + '|'.join(map(re.escape, settings.CC_WORD_BLACKLIST))) + ')\\b'))
[ "def", "get_word_blacklist_regex", "(", ")", ":", "return", "re", ".", "compile", "(", "(", "(", "'\\\\b('", "+", "'|'", ".", "join", "(", "map", "(", "re", ".", "escape", ",", "settings", ".", "CC_WORD_BLACKLIST", ")", ")", ")", "+", "')\\\\b'", ")", ...
make a regex that looks kind of like r .
train
false
15,872
def hostgroup_exists(name=None, groupid=None, node=None, nodeids=None, **connection_args): conn_args = _login(**connection_args) zabbix_version = apiinfo_version(**connection_args) try: if conn_args: if (LooseVersion(zabbix_version) > LooseVersion('2.5')): if (not groupid): groupid = None if (not name): name = None ret = hostgroup_get(name, groupid, **connection_args) return bool(ret) else: params = {} method = 'hostgroup.exists' if groupid: params['groupid'] = groupid if name: params['name'] = name if (LooseVersion(zabbix_version) < LooseVersion('2.4')): if node: params['node'] = node if nodeids: params['nodeids'] = nodeids if ((not groupid) and (not name) and (not node) and (not nodeids)): return {'result': False, 'comment': 'Please submit groupid, name, node or nodeids parameter tocheck if at least one host group that matches the given filter criteria exists.'} ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result'] else: raise KeyError except KeyError: return False
[ "def", "hostgroup_exists", "(", "name", "=", "None", ",", "groupid", "=", "None", ",", "node", "=", "None", ",", "nodeids", "=", "None", ",", "**", "connection_args", ")", ":", "conn_args", "=", "_login", "(", "**", "connection_args", ")", "zabbix_version"...
checks if at least one host group that matches the given filter criteria exists .
train
true
15,873
def regexp_extraction(string, _regexp, groups=1): regexp_search = re.search(string=str(string), pattern=str(_regexp)) if regexp_search: if (regexp_search.group(groups) != ''): return str(regexp_search.group(groups)) return None
[ "def", "regexp_extraction", "(", "string", ",", "_regexp", ",", "groups", "=", "1", ")", ":", "regexp_search", "=", "re", ".", "search", "(", "string", "=", "str", "(", "string", ")", ",", "pattern", "=", "str", "(", "_regexp", ")", ")", "if", "regex...
returns the capture group specified in the regexp .
train
false
15,874
def enqueue_events_for_all_sites(): with frappe.init_site(): jobs_per_site = get_jobs() sites = get_sites() for site in sites: try: enqueue_events_for_site(site=site, queued_jobs=jobs_per_site[site]) except: print frappe.get_traceback()
[ "def", "enqueue_events_for_all_sites", "(", ")", ":", "with", "frappe", ".", "init_site", "(", ")", ":", "jobs_per_site", "=", "get_jobs", "(", ")", "sites", "=", "get_sites", "(", ")", "for", "site", "in", "sites", ":", "try", ":", "enqueue_events_for_site"...
loop through sites and enqueue events that are not already queued .
train
false
15,875
def contains_softmax(f): raise NotImplementedError('TODO: implement this function.')
[ "def", "contains_softmax", "(", "f", ")", ":", "raise", "NotImplementedError", "(", "'TODO: implement this function.'", ")" ]
f: a theano function returns true if f contains a t .
train
false
15,876
@contextfunction def htsort(context, objects): if ((not objects) or ('request' not in context)): return objects request = context['request'] if (('sorting' not in request.GET) or (not hasattr(objects, 'order_by')) or (not hasattr(objects, 'model'))): return objects args = request.GET.getlist('sorting') fields = objects.model._meta.get_all_field_names() for arg in args: field_name = arg.lstrip('-') if (field_name in fields): field = objects.model._meta.get_field(field_name) if isinstance(field, models.ManyToManyField): agg_field = agg_arg = str(('sorting_%s' % field_name)) if (arg[0] == '-'): agg_arg = ('-' + agg_arg) kwargs = {agg_field: models.Count(field_name)} objects = objects.annotate(**kwargs).order_by(agg_arg) else: objects = objects.order_by(arg) return objects.distinct()
[ "@", "contextfunction", "def", "htsort", "(", "context", ",", "objects", ")", ":", "if", "(", "(", "not", "objects", ")", "or", "(", "'request'", "not", "in", "context", ")", ")", ":", "return", "objects", "request", "=", "context", "[", "'request'", "...
sort objects based on request .
train
false
15,877
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path): (key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable) = _get_s3_key() if os.path.isfile(cached_file_path): file_meta = _find_file_meta(metadata, bucket_name, saltenv, path) if file_meta: file_etag = file_meta['ETag'] if (file_etag.find('-') == (-1)): file_md5 = file_etag cached_md5 = salt.utils.get_hash(cached_file_path, 'md5') if (cached_md5 == file_md5): return else: cached_file_stat = os.stat(cached_file_path) cached_file_size = cached_file_stat.st_size cached_file_mtime = datetime.datetime.fromtimestamp(cached_file_stat.st_mtime) cached_file_lastmod = datetime.datetime.strptime(file_meta['LastModified'], '%Y-%m-%dT%H:%M:%S.%fZ') if ((cached_file_size == int(file_meta['Size'])) and (cached_file_mtime > cached_file_lastmod)): log.debug('cached file size equal to metadata size and cached file mtime later than metadata last modification time.') ret = __utils__['s3.query'](key=key, keyid=keyid, kms_keyid=keyid, method='HEAD', bucket=bucket_name, service_url=service_url, verify_ssl=verify_ssl, location=location, path=_quote(path), local_file=cached_file_path, full_headers=True, path_style=path_style, https_enable=https_enable) if (ret is not None): for (header_name, header_value) in ret['headers'].items(): name = header_name.strip() value = header_value.strip() if (str(name).lower() == 'last-modified'): s3_file_mtime = datetime.datetime.strptime(value, '%a, %d %b %Y %H:%M:%S %Z') elif (str(name).lower() == 'content-length'): s3_file_size = int(value) if ((cached_file_size == s3_file_size) and (cached_file_mtime > s3_file_mtime)): log.info('{0} - {1} : {2} skipped download since cached file size equal to and mtime after s3 values'.format(bucket_name, saltenv, path)) return __utils__['s3.query'](key=key, keyid=keyid, kms_keyid=keyid, bucket=bucket_name, service_url=service_url, verify_ssl=verify_ssl, location=location, path=_quote(path), local_file=cached_file_path, path_style=path_style, https_enable=https_enable)
[ "def", "_get_file_from_s3", "(", "metadata", ",", "saltenv", ",", "bucket_name", ",", "path", ",", "cached_file_path", ")", ":", "(", "key", ",", "keyid", ",", "service_url", ",", "verify_ssl", ",", "kms_keyid", ",", "location", ",", "path_style", ",", "http...
checks the local cache for the file .
train
true