id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
3,767
def _wrap_with_after(action, responder): if ('resource' in get_argnames(action)): shim = action else: def shim(req, resp, resource): action(req, resp) @wraps(responder) def do_after(self, req, resp, **kwargs): responder(self, req, resp, **kwargs) shim(req, resp, self) return do_after
[ "def", "_wrap_with_after", "(", "action", ",", "responder", ")", ":", "if", "(", "'resource'", "in", "get_argnames", "(", "action", ")", ")", ":", "shim", "=", "action", "else", ":", "def", "shim", "(", "req", ",", "resp", ",", "resource", ")", ":", ...
execute the given action function after a responder method .
train
false
3,768
def DeconstructTimestampAssetId(id_prefix, asset_id, reverse_ts=True): assert IdPrefix.IsValid(id_prefix), id_prefix assert (asset_id[0] == id_prefix), asset_id byte_str = base64hex.B64HexDecode(asset_id[1:], padding=False) (timestamp,) = struct.unpack('>I', byte_str[:4]) if reverse_ts: timestamp = (((1L << 32) ...
[ "def", "DeconstructTimestampAssetId", "(", "id_prefix", ",", "asset_id", ",", "reverse_ts", "=", "True", ")", ":", "assert", "IdPrefix", ".", "IsValid", "(", "id_prefix", ")", ",", "id_prefix", "assert", "(", "asset_id", "[", "0", "]", "==", "id_prefix", ")"...
deconstructs an asset id that was previously constructed according to the rules of "constructtimestampassetid" .
train
false
3,769
def modify_atomic_group(id, **data): models.AtomicGroup.smart_get(id).update_object(data)
[ "def", "modify_atomic_group", "(", "id", ",", "**", "data", ")", ":", "models", ".", "AtomicGroup", ".", "smart_get", "(", "id", ")", ".", "update_object", "(", "data", ")" ]
modify atomic group .
train
false
3,770
def apply_query(query, entities, _key=None): if (not isinstance(query, Query)): raise datastore_errors.BadArgumentError(('query argument must be a datastore_query.Query (%r)' % (query,))) if (not isinstance(entities, list)): raise datastore_errors.BadArgumentError(('entities argument must be a list (%r)' % (entit...
[ "def", "apply_query", "(", "query", ",", "entities", ",", "_key", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "query", ",", "Query", ")", ")", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "(", "'query argument must be a dat...
performs the given query on a set of in-memory entities .
train
false
3,771
def _activities_union_all(*qlist): import ckan.model as model return model.Session.query(model.Activity).select_entity_from(union_all(*[q.subquery().select() for q in qlist])).distinct(model.Activity.timestamp)
[ "def", "_activities_union_all", "(", "*", "qlist", ")", ":", "import", "ckan", ".", "model", "as", "model", "return", "model", ".", "Session", ".", "query", "(", "model", ".", "Activity", ")", ".", "select_entity_from", "(", "union_all", "(", "*", "[", "...
return union of two or more queries sorted by timestamp .
train
false
3,772
def _encode_multipart(vars, content_type): f = StringIO() w = f.write CRLF = '\r\n' boundary = _get_multipart_boundary(content_type) if (not boundary): boundary = os.urandom(10).encode('hex') content_type += ('; boundary=%s' % boundary) for (name, value) in vars: w(('--%s' % boundary)) w(CRLF) assert (n...
[ "def", "_encode_multipart", "(", "vars", ",", "content_type", ")", ":", "f", "=", "StringIO", "(", ")", "w", "=", "f", ".", "write", "CRLF", "=", "'\\r\\n'", "boundary", "=", "_get_multipart_boundary", "(", "content_type", ")", "if", "(", "not", "boundary"...
build a multipart/form-data body with randomly generated boundary .
train
false
3,773
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
[ "def", "partial_token_sort_ratio", "(", "s1", ",", "s2", ",", "force_ascii", "=", "True", ",", "full_process", "=", "True", ")", ":", "return", "_token_sort", "(", "s1", ",", "s2", ",", "partial", "=", "True", ",", "force_ascii", "=", "force_ascii", ",", ...
return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing .
train
true
3,774
def _safe_svd(A, **kwargs): if kwargs.get('overwrite_a', False): raise ValueError('Cannot set overwrite_a=True with this function') try: return linalg.svd(A, **kwargs) except np.linalg.LinAlgError as exp: from .utils import warn if ('lapack_driver' in _get_args(linalg.svd)): warn(('SVD error (%s), attempt...
[ "def", "_safe_svd", "(", "A", ",", "**", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'overwrite_a'", ",", "False", ")", ":", "raise", "ValueError", "(", "'Cannot set overwrite_a=True with this function'", ")", "try", ":", "return", "linalg", ".", "...
wrapper to get around the svd did not converge error of death .
train
false
3,775
def PackDatetime(name, value, pbvalue): pbvalue.set_int64value(DatetimeToTimestamp(value))
[ "def", "PackDatetime", "(", "name", ",", "value", ",", "pbvalue", ")", ":", "pbvalue", ".", "set_int64value", "(", "DatetimeToTimestamp", "(", "value", ")", ")" ]
packs a datetime-typed property into a entity_pb .
train
false
3,777
def morphological_laplace(input, size=None, footprint=None, structure=None, output=None, mode='reflect', cval=0.0, origin=0): tmp1 = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_erosion(input, size, footprint, structure, output, mode, cval, or...
[ "def", "morphological_laplace", "(", "input", ",", "size", "=", "None", ",", "footprint", "=", "None", ",", "structure", "=", "None", ",", "output", "=", "None", ",", "mode", "=", "'reflect'", ",", "cval", "=", "0.0", ",", "origin", "=", "0", ")", ":...
multi-dimensional morphological laplace .
train
false
3,778
def writeToMongo(): sys.stderr.write('Saving to db.messages.errors, will not check for duplicates!') from pymongo import Connection connection = Connection() db = connection['messages'] errorcodes = db['errors'] for errCode in messages.keys(): sys.stderr.write('Inserting code: {}\n'.format(errCode)) result = ...
[ "def", "writeToMongo", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Saving to db.messages.errors, will not check for duplicates!'", ")", "from", "pymongo", "import", "Connection", "connection", "=", "Connection", "(", ")", "db", "=", "connection", "[", ...
pipe the messages array into mongodb .
train
false
3,780
def pip_install(req_file, constraints_file=None): cmd = bin_prefix('pip install --exists-action w --upgrade -r {} '.format(req_file)) if constraints_file: cmd += ' -c {}'.format(constraints_file) if WHEELHOUSE_PATH: cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH) return cmd
[ "def", "pip_install", "(", "req_file", ",", "constraints_file", "=", "None", ")", ":", "cmd", "=", "bin_prefix", "(", "'pip install --exists-action w --upgrade -r {} '", ".", "format", "(", "req_file", ")", ")", "if", "constraints_file", ":", "cmd", "+=", "' -c {}...
install the package using pip .
train
false
3,781
def sorted_by_field(issues, field='closed_at', reverse=False): return sorted(issues, key=(lambda i: i[field]), reverse=reverse)
[ "def", "sorted_by_field", "(", "issues", ",", "field", "=", "'closed_at'", ",", "reverse", "=", "False", ")", ":", "return", "sorted", "(", "issues", ",", "key", "=", "(", "lambda", "i", ":", "i", "[", "field", "]", ")", ",", "reverse", "=", "reverse...
return a list of issues sorted by closing date date .
train
true
3,782
def SerializeEntries(entries): output = [] for (python_format, wire_format, type_descriptor) in entries: if ((wire_format is None) or (python_format and type_descriptor.IsDirty(python_format))): wire_format = type_descriptor.ConvertToWireFormat(python_format) output.extend(wire_format) return ''.join(output)
[ "def", "SerializeEntries", "(", "entries", ")", ":", "output", "=", "[", "]", "for", "(", "python_format", ",", "wire_format", ",", "type_descriptor", ")", "in", "entries", ":", "if", "(", "(", "wire_format", "is", "None", ")", "or", "(", "python_format", ...
serializes given triplets of python and wire values and a descriptor .
train
false
3,784
def run_CSS(input_path, out_path, output_CSS_statistics): if (not output_CSS_statistics): command_args = [('-i %s -o %s' % (input_path, out_path))] else: command_args = [('-i %s -o %s -s %s' % (input_path, out_path, output_CSS_statistics))] rsl = RExecutor(TmpDir=get_qiime_temp_dir()) app_result = rsl(command_a...
[ "def", "run_CSS", "(", "input_path", ",", "out_path", ",", "output_CSS_statistics", ")", ":", "if", "(", "not", "output_CSS_statistics", ")", ":", "command_args", "=", "[", "(", "'-i %s -o %s'", "%", "(", "input_path", ",", "out_path", ")", ")", "]", "else",...
run metagenomeseqs css algorithm through rscript .
train
false
3,785
def sparse_categorical_accuracy(y_true, y_pred): y_true = tf.cast(y_true, tf.float32) y_pred = tf.cast(tf.argmax(y_pred, (len(y_pred.get_shape()) - 1)), tf.float32) return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
[ "def", "sparse_categorical_accuracy", "(", "y_true", ",", "y_pred", ")", ":", "y_true", "=", "tf", ".", "cast", "(", "y_true", ",", "tf", ".", "float32", ")", "y_pred", "=", "tf", ".", "cast", "(", "tf", ".", "argmax", "(", "y_pred", ",", "(", "len",...
multi-class prediction accuracy .
train
false
3,786
def _create_instances_with_cached_ips(orig_func, *args, **kwargs): (instances, reservation_id) = orig_func(*args, **kwargs) fake_cache = _get_fake_cache() for instance in instances: instance['info_cache'].network_info = fake_cache db.instance_info_cache_update(args[1], instance['uuid'], {'network_info': fake_cac...
[ "def", "_create_instances_with_cached_ips", "(", "orig_func", ",", "*", "args", ",", "**", "kwargs", ")", ":", "(", "instances", ",", "reservation_id", ")", "=", "orig_func", "(", "*", "args", ",", "**", "kwargs", ")", "fake_cache", "=", "_get_fake_cache", "...
kludge the above kludge so that the database doesnt get out of sync with the actual instance .
train
false
3,787
def _matplotlib_list(interval_list): xlist = [] ylist = [] if len(interval_list): for intervals in interval_list: intervalx = intervals[0] intervaly = intervals[1] xlist.extend([intervalx.start, intervalx.start, intervalx.end, intervalx.end, None]) ylist.extend([intervaly.start, intervaly.end, interval...
[ "def", "_matplotlib_list", "(", "interval_list", ")", ":", "xlist", "=", "[", "]", "ylist", "=", "[", "]", "if", "len", "(", "interval_list", ")", ":", "for", "intervals", "in", "interval_list", ":", "intervalx", "=", "intervals", "[", "0", "]", "interva...
returns lists for matplotlib fill command from a list of bounding rectangular intervals .
train
false
3,788
def versions_from_parentdir(parentdir_prefix, root, verbose): dirname = os.path.basename(root) if (not dirname.startswith(parentdir_prefix)): if verbose: print(("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % (root, dirname, parentdir_prefix))) raise NotThisMethod("rootdir doesn't start w...
[ "def", "versions_from_parentdir", "(", "parentdir_prefix", ",", "root", ",", "verbose", ")", ":", "dirname", "=", "os", ".", "path", ".", "basename", "(", "root", ")", "if", "(", "not", "dirname", ".", "startswith", "(", "parentdir_prefix", ")", ")", ":", ...
try to determine the version from the parent directory name .
train
true
3,789
def truncated_cube_graph(create_using=None): description = ['adjacencylist', 'Truncated Cube Graph', 24, [[2, 3, 5], [12, 15], [4, 5], [7, 9], [6], [17, 19], [8, 9], [11, 13], [10], [18, 21], [12, 13], [15], [14], [22, 23], [16], [20, 24], [18, 19], [21], [20], [24], [22], [23], [24], []]] G = make_small_undirected_g...
[ "def", "truncated_cube_graph", "(", "create_using", "=", "None", ")", ":", "description", "=", "[", "'adjacencylist'", ",", "'Truncated Cube Graph'", ",", "24", ",", "[", "[", "2", ",", "3", ",", "5", "]", ",", "[", "12", ",", "15", "]", ",", "[", "4...
return the skeleton of the truncated cube .
train
false
3,791
def colorscale_to_scale(colorscale): scale_list = [] for item in colorscale: scale_list.append(item[0]) return scale_list
[ "def", "colorscale_to_scale", "(", "colorscale", ")", ":", "scale_list", "=", "[", "]", "for", "item", "in", "colorscale", ":", "scale_list", ".", "append", "(", "item", "[", "0", "]", ")", "return", "scale_list" ]
extracts the interpolation scale values from colorscale as a list .
train
false
3,792
def get_offset_name(offset): msg = 'get_offset_name(offset) is deprecated. Use offset.freqstr instead' warnings.warn(msg, FutureWarning, stacklevel=2) return offset.freqstr
[ "def", "get_offset_name", "(", "offset", ")", ":", "msg", "=", "'get_offset_name(offset) is deprecated. Use offset.freqstr instead'", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "offset", ".", "freqstr" ]
return rule name associated with a dateoffset object examples get_offset_name(bmonthend(1)) --> eom .
train
false
3,793
def closure(source, pointerType, accumulator=None): if isinstance(source, Word): return reduce(union, map((lambda s, t=pointerType: tree(s, t)), source.getSenses())) _requireSource(source) if (accumulator is None): accumulator = [] if (source not in accumulator): accumulator.append(source) for target in sou...
[ "def", "closure", "(", "source", ",", "pointerType", ",", "accumulator", "=", "None", ")", ":", "if", "isinstance", "(", "source", ",", "Word", ")", ":", "return", "reduce", "(", "union", ",", "map", "(", "(", "lambda", "s", ",", "t", "=", "pointerTy...
return the transitive closure of source under the pointertype relationship .
train
false
3,794
def edge_dfs(G, source=None, orientation='original'): nodes = list(G.nbunch_iter(source)) if (not nodes): raise StopIteration kwds = {'data': False} if G.is_multigraph(): kwds['keys'] = True (out_edges, key, tailhead) = helper_funcs(G, orientation) visited_edges = set() visited_nodes = set() edges = {} for...
[ "def", "edge_dfs", "(", "G", ",", "source", "=", "None", ",", "orientation", "=", "'original'", ")", ":", "nodes", "=", "list", "(", "G", ".", "nbunch_iter", "(", "source", ")", ")", "if", "(", "not", "nodes", ")", ":", "raise", "StopIteration", "kwd...
a directed .
train
false
3,795
def dumb_property_dict(style): return dict([(x.strip(), y.strip()) for (x, y) in [z.split(':', 1) for z in style.split(';') if (':' in z)]])
[ "def", "dumb_property_dict", "(", "style", ")", ":", "return", "dict", "(", "[", "(", "x", ".", "strip", "(", ")", ",", "y", ".", "strip", "(", ")", ")", "for", "(", "x", ",", "y", ")", "in", "[", "z", ".", "split", "(", "':'", ",", "1", ")...
returns a hash of css attributes .
train
true
3,796
def rm(pattern, directory=False): def safe_remove(path): try: os.remove(path) except OSError as err: if (err.errno != errno.ENOENT): raise else: print ('rm %s' % path) def safe_rmtree(path): def onerror(fun, path, excinfo): exc = excinfo[1] if (exc.errno != errno.ENOENT): raise existe...
[ "def", "rm", "(", "pattern", ",", "directory", "=", "False", ")", ":", "def", "safe_remove", "(", "path", ")", ":", "try", ":", "os", ".", "remove", "(", "path", ")", "except", "OSError", "as", "err", ":", "if", "(", "err", ".", "errno", "!=", "e...
remove stopped containers in the docker-compose file .
train
false
3,797
def test_solve_polynomial_cv_1a(): assert (solve((sqrt(x) - 1), x) == [1]) assert (solve((sqrt(x) - 2), x) == [4]) assert (solve(((x ** Rational(1, 4)) - 2), x) == [16]) assert (solve(((x ** Rational(1, 3)) - 3), x) == [27]) assert (solve(((sqrt(x) + (x ** Rational(1, 3))) + (x ** Rational(1, 4))), x) == [0])
[ "def", "test_solve_polynomial_cv_1a", "(", ")", ":", "assert", "(", "solve", "(", "(", "sqrt", "(", "x", ")", "-", "1", ")", ",", "x", ")", "==", "[", "1", "]", ")", "assert", "(", "solve", "(", "(", "sqrt", "(", "x", ")", "-", "2", ")", ",",...
test for solving on equations that can be converted to a polynomial equation using the change of variable y -> x**rational .
train
false
3,799
def setup_module(module): import os import numpy as np import random _random_seed = os.environ.get('SKLEARN_SEED', None) if (_random_seed is None): _random_seed = (np.random.uniform() * ((2 ** 31) - 1)) _random_seed = int(_random_seed) print ('I: Seeding RNGs with %r' % _random_seed) np.random.seed(_random_se...
[ "def", "setup_module", "(", "module", ")", ":", "import", "os", "import", "numpy", "as", "np", "import", "random", "_random_seed", "=", "os", ".", "environ", ".", "get", "(", "'SKLEARN_SEED'", ",", "None", ")", "if", "(", "_random_seed", "is", "None", ")...
set up test fixtures .
train
false
3,800
def fileobj_name(f): if isinstance(f, string_types): return f elif isinstance(f, gzip.GzipFile): return fileobj_name(f.fileobj) elif hasattr(f, 'name'): return f.name elif hasattr(f, 'filename'): return f.filename elif hasattr(f, '__class__'): return str(f.__class__) else: return str(type(f))
[ "def", "fileobj_name", "(", "f", ")", ":", "if", "isinstance", "(", "f", ",", "string_types", ")", ":", "return", "f", "elif", "isinstance", "(", "f", ",", "gzip", ".", "GzipFile", ")", ":", "return", "fileobj_name", "(", "f", ".", "fileobj", ")", "e...
returns the name of file-like object f .
train
false
3,801
def finite_diff(expression, variable, increment=1): expression = expression.expand() expression2 = expression.subs(variable, (variable + increment)) expression2 = expression2.expand() return (expression2 - expression)
[ "def", "finite_diff", "(", "expression", ",", "variable", ",", "increment", "=", "1", ")", ":", "expression", "=", "expression", ".", "expand", "(", ")", "expression2", "=", "expression", ".", "subs", "(", "variable", ",", "(", "variable", "+", "increment"...
takes as input a polynomial expression and the variable used to construct it and returns the difference between functions value when the input is incremented to 1 and the original function value .
train
false
3,802
def walk_skip_hidden(top, onerror=None, followlinks=False): for (root, dirs, files) in os.walk(top, topdown=True, onerror=onerror, followlinks=followlinks): dirs[:] = [d for d in dirs if (not is_path_hidden(d))] files[:] = [f for f in files if (not is_path_hidden(f))] (yield (root, dirs, files))
[ "def", "walk_skip_hidden", "(", "top", ",", "onerror", "=", "None", ",", "followlinks", "=", "False", ")", ":", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "top", ",", "topdown", "=", "True", ",", "onerror", "=...
a wrapper for os .
train
true
3,804
@pick_context_manager_writer def flavor_access_remove(context, flavor_id, project_id): instance_type_id = _flavor_get_id_from_flavor(context, flavor_id) count = _flavor_access_query(context).filter_by(instance_type_id=instance_type_id).filter_by(project_id=project_id).soft_delete(synchronize_session=False) if (count...
[ "@", "pick_context_manager_writer", "def", "flavor_access_remove", "(", "context", ",", "flavor_id", ",", "project_id", ")", ":", "instance_type_id", "=", "_flavor_get_id_from_flavor", "(", "context", ",", "flavor_id", ")", "count", "=", "_flavor_access_query", "(", "...
remove flavor access for project .
train
false
3,805
def touch(name, atime=None, mtime=None): name = os.path.expanduser(name) if (atime and atime.isdigit()): atime = int(atime) if (mtime and mtime.isdigit()): mtime = int(mtime) try: if (not os.path.exists(name)): with salt.utils.fopen(name, 'a') as fhw: fhw.write('') if ((not atime) and (not mtime)): ...
[ "def", "touch", "(", "name", ",", "atime", "=", "None", ",", "mtime", "=", "None", ")", ":", "name", "=", "os", ".", "path", ".", "expanduser", "(", "name", ")", "if", "(", "atime", "and", "atime", ".", "isdigit", "(", ")", ")", ":", "atime", "...
touch generated files that are older than their sources after an update .
train
true
3,806
def is_eui64_address(ip_address): ip = netaddr.IPAddress(ip_address) return ((ip.version == 6) and (not ((ip & 1099494850560) ^ 1099478073344)))
[ "def", "is_eui64_address", "(", "ip_address", ")", ":", "ip", "=", "netaddr", ".", "IPAddress", "(", "ip_address", ")", "return", "(", "(", "ip", ".", "version", "==", "6", ")", "and", "(", "not", "(", "(", "ip", "&", "1099494850560", ")", "^", "1099...
check if ip address is eui64 .
train
false
3,807
def _find_all_structured_arrays(handle): import h5py structured_arrays = [] def append_structured_arrays(name, obj): if (isinstance(obj, h5py.Dataset) and (obj.dtype.kind == u'V')): structured_arrays.append(name) handle.visititems(append_structured_arrays) return structured_arrays
[ "def", "_find_all_structured_arrays", "(", "handle", ")", ":", "import", "h5py", "structured_arrays", "=", "[", "]", "def", "append_structured_arrays", "(", "name", ",", "obj", ")", ":", "if", "(", "isinstance", "(", "obj", ",", "h5py", ".", "Dataset", ")", ...
find all structured arrays in an hdf5 file .
train
false
3,808
def name_to_batch(name, batch_size, num_steps): data = np.zeros(((batch_size * num_steps) + 1)) data_index = 0 for letter in (map(_letter_to_number, name) + [_EON]): data[data_index] = letter data_index += 1 x = data[:(batch_size * num_steps)].reshape((batch_size, num_steps)) y = data[1:((batch_size * num_step...
[ "def", "name_to_batch", "(", "name", ",", "batch_size", ",", "num_steps", ")", ":", "data", "=", "np", ".", "zeros", "(", "(", "(", "batch_size", "*", "num_steps", ")", "+", "1", ")", ")", "data_index", "=", "0", "for", "letter", "in", "(", "map", ...
takes a single name and fills a batch with it args: name: lowercase composed of 26 characters batch_size: int num_steps: int returns: x .
train
false
3,810
def sys_info(fname=None, overwrite=False): if ((fname is not None) and op.isfile(fname) and (not overwrite)): raise IOError('file exists, use overwrite=True to overwrite') out = '' try: from ..app import use_app, Canvas from ..app.backends import BACKEND_NAMES from ..gloo import gl from ..testing import ha...
[ "def", "sys_info", "(", "fname", "=", "None", ",", "overwrite", "=", "False", ")", ":", "if", "(", "(", "fname", "is", "not", "None", ")", "and", "op", ".", "isfile", "(", "fname", ")", "and", "(", "not", "overwrite", ")", ")", ":", "raise", "IOE...
return useful information about ipython and the system .
train
true
3,812
def process_token_or_pass(func): @functools.wraps(func) def wrapper(*args, **kwargs): encoded_token = request.args.get('token') if encoded_token: handler = TokenHandler.from_string(encoded_token) try: res = handler.to_response() except TokenHandlerNotFound as e: raise HTTPError(http.BAD_REQUEST, ...
[ "def", "process_token_or_pass", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "encoded_token", "=", "request", ".", "args", ".", "get", "(", "'token'", ")", ...
parse encoded token and run attached handlers .
train
false
3,813
def fileobj_mode(f): if (hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode')): fileobj = f.fileobj elif hasattr(f, 'fileobj_mode'): return f.fileobj_mode elif (hasattr(f, 'fp') and hasattr(f.fp, 'mode')): fileobj = f.fp elif hasattr(f, 'mode'): fileobj = f else: return None return _fileobj_normalize_mo...
[ "def", "fileobj_mode", "(", "f", ")", ":", "if", "(", "hasattr", "(", "f", ",", "'fileobj'", ")", "and", "hasattr", "(", "f", ".", "fileobj", ",", "'mode'", ")", ")", ":", "fileobj", "=", "f", ".", "fileobj", "elif", "hasattr", "(", "f", ",", "'f...
returns the mode string of a file-like object if such a thing exists .
train
false
3,814
def getDefaultFetcher(): global _default_fetcher if (_default_fetcher is None): setDefaultFetcher(createHTTPFetcher()) return _default_fetcher
[ "def", "getDefaultFetcher", "(", ")", ":", "global", "_default_fetcher", "if", "(", "_default_fetcher", "is", "None", ")", ":", "setDefaultFetcher", "(", "createHTTPFetcher", "(", ")", ")", "return", "_default_fetcher" ]
return the default fetcher instance if no fetcher has been set .
train
false
3,815
def dyld_find(name, executable_path=None, env=None): name = ensure_utf8(name) executable_path = ensure_utf8(executable_path) for path in dyld_image_suffix_search(chain(dyld_override_search(name, env), dyld_executable_path_search(name, executable_path), dyld_default_search(name, env)), env): if os.path.isfile(path)...
[ "def", "dyld_find", "(", "name", ",", "executable_path", "=", "None", ",", "env", "=", "None", ")", ":", "name", "=", "ensure_utf8", "(", "name", ")", "executable_path", "=", "ensure_utf8", "(", "executable_path", ")", "for", "path", "in", "dyld_image_suffix...
find a library or framework using dyld semantics .
train
true
3,817
@library.global_function def stringify_groups(groups): return u','.join([group.name for group in groups])
[ "@", "library", ".", "global_function", "def", "stringify_groups", "(", "groups", ")", ":", "return", "u','", ".", "join", "(", "[", "group", ".", "name", "for", "group", "in", "groups", "]", ")" ]
change a list of group objects into a space-delimited string .
train
false
3,818
def argmax_list(seq, func): return argmin_list(seq, (lambda x: (- func(x))))
[ "def", "argmax_list", "(", "seq", ",", "func", ")", ":", "return", "argmin_list", "(", "seq", ",", "(", "lambda", "x", ":", "(", "-", "func", "(", "x", ")", ")", ")", ")" ]
return a list of elements of seq[i] with the highest func scores .
train
false
3,819
def CDLGRAVESTONEDOJI(barDs, count): return call_talib_with_ohlc(barDs, count, talib.CDLGRAVESTONEDOJI)
[ "def", "CDLGRAVESTONEDOJI", "(", "barDs", ",", "count", ")", ":", "return", "call_talib_with_ohlc", "(", "barDs", ",", "count", ",", "talib", ".", "CDLGRAVESTONEDOJI", ")" ]
gravestone doji .
train
false
3,821
@handle_response_format @treeio_login_required def settings_edit(request, response_format='html'): if (not request.user.profile.is_admin('treeio.projects')): return user_denied(request, message="You don't have administrator access to the Projects module") form = None if request.POST: if ('cancel' not in request....
[ "@", "handle_response_format", "@", "treeio_login_required", "def", "settings_edit", "(", "request", ",", "response_format", "=", "'html'", ")", ":", "if", "(", "not", "request", ".", "user", ".", "profile", ".", "is_admin", "(", "'treeio.projects'", ")", ")", ...
settings edit .
train
false
3,822
def get_namespaces(start=None, end=None): q = Namespace.all() if (start is not None): q.filter('__key__ >=', Namespace.key_for_namespace(start)) if (end is not None): q.filter('__key__ <', Namespace.key_for_namespace(end)) return [x.namespace_name for x in q.run()]
[ "def", "get_namespaces", "(", "start", "=", "None", ",", "end", "=", "None", ")", ":", "q", "=", "Namespace", ".", "all", "(", ")", "if", "(", "start", "is", "not", "None", ")", ":", "q", ".", "filter", "(", "'__key__ >='", ",", "Namespace", ".", ...
return all namespaces in the specified range .
train
false
3,825
def get_reader_session(): return context_manager.reader.get_sessionmaker()()
[ "def", "get_reader_session", "(", ")", ":", "return", "context_manager", ".", "reader", ".", "get_sessionmaker", "(", ")", "(", ")" ]
helper to get reader session .
train
false
3,826
def have_qstring(): return (not ((sys.version_info.major >= 3) or QT_VERSION_STR.startswith('5.')))
[ "def", "have_qstring", "(", ")", ":", "return", "(", "not", "(", "(", "sys", ".", "version_info", ".", "major", ">=", "3", ")", "or", "QT_VERSION_STR", ".", "startswith", "(", "'5.'", ")", ")", ")" ]
p3/qt5 get rid of qstring wrapper as py3 has native unicode str type .
train
false
3,827
def makeKickstartFloppy(): kickstart = 'ks.cfg' with open(kickstart, 'w') as f: f.write(KickstartText) preseed = 'ks.preseed' with open(preseed, 'w') as f: f.write(PreseedText) floppy = 'ksfloppy.img' run(('qemu-img create %s 1440k' % floppy)) run(('mkfs -t msdos ' + floppy)) run(('mcopy -i %s %s ::/' % (fl...
[ "def", "makeKickstartFloppy", "(", ")", ":", "kickstart", "=", "'ks.cfg'", "with", "open", "(", "kickstart", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "KickstartText", ")", "preseed", "=", "'ks.preseed'", "with", "open", "(", "preseed", ",...
create and return kickstart floppy .
train
false
3,828
def read_config_file(option, opt, value, parser): try: new_settings = parser.get_config_file_settings(value) except ValueError as error: parser.error(error) parser.values.update(new_settings, parser)
[ "def", "read_config_file", "(", "option", ",", "opt", ",", "value", ",", "parser", ")", ":", "try", ":", "new_settings", "=", "parser", ".", "get_config_file_settings", "(", "value", ")", "except", "ValueError", "as", "error", ":", "parser", ".", "error", ...
reads all uppercase variables defined in the given module file .
train
false
3,829
def efetch(db, **keywords): cgi = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi' variables = {'db': db} variables.update(keywords) post = False try: ids = variables['id'] except KeyError: pass else: if isinstance(ids, list): ids = ','.join(ids) variables['id'] = ids elif isinstance(ids...
[ "def", "efetch", "(", "db", ",", "**", "keywords", ")", ":", "cgi", "=", "'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'", "variables", "=", "{", "'db'", ":", "db", "}", "variables", ".", "update", "(", "keywords", ")", "post", "=", "False", "try"...
fetches entrez results which are returned as a handle .
train
false
3,830
def edge_detect(pin, event_callback, bounce): import Adafruit_BBIO.GPIO as GPIO GPIO.add_event_detect(pin, GPIO.BOTH, callback=event_callback, bouncetime=bounce)
[ "def", "edge_detect", "(", "pin", ",", "event_callback", ",", "bounce", ")", ":", "import", "Adafruit_BBIO", ".", "GPIO", "as", "GPIO", "GPIO", ".", "add_event_detect", "(", "pin", ",", "GPIO", ".", "BOTH", ",", "callback", "=", "event_callback", ",", "bou...
add detection for rising and falling events .
train
false
3,831
def dipy_version(): if no_dipy(): return None return dipy.__version__
[ "def", "dipy_version", "(", ")", ":", "if", "no_dipy", "(", ")", ":", "return", "None", "return", "dipy", ".", "__version__" ]
check dipy version .
train
false
3,833
def RemoveELBInstance(region, instance_id, node_type): balancers = GetLoadBalancers(region, node_types=[node_type]) assert balancers, ('No %s load balancer in region %s' % (node_type, region)) assert (len(balancers) == 1) b = balancers[0] balancer_instances = set([i.id for i in b.instances]) if (instance_id not i...
[ "def", "RemoveELBInstance", "(", "region", ",", "instance_id", ",", "node_type", ")", ":", "balancers", "=", "GetLoadBalancers", "(", "region", ",", "node_types", "=", "[", "node_type", "]", ")", "assert", "balancers", ",", "(", "'No %s load balancer in region %s'...
add an instance to the load balancer in region .
train
false
3,834
def memory_usage(): return _GetSystemStats().memory()
[ "def", "memory_usage", "(", ")", ":", "return", "_GetSystemStats", "(", ")", ".", "memory", "(", ")" ]
log memory usage before and after a method .
train
false
3,835
@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_read_html_unicode(): table_in = [u'<table>', u'<tr><td>&#x0394;</td></tr>', u'<tr><td>\u0394</td></tr>', u'</table>'] dat = Table.read(table_in, format='ascii.html') assert np.all((dat['col1'] == [u'\u0394', u'\u0394']))
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "'not HAS_BEAUTIFUL_SOUP'", ")", "def", "test_read_html_unicode", "(", ")", ":", "table_in", "=", "[", "u'<table>'", ",", "u'<tr><td>&#x0394;</td></tr>'", ",", "u'<tr><td>\\u0394</td></tr>'", ",", "u'</table>'", "]", "...
test reading an html table with unicode values .
train
false
3,836
def CloseBuffersForFilename(filename): buffer_number = GetBufferNumberForFilename(filename, False) while (buffer_number != (-1)): vim.command(u'silent! bwipeout! {0}'.format(buffer_number)) new_buffer_number = GetBufferNumberForFilename(filename, False) if (buffer_number == new_buffer_number): raise RuntimeE...
[ "def", "CloseBuffersForFilename", "(", "filename", ")", ":", "buffer_number", "=", "GetBufferNumberForFilename", "(", "filename", ",", "False", ")", "while", "(", "buffer_number", "!=", "(", "-", "1", ")", ")", ":", "vim", ".", "command", "(", "u'silent! bwipe...
close all buffers for a specific file .
train
false
3,837
def nearest_unequal_elements(dts, dt): if (not dts.is_unique): raise ValueError('dts must be unique') if (not dts.is_monotonic_increasing): raise ValueError('dts must be sorted in increasing order') if (not len(dts)): return (None, None) sortpos = dts.searchsorted(dt, side='left') try: sortval = dts[sortpo...
[ "def", "nearest_unequal_elements", "(", "dts", ",", "dt", ")", ":", "if", "(", "not", "dts", ".", "is_unique", ")", ":", "raise", "ValueError", "(", "'dts must be unique'", ")", "if", "(", "not", "dts", ".", "is_monotonic_increasing", ")", ":", "raise", "V...
find values in dts closest but not equal to dt .
train
true
3,838
def transfer_create(context, values): return IMPL.transfer_create(context, values)
[ "def", "transfer_create", "(", "context", ",", "values", ")", ":", "return", "IMPL", ".", "transfer_create", "(", "context", ",", "values", ")" ]
create an entry in the transfers table .
train
false
3,839
def ExpectedFailure(reason, *exception_matchers): def decorator(test): @functools.wraps(test) def Wrapper(*args, **kwargs): try: test(*args, **kwargs) except Exception as test_exception: test_exception_message = ToUnicode(test_exception) try: for matcher in exception_matchers: assert_t...
[ "def", "ExpectedFailure", "(", "reason", ",", "*", "exception_matchers", ")", ":", "def", "decorator", "(", "test", ")", ":", "@", "functools", ".", "wraps", "(", "test", ")", "def", "Wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "try", ...
defines a decorator to be attached to tests .
train
false
3,840
def _mark_cookie_for_deletion(request): request.need_to_delete_cookie = True
[ "def", "_mark_cookie_for_deletion", "(", "request", ")", ":", "request", ".", "need_to_delete_cookie", "=", "True" ]
updates the given request object to designate that the session cookie should be deleted .
train
false
3,841
@snippet def topic_iam_policy(client, to_delete): TOPIC_NAME = ('topic_iam_policy-%d' % (_millis(),)) topic = client.topic(TOPIC_NAME) topic.create() to_delete.append(topic) policy = topic.get_iam_policy() assert (len(policy.viewers) == 0) assert (len(policy.editors) == 0) assert (len(policy.owners) == 0) ALL_...
[ "@", "snippet", "def", "topic_iam_policy", "(", "client", ",", "to_delete", ")", ":", "TOPIC_NAME", "=", "(", "'topic_iam_policy-%d'", "%", "(", "_millis", "(", ")", ",", ")", ")", "topic", "=", "client", ".", "topic", "(", "TOPIC_NAME", ")", "topic", "....
fetch / set a topics iam policy .
train
false
3,842
def acquire_lock(): pass
[ "def", "acquire_lock", "(", ")", ":", "pass" ]
acquiring the lock is a no-op since no threading is supported .
train
false
3,844
def unescape_all(url): if isinstance(url, bytes): func2use = _unescape_bytes keys2use = _bytes_keys else: func2use = _unescape_str keys2use = _str_keys clean_url = func2use(url) not_done = [(clean_url.count(key) > 0) for key in keys2use] if (True in not_done): return unescape_all(clean_url) else: retu...
[ "def", "unescape_all", "(", "url", ")", ":", "if", "isinstance", "(", "url", ",", "bytes", ")", ":", "func2use", "=", "_unescape_bytes", "keys2use", "=", "_bytes_keys", "else", ":", "func2use", "=", "_unescape_str", "keys2use", "=", "_str_keys", "clean_url", ...
recursively unescape a given url .
train
false
3,845
def visstd(a, s=0.1): return ((((a - a.mean()) / max(a.std(), 0.0001)) * s) + 0.5)
[ "def", "visstd", "(", "a", ",", "s", "=", "0.1", ")", ":", "return", "(", "(", "(", "(", "a", "-", "a", ".", "mean", "(", ")", ")", "/", "max", "(", "a", ".", "std", "(", ")", ",", "0.0001", ")", ")", "*", "s", ")", "+", "0.5", ")" ]
normalize the image range for visualization .
train
false
3,847
def product_upper_triangle(values, include_diagonal=False): return all_pairs_matching_predicate(values, (operator.le if include_diagonal else operator.lt))
[ "def", "product_upper_triangle", "(", "values", ",", "include_diagonal", "=", "False", ")", ":", "return", "all_pairs_matching_predicate", "(", "values", ",", "(", "operator", ".", "le", "if", "include_diagonal", "else", "operator", ".", "lt", ")", ")" ]
return an iterator over pairs .
train
false
3,849
def image_preprocessing(image_buffer, bbox, train, thread_id=0): if (bbox is None): raise ValueError('Please supply a bounding box.') image = decode_jpeg(image_buffer) height = FLAGS.image_size width = FLAGS.image_size if train: image = distort_image(image, height, width, bbox, thread_id) else: image = eval...
[ "def", "image_preprocessing", "(", "image_buffer", ",", "bbox", ",", "train", ",", "thread_id", "=", "0", ")", ":", "if", "(", "bbox", "is", "None", ")", ":", "raise", "ValueError", "(", "'Please supply a bounding box.'", ")", "image", "=", "decode_jpeg", "(...
decode and preprocess one image for evaluation or training .
train
false
3,851
def _int_arith_flags(rettype): if rettype.signed: return ['nsw'] else: return []
[ "def", "_int_arith_flags", "(", "rettype", ")", ":", "if", "rettype", ".", "signed", ":", "return", "[", "'nsw'", "]", "else", ":", "return", "[", "]" ]
return the modifier flags for integer arithmetic .
train
false
3,853
def make_csv_output(res, dt): import frappe from cStringIO import StringIO import csv f = StringIO() writer = csv.writer(f) for r in res: row = [] for v in r: if isinstance(v, basestring): v = v.encode(u'utf-8') row.append(v) writer.writerow(row) f.seek(0) frappe.response[u'result'] = unicode(f....
[ "def", "make_csv_output", "(", "res", ",", "dt", ")", ":", "import", "frappe", "from", "cStringIO", "import", "StringIO", "import", "csv", "f", "=", "StringIO", "(", ")", "writer", "=", "csv", ".", "writer", "(", "f", ")", "for", "r", "in", "res", ":...
send method response as downloadable csv file .
train
false
3,854
@pick_context_manager_writer def flavor_access_add(context, flavor_id, project_id): instance_type_id = _flavor_get_id_from_flavor(context, flavor_id) access_ref = models.InstanceTypeProjects() access_ref.update({'instance_type_id': instance_type_id, 'project_id': project_id}) try: access_ref.save(context.session)...
[ "@", "pick_context_manager_writer", "def", "flavor_access_add", "(", "context", ",", "flavor_id", ",", "project_id", ")", ":", "instance_type_id", "=", "_flavor_get_id_from_flavor", "(", "context", ",", "flavor_id", ")", "access_ref", "=", "models", ".", "InstanceType...
add flavor access for project .
train
false
3,855
def lastmodified(date_obj): web.header('Last-Modified', net.httpdate(date_obj))
[ "def", "lastmodified", "(", "date_obj", ")", ":", "web", ".", "header", "(", "'Last-Modified'", ",", "net", ".", "httpdate", "(", "date_obj", ")", ")" ]
outputs a last-modified header for datetime .
train
false
3,857
def re_render_content_for_management_command(message): assert Message.need_to_render_content(message.rendered_content, message.rendered_content_version, bugdown.version) rendered_content = render_markdown(message, message.content) message.rendered_content = rendered_content message.rendered_content_version = bugdow...
[ "def", "re_render_content_for_management_command", "(", "message", ")", ":", "assert", "Message", ".", "need_to_render_content", "(", "message", ".", "rendered_content", ",", "message", ".", "rendered_content_version", ",", "bugdown", ".", "version", ")", "rendered_cont...
please avoid using this function .
train
false
3,859
def trigrams(sequence, **kwargs): for item in ngrams(sequence, 3, **kwargs): (yield item)
[ "def", "trigrams", "(", "sequence", ",", "**", "kwargs", ")", ":", "for", "item", "in", "ngrams", "(", "sequence", ",", "3", ",", "**", "kwargs", ")", ":", "(", "yield", "item", ")" ]
return the trigrams generated from a sequence of items .
train
false
3,860
def heap_sort(unsorted): n = len(unsorted) for i in range(((n // 2) - 1), (-1), (-1)): heapify(unsorted, i, n) for i in range((n - 1), 0, (-1)): (unsorted[0], unsorted[i]) = (unsorted[i], unsorted[0]) heapify(unsorted, 0, i) return unsorted
[ "def", "heap_sort", "(", "unsorted", ")", ":", "n", "=", "len", "(", "unsorted", ")", "for", "i", "in", "range", "(", "(", "(", "n", "//", "2", ")", "-", "1", ")", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ":", "heapify", "("...
pure implementation of the heap sort algorithm in python .
train
false
3,861
def cmServicePrompt(): a = TpPd(pd=5) b = MessageType(mesType=37) c = PdAndSapi() packet = ((a / b) / c) return packet
[ "def", "cmServicePrompt", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "5", ")", "b", "=", "MessageType", "(", "mesType", "=", "37", ")", "c", "=", "PdAndSapi", "(", ")", "packet", "=", "(", "(", "a", "/", "b", ")", "/", "c", ")", "return...
cm service prompt section 9 .
train
true
3,862
def print_column_headers(results): print('Column Headers:') headers = results.get('columnHeaders') for header in headers: print((' DCTB %s name: = %s' % (header.get('columnType').title(), header.get('name')))) print((' DCTB Column Type = %s' % header.get('columnType'))) print((' DCTB Data Type = %s' % hea...
[ "def", "print_column_headers", "(", "results", ")", ":", "print", "(", "'Column Headers:'", ")", "headers", "=", "results", ".", "get", "(", "'columnHeaders'", ")", "for", "header", "in", "headers", ":", "print", "(", "(", "' DCTB %s name: = %s'", "%", "(",...
prints the information for each column .
train
false
3,863
def get_start_end(sequence, skiplist=('-', '?')): length = len(sequence) if (length == 0): return (None, None) end = (length - 1) while ((end >= 0) and (sequence[end] in skiplist)): end -= 1 start = 0 while ((start < length) and (sequence[start] in skiplist)): start += 1 if ((start == length) and (end == (...
[ "def", "get_start_end", "(", "sequence", ",", "skiplist", "=", "(", "'-'", ",", "'?'", ")", ")", ":", "length", "=", "len", "(", "sequence", ")", "if", "(", "length", "==", "0", ")", ":", "return", "(", "None", ",", "None", ")", "end", "=", "(", ...
return position of first and last character which is not in skiplist .
train
false
3,864
def osc_ostlist(directory, fs): ostlist = [] for ost in os.listdir(directory): if (fs in ost): fir = ost.find('-') sec = ost.find('-', (fir + 1)) thrd = ost.find('-', (sec + 1)) ost_name = ost[(fir + 1):sec] if (ost_name not in ostlist): ostlist.append(ost_name) (yield ost_name)
[ "def", "osc_ostlist", "(", "directory", ",", "fs", ")", ":", "ostlist", "=", "[", "]", "for", "ost", "in", "os", ".", "listdir", "(", "directory", ")", ":", "if", "(", "fs", "in", "ost", ")", ":", "fir", "=", "ost", ".", "find", "(", "'-'", ")"...
return ost names based on folder names in osc directory .
train
false
3,866
def pickleDumpDict(name, d): try: f = open((name + '.pickle'), 'w') pickle.dump(d, f) f.close() return True except Exception as e: print(('Error writing into', name, ':', str(e))) return False
[ "def", "pickleDumpDict", "(", "name", ",", "d", ")", ":", "try", ":", "f", "=", "open", "(", "(", "name", "+", "'.pickle'", ")", ",", "'w'", ")", "pickle", ".", "dump", "(", "d", ",", "f", ")", "f", ".", "close", "(", ")", "return", "True", "...
pickle-dump a variable into a file .
train
false
3,870
def getMinimumYByPath(path): minimumYByPath = path[0].y for point in path: minimumYByPath = min(minimumYByPath, point.y) return minimumYByPath
[ "def", "getMinimumYByPath", "(", "path", ")", ":", "minimumYByPath", "=", "path", "[", "0", "]", ".", "y", "for", "point", "in", "path", ":", "minimumYByPath", "=", "min", "(", "minimumYByPath", ",", "point", ".", "y", ")", "return", "minimumYByPath" ]
get path with overhangs removed or filled in .
train
false
3,871
def volunteer_award(): return s3_rest_controller()
[ "def", "volunteer_award", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
used for returning options to the s3popuplink popup .
train
false
3,875
def get_device_by_name_or_pk(name): if re.match(DEVICE_BY_PK_RE, name): pk = name.strip('{}') device = Device.objects.get(pk=pk) else: device = Device.objects.get(name=name) return device
[ "def", "get_device_by_name_or_pk", "(", "name", ")", ":", "if", "re", ".", "match", "(", "DEVICE_BY_PK_RE", ",", "name", ")", ":", "pk", "=", "name", ".", "strip", "(", "'{}'", ")", "device", "=", "Device", ".", "objects", ".", "get", "(", "pk", "=",...
attempt to retrieve a device by either its name or primary key .
train
false
3,876
def export_module_json(doc, is_standard, module): if ((not frappe.flags.in_import) and getattr(frappe.get_conf(), u'developer_mode', 0) and is_standard): from frappe.modules.export_file import export_to_files from frappe.modules import get_module_path export_to_files(record_list=[[doc.doctype, doc.name]], record...
[ "def", "export_module_json", "(", "doc", ",", "is_standard", ",", "module", ")", ":", "if", "(", "(", "not", "frappe", ".", "flags", ".", "in_import", ")", "and", "getattr", "(", "frappe", ".", "get_conf", "(", ")", ",", "u'developer_mode'", ",", "0", ...
make a folder for the given doc and add its json file .
train
false
3,877
@task() @timeit def index_chunk_task(write_index, batch_id, rec_id, chunk): (cls_path, id_list) = chunk cls = from_class_path(cls_path) rec = None from kitsune.search.models import Record try: pin_this_thread() rec = Record.objects.get(pk=rec_id) rec.start_time = datetime.datetime.now() rec.message = (u'Re...
[ "@", "task", "(", ")", "@", "timeit", "def", "index_chunk_task", "(", "write_index", ",", "batch_id", ",", "rec_id", ",", "chunk", ")", ":", "(", "cls_path", ",", "id_list", ")", "=", "chunk", "cls", "=", "from_class_path", "(", "cls_path", ")", "rec", ...
index a chunk of things .
train
false
3,878
@env.catch_exceptions def enable_virtualenv(): path = env.var('g:pymode_virtualenv_path') path = os.path.abspath(path) enabled = env.var('g:pymode_virtualenv_enabled') if (path == enabled): env.message(('Virtualenv %s already enabled.' % path)) return env.stop() activate_this = os.path.join(os.path.join(path, ...
[ "@", "env", ".", "catch_exceptions", "def", "enable_virtualenv", "(", ")", ":", "path", "=", "env", ".", "var", "(", "'g:pymode_virtualenv_path'", ")", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "enabled", "=", "env", ".", "var", ...
enable virtualenv for vim .
train
false
3,879
@receiver(thread_edited) @receiver(thread_deleted) @receiver(comment_edited) @receiver(comment_deleted) def post_edit_delete_handler(sender, **kwargs): post = kwargs['post'] handle_activity(kwargs['user'], post, long(post.user_id))
[ "@", "receiver", "(", "thread_edited", ")", "@", "receiver", "(", "thread_deleted", ")", "@", "receiver", "(", "comment_edited", ")", "@", "receiver", "(", "comment_deleted", ")", "def", "post_edit_delete_handler", "(", "sender", ",", "**", "kwargs", ")", ":",...
update the users last activity date upon editing or deleting a post .
train
false
3,880
def is_conemu_ansi(): return (is_windows() and (os.environ.get(u'ConEmuANSI', u'OFF') == u'ON'))
[ "def", "is_conemu_ansi", "(", ")", ":", "return", "(", "is_windows", "(", ")", "and", "(", "os", ".", "environ", ".", "get", "(", "u'ConEmuANSI'", ",", "u'OFF'", ")", "==", "u'ON'", ")", ")" ]
true when the conemu windows console is used .
train
false
3,881
def ck_browse(): table = s3db.doc_ckeditor set = db((table.id > 0)) rows = set.select(orderby=table.title) return dict(rows=rows, cknum=request.vars.CKEditorFuncNum)
[ "def", "ck_browse", "(", ")", ":", "table", "=", "s3db", ".", "doc_ckeditor", "set", "=", "db", "(", "(", "table", ".", "id", ">", "0", ")", ")", "rows", "=", "set", ".", "select", "(", "orderby", "=", "table", ".", "title", ")", "return", "dict"...
controller to handle uploads to ckeditor .
train
false
3,882
def pagerank_numpy(G, alpha=0.85, personalization=None, weight='weight', dangling=None): import numpy as np if (len(G) == 0): return {} M = google_matrix(G, alpha, personalization=personalization, weight=weight, dangling=dangling) (eigenvalues, eigenvectors) = np.linalg.eig(M.T) ind = np.argmax(eigenvalues) lar...
[ "def", "pagerank_numpy", "(", "G", ",", "alpha", "=", "0.85", ",", "personalization", "=", "None", ",", "weight", "=", "'weight'", ",", "dangling", "=", "None", ")", ":", "import", "numpy", "as", "np", "if", "(", "len", "(", "G", ")", "==", "0", ")...
return the pagerank of the nodes in the graph .
train
false
3,883
def _make_api_request_no_retry(http, http_request, redirections=_REDIRECTIONS): connection_type = None if getattr(http, 'connections', None): url_scheme = parse.urlsplit(http_request.url).scheme if (url_scheme and (url_scheme in http.connections)): connection_type = http.connections[url_scheme] new_debuglevel...
[ "def", "_make_api_request_no_retry", "(", "http", ",", "http_request", ",", "redirections", "=", "_REDIRECTIONS", ")", ":", "connection_type", "=", "None", "if", "getattr", "(", "http", ",", "'connections'", ",", "None", ")", ":", "url_scheme", "=", "parse", "...
send an http request via the given http instance .
train
false
3,884
def get_path_names(): return _SCHEME_KEYS
[ "def", "get_path_names", "(", ")", ":", "return", "_SCHEME_KEYS" ]
return a tuple containing the paths names .
train
false
3,885
def parse_qiime_config_file(qiime_config_file): result = {} for line in qiime_config_file: line = line.strip() if ((not line) or line.startswith('#')): continue fields = line.split() param_id = fields[0] param_value = (expandvars(' '.join(fields[1:])) or None) result[param_id] = param_value return res...
[ "def", "parse_qiime_config_file", "(", "qiime_config_file", ")", ":", "result", "=", "{", "}", "for", "line", "in", "qiime_config_file", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "(", "(", "not", "line", ")", "or", "line", ".", "startswith"...
parse lines in a qiime_config file .
train
false
3,888
def dup_gcdex(f, g, K): (s, h) = dup_half_gcdex(f, g, K) F = dup_sub_mul(h, s, f, K) t = dup_quo(F, g, K) return (s, t, h)
[ "def", "dup_gcdex", "(", "f", ",", "g", ",", "K", ")", ":", "(", "s", ",", "h", ")", "=", "dup_half_gcdex", "(", "f", ",", "g", ",", "K", ")", "F", "=", "dup_sub_mul", "(", "h", ",", "s", ",", "f", ",", "K", ")", "t", "=", "dup_quo", "(",...
extended euclidean algorithm in f[x] .
train
false
3,890
def merge_inner(clsdict): samelist = False done = {} while (not samelist): samelist = True classlist = clsdict.keys() for classname in classlist: parts_name = classname.rsplit('$', 1) if (len(parts_name) > 1): (mainclass, innerclass) = parts_name innerclass = innerclass[:(-1)] mainclass += ';...
[ "def", "merge_inner", "(", "clsdict", ")", ":", "samelist", "=", "False", "done", "=", "{", "}", "while", "(", "not", "samelist", ")", ":", "samelist", "=", "True", "classlist", "=", "clsdict", ".", "keys", "(", ")", "for", "classname", "in", "classlis...
merge the inner class of a class: e .
train
true
3,891
def make_tensor(dim): raise NotImplementedError('TODO: implement this function.')
[ "def", "make_tensor", "(", "dim", ")", ":", "raise", "NotImplementedError", "(", "'TODO: implement this function.'", ")" ]
returns a new theano tensor with no broadcastable dimensions .
train
false
3,892
def get_signed_purchase_params(cart, callback_url=None, extra_data=None): return sign(get_purchase_params(cart, callback_url=callback_url, extra_data=extra_data))
[ "def", "get_signed_purchase_params", "(", "cart", ",", "callback_url", "=", "None", ",", "extra_data", "=", "None", ")", ":", "return", "sign", "(", "get_purchase_params", "(", "cart", ",", "callback_url", "=", "callback_url", ",", "extra_data", "=", "extra_data...
return the parameters to send to the current payment processor .
train
false
3,893
def getSliceDictionary(xmlElement): for metadataElement in xmlElement.getChildrenWithClassName('metadata'): for child in metadataElement.children: if (child.className.lower() == 'slice:layers'): return child.attributeDictionary return {}
[ "def", "getSliceDictionary", "(", "xmlElement", ")", ":", "for", "metadataElement", "in", "xmlElement", ".", "getChildrenWithClassName", "(", "'metadata'", ")", ":", "for", "child", "in", "metadataElement", ".", "children", ":", "if", "(", "child", ".", "classNa...
get the metadata slice attribute dictionary .
train
false
3,894
def set_default_app(app): global default_app default_app = app
[ "def", "set_default_app", "(", "app", ")", ":", "global", "default_app", "default_app", "=", "app" ]
set default app .
train
false
3,895
def after_nearest_workday(dt): return next_workday(nearest_workday(dt))
[ "def", "after_nearest_workday", "(", "dt", ")", ":", "return", "next_workday", "(", "nearest_workday", "(", "dt", ")", ")" ]
returns next workday after nearest workday needed for boxing day or multiple holidays in a series .
train
false
3,897
def add_time_units(time, unit, amount): args = {} if (unit == 'hour'): args['hours'] = amount elif (unit == 'day'): args['days'] = amount elif (unit == 'week'): args['days'] = (amount * 7) elif (unit == 'month'): args['months'] = amount elif (unit == 'quarter'): args['months'] = (amount * 3) elif (unit...
[ "def", "add_time_units", "(", "time", ",", "unit", ",", "amount", ")", ":", "args", "=", "{", "}", "if", "(", "unit", "==", "'hour'", ")", ":", "args", "[", "'hours'", "]", "=", "amount", "elif", "(", "unit", "==", "'day'", ")", ":", "args", "[",...
subtract amount number of units from datetime object time .
train
false