id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
16,975
def getTransformTetragrid(elementNode, prefix): tetragrid = getTetragridA(elementNode, prefix, None) tetragrid = getTetragridC(elementNode, prefix, tetragrid) tetragrid = getTetragridM(elementNode, prefix, tetragrid) tetragrid = getTetragridMatrix(elementNode, prefix, tetragrid) tetragrid = getTetragridR(elementNode, prefix, tetragrid) return tetragrid
[ "def", "getTransformTetragrid", "(", "elementNode", ",", "prefix", ")", ":", "tetragrid", "=", "getTetragridA", "(", "elementNode", ",", "prefix", ",", "None", ")", "tetragrid", "=", "getTetragridC", "(", "elementNode", ",", "prefix", ",", "tetragrid", ")", "t...
get the tetragrid from the elementnode .
train
false
16,976
def takes_kwargs(function): return bool((function.__code__.co_flags & 8))
[ "def", "takes_kwargs", "(", "function", ")", ":", "return", "bool", "(", "(", "function", ".", "__code__", ".", "co_flags", "&", "8", ")", ")" ]
returns true if the supplied function takes keyword arguments .
train
false
16,977
@pytest.fixture def trans_member(): return _require_user('trans_member', 'Transactional member')
[ "@", "pytest", ".", "fixture", "def", "trans_member", "(", ")", ":", "return", "_require_user", "(", "'trans_member'", ",", "'Transactional member'", ")" ]
require a member user .
train
false
16,978
def num_solutions(user): return Question.objects.filter(solution__creator=user).count()
[ "def", "num_solutions", "(", "user", ")", ":", "return", "Question", ".", "objects", ".", "filter", "(", "solution__creator", "=", "user", ")", ".", "count", "(", ")" ]
returns the number of solutions a user has .
train
false
16,979
def _make_grid_to_short_label(dataset): unique_values = [sorted(list(frozenset(column))) for column in dataset.y[:, :5].transpose()] category_index = dataset.label_name_to_index['category'] unique_categories = unique_values[category_index] category_to_name = dataset.label_to_value_funcs[category_index] if any(((category_to_name(category) == 'blank') for category in unique_categories)): for d in range(1, len(unique_values)): assert (unique_values[d][0] == (-1)), ('unique_values: %s' % str(unique_values)) unique_values[d] = unique_values[d][1:] return unique_values
[ "def", "_make_grid_to_short_label", "(", "dataset", ")", ":", "unique_values", "=", "[", "sorted", "(", "list", "(", "frozenset", "(", "column", ")", ")", ")", "for", "column", "in", "dataset", ".", "y", "[", ":", ",", ":", "5", "]", ".", "transpose", ...
returns an array x such that x[a][b] gives label index as bth unique value .
train
false
16,980
@get('/scan/<taskid>/data') def scan_data(taskid): json_data_message = list() json_errors_message = list() if (taskid not in DataStore.tasks): logger.warning(('[%s] Invalid task ID provided to scan_data()' % taskid)) return jsonize({'success': False, 'message': 'Invalid task ID'}) for (status, content_type, value) in DataStore.current_db.execute('SELECT status, content_type, value FROM data WHERE taskid = ? ORDER BY id ASC', (taskid,)): json_data_message.append({'status': status, 'type': content_type, 'value': dejsonize(value)}) for error in DataStore.current_db.execute('SELECT error FROM errors WHERE taskid = ? ORDER BY id ASC', (taskid,)): json_errors_message.append(error) logger.debug(('[%s] Retrieved scan data and error messages' % taskid)) return jsonize({'success': True, 'data': json_data_message, 'error': json_errors_message})
[ "@", "get", "(", "'/scan/<taskid>/data'", ")", "def", "scan_data", "(", "taskid", ")", ":", "json_data_message", "=", "list", "(", ")", "json_errors_message", "=", "list", "(", ")", "if", "(", "taskid", "not", "in", "DataStore", ".", "tasks", ")", ":", "...
retrieve the data of a scan .
train
false
16,981
def branch(tree): if (not isinstance(tree[0], (int, long))): branchsum = 0 for b in tree: branchsum += branch(b) else: print(tree) print('final branch with', tree, sum(tree)) if testxb: return sum(xb[tree]) else: return sum(tree) print('working on branch', tree, branchsum) return branchsum
[ "def", "branch", "(", "tree", ")", ":", "if", "(", "not", "isinstance", "(", "tree", "[", "0", "]", ",", "(", "int", ",", "long", ")", ")", ")", ":", "branchsum", "=", "0", "for", "b", "in", "tree", ":", "branchsum", "+=", "branch", "(", "b", ...
interface to git-branch(1)_ cwd the path to the git checkout name name of the branch on which to operate .
train
false
16,982
def profileSP(spClass, spDim, nRuns): inDim = [10000, 1, 1] colDim = [spDim, 1, 1] sp = spClass(inputDimensions=inDim, columnDimensions=colDim, potentialRadius=3, potentialPct=0.5, globalInhibition=False, localAreaDensity=(-1.0), numActiveColumnsPerInhArea=3, stimulusThreshold=1, synPermInactiveDec=0.01, synPermActiveInc=0.1, synPermConnected=0.1, minPctOverlapDutyCycle=0.1, dutyCyclePeriod=10, boostStrength=10.0, seed=42, spVerbosity=0) dataDim = inDim dataDim.append(nRuns) data = numpy.random.randint(0, 2, dataDim).astype('float32') for i in xrange(nRuns): d = data[:, :, :, i] activeArray = numpy.zeros(colDim) sp.compute(d, True, activeArray)
[ "def", "profileSP", "(", "spClass", ",", "spDim", ",", "nRuns", ")", ":", "inDim", "=", "[", "10000", ",", "1", ",", "1", "]", "colDim", "=", "[", "spDim", ",", "1", ",", "1", "]", "sp", "=", "spClass", "(", "inputDimensions", "=", "inDim", ",", ...
profiling performance of spatialpooler using the python cprofile module and ordered by cumulative time .
train
true
16,983
def libvlc_media_save_meta(p_md): f = (_Cfunctions.get('libvlc_media_save_meta', None) or _Cfunction('libvlc_media_save_meta', ((1,),), None, ctypes.c_int, Media)) return f(p_md)
[ "def", "libvlc_media_save_meta", "(", "p_md", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_save_meta'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_save_meta'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ...
save the meta previously set .
train
true
16,984
def test_failure_junit_xml(test_name, message, stdout=None, class_name='Results', testcase_name='test_ran'): testsuite = ET.Element('testsuite') testsuite.set('tests', '1') testsuite.set('failures', '1') testsuite.set('time', '1') testsuite.set('errors', '0') testsuite.set('name', test_name) testcase = ET.SubElement(testsuite, 'testcase') testcase.set('name', testcase_name) testcase.set('status', 'run') testcase.set('time', '1') testcase.set('classname', class_name) failure = ET.SubElement(testcase, 'failure') failure.set('message', message) failure.set('type', '') if stdout: system_out = ET.SubElement(testsuite, 'system-out') system_out.text = cdata(filter_nonprintable_text(stdout)) return ET.tostring(testsuite, encoding='utf-8', method='xml')
[ "def", "test_failure_junit_xml", "(", "test_name", ",", "message", ",", "stdout", "=", "None", ",", "class_name", "=", "'Results'", ",", "testcase_name", "=", "'test_ran'", ")", ":", "testsuite", "=", "ET", ".", "Element", "(", "'testsuite'", ")", "testsuite",...
generate junit xml file for a unary test suite where the test failed .
train
false
16,987
def image_dim_ordering(): return _IMAGE_DIM_ORDERING
[ "def", "image_dim_ordering", "(", ")", ":", "return", "_IMAGE_DIM_ORDERING" ]
returns the default image dimension ordering convention .
train
false
16,988
def get_browser_controller(browser=None): browser = settings.browser(browser) if (browser is not None): if (browser == 'none'): controller = DummyWebBrowser() else: controller = webbrowser.get(browser) else: controller = webbrowser return controller
[ "def", "get_browser_controller", "(", "browser", "=", "None", ")", ":", "browser", "=", "settings", ".", "browser", "(", "browser", ")", "if", "(", "browser", "is", "not", "None", ")", ":", "if", "(", "browser", "==", "'none'", ")", ":", "controller", ...
return a browser controller .
train
true
16,990
def validate_positive_integer(option, value): val = validate_integer(option, value) if (val <= 0): raise ValueError(('The value of %s must be a positive integer' % (option,))) return val
[ "def", "validate_positive_integer", "(", "option", ",", "value", ")", ":", "val", "=", "validate_integer", "(", "option", ",", "value", ")", "if", "(", "val", "<=", "0", ")", ":", "raise", "ValueError", "(", "(", "'The value of %s must be a positive integer'", ...
validate that value is a positive integer .
train
true
16,991
def parse_max_server_instances(value): if (':' not in value): try: max_server_instances = int(value) except ValueError: raise argparse.ArgumentTypeError(('Invalid instance count: %r' % value)) else: if (not max_server_instances): raise argparse.ArgumentTypeError('Cannot specify zero instances for all servers') return max_server_instances else: server_to_max_instances = {} for server_instance_max in value.split(','): try: (server_name, max_instances) = server_instance_max.split(':') max_instances = int(max_instances) except ValueError: raise argparse.ArgumentTypeError(('Expected "server:max_instances": %r' % server_instance_max)) else: server_name = server_name.strip() if (server_name in server_to_max_instances): raise argparse.ArgumentTypeError(('Duplicate max instance value: %r' % server_name)) server_to_max_instances[server_name] = max_instances return server_to_max_instances
[ "def", "parse_max_server_instances", "(", "value", ")", ":", "if", "(", "':'", "not", "in", "value", ")", ":", "try", ":", "max_server_instances", "=", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "argparse", ".", "ArgumentTypeError", "(",...
returns the parsed value for the --max_server_instances flag .
train
false
16,992
@pytest.mark.parametrize('fast_writer', [True, False]) def test_write_fill_masked_different(fast_writer): data = ascii.read(tab_to_fill) data = table.Table(data, masked=True) data['a'].mask = [True, False] data['c'].mask = [False, True] for test_def in test_def_masked_fill_value: check_write_table(test_def, data, fast_writer)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'fast_writer'", ",", "[", "True", ",", "False", "]", ")", "def", "test_write_fill_masked_different", "(", "fast_writer", ")", ":", "data", "=", "ascii", ".", "read", "(", "tab_to_fill", ")", "data", "="...
see discussion in #2255 .
train
false
16,994
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
return a fresh instance of the hash object .
train
false
16,995
def test_env_read_docs(): def on_env_read_docs_1(app, env, docnames): pass app.connect('env-before-read-docs', on_env_read_docs_1) read_docnames = env.update(app.config, app.srcdir, app.doctreedir, app) assert ((len(read_docnames) > 2) and (read_docnames == sorted(read_docnames))) def on_env_read_docs_2(app, env, docnames): docnames.remove('images') app.connect('env-before-read-docs', on_env_read_docs_2) read_docnames = env.update(app.config, app.srcdir, app.doctreedir, app) assert (len(read_docnames) == 2)
[ "def", "test_env_read_docs", "(", ")", ":", "def", "on_env_read_docs_1", "(", "app", ",", "env", ",", "docnames", ")", ":", "pass", "app", ".", "connect", "(", "'env-before-read-docs'", ",", "on_env_read_docs_1", ")", "read_docnames", "=", "env", ".", "update"...
by default .
train
false
16,997
def vg_list(): cmd = 'vgs --all' vgroups = {} result = utils.run(cmd) lines = result.stdout.strip().splitlines() if (len(lines) > 1): columns = lines[0].split() lines = lines[1:] else: return vgroups for line in lines: details = line.split() details_dict = {} index = 0 for column in columns: if re.search('VG', column): vg_name = details[index] else: details_dict[column] = details[index] index += 1 vgroups[vg_name] = details_dict return vgroups
[ "def", "vg_list", "(", ")", ":", "cmd", "=", "'vgs --all'", "vgroups", "=", "{", "}", "result", "=", "utils", ".", "run", "(", "cmd", ")", "lines", "=", "result", ".", "stdout", ".", "strip", "(", ")", ".", "splitlines", "(", ")", "if", "(", "len...
list available volume groups .
train
false
16,998
def _iframe(src, width=650, height=365, content=None, link=None): html = ('<iframe width="%s" height="%s" src="%s" frameborder="0" allowfullscreen></iframe>' % (width, height, src)) if (not content): return html if link: content = ('<a href="%s">%s</a>' % (link, content)) return ('<figure>%s<figcaption>%s</figcaption></figure>' % (html, content))
[ "def", "_iframe", "(", "src", ",", "width", "=", "650", ",", "height", "=", "365", ",", "content", "=", "None", ",", "link", "=", "None", ")", ":", "html", "=", "(", "'<iframe width=\"%s\" height=\"%s\" src=\"%s\" frameborder=\"0\" allowfullscreen></iframe>'", "%"...
create an iframe html snippet .
train
false
16,999
def signal_program(program_name, sig=signal.SIGTERM, pid_files_dir=None): pid = get_pid_from_file(program_name, pid_files_dir) if pid: signal_pid(pid, sig)
[ "def", "signal_program", "(", "program_name", ",", "sig", "=", "signal", ".", "SIGTERM", ",", "pid_files_dir", "=", "None", ")", ":", "pid", "=", "get_pid_from_file", "(", "program_name", ",", "pid_files_dir", ")", "if", "pid", ":", "signal_pid", "(", "pid",...
sends a signal to the process listed in <program_name> .
train
false
17,000
def _similarity_score(obj1, obj2, block_cache=None): if (block_cache is None): block_cache = {} if (obj1.id not in block_cache): block_cache[obj1.id] = _count_blocks(obj1) if (obj2.id not in block_cache): block_cache[obj2.id] = _count_blocks(obj2) common_bytes = _common_bytes(block_cache[obj1.id], block_cache[obj2.id]) max_size = max(obj1.raw_length(), obj2.raw_length()) if (not max_size): return _MAX_SCORE return int(((float(common_bytes) * _MAX_SCORE) / max_size))
[ "def", "_similarity_score", "(", "obj1", ",", "obj2", ",", "block_cache", "=", "None", ")", ":", "if", "(", "block_cache", "is", "None", ")", ":", "block_cache", "=", "{", "}", "if", "(", "obj1", ".", "id", "not", "in", "block_cache", ")", ":", "bloc...
compute a similarity score for two objects .
train
false
17,002
def strip_punc(s, all=False): if all: return PUNCTUATION_REGEX.sub('', s.strip()) else: return s.strip().strip(string.punctuation)
[ "def", "strip_punc", "(", "s", ",", "all", "=", "False", ")", ":", "if", "all", ":", "return", "PUNCTUATION_REGEX", ".", "sub", "(", "''", ",", "s", ".", "strip", "(", ")", ")", "else", ":", "return", "s", ".", "strip", "(", ")", ".", "strip", ...
removes punctuation from a string .
train
false
17,003
def test_pprint_break(): output = pretty.pretty(Breaking()) expected = 'TG: Breaking(\n ):' nt.assert_equal(output, expected)
[ "def", "test_pprint_break", "(", ")", ":", "output", "=", "pretty", ".", "pretty", "(", "Breaking", "(", ")", ")", "expected", "=", "'TG: Breaking(\\n ):'", "nt", ".", "assert_equal", "(", "output", ",", "expected", ")" ]
test that p .
train
false
17,004
def HostPlacer(name, *args, **params): if (name in remoteHosts): return RemoteHost(name, server=remoteServer, *args, **params) else: return Host(name, *args, **params)
[ "def", "HostPlacer", "(", "name", ",", "*", "args", ",", "**", "params", ")", ":", "if", "(", "name", "in", "remoteHosts", ")", ":", "return", "RemoteHost", "(", "name", ",", "server", "=", "remoteServer", ",", "*", "args", ",", "**", "params", ")", ...
custom host() constructor which places hosts on servers .
train
false
17,006
def migrate_task(producer, body_, message, queues=None): info = message.delivery_info queues = ({} if (queues is None) else queues) republish(producer, message, exchange=queues.get(info[u'exchange']), routing_key=queues.get(info[u'routing_key']))
[ "def", "migrate_task", "(", "producer", ",", "body_", ",", "message", ",", "queues", "=", "None", ")", ":", "info", "=", "message", ".", "delivery_info", "queues", "=", "(", "{", "}", "if", "(", "queues", "is", "None", ")", "else", "queues", ")", "re...
migrate single task message .
train
false
17,007
def _authenticate_plain(credentials, sock_info): source = credentials.source username = credentials.username password = credentials.password payload = ('\x00%s\x00%s' % (username, password)).encode('utf-8') cmd = SON([('saslStart', 1), ('mechanism', 'PLAIN'), ('payload', Binary(payload)), ('autoAuthorize', 1)]) sock_info.command(source, cmd)
[ "def", "_authenticate_plain", "(", "credentials", ",", "sock_info", ")", ":", "source", "=", "credentials", ".", "source", "username", "=", "credentials", ".", "username", "password", "=", "credentials", ".", "password", "payload", "=", "(", "'\\x00%s\\x00%s'", ...
authenticate using sasl plain .
train
true
17,008
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs): return eval(activ)((tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]))
[ "def", "fflayer", "(", "tparams", ",", "state_below", ",", "options", ",", "prefix", "=", "'rconv'", ",", "activ", "=", "'lambda x: tensor.tanh(x)'", ",", "**", "kwargs", ")", ":", "return", "eval", "(", "activ", ")", "(", "(", "tensor", ".", "dot", "(",...
feedforward pass .
train
false
17,011
@adapt_rgb(hsv_value) def equalize_adapthist(image, kernel_size=None, clip_limit=0.01, nbins=256, **kwargs): image = img_as_uint(image) image = rescale_intensity(image, out_range=(0, (NR_OF_GREY - 1))) if kwargs: if (('ntiles_x' in kwargs) or ('ntiles_y' in kwargs)): msg = '`ntiles_*` have been deprecated in favor of `kernel_size`' raise ValueError(msg) if (kernel_size is None): kernel_size = ((image.shape[0] // 8), (image.shape[1] // 8)) elif isinstance(kernel_size, numbers.Number): kernel_size = ((kernel_size,) * image.ndim) elif (len(kernel_size) != image.ndim): ValueError('Incorrect value of `kernel_size`: {}'.format(kernel_size)) kernel_size = [int(k) for k in kernel_size] image = _clahe(image, kernel_size, (clip_limit * nbins), nbins) image = img_as_float(image) return rescale_intensity(image)
[ "@", "adapt_rgb", "(", "hsv_value", ")", "def", "equalize_adapthist", "(", "image", ",", "kernel_size", "=", "None", ",", "clip_limit", "=", "0.01", ",", "nbins", "=", "256", ",", "**", "kwargs", ")", ":", "image", "=", "img_as_uint", "(", "image", ")", ...
contrast limited adaptive histogram equalization .
train
false
17,012
def getLoopsUnified(importRadius, loopLists): allPoints = [] corners = getLoopsListsIntersections(loopLists) radiusSide = (0.01 * importRadius) intercircle.directLoopLists(True, loopLists) for loopListIndex in xrange(len(loopLists)): insetLoops = loopLists[loopListIndex] inBetweenInsetLoops = getInBetweenLoopsFromLoops(importRadius, insetLoops) otherLoops = euclidean.getConcatenatedList((loopLists[:loopListIndex] + loopLists[(loopListIndex + 1):])) corners += getInsetPointsByInsetLoops(insetLoops, False, otherLoops, radiusSide) allPoints += getInsetPointsByInsetLoops(inBetweenInsetLoops, False, otherLoops, radiusSide) allPoints += corners[:] return trianglemesh.getDescendingAreaLoops(allPoints, corners, importRadius)
[ "def", "getLoopsUnified", "(", "importRadius", ",", "loopLists", ")", ":", "allPoints", "=", "[", "]", "corners", "=", "getLoopsListsIntersections", "(", "loopLists", ")", "radiusSide", "=", "(", "0.01", "*", "importRadius", ")", "intercircle", ".", "directLoopL...
get joined loops sliced through shape .
train
false
17,013
def _consume_bytes(seq, num=1): bytes = [] for i in range(0, _B(num), 2): bytes.append(int(seq[i:(i + 2)], 16)) return (bytes, seq[_B(num):])
[ "def", "_consume_bytes", "(", "seq", ",", "num", "=", "1", ")", ":", "bytes", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "_B", "(", "num", ")", ",", "2", ")", ":", "bytes", ".", "append", "(", "int", "(", "seq", "[", "i", ":",...
consumes bytes for num ints coverts to int .
train
false
17,014
def extract_docstring(filename, ignore_heading=False): if six.PY2: lines = open(filename).readlines() else: lines = open(filename, encoding='utf-8').readlines() start_row = 0 if lines[0].startswith('#!'): lines.pop(0) start_row = 1 docstring = '' first_par = '' line_iterator = iter(lines) tokens = tokenize.generate_tokens((lambda : next(line_iterator))) for (tok_type, tok_content, _, (erow, _), _) in tokens: tok_type = token.tok_name[tok_type] if (tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT')): continue elif (tok_type == 'STRING'): docstring = eval(tok_content) paragraphs = '\n'.join((line.rstrip() for line in docstring.split('\n'))).split('\n\n') if paragraphs: if ignore_heading: if (len(paragraphs) > 1): first_par = re.sub('\n', ' ', paragraphs[1]) first_par = ((first_par[:95] + '...') if (len(first_par) > 95) else first_par) else: raise ValueError("Docstring not found by gallery.\nPlease check the layout of your example file:\n {}\n and make sure it's correct".format(filename)) else: first_par = paragraphs[0] break return (docstring, first_par, ((erow + 1) + start_row))
[ "def", "extract_docstring", "(", "filename", ",", "ignore_heading", "=", "False", ")", ":", "if", "six", ".", "PY2", ":", "lines", "=", "open", "(", "filename", ")", ".", "readlines", "(", ")", "else", ":", "lines", "=", "open", "(", "filename", ",", ...
extract a module-level docstring .
train
false
17,015
def print_pagination_info(management_response): print(('Items per page = %s' % management_response.get('itemsPerPage'))) print(('Total Results = %s' % management_response.get('totalResults'))) print(('Start Index = %s' % management_response.get('startIndex'))) if management_response.get('previousLink'): print(('Previous Link = %s' % management_response.get('previousLink'))) if management_response.get('nextLink'): print(('Next Link = %s' % management_response.get('nextLink')))
[ "def", "print_pagination_info", "(", "management_response", ")", ":", "print", "(", "(", "'Items per page = %s'", "%", "management_response", ".", "get", "(", "'itemsPerPage'", ")", ")", ")", "print", "(", "(", "'Total Results = %s'", "%", "management_response", "....
prints common pagination details .
train
false
17,017
def dup_pexquo(f, g, K): (q, r) = dup_pdiv(f, g, K) if (not r): return q else: raise ExactQuotientFailed(f, g)
[ "def", "dup_pexquo", "(", "f", ",", "g", ",", "K", ")", ":", "(", "q", ",", "r", ")", "=", "dup_pdiv", "(", "f", ",", "g", ",", "K", ")", "if", "(", "not", "r", ")", ":", "return", "q", "else", ":", "raise", "ExactQuotientFailed", "(", "f", ...
polynomial pseudo-quotient in k[x] .
train
false
17,018
def safe(text): return _safe(text)
[ "def", "safe", "(", "text", ")", ":", "return", "_safe", "(", "text", ")" ]
define a new function that wraps f and return it .
train
false
17,021
def listRedundantModules(): mods = {} for (name, mod) in sys.modules.items(): if (not hasattr(mod, '__file__')): continue mfile = os.path.abspath(mod.__file__) if (mfile[(-1)] == 'c'): mfile = mfile[:(-1)] if (mfile in mods): print(('module at %s has 2 names: %s, %s' % (mfile, name, mods[mfile]))) else: mods[mfile] = name
[ "def", "listRedundantModules", "(", ")", ":", "mods", "=", "{", "}", "for", "(", "name", ",", "mod", ")", "in", "sys", ".", "modules", ".", "items", "(", ")", ":", "if", "(", "not", "hasattr", "(", "mod", ",", "'__file__'", ")", ")", ":", "contin...
list modules that have been imported more than once via different paths .
train
false
17,022
def _adapt_eventdict(eventDict, log_level=INFO, encoding='utf-8', prepend_level=True): ev = eventDict.copy() if ev['isError']: ev.setdefault('logLevel', ERROR) if ((ev.get('system') != 'scrapy') and (not ev['isError'])): return level = ev.get('logLevel') if (level < log_level): return spider = ev.get('spider') if spider: ev['system'] = spider.name message = ev.get('message') lvlname = level_names.get(level, 'NOLEVEL') if message: message = [unicode_to_str(x, encoding) for x in message] if prepend_level: message[0] = ('%s: %s' % (lvlname, message[0])) ev['message'] = message why = ev.get('why') if why: why = unicode_to_str(why, encoding) if prepend_level: why = ('%s: %s' % (lvlname, why)) ev['why'] = why return ev
[ "def", "_adapt_eventdict", "(", "eventDict", ",", "log_level", "=", "INFO", ",", "encoding", "=", "'utf-8'", ",", "prepend_level", "=", "True", ")", ":", "ev", "=", "eventDict", ".", "copy", "(", ")", "if", "ev", "[", "'isError'", "]", ":", "ev", ".", ...
adapt twisted log eventdict making it suitable for logging with a scrapy log observer .
train
false
17,023
def _get_installed_apps_entry(app_name): for installed_app in settings.INSTALLED_APPS: if ((installed_app == app_name) or installed_app.endswith(('.' + app_name))): return installed_app return None
[ "def", "_get_installed_apps_entry", "(", "app_name", ")", ":", "for", "installed_app", "in", "settings", ".", "INSTALLED_APPS", ":", "if", "(", "(", "installed_app", "==", "app_name", ")", "or", "installed_app", ".", "endswith", "(", "(", "'.'", "+", "app_name...
given an app name .
train
false
17,026
def construct_tmp_cg_snap_name(cg_name): return ('tmp-snap-' + six.text_type(cg_name))
[ "def", "construct_tmp_cg_snap_name", "(", "cg_name", ")", ":", "return", "(", "'tmp-snap-'", "+", "six", ".", "text_type", "(", "cg_name", ")", ")" ]
return cg snapshot name .
train
false
17,027
def patch_paths(): sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs'))
[ "def", "patch_paths", "(", ")", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'libs'", ")", ")" ]
patch python sys .
train
false
17,028
@ensure_csrf_cookie @ensure_valid_course_key def syllabus(request, course_id): course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) course = get_course_with_access(request.user, 'load', course_key) staff_access = bool(has_access(request.user, 'staff', course)) return render_to_response('courseware/syllabus.html', {'course': course, 'staff_access': staff_access})
[ "@", "ensure_csrf_cookie", "@", "ensure_valid_course_key", "def", "syllabus", "(", "request", ",", "course_id", ")", ":", "course_key", "=", "SlashSeparatedCourseKey", ".", "from_deprecated_string", "(", "course_id", ")", "course", "=", "get_course_with_access", "(", ...
display the courses syllabus .
train
false
17,031
def test_validation_shuffle_split(): skip_if_no_sklearn() from pylearn2.cross_validation.subset_iterators import ValidationShuffleSplit n = 30 cv = ValidationShuffleSplit(n) for (train, valid, test) in cv: assert (np.unique(np.concatenate((train, valid, test))).size == n) assert (valid.size == (n * cv.test_size)) assert (test.size == (n * cv.test_size))
[ "def", "test_validation_shuffle_split", "(", ")", ":", "skip_if_no_sklearn", "(", ")", "from", "pylearn2", ".", "cross_validation", ".", "subset_iterators", "import", "ValidationShuffleSplit", "n", "=", "30", "cv", "=", "ValidationShuffleSplit", "(", "n", ")", "for"...
test validationshufflesplit .
train
false
17,032
def attractor153_graph(n, p, multiple=3, b=10): G = DiGraph() for k in range(1, (n + 1)): if (((k % multiple) == 0) and (k not in G)): k1 = k knext = powersum(k1, p, b) while (k1 != knext): G.add_edge(k1, knext) k1 = knext knext = powersum(k1, p, b) return G
[ "def", "attractor153_graph", "(", "n", ",", "p", ",", "multiple", "=", "3", ",", "b", "=", "10", ")", ":", "G", "=", "DiGraph", "(", ")", "for", "k", "in", "range", "(", "1", ",", "(", "n", "+", "1", ")", ")", ":", "if", "(", "(", "(", "k...
return digraph of iterations of powersum .
train
false
17,033
def unzfill(str): try: return str[:str.index('\x00')] except ValueError: return str
[ "def", "unzfill", "(", "str", ")", ":", "try", ":", "return", "str", "[", ":", "str", ".", "index", "(", "'\\x00'", ")", "]", "except", "ValueError", ":", "return", "str" ]
return a string without ascii nuls .
train
false
17,034
def run_in_thread(func, *args, **kwargs): from threading import Thread thread = Thread(target=func, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread
[ "def", "run_in_thread", "(", "func", ",", "*", "args", ",", "**", "kwargs", ")", ":", "from", "threading", "import", "Thread", "thread", "=", "Thread", "(", "target", "=", "func", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "thread", ...
run function in thread .
train
true
17,036
def env_present(name, value=None, user='root'): ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if __opts__['test']: status = _check_cron_env(user, name, value=value) ret['result'] = None if (status == 'absent'): ret['comment'] = 'Cron env {0} is set to be added'.format(name) elif (status == 'present'): ret['result'] = True ret['comment'] = 'Cron env {0} already present'.format(name) elif (status == 'update'): ret['comment'] = 'Cron env {0} is set to be updated'.format(name) return ret data = __salt__['cron.set_env'](user, name, value=value) if (data == 'present'): ret['comment'] = 'Cron env {0} already present'.format(name) return ret if (data == 'new'): ret['comment'] = "Cron env {0} added to {1}'s crontab".format(name, user) ret['changes'] = {user: name} return ret if (data == 'updated'): ret['comment'] = 'Cron env {0} updated'.format(name) ret['changes'] = {user: name} return ret ret['comment'] = 'Cron env {0} for user {1} failed to commit with error \n{2}'.format(name, user, data) ret['result'] = False return ret
[ "def", "env_present", "(", "name", ",", "value", "=", "None", ",", "user", "=", "'root'", ")", ":", "ret", "=", "{", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'name'", ":", "name", ",", "'result'", ":", "True", "}", "if", "__...
verifies that the specified environment variable is present in the crontab for the specified user .
train
true
17,037
def test_vstack(): t1 = QTable(MIXIN_COLS) t2 = QTable(MIXIN_COLS) with pytest.raises(NotImplementedError): vstack([t1, t2])
[ "def", "test_vstack", "(", ")", ":", "t1", "=", "QTable", "(", "MIXIN_COLS", ")", "t2", "=", "QTable", "(", "MIXIN_COLS", ")", "with", "pytest", ".", "raises", "(", "NotImplementedError", ")", ":", "vstack", "(", "[", "t1", ",", "t2", "]", ")" ]
vstack tables with mixin cols .
train
false
17,039
def getDescriptionSpeed(lines): activateSpeedString = getSettingString(lines, 'speed', 'Activate Speed') if ((activateSpeedString == None) or (activateSpeedString == 'False')): return '' feedRateString = getSettingString(lines, 'speed', 'Feed Rate') flowRateString = getSettingString(lines, 'speed', 'Flow Rate') if (feedRateString == flowRateString): return ('_%sEL' % feedRateString.replace('.0', '')) return ('_%sE%sL' % (feedRateString.replace('.0', ''), flowRateString.replace('.0', '')))
[ "def", "getDescriptionSpeed", "(", "lines", ")", ":", "activateSpeedString", "=", "getSettingString", "(", "lines", ",", "'speed'", ",", "'Activate Speed'", ")", "if", "(", "(", "activateSpeedString", "==", "None", ")", "or", "(", "activateSpeedString", "==", "'...
get the description for speed .
train
false
17,040
def run_diff_quality(violations_type=None, prefix=None, reports=None, percentage_string=None, branch_string=None, dquality_dir=None): try: sh('{pythonpath_prefix} diff-quality --violations={type} {reports} {percentage_string} {compare_branch_string} --html-report {dquality_dir}/diff_quality_{type}.html '.format(type=violations_type, pythonpath_prefix=prefix, reports=reports, percentage_string=percentage_string, compare_branch_string=branch_string, dquality_dir=dquality_dir)) return True except BuildFailure as error_message: if is_percentage_failure(error_message): return False else: raise BuildFailure(error_message)
[ "def", "run_diff_quality", "(", "violations_type", "=", "None", ",", "prefix", "=", "None", ",", "reports", "=", "None", ",", "percentage_string", "=", "None", ",", "branch_string", "=", "None", ",", "dquality_dir", "=", "None", ")", ":", "try", ":", "sh",...
this executes the diff-quality commandline tool for the given violation type .
train
false
17,041
def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'): pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] d = {} for name_value in pairs: if ((not name_value) and (not strict_parsing)): continue nv = name_value.split('=', 1) if (len(nv) != 2): if strict_parsing: raise ValueError(('bad query field: %r' % (name_value,))) if keep_blank_values: nv.append('') else: continue if (len(nv[1]) or keep_blank_values): name = urllib.unquote(nv[0].replace('+', ' ')) name = name.decode(encoding, 'strict') value = urllib.unquote(nv[1].replace('+', ' ')) value = value.decode(encoding, 'strict') if (name in d): if (not isinstance(d[name], list)): d[name] = [d[name]] d[name].append(value) else: d[name] = value return d
[ "def", "_parse_qs", "(", "qs", ",", "keep_blank_values", "=", "0", ",", "strict_parsing", "=", "0", ",", "encoding", "=", "'utf-8'", ")", ":", "pairs", "=", "[", "s2", "for", "s1", "in", "qs", ".", "split", "(", "'&'", ")", "for", "s2", "in", "s1",...
parse a query given as a string argument .
train
false
17,042
def _extract_blob_key(blob): if isinstance(blob, str): return blob.decode('utf-8') elif isinstance(blob, BlobKey): return str(blob).decode('utf-8') elif (blob.__class__.__name__ == 'BlobInfo'): return str(blob.key()).decode('utf-8') return blob
[ "def", "_extract_blob_key", "(", "blob", ")", ":", "if", "isinstance", "(", "blob", ",", "str", ")", ":", "return", "blob", ".", "decode", "(", "'utf-8'", ")", "elif", "isinstance", "(", "blob", ",", "BlobKey", ")", ":", "return", "str", "(", "blob", ...
extract a unicode blob key from a str .
train
false
17,044
def compute_dependencies(tables): tables = list(tables) graph = {} def visit_foreign_key(fkey): if fkey.use_alter: return parent_table = fkey.column.table if (parent_table in tables): child_table = fkey.parent.table if (parent_table is not child_table): graph.setdefault(parent_table, []).append(child_table) for table in tables: visitors.traverse(table, {'schema_visitor': True}, {'foreign_key': visit_foreign_key}) graph.setdefault(table, []).extend(table._extra_dependencies) return graph
[ "def", "compute_dependencies", "(", "tables", ")", ":", "tables", "=", "list", "(", "tables", ")", "graph", "=", "{", "}", "def", "visit_foreign_key", "(", "fkey", ")", ":", "if", "fkey", ".", "use_alter", ":", "return", "parent_table", "=", "fkey", ".",...
construct a reverse dependency graph for the given tables .
train
false
17,045
def get_tab(win_id, target): if (target == usertypes.ClickTarget.tab): win_id = win_id bg_tab = False elif (target == usertypes.ClickTarget.tab_bg): win_id = win_id bg_tab = True elif (target == usertypes.ClickTarget.window): from qutebrowser.mainwindow import mainwindow window = mainwindow.MainWindow() window.show() win_id = window.win_id bg_tab = False else: raise ValueError('Invalid ClickTarget {}'.format(target)) tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) return tabbed_browser.tabopen(url=None, background=bg_tab)
[ "def", "get_tab", "(", "win_id", ",", "target", ")", ":", "if", "(", "target", "==", "usertypes", ".", "ClickTarget", ".", "tab", ")", ":", "win_id", "=", "win_id", "bg_tab", "=", "False", "elif", "(", "target", "==", "usertypes", ".", "ClickTarget", "...
get a tab widget for the given usertypes .
train
false
17,048
def example2_build_verticle_exit(x, y, **kwargs): if (kwargs['iteration'] == 0): return north_room = kwargs['room_dict'][(x, (y - 1))] south_room = kwargs['room_dict'][(x, (y + 1))] north = create_object(exits.Exit, key='south', aliases=['s'], location=north_room, destination=south_room) south = create_object(exits.Exit, key='north', aliases=['n'], location=south_room, destination=north_room) kwargs['caller'].msg(((('Connected: ' + north_room.key) + ' & ') + south_room.key))
[ "def", "example2_build_verticle_exit", "(", "x", ",", "y", ",", "**", "kwargs", ")", ":", "if", "(", "kwargs", "[", "'iteration'", "]", "==", "0", ")", ":", "return", "north_room", "=", "kwargs", "[", "'room_dict'", "]", "[", "(", "x", ",", "(", "y",...
creates two exits to and from the two rooms north and south .
train
false
17,050
def _parseHeader(h, v): if (h in ('passporturls', 'authentication-info', 'www-authenticate')): v = v.replace('Passport1.4', '').lstrip() fields = {} for fieldPair in v.split(','): try: (field, value) = fieldPair.split('=', 1) fields[field.lower()] = value except ValueError: fields[field.lower()] = '' return fields else: return v
[ "def", "_parseHeader", "(", "h", ",", "v", ")", ":", "if", "(", "h", "in", "(", "'passporturls'", ",", "'authentication-info'", ",", "'www-authenticate'", ")", ")", ":", "v", "=", "v", ".", "replace", "(", "'Passport1.4'", ",", "''", ")", ".", "lstrip"...
split a certin number of known header values with the format: field1=val .
train
false
17,051
@task def check_tag_exists(): version = get_sympy_version() tag = ('sympy-' + version) with cd('/home/vagrant/repos/sympy'): all_tags = run('git ls-remote --tags origin') return (tag in all_tags)
[ "@", "task", "def", "check_tag_exists", "(", ")", ":", "version", "=", "get_sympy_version", "(", ")", "tag", "=", "(", "'sympy-'", "+", "version", ")", "with", "cd", "(", "'/home/vagrant/repos/sympy'", ")", ":", "all_tags", "=", "run", "(", "'git ls-remote -...
check if the tag for this release has been uploaded yet .
train
false
17,052
@parse_data @set_database def get_topic_contents(kinds=None, topic_id=None, **kwargs): if topic_id: topic_node = Item.get((Item.id == topic_id), (Item.kind == 'Topic')) if (not kinds): kinds = ['Video', 'Audio', 'Exercise', 'Document'] return Item.select(Item).where(Item.kind.in_(kinds), Item.path.contains(topic_node.path))
[ "@", "parse_data", "@", "set_database", "def", "get_topic_contents", "(", "kinds", "=", "None", ",", "topic_id", "=", "None", ",", "**", "kwargs", ")", ":", "if", "topic_id", ":", "topic_node", "=", "Item", ".", "get", "(", "(", "Item", ".", "id", "=="...
convenience function for returning a set of content/leaf nodes contained within a topic .
train
false
17,053
def kill(coro): return KillEvent(coro)
[ "def", "kill", "(", "coro", ")", ":", "return", "KillEvent", "(", "coro", ")" ]
kill greenlet asynchronously .
train
false
17,054
def setEdgeMaximumMinimum(edge, vertexes): beginIndex = edge.vertexIndexes[0] endIndex = edge.vertexIndexes[1] if ((beginIndex >= len(vertexes)) or (endIndex >= len(vertexes))): print 'Warning, there are duplicate vertexes in setEdgeMaximumMinimum in triangle_mesh.' print 'Something might still be printed, but there is no guarantee that it will be the correct shape.' edge.zMaximum = (-987654321.0) edge.zMinimum = (-987654321.0) return beginZ = vertexes[beginIndex].z endZ = vertexes[endIndex].z edge.zMinimum = min(beginZ, endZ) edge.zMaximum = max(beginZ, endZ)
[ "def", "setEdgeMaximumMinimum", "(", "edge", ",", "vertexes", ")", ":", "beginIndex", "=", "edge", ".", "vertexIndexes", "[", "0", "]", "endIndex", "=", "edge", ".", "vertexIndexes", "[", "1", "]", "if", "(", "(", "beginIndex", ">=", "len", "(", "vertexe...
set the edge maximum and minimum .
train
false
17,055
def percent_encode(input_str, safe=SAFE_CHARS): if (not isinstance(input_str, string_types)): input_str = text_type(input_str) return quote(text_type(input_str).encode('utf-8'), safe=safe)
[ "def", "percent_encode", "(", "input_str", ",", "safe", "=", "SAFE_CHARS", ")", ":", "if", "(", "not", "isinstance", "(", "input_str", ",", "string_types", ")", ")", ":", "input_str", "=", "text_type", "(", "input_str", ")", "return", "quote", "(", "text_t...
urlencodes a string .
train
false
17,056
def error_body_response(error_code, message, __warn=True): if __warn: warnings.warn('wsgilib.error_body_response is deprecated; use the wsgi_application method on an HTTPException object instead', DeprecationWarning, 2) return ('<html>\n <head>\n <title>%(error_code)s</title>\n </head>\n <body>\n <h1>%(error_code)s</h1>\n %(message)s\n </body>\n</html>' % {'error_code': error_code, 'message': message})
[ "def", "error_body_response", "(", "error_code", ",", "message", ",", "__warn", "=", "True", ")", ":", "if", "__warn", ":", "warnings", ".", "warn", "(", "'wsgilib.error_body_response is deprecated; use the wsgi_application method on an HTTPException object instead'", ",", ...
returns a standard html response page for an http error .
train
false
17,057
def _convert_filetime_to_timestamp(filetime): hundreds_nano_seconds = struct.unpack('>Q', struct.pack('>LL', filetime.dwHighDateTime, filetime.dwLowDateTime))[0] seconds_since_1601 = (hundreds_nano_seconds / 10000000) return (seconds_since_1601 - 11644473600)
[ "def", "_convert_filetime_to_timestamp", "(", "filetime", ")", ":", "hundreds_nano_seconds", "=", "struct", ".", "unpack", "(", "'>Q'", ",", "struct", ".", "pack", "(", "'>LL'", ",", "filetime", ".", "dwHighDateTime", ",", "filetime", ".", "dwLowDateTime", ")", ...
windows returns times as 64-bit unsigned longs that are the number of hundreds of nanoseconds since jan 1 1601 .
train
true
17,059
def __collect_from_stream(stream, buffer, echo_stream): collected = [] try: while True: got = os.read(stream.fileno(), 1) if (not got): break collected.append(got) if echo_stream: echo_stream.write(got) echo_stream.flush() except OSError: pass if collected: buffer.append(''.join(collected)) return len(collected)
[ "def", "__collect_from_stream", "(", "stream", ",", "buffer", ",", "echo_stream", ")", ":", "collected", "=", "[", "]", "try", ":", "while", "True", ":", "got", "=", "os", ".", "read", "(", "stream", ".", "fileno", "(", ")", ",", "1", ")", "if", "(...
read all the input from a stream .
train
false
17,060
def build_results_list(pages, is_json): results = [] for (rank, doc) in enumerate(pages, pages.start_index()): if (doc['model'] == 'wiki_document'): summary = _build_es_excerpt(doc) if (not summary): summary = doc['document_summary'] result = {'title': doc['document_title'], 'type': 'document'} elif (doc['model'] == 'questions_question'): summary = _build_es_excerpt(doc) if (not summary): summary = bleach.clean(doc['question_content'], strip=True)[:500] result = {'title': doc['question_title'], 'type': 'question', 'last_updated': datetime.fromtimestamp(doc['updated']), 'is_solved': doc['question_is_solved'], 'num_answers': doc['question_num_answers'], 'num_votes': doc['question_num_votes'], 'num_votes_past_week': doc['question_num_votes_past_week']} elif (doc['model'] == 'forums_thread'): summary = _build_es_excerpt(doc, first_only=True) result = {'title': doc['post_title'], 'type': 'thread'} else: raise UnknownDocType(('%s is an unknown doctype' % doc['model'])) result['url'] = doc['url'] if (not is_json): result['object'] = doc result['search_summary'] = summary result['rank'] = rank result['score'] = doc.es_meta.score result['explanation'] = escape(format_explanation(doc.es_meta.explanation)) result['id'] = doc['id'] results.append(result) return results
[ "def", "build_results_list", "(", "pages", ",", "is_json", ")", ":", "results", "=", "[", "]", "for", "(", "rank", ",", "doc", ")", "in", "enumerate", "(", "pages", ",", "pages", ".", "start_index", "(", ")", ")", ":", "if", "(", "doc", "[", "'mode...
takes a paginated search and returns results list handles wiki documents .
train
false
17,061
def case_event_type(): return s3_rest_controller()
[ "def", "case_event_type", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
case event types: restful crud controller .
train
false
17,063
def repl_view_delta(sublime_view): rv = manager.repl_view(sublime_view) if (not rv): return (None, (-1)) delta = (rv._output_end - sublime_view.sel()[0].begin()) return (rv, delta)
[ "def", "repl_view_delta", "(", "sublime_view", ")", ":", "rv", "=", "manager", ".", "repl_view", "(", "sublime_view", ")", "if", "(", "not", "rv", ")", ":", "return", "(", "None", ",", "(", "-", "1", ")", ")", "delta", "=", "(", "rv", ".", "_output...
return a repl_view and number of characters from current selection to then beggingin of user_input .
train
false
17,064
def _ConnectELB(region_name): return elb.connect_to_region(region_name)
[ "def", "_ConnectELB", "(", "region_name", ")", ":", "return", "elb", ".", "connect_to_region", "(", "region_name", ")" ]
connect to a given region for load balancer queries .
train
false
17,066
def get_prosite_entry(id, cgi='http://www.expasy.ch/cgi-bin/get-prosite-entry'): return _urlopen(('%s?%s' % (cgi, id)))
[ "def", "get_prosite_entry", "(", "id", ",", "cgi", "=", "'http://www.expasy.ch/cgi-bin/get-prosite-entry'", ")", ":", "return", "_urlopen", "(", "(", "'%s?%s'", "%", "(", "cgi", ",", "id", ")", ")", ")" ]
get_prosite_entry -> handle get a handle to a prosite entry at expasy in html format .
train
false
17,067
def call_megam(args): if isinstance(args, compat.string_types): raise TypeError('args should be a list of strings') if (_megam_bin is None): config_megam() cmd = ([_megam_bin] + args) p = subprocess.Popen(cmd, stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() if (p.returncode != 0): print() print(stderr) raise OSError('megam command failed!') if isinstance(stdout, compat.string_types): return stdout else: return stdout.decode('utf-8')
[ "def", "call_megam", "(", "args", ")", ":", "if", "isinstance", "(", "args", ",", "compat", ".", "string_types", ")", ":", "raise", "TypeError", "(", "'args should be a list of strings'", ")", "if", "(", "_megam_bin", "is", "None", ")", ":", "config_megam", ...
call the megam binary with the given arguments .
train
false
17,068
def bernoulli_nll(x, y): assert isinstance(x, variable.Variable) assert isinstance(y, variable.Variable) return (sum.sum(softplus.softplus(y)) - sum.sum((x * y)))
[ "def", "bernoulli_nll", "(", "x", ",", "y", ")", ":", "assert", "isinstance", "(", "x", ",", "variable", ".", "Variable", ")", "assert", "isinstance", "(", "y", ",", "variable", ".", "Variable", ")", "return", "(", "sum", ".", "sum", "(", "softplus", ...
computes the negative log-likelihood of a bernoulli distribution .
train
false
17,069
def prepare_key(*args): key_quote = [] for key in args: if isinstance(key, six.integer_types): key = str(key) key_quote.append(quote(key)) return ':'.join(key_quote)
[ "def", "prepare_key", "(", "*", "args", ")", ":", "key_quote", "=", "[", "]", "for", "key", "in", "args", ":", "if", "isinstance", "(", "key", ",", "six", ".", "integer_types", ")", ":", "key", "=", "str", "(", "key", ")", "key_quote", ".", "append...
prepares names for rows and columns with correct separator .
train
false
17,070
def unit_vector(data, axis=None, out=None): if (out is None): data = numpy.array(data, dtype=numpy.float64, copy=True) if (data.ndim == 1): data /= math.sqrt(numpy.dot(data, data)) return data else: if (out is not data): out[:] = numpy.array(data, copy=False) data = out length = numpy.atleast_1d(numpy.sum((data * data), axis)) numpy.sqrt(length, length) if (axis is not None): length = numpy.expand_dims(length, axis) data /= length if (out is None): return data
[ "def", "unit_vector", "(", "data", ",", "axis", "=", "None", ",", "out", "=", "None", ")", ":", "if", "(", "out", "is", "None", ")", ":", "data", "=", "numpy", ".", "array", "(", "data", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", ...
return ndarray normalized by length .
train
true
17,072
def find_health_check(conn, wanted): for check in conn.get_list_health_checks().HealthChecks: config = check.HealthCheckConfig if ((config.get('IPAddress') == wanted.ip_addr) and (config.get('FullyQualifiedDomainName') == wanted.fqdn) and (config.get('Type') == wanted.hc_type) and (config.get('RequestInterval') == str(wanted.request_interval))): return check return None
[ "def", "find_health_check", "(", "conn", ",", "wanted", ")", ":", "for", "check", "in", "conn", ".", "get_list_health_checks", "(", ")", ".", "HealthChecks", ":", "config", "=", "check", ".", "HealthCheckConfig", "if", "(", "(", "config", ".", "get", "(", ...
searches for health checks that have the exact same set of immutable values .
train
false
17,075
def require_authorized_access_to_student_data(handler): if settings.CENTRAL_SERVER: return require_authorized_admin(handler) else: @require_login def require_authorized_access_to_student_data_wrapper_fn_distributed(request, *args, **kwargs): '\n Everything is allowed for admins on distributed server.\n For students, they can only access their own account.\n ' if getattr(request, 'is_admin', False): return handler(request, *args, **kwargs) else: user = get_user_from_request(request=request) if (request.session.get('facility_user') == user): return handler(request, *args, **kwargs) else: raise PermissionDenied(_('You requested information for a user that you are not authorized to view.')) return require_admin(handler) return require_authorized_access_to_student_data_wrapper_fn_distributed
[ "def", "require_authorized_access_to_student_data", "(", "handler", ")", ":", "if", "settings", ".", "CENTRAL_SERVER", ":", "return", "require_authorized_admin", "(", "handler", ")", "else", ":", "@", "require_login", "def", "require_authorized_access_to_student_data_wrappe...
warning: this is a crappy function with a crappy name .
train
false
17,076
def test_stackedblocks_with_params(): aes = [Autoencoder(100, 50, 'tanh', 'tanh'), Autoencoder(50, 10, 'tanh', 'tanh')] sb = StackedBlocks(aes) _params = set([p for l in sb._layers for p in l._params]) assert (sb._params == _params)
[ "def", "test_stackedblocks_with_params", "(", ")", ":", "aes", "=", "[", "Autoencoder", "(", "100", ",", "50", ",", "'tanh'", ",", "'tanh'", ")", ",", "Autoencoder", "(", "50", ",", "10", ",", "'tanh'", ",", "'tanh'", ")", "]", "sb", "=", "StackedBlock...
test stackedblocks when all layers have trainable params .
train
false
17,077
@pytest.mark.parametrize('key', ('NETAPP_STORAGE', 'GUARDED_ADDONS_PATH', 'MEDIA_ROOT', 'TMP_PATH', 'MEDIA_ROOT')) def test_base_paths_bytestring(key): assert isinstance(getattr(settings, key), str)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'key'", ",", "(", "'NETAPP_STORAGE'", ",", "'GUARDED_ADDONS_PATH'", ",", "'MEDIA_ROOT'", ",", "'TMP_PATH'", ",", "'MEDIA_ROOT'", ")", ")", "def", "test_base_paths_bytestring", "(", "key", ")", ":", "assert", ...
make sure all relevant base paths are bytestrings .
train
false
17,078
def mount(location, access='rw', root=None): if (root is None): root = os.path.join(tempfile.gettempdir(), 'guest', location.lstrip(os.sep).replace('/', '.')) log.debug('Using root {0}'.format(root)) if (not os.path.isdir(root)): try: os.makedirs(root) except OSError: pass while True: if os.listdir(root): hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) rand = hash_type(os.urandom(32)).hexdigest() root = os.path.join(tempfile.gettempdir(), 'guest', (location.lstrip(os.sep).replace('/', '.') + rand)) log.debug('Establishing new root as {0}'.format(root)) else: break cmd = 'guestmount -i -a {0} --{1} {2}'.format(location, access, root) __salt__['cmd.run'](cmd, python_shell=False) return root
[ "def", "mount", "(", "location", ",", "access", "=", "'rw'", ",", "root", "=", "None", ")", ":", "if", "(", "root", "is", "None", ")", ":", "root", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "'guest'",...
mount a partition example:: from fabtools .
train
true
17,080
@task def nodetype(typ): if (not env.hosts): env.hosts = ec2_utils.ListInstancesDNS(region='us-east-1', node_types=[typ], states=['running']) env.nodetype = typ
[ "@", "task", "def", "nodetype", "(", "typ", ")", ":", "if", "(", "not", "env", ".", "hosts", ")", ":", "env", ".", "hosts", "=", "ec2_utils", ".", "ListInstancesDNS", "(", "region", "=", "'us-east-1'", ",", "node_types", "=", "[", "typ", "]", ",", ...
specify node type: staging or prod .
train
false
17,082
def test_write_formats(): out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidth, formats={'Col1': '%-8.3f', 'Col2': '%-15s'}) assert_equal_splitlines(out.getvalue(), '| Col1 | Col2 | Col3 | Col4 |\n| 1.200 | "hello" | 1 | a |\n| 2.400 | \'s worlds | 2 | 2 |\n')
[ "def", "test_write_formats", "(", ")", ":", "out", "=", "StringIO", "(", ")", "ascii", ".", "write", "(", "dat", ",", "out", ",", "Writer", "=", "ascii", ".", "FixedWidth", ",", "formats", "=", "{", "'Col1'", ":", "'%-8.3f'", ",", "'Col2'", ":", "'%-...
write a table as a fixed width table with no delimiter .
train
false
17,084
def getMatchingPlugins(elementNode, namePathDictionary): matchingPlugins = [] namePathDictionaryCopy = namePathDictionary.copy() for key in elementNode.attributes: dotIndex = key.find('.') if (dotIndex > (-1)): keyUntilDot = key[:dotIndex] if (keyUntilDot in namePathDictionaryCopy): pluginModule = archive.getModuleWithPath(namePathDictionaryCopy[keyUntilDot]) del namePathDictionaryCopy[keyUntilDot] if (pluginModule != None): matchingPlugins.append(pluginModule) return matchingPlugins
[ "def", "getMatchingPlugins", "(", "elementNode", ",", "namePathDictionary", ")", ":", "matchingPlugins", "=", "[", "]", "namePathDictionaryCopy", "=", "namePathDictionary", ".", "copy", "(", ")", "for", "key", "in", "elementNode", ".", "attributes", ":", "dotIndex...
get the plugins whose names are in the attribute dictionary .
train
false
17,086
def _boundary_of_alternatives_indices(pattern): end_pos = None for match in re.finditer('\\)', pattern): if (not _position_is_bracketed(pattern, match.start())): end_pos = match.start() break start_pos = None for match in re.finditer('\\(', pattern[:end_pos]): if (not _position_is_bracketed(pattern, match.start())): start_pos = match.end() return (start_pos, end_pos)
[ "def", "_boundary_of_alternatives_indices", "(", "pattern", ")", ":", "end_pos", "=", "None", "for", "match", "in", "re", ".", "finditer", "(", "'\\\\)'", ",", "pattern", ")", ":", "if", "(", "not", "_position_is_bracketed", "(", "pattern", ",", "match", "."...
determines the location of a set of alternatives in a glob pattern .
train
false
17,087
def dockerpy_client(**kwargs): if ('version' not in kwargs): kwargs = kwargs.copy() kwargs['version'] = '1.15' return decorate_methods(TimeoutClient(**kwargs), partial(with_retry, should_retry=retry_if(_is_known_retryable), steps=get_default_retry_steps()))
[ "def", "dockerpy_client", "(", "**", "kwargs", ")", ":", "if", "(", "'version'", "not", "in", "kwargs", ")", ":", "kwargs", "=", "kwargs", ".", "copy", "(", ")", "kwargs", "[", "'version'", "]", "=", "'1.15'", "return", "decorate_methods", "(", "TimeoutC...
create a docker .
train
false
17,088
def validation_curve(estimator, X, y, param_name, param_range, groups=None, cv=None, scoring=None, n_jobs=1, pre_dispatch='all', verbose=0): (X, y, groups) = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) out = parallel((delayed(_fit_and_score)(estimator, X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=None, return_train_score=True) for (train, test) in cv.split(X, y, groups) for v in param_range)) out = np.asarray(out) n_params = len(param_range) n_cv_folds = (out.shape[0] // n_params) out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0)) return (out[0], out[1])
[ "def", "validation_curve", "(", "estimator", ",", "X", ",", "y", ",", "param_name", ",", "param_range", ",", "groups", "=", "None", ",", "cv", "=", "None", ",", "scoring", "=", "None", ",", "n_jobs", "=", "1", ",", "pre_dispatch", "=", "'all'", ",", ...
validation curve .
train
false
17,091
def write_entry(logger_name): logging_client = logging.Client() logger = logging_client.logger(logger_name) logger.log_text('Hello, world!') logger.log_text('Goodbye, world!', severity='ERROR') logger.log_struct({'name': 'King Arthur', 'quest': 'Find the Holy Grail', 'favorite_color': 'Blue'}) print 'Wrote logs to {}.'.format(logger.name)
[ "def", "write_entry", "(", "logger_name", ")", ":", "logging_client", "=", "logging", ".", "Client", "(", ")", "logger", "=", "logging_client", ".", "logger", "(", "logger_name", ")", "logger", ".", "log_text", "(", "'Hello, world!'", ")", "logger", ".", "lo...
writes log entries to the given logger .
train
false
17,092
def _dict_from_expr_no_gens(expr, opt): ((poly,), gens) = _parallel_dict_from_expr_no_gens((expr,), opt) return (poly, gens)
[ "def", "_dict_from_expr_no_gens", "(", "expr", ",", "opt", ")", ":", "(", "(", "poly", ",", ")", ",", "gens", ")", "=", "_parallel_dict_from_expr_no_gens", "(", "(", "expr", ",", ")", ",", "opt", ")", "return", "(", "poly", ",", "gens", ")" ]
transform an expression into a multinomial form and figure out generators .
train
false
17,093
def _bulk_state(saltfunc, lbn, workers, profile): ret = {'name': lbn, 'result': True, 'changes': {}, 'comment': ''} if (not isinstance(workers, list)): ret['result'] = False ret['comment'] = 'workers should be a list not a {0}'.format(type(workers)) return ret if __opts__['test']: ret['result'] = None return ret log.info('executing {0} to modjk workers {1}'.format(saltfunc, workers)) try: cmdret = __salt__[saltfunc](workers, lbn, profile=profile) except KeyError: ret['result'] = False ret['comment'] = 'unsupported function {0}'.format(saltfunc) return ret errors = [] for (worker, ok) in six.iteritems(cmdret): if (not ok): errors.append(worker) ret['changes'] = {'status': cmdret} if errors: ret['result'] = False ret['comment'] = '{0} failed on some workers'.format(saltfunc) return ret
[ "def", "_bulk_state", "(", "saltfunc", ",", "lbn", ",", "workers", ",", "profile", ")", ":", "ret", "=", "{", "'name'", ":", "lbn", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "if", "(", "not", ...
generic function for bulk worker operation .
train
true
17,095
def _map_nodes(meta): services = Table('services', meta, autoload=True) c_nodes = Table('compute_nodes', meta, autoload=True) q = select([services.c.host, c_nodes.c.hypervisor_hostname], whereclause=and_((c_nodes.c.deleted == False), (services.c.deleted == False)), from_obj=c_nodes.join(services, (c_nodes.c.service_id == services.c.id))) nodemap = {} for (host, node) in q.execute(): nodes = nodemap.setdefault(host, []) nodes.append(node) return nodemap
[ "def", "_map_nodes", "(", "meta", ")", ":", "services", "=", "Table", "(", "'services'", ",", "meta", ",", "autoload", "=", "True", ")", "c_nodes", "=", "Table", "(", "'compute_nodes'", ",", "meta", ",", "autoload", "=", "True", ")", "q", "=", "select"...
map host to compute node(s) for the purpose of determining which hosts are single vs multi-node .
train
false
17,097
def mask_password_from_url(url): split = urlparse.urlsplit(url) if split.password: if (url.count(split.password) == 1): url = url.replace(split.password, '********') else: split = split._replace(netloc=split.netloc.replace(('%s:%s' % (split.username, split.password)), ('%s:********' % split.username))) url = urlparse.urlunsplit(split) return url
[ "def", "mask_password_from_url", "(", "url", ")", ":", "split", "=", "urlparse", ".", "urlsplit", "(", "url", ")", "if", "split", ".", "password", ":", "if", "(", "url", ".", "count", "(", "split", ".", "password", ")", "==", "1", ")", ":", "url", ...
masks out passwords from connection urls like the database connection in galaxy .
train
false
17,098
def load_content(file_path): (file_name, file_ext) = os.path.splitext(file_path) if (file_ext not in ALLOWED_EXTS): raise Exception(('Unsupported meta type %s, file %s. Allowed: %s' % (file_ext, file_path, ALLOWED_EXTS))) parser_func = PARSER_FUNCS.get(file_ext, None) with open(file_path, 'r') as fd: return (parser_func(fd) if parser_func else fd.read())
[ "def", "load_content", "(", "file_path", ")", ":", "(", "file_name", ",", "file_ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "file_path", ")", "if", "(", "file_ext", "not", "in", "ALLOWED_EXTS", ")", ":", "raise", "Exception", "(", "(", "'U...
loads content from file_path if file_paths extension is one of allowed ones .
train
false
17,100
def rsplit(s, sep=None, maxsplit=(-1)): return s.rsplit(sep, maxsplit)
[ "def", "rsplit", "(", "s", ",", "sep", "=", "None", ",", "maxsplit", "=", "(", "-", "1", ")", ")", ":", "return", "s", ".", "rsplit", "(", "sep", ",", "maxsplit", ")" ]
rsplit -> list of strings return a list of the words in the string s .
train
false
17,101
def _document_lock(doc_id, username): locked_by = _document_lock_check(doc_id) if (locked_by == username): locked = False if locked_by: try: locked = (not (locked_by == username)) locked_by = User.objects.get(username=locked_by) except User.DoesNotExist: locked = False locked_by = None else: locked_by = username locked = False _document_lock_steal(doc_id, username) return (locked, locked_by)
[ "def", "_document_lock", "(", "doc_id", ",", "username", ")", ":", "locked_by", "=", "_document_lock_check", "(", "doc_id", ")", "if", "(", "locked_by", "==", "username", ")", ":", "locked", "=", "False", "if", "locked_by", ":", "try", ":", "locked", "=", ...
if there is no lock .
train
false
17,102
def _build_install_args(options): install_args = [] if options.user_install: if (sys.version_info < (2, 6)): log.warn('--user requires Python 2.6 or later') raise SystemExit(1) install_args.append('--user') return install_args
[ "def", "_build_install_args", "(", "options", ")", ":", "install_args", "=", "[", "]", "if", "options", ".", "user_install", ":", "if", "(", "sys", ".", "version_info", "<", "(", "2", ",", "6", ")", ")", ":", "log", ".", "warn", "(", "'--user requires ...
build the arguments to python setup .
train
true
17,103
def not_in_(a, b, msg=None): assert (a not in b), (msg or ('%r is in %r' % (a, b)))
[ "def", "not_in_", "(", "a", ",", "b", ",", "msg", "=", "None", ")", ":", "assert", "(", "a", "not", "in", "b", ")", ",", "(", "msg", "or", "(", "'%r is in %r'", "%", "(", "a", ",", "b", ")", ")", ")" ]
assert a in not b .
train
false
17,104
def completions_sorting_key(word): word = word.lower() (prio1, prio2) = (0, 0) if word.startswith('__'): prio1 = 2 elif word.startswith('_'): prio1 = 1 if word.endswith('='): prio1 = (-1) if word.startswith('%%'): if (not ('%' in word[2:])): word = word[2:] prio2 = 2 elif word.startswith('%'): if (not ('%' in word[1:])): word = word[1:] prio2 = 1 return (prio1, word, prio2)
[ "def", "completions_sorting_key", "(", "word", ")", ":", "word", "=", "word", ".", "lower", "(", ")", "(", "prio1", ",", "prio2", ")", "=", "(", "0", ",", "0", ")", "if", "word", ".", "startswith", "(", "'__'", ")", ":", "prio1", "=", "2", "elif"...
key for sorting completions this does several things: - lowercase all completions .
train
false
17,105
def objlocattr(accessing_obj, accessed_obj, *args, **kwargs): if hasattr(accessed_obj, 'obj'): accessed_obj = accessed_obj.obj if hasattr(accessed_obj, 'location'): return attr(accessed_obj.location, accessed_obj, *args, **kwargs)
[ "def", "objlocattr", "(", "accessing_obj", ",", "accessed_obj", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "hasattr", "(", "accessed_obj", ",", "'obj'", ")", ":", "accessed_obj", "=", "accessed_obj", ".", "obj", "if", "hasattr", "(", "accessed_...
usage: locattr locattr locattr works like attr .
train
false
17,107
def safemembers(members, base): base = resolved(base) if (not base.startswith(resolved(settings.DATA_DIR))): raise SuspiciousOperation('Attempted to import course outside of data dir') for finfo in members: if _is_bad_path(finfo.name, base): log.debug('File %r is blocked (illegal path)', finfo.name) raise SuspiciousOperation('Illegal path') elif (finfo.issym() and _is_bad_link(finfo, base)): log.debug('File %r is blocked: Hard link to %r', finfo.name, finfo.linkname) raise SuspiciousOperation('Hard link') elif (finfo.islnk() and _is_bad_link(finfo, base)): log.debug('File %r is blocked: Symlink to %r', finfo.name, finfo.linkname) raise SuspiciousOperation('Symlink') elif finfo.isdev(): log.debug('File %r is blocked: FIFO, device or character file', finfo.name) raise SuspiciousOperation('Dev file') return members
[ "def", "safemembers", "(", "members", ",", "base", ")", ":", "base", "=", "resolved", "(", "base", ")", "if", "(", "not", "base", ".", "startswith", "(", "resolved", "(", "settings", ".", "DATA_DIR", ")", ")", ")", ":", "raise", "SuspiciousOperation", ...
check that all elements of a tar file are safe .
train
false
17,108
def setup_console(): global console console = ConsoleOutput(sys.stdout, livestreamer) if (args.stdout or (args.output == '-')): console.set_output(sys.stderr) if (not any((getattr(args, attr) for attr in QUIET_OPTIONS))): console.set_level(args.loglevel) if args.quiet_player: console.logger.warning('The option --quiet-player is deprecated since version 1.4.3 as hiding player output is now the default.') if args.best_stream_default: console.logger.warning("The option --best-stream-default is deprecated since version 1.9.0, use '--default-stream best' instead.") console.json = args.json signal.signal(signal.SIGTERM, signal.default_int_handler)
[ "def", "setup_console", "(", ")", ":", "global", "console", "console", "=", "ConsoleOutput", "(", "sys", ".", "stdout", ",", "livestreamer", ")", "if", "(", "args", ".", "stdout", "or", "(", "args", ".", "output", "==", "'-'", ")", ")", ":", "console",...
console setup .
train
false
17,109
def check_X_y(X, y, accept_sparse=False, dtype='numeric', order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, multi_output=False, ensure_min_samples=1, ensure_min_features=1, y_numeric=False, warn_on_dtype=False, estimator=None): X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, warn_on_dtype, estimator) if multi_output: y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False, dtype=None) else: y = column_or_1d(y, warn=True) _assert_all_finite(y) if (y_numeric and (y.dtype.kind == 'O')): y = y.astype(np.float64) check_consistent_length(X, y) return (X, y)
[ "def", "check_X_y", "(", "X", ",", "y", ",", "accept_sparse", "=", "False", ",", "dtype", "=", "'numeric'", ",", "order", "=", "None", ",", "copy", "=", "False", ",", "force_all_finite", "=", "True", ",", "ensure_2d", "=", "True", ",", "allow_nd", "=",...
input validation for standard estimators .
train
false
17,110
def _get_type_id_options(name, configuration): if ('.' in name): (type_, sep, id_) = name.partition('.') options = configuration else: type_ = next(six.iterkeys(configuration)) id_ = name options = configuration[type_] return (type_, id_, options)
[ "def", "_get_type_id_options", "(", "name", ",", "configuration", ")", ":", "if", "(", "'.'", "in", "name", ")", ":", "(", "type_", ",", "sep", ",", "id_", ")", "=", "name", ".", "partition", "(", "'.'", ")", "options", "=", "configuration", "else", ...
returns the type .
train
true
17,111
def get_new_language_form(request, component): if request.user.is_superuser: return NewLanguageOwnerForm if component.project.owners.filter(id=request.user.id).exists(): return NewLanguageOwnerForm return NewLanguageForm
[ "def", "get_new_language_form", "(", "request", ",", "component", ")", ":", "if", "request", ".", "user", ".", "is_superuser", ":", "return", "NewLanguageOwnerForm", "if", "component", ".", "project", ".", "owners", ".", "filter", "(", "id", "=", "request", ...
returns new language form for user .
train
false
17,113
def getMostRecentDate(emails): dates = [getDate(e) for e in emails] dates.sort(reverse=True) if dates: return dates[0] return None
[ "def", "getMostRecentDate", "(", "emails", ")", ":", "dates", "=", "[", "getDate", "(", "e", ")", "for", "e", "in", "emails", "]", "dates", ".", "sort", "(", "reverse", "=", "True", ")", "if", "dates", ":", "return", "dates", "[", "0", "]", "return...
returns the most recent date of any email in the list provided .
train
false