id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
261
def multMatVect(v, A, m1, B, m2): if (multMatVect.dot_modulo is None): A_sym = tensor.lmatrix('A') s_sym = tensor.ivector('s') m_sym = tensor.iscalar('m') A2_sym = tensor.lmatrix('A2') s2_sym = tensor.ivector('s2') m2_sym = tensor.iscalar('m2') o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym) multMatVect.dot_modulo = function([A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False) f = multMatVect.dot_modulo f.input_storage[0].storage[0] = A f.input_storage[1].storage[0] = v[:3] f.input_storage[2].storage[0] = m1 f.input_storage[3].storage[0] = B f.input_storage[4].storage[0] = v[3:] f.input_storage[5].storage[0] = m2 f.fn() r = f.output_storage[0].storage[0] return r
[ "def", "multMatVect", "(", "v", ",", "A", ",", "m1", ",", "B", ",", "m2", ")", ":", "if", "(", "multMatVect", ".", "dot_modulo", "is", "None", ")", ":", "A_sym", "=", "tensor", ".", "lmatrix", "(", "'A'", ")", "s_sym", "=", "tensor", ".", "ivecto...
multiply the first half of v by a with a modulo of m1 and the second half by b with a modulo of m2 .
train
false
262
def endit(): end = clock() total = (end - START) print('Completion time: {0} seconds.'.format(total))
[ "def", "endit", "(", ")", ":", "end", "=", "clock", "(", ")", "total", "=", "(", "end", "-", "START", ")", "print", "(", "'Completion time: {0} seconds.'", ".", "format", "(", "total", ")", ")" ]
times how long it took for this script to run .
train
false
263
def get_isnan(dtype): if isinstance(dtype, (types.Float, types.Complex)): return np.isnan else: @register_jitable def _trivial_isnan(x): return False return _trivial_isnan
[ "def", "get_isnan", "(", "dtype", ")", ":", "if", "isinstance", "(", "dtype", ",", "(", "types", ".", "Float", ",", "types", ".", "Complex", ")", ")", ":", "return", "np", ".", "isnan", "else", ":", "@", "register_jitable", "def", "_trivial_isnan", "("...
a generic isnan() function .
train
false
264
@validate('form') def valid_att_in_label(arch): return (not arch.xpath('//label[not(@for or @string)]'))
[ "@", "validate", "(", "'form'", ")", "def", "valid_att_in_label", "(", "arch", ")", ":", "return", "(", "not", "arch", ".", "xpath", "(", "'//label[not(@for or @string)]'", ")", ")" ]
label nodes must have a @for or a @string .
train
false
267
def burt_table(data, variables): values = [(var, value) for var in variables for value in var.values] table = numpy.zeros((len(values), len(values))) counts = [len(attr.values) for attr in variables] offsets = numpy.r_[(0, numpy.cumsum(counts))] for i in range(len(variables)): for j in range((i + 1)): var1 = variables[i] var2 = variables[j] cm = contingency.get_contingency(data, var2, var1) (start1, end1) = (offsets[i], (offsets[i] + counts[i])) (start2, end2) = (offsets[j], (offsets[j] + counts[j])) table[start1:end1, start2:end2] += cm if (i != j): table[start2:end2, start1:end1] += cm.T return (values, table)
[ "def", "burt_table", "(", "data", ",", "variables", ")", ":", "values", "=", "[", "(", "var", ",", "value", ")", "for", "var", "in", "variables", "for", "value", "in", "var", ".", "values", "]", "table", "=", "numpy", ".", "zeros", "(", "(", "len",...
construct a burt table for variables .
train
false
268
def Element(*args, **kw): v = html_parser.makeelement(*args, **kw) return v
[ "def", "Element", "(", "*", "args", ",", "**", "kw", ")", ":", "v", "=", "html_parser", ".", "makeelement", "(", "*", "args", ",", "**", "kw", ")", "return", "v" ]
create an atom element .
train
false
269
def assert_calculated_changes_for_deployer(case, deployer, node_state, node_config, nonmanifest_datasets, additional_node_states, additional_node_config, expected_changes, local_state, leases=Leases()): cluster_state = compute_cluster_state(node_state, additional_node_states, nonmanifest_datasets) cluster_configuration = Deployment(nodes=({node_config} | additional_node_config), leases=leases) changes = deployer.calculate_changes(cluster_configuration, cluster_state, local_state) case.assertEqual(expected_changes, changes)
[ "def", "assert_calculated_changes_for_deployer", "(", "case", ",", "deployer", ",", "node_state", ",", "node_config", ",", "nonmanifest_datasets", ",", "additional_node_states", ",", "additional_node_config", ",", "expected_changes", ",", "local_state", ",", "leases", "="...
assert that calculate_changes returns certain changes when it is invoked with the given state and configuration .
train
false
270
def pickle_dump(data, filename): fh = open(filename, 'w') try: pickle.dump(data, fh) finally: fh.close()
[ "def", "pickle_dump", "(", "data", ",", "filename", ")", ":", "fh", "=", "open", "(", "filename", ",", "'w'", ")", "try", ":", "pickle", ".", "dump", "(", "data", ",", "fh", ")", "finally", ":", "fh", ".", "close", "(", ")" ]
equivalent to pickle .
train
false
273
def _generate_output_dataframe(data_subset, defaults): cols = set(data_subset.columns) desired_cols = set(defaults) data_subset.drop((cols - desired_cols), axis=1, inplace=True) for col in (desired_cols - cols): data_subset[col] = defaults[col] return data_subset
[ "def", "_generate_output_dataframe", "(", "data_subset", ",", "defaults", ")", ":", "cols", "=", "set", "(", "data_subset", ".", "columns", ")", "desired_cols", "=", "set", "(", "defaults", ")", "data_subset", ".", "drop", "(", "(", "cols", "-", "desired_col...
generates an output dataframe from the given subset of user-provided data .
train
true
274
def make_choices(choices): return list(zip(choices, choices))
[ "def", "make_choices", "(", "choices", ")", ":", "return", "list", "(", "zip", "(", "choices", ",", "choices", ")", ")" ]
zips a list with itself for field choices .
train
false
276
def click_component_from_menu(category, component_type, is_advanced): if is_advanced: world.retry_on_exception(_click_advanced, ignored_exceptions=AssertionError) link = world.retry_on_exception((lambda : _find_matching_button(category, component_type)), ignored_exceptions=AssertionError) world.retry_on_exception((lambda : link.click()))
[ "def", "click_component_from_menu", "(", "category", ",", "component_type", ",", "is_advanced", ")", ":", "if", "is_advanced", ":", "world", ".", "retry_on_exception", "(", "_click_advanced", ",", "ignored_exceptions", "=", "AssertionError", ")", "link", "=", "world...
creates a component for a category with more than one template .
train
false
277
def processSVGElementtext(svgReader, xmlElement): if svgReader.yAxisPointingUpward: return fontFamily = getStyleValue('Gentium Basic Regular', 'font-family', xmlElement) fontSize = getRightStripAlphabetPercent(getStyleValue('12.0', 'font-size', xmlElement)) matrixSVG = getChainMatrixSVGIfNecessary(xmlElement, svgReader.yAxisPointingUpward) rotatedLoopLayer = svgReader.getRotatedLoopLayer() translate = euclidean.getComplexDefaultByDictionaryKeys(complex(), xmlElement.attributeDictionary, 'x', 'y') for textComplexLoop in getTextComplexLoops(fontFamily, fontSize, xmlElement.text, svgReader.yAxisPointingUpward): translatedLoop = [] for textComplexPoint in textComplexLoop: translatedLoop.append((textComplexPoint + translate)) rotatedLoopLayer.loops.append(matrixSVG.getTransformedPath(translatedLoop))
[ "def", "processSVGElementtext", "(", "svgReader", ",", "xmlElement", ")", ":", "if", "svgReader", ".", "yAxisPointingUpward", ":", "return", "fontFamily", "=", "getStyleValue", "(", "'Gentium Basic Regular'", ",", "'font-family'", ",", "xmlElement", ")", "fontSize", ...
process elementnode by svgreader .
train
false
278
def setClosedAttribute(elementNode, revolutions): closedBoolean = evaluate.getEvaluatedBoolean((revolutions <= 1), elementNode, 'closed') elementNode.attributes['closed'] = str(closedBoolean).lower()
[ "def", "setClosedAttribute", "(", "elementNode", ",", "revolutions", ")", ":", "closedBoolean", "=", "evaluate", ".", "getEvaluatedBoolean", "(", "(", "revolutions", "<=", "1", ")", ",", "elementNode", ",", "'closed'", ")", "elementNode", ".", "attributes", "[",...
set the closed attribute of the xmlelement .
train
false
279
def default_environment(): return dict(_VARS)
[ "def", "default_environment", "(", ")", ":", "return", "dict", "(", "_VARS", ")" ]
return copy of default pep 385 globals dictionary .
train
false
281
def storage_uri_for_key(key): if (not isinstance(key, boto.s3.key.Key)): raise InvalidUriError(('Requested key (%s) is not a subclass of boto.s3.key.Key' % str(type(key)))) prov_name = key.bucket.connection.provider.get_provider_name() uri_str = ('%s://%s/%s' % (prov_name, key.bucket.name, key.name)) return storage_uri(uri_str)
[ "def", "storage_uri_for_key", "(", "key", ")", ":", "if", "(", "not", "isinstance", "(", "key", ",", "boto", ".", "s3", ".", "key", ".", "Key", ")", ")", ":", "raise", "InvalidUriError", "(", "(", "'Requested key (%s) is not a subclass of boto.s3.key.Key'", "%...
returns a storageuri for the given key .
train
true
285
def root_mean_square_error(y_real, y_pred): (y_real, y_pred) = check_arrays(y_real, y_pred) return np.sqrt((np.sum(((y_pred - y_real) ** 2)) / y_real.shape[0]))
[ "def", "root_mean_square_error", "(", "y_real", ",", "y_pred", ")", ":", "(", "y_real", ",", "y_pred", ")", "=", "check_arrays", "(", "y_real", ",", "y_pred", ")", "return", "np", ".", "sqrt", "(", "(", "np", ".", "sum", "(", "(", "(", "y_pred", "-",...
it computes the root mean squared difference between predicted and actual ratings for users .
train
false
286
def factor_calculate(evaluator, types, operator): for typ in types: if (operator == '-'): if _is_number(typ): (yield create(evaluator, (- typ.obj))) elif (operator == 'not'): value = typ.py__bool__() if (value is None): return (yield create(evaluator, (not value))) else: (yield typ)
[ "def", "factor_calculate", "(", "evaluator", ",", "types", ",", "operator", ")", ":", "for", "typ", "in", "types", ":", "if", "(", "operator", "==", "'-'", ")", ":", "if", "_is_number", "(", "typ", ")", ":", "(", "yield", "create", "(", "evaluator", ...
calculates + .
train
false
288
def get_command(tool_xml): root = tool_xml.getroot() commands = root.findall('command') command = None if (len(commands) == 1): command = commands[0] return command
[ "def", "get_command", "(", "tool_xml", ")", ":", "root", "=", "tool_xml", ".", "getroot", "(", ")", "commands", "=", "root", ".", "findall", "(", "'command'", ")", "command", "=", "None", "if", "(", "len", "(", "commands", ")", "==", "1", ")", ":", ...
gets a zone by name .
train
false
289
def has_change_path_cmd(sql): return (u'set search_path' in sql.lower())
[ "def", "has_change_path_cmd", "(", "sql", ")", ":", "return", "(", "u'set search_path'", "in", "sql", ".", "lower", "(", ")", ")" ]
determines if the search_path should be refreshed by checking if the sql has set search_path .
train
false
290
def sanitize_file_name(name, substitute='_', as_unicode=False): if isinstance(name, unicode): name = name.encode(filesystem_encoding, 'ignore') _filename_sanitize = re.compile('[\\xae\\0\\\\|\\?\\*<":>\\+/]') one = _filename_sanitize.sub(substitute, name) one = re.sub('\\s', ' ', one).strip() (bname, ext) = os.path.splitext(one) one = re.sub('^\\.+$', '_', bname) if as_unicode: one = one.decode(filesystem_encoding) one = one.replace('..', substitute) one += ext if (one and (one[(-1)] in ('.', ' '))): one = (one[:(-1)] + '_') if one.startswith('.'): one = ('_' + one[1:]) return one
[ "def", "sanitize_file_name", "(", "name", ",", "substitute", "=", "'_'", ",", "as_unicode", "=", "False", ")", ":", "if", "isinstance", "(", "name", ",", "unicode", ")", ":", "name", "=", "name", ".", "encode", "(", "filesystem_encoding", ",", "'ignore'", ...
sanitize the filename name .
train
false
292
@register.inclusion_tag('addons/review_list_box.html') @jinja2.contextfunction def review_list_box(context, addon, reviews): c = dict(context.items()) c.update(addon=addon, reviews=reviews) return c
[ "@", "register", ".", "inclusion_tag", "(", "'addons/review_list_box.html'", ")", "@", "jinja2", ".", "contextfunction", "def", "review_list_box", "(", "context", ",", "addon", ",", "reviews", ")", ":", "c", "=", "dict", "(", "context", ".", "items", "(", ")...
details page: show a box with three add-on reviews .
train
false
293
def get_if_addr6(iff): for x in in6_getifaddr(): if ((x[2] == iff) and (x[1] == IPV6_ADDR_GLOBAL)): return x[0] return None
[ "def", "get_if_addr6", "(", "iff", ")", ":", "for", "x", "in", "in6_getifaddr", "(", ")", ":", "if", "(", "(", "x", "[", "2", "]", "==", "iff", ")", "and", "(", "x", "[", "1", "]", "==", "IPV6_ADDR_GLOBAL", ")", ")", ":", "return", "x", "[", ...
returns the main global unicast address associated with provided interface .
train
true
294
def check_integrity(models): messages = dict(error=[], warning=[]) for model in models: validators = [] for name in dir(model): if (not name.startswith('_check')): continue obj = getattr(model, name) if getattr(obj, 'validator_type', None): validators.append(obj) for func in validators: messages[func.validator_type].extend(func()) for msg in sorted(messages['error']): logger.error(('E-%d (%s): %s: %s' % msg)) for msg in sorted(messages['warning']): logger.warning(('W-%d (%s): %s: %s' % msg))
[ "def", "check_integrity", "(", "models", ")", ":", "messages", "=", "dict", "(", "error", "=", "[", "]", ",", "warning", "=", "[", "]", ")", "for", "model", "in", "models", ":", "validators", "=", "[", "]", "for", "name", "in", "dir", "(", "model",...
apply validation and integrity checks to a collection of bokeh models .
train
false
296
def get_user_impact_score(user_id): model = user_models.UserStatsModel.get(user_id, strict=False) if model: return model.impact_score else: return 0
[ "def", "get_user_impact_score", "(", "user_id", ")", ":", "model", "=", "user_models", ".", "UserStatsModel", ".", "get", "(", "user_id", ",", "strict", "=", "False", ")", "if", "model", ":", "return", "model", ".", "impact_score", "else", ":", "return", "...
gets the user impact score for the given user_id .
train
false
299
def split_line_endings(data): lines = NEWLINE_RE.split(data) if (not lines[(-1)]): lines = lines[:(-1)] return lines
[ "def", "split_line_endings", "(", "data", ")", ":", "lines", "=", "NEWLINE_RE", ".", "split", "(", "data", ")", "if", "(", "not", "lines", "[", "(", "-", "1", ")", "]", ")", ":", "lines", "=", "lines", "[", ":", "(", "-", "1", ")", "]", "return...
splits a string into lines while preserving all non-crlf characters .
train
false
300
def description_setter(registry, xml_parent, data): descriptionsetter = XML.SubElement(xml_parent, 'hudson.plugins.descriptionsetter.DescriptionSetterBuilder') XML.SubElement(descriptionsetter, 'regexp').text = data.get('regexp', '') if ('description' in data): XML.SubElement(descriptionsetter, 'description').text = data['description']
[ "def", "description_setter", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "descriptionsetter", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'hudson.plugins.descriptionsetter.DescriptionSetterBuilder'", ")", "XML", ".", "SubElement", "(", "desc...
yaml: description-setter this plugin sets the description for each build .
train
false
301
def kit_export_csv(): output = '' for resourcename in ['kit', 'item', 'kit_item']: _table = ((module + '_') + resourcename) table = db[_table] query = auth.s3_accessible_query('read', table) if ('deleted' in table): query = (((table.deleted == False) | (table.deleted == None)) & query) output += (('TABLE ' + _table) + '\n') output += str(db(query).select()) output += '\n\n' import gluon.contenttype response.headers['Content-Type'] = gluon.contenttype.contenttype('.csv') filename = ('%s_kits.csv' % request.env.server_name) response.headers['Content-disposition'] = ('attachment; filename=%s' % filename) return output
[ "def", "kit_export_csv", "(", ")", ":", "output", "=", "''", "for", "resourcename", "in", "[", "'kit'", ",", "'item'", ",", "'kit_item'", "]", ":", "_table", "=", "(", "(", "module", "+", "'_'", ")", "+", "resourcename", ")", "table", "=", "db", "[",...
export kits in csv format concatenates: kits .
train
false
302
def register_detector(cls): detectorshub.register(cls()) return cls
[ "def", "register_detector", "(", "cls", ")", ":", "detectorshub", ".", "register", "(", "cls", "(", ")", ")", "return", "cls" ]
collector of all the reddit detectors .
train
false
303
def sensitive_variables(*variables): def decorator(func): @functools.wraps(func) def sensitive_variables_wrapper(*func_args, **func_kwargs): if variables: sensitive_variables_wrapper.sensitive_variables = variables else: sensitive_variables_wrapper.sensitive_variables = '__ALL__' return func(*func_args, **func_kwargs) return sensitive_variables_wrapper return decorator
[ "def", "sensitive_variables", "(", "*", "variables", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "sensitive_variables_wrapper", "(", "*", "func_args", ",", "**", "func_kwargs", ")", ":", "if...
indicates which variables used in the decorated function are sensitive .
train
false
306
def RANGE(value): if (u':' in value): (start, stop) = value.split(u':', 1) start = UINT(start) if stop.strip(): stop = UINT(stop) if (start >= stop): raise ValueError(u'End must be larger than start') else: stop = None else: start = UINT(value) stop = (start + 1) return slice(start, stop)
[ "def", "RANGE", "(", "value", ")", ":", "if", "(", "u':'", "in", "value", ")", ":", "(", "start", ",", "stop", ")", "=", "value", ".", "split", "(", "u':'", ",", "1", ")", "start", "=", "UINT", "(", "start", ")", "if", "stop", ".", "strip", "...
convert a single integer or range spec into a slice n should become slice n: should become slice n:m should become slice and m > n must hold .
train
false
308
def _os_bootstrap(): names = sys.builtin_module_names join = None if ('posix' in names): sep = '/' from posix import stat elif ('nt' in names): sep = '\\' from nt import stat elif ('dos' in names): sep = '\\' from dos import stat elif ('os2' in names): sep = '\\' from os2 import stat elif ('mac' in names): from mac import stat def join(a, b): if (a == ''): return b if (':' not in a): a = (':' + a) if (a[(-1):] != ':'): a = (a + ':') return (a + b) else: raise ImportError, 'no os specific module found' if (join is None): def join(a, b, sep=sep): if (a == ''): return b lastchar = a[(-1):] if ((lastchar == '/') or (lastchar == sep)): return (a + b) return ((a + sep) + b) global _os_stat _os_stat = stat global _os_path_join _os_path_join = join
[ "def", "_os_bootstrap", "(", ")", ":", "names", "=", "sys", ".", "builtin_module_names", "join", "=", "None", "if", "(", "'posix'", "in", "names", ")", ":", "sep", "=", "'/'", "from", "posix", "import", "stat", "elif", "(", "'nt'", "in", "names", ")", ...
set up os module replacement functions for use during import bootstrap .
train
false
312
def _map_fragment_list(flist, reflist): mapped = [] for f in flist: rank = [] for i in range(0, len(reflist)): rf = reflist[i] rms = (f - rf) rank.append((rms, rf)) rank.sort() fragment = rank[0][1] mapped.append(fragment) return mapped
[ "def", "_map_fragment_list", "(", "flist", ",", "reflist", ")", ":", "mapped", "=", "[", "]", "for", "f", "in", "flist", ":", "rank", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "reflist", ")", ")", ":", "rf", "=", "ref...
map all frgaments in flist to the closest fragment in reflist .
train
false
313
def test_magic_rerun(): ip = get_ipython() ip.run_cell('a = 10', store_history=True) ip.run_cell('a += 1', store_history=True) nt.assert_equal(ip.user_ns['a'], 11) ip.run_cell('%rerun', store_history=True) nt.assert_equal(ip.user_ns['a'], 12)
[ "def", "test_magic_rerun", "(", ")", ":", "ip", "=", "get_ipython", "(", ")", "ip", ".", "run_cell", "(", "'a = 10'", ",", "store_history", "=", "True", ")", "ip", ".", "run_cell", "(", "'a += 1'", ",", "store_history", "=", "True", ")", "nt", ".", "as...
simple test for %rerun .
train
false
315
def _get_foreign_keys(t_images, t_image_members, t_image_properties, dialect): foreign_keys = [] if t_image_members.foreign_keys: img_members_fk_name = list(t_image_members.foreign_keys)[0].name if (dialect == 'mysql'): fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id], [t_images.c.id], name=img_members_fk_name) else: fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id], [t_images.c.id]) foreign_keys.append(fk1) if t_image_properties.foreign_keys: img_properties_fk_name = list(t_image_properties.foreign_keys)[0].name if (dialect == 'mysql'): fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id], [t_images.c.id], name=img_properties_fk_name) else: fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id], [t_images.c.id]) foreign_keys.append(fk2) return foreign_keys
[ "def", "_get_foreign_keys", "(", "t_images", ",", "t_image_members", ",", "t_image_properties", ",", "dialect", ")", ":", "foreign_keys", "=", "[", "]", "if", "t_image_members", ".", "foreign_keys", ":", "img_members_fk_name", "=", "list", "(", "t_image_members", ...
retrieve and return foreign keys for members/properties tables .
train
false
316
def bytes2int(bytes): if (not ((type(bytes) is types.ListType) or (type(bytes) is types.StringType))): raise TypeError('You must pass a string or a list') integer = 0 for byte in bytes: integer *= 256 if (type(byte) is types.StringType): byte = ord(byte) integer += byte return integer
[ "def", "bytes2int", "(", "bytes", ")", ":", "if", "(", "not", "(", "(", "type", "(", "bytes", ")", "is", "types", ".", "ListType", ")", "or", "(", "type", "(", "bytes", ")", "is", "types", ".", "StringType", ")", ")", ")", ":", "raise", "TypeErro...
converts a list of bytes or a string to an integer .
train
false
317
def CheckFreeSpace(): if (cfg.download_free() and (not sabnzbd.downloader.Downloader.do.paused)): if (misc.diskfree(cfg.download_dir.get_path()) < (cfg.download_free.get_float() / GIGI)): logging.warning(T('Too little diskspace forcing PAUSE')) Downloader.do.pause(save=False) emailer.diskfull()
[ "def", "CheckFreeSpace", "(", ")", ":", "if", "(", "cfg", ".", "download_free", "(", ")", "and", "(", "not", "sabnzbd", ".", "downloader", ".", "Downloader", ".", "do", ".", "paused", ")", ")", ":", "if", "(", "misc", ".", "diskfree", "(", "cfg", "...
check if enough disk space is free .
train
false
318
def find_unit(quantity): import sympy.physics.units as u rv = [] if isinstance(quantity, str): rv = [i for i in dir(u) if (quantity in i)] else: units = quantity.as_coeff_Mul()[1] for i in dir(u): try: if (units == eval(('u.' + i)).as_coeff_Mul()[1]): rv.append(str(i)) except Exception: pass return sorted(rv, key=len)
[ "def", "find_unit", "(", "quantity", ")", ":", "import", "sympy", ".", "physics", ".", "units", "as", "u", "rv", "=", "[", "]", "if", "isinstance", "(", "quantity", ",", "str", ")", ":", "rv", "=", "[", "i", "for", "i", "in", "dir", "(", "u", "...
finds the unit with the given url in the course tree and returns the unit .
train
false
319
def configure_registry_client(): global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY try: (host, port) = (CONF.registry_host, CONF.registry_port) except cfg.ConfigFileValueError: msg = _('Configuration option was not valid') LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(msg) except IndexError: msg = _('Could not find required configuration option') LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(msg) _CLIENT_HOST = host _CLIENT_PORT = port _METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key _CLIENT_KWARGS = {'use_ssl': (CONF.registry_client_protocol.lower() == 'https'), 'key_file': CONF.registry_client_key_file, 'cert_file': CONF.registry_client_cert_file, 'ca_file': CONF.registry_client_ca_file, 'insecure': CONF.registry_client_insecure, 'timeout': CONF.registry_client_timeout}
[ "def", "configure_registry_client", "(", ")", ":", "global", "_CLIENT_KWARGS", ",", "_CLIENT_HOST", ",", "_CLIENT_PORT", ",", "_METADATA_ENCRYPTION_KEY", "try", ":", "(", "host", ",", "port", ")", "=", "(", "CONF", ".", "registry_host", ",", "CONF", ".", "regi...
sets up a registry client for use in registry lookups .
train
false
322
def init(mpstate): return SerialModule(mpstate)
[ "def", "init", "(", "mpstate", ")", ":", "return", "SerialModule", "(", "mpstate", ")" ]
initialise module .
train
false
323
@contextfunction def easy_invite_block(context, emails=None): if (emails is None): emails = [] request = context['request'] response_format = 'html' if ('response_format' in context): response_format = context['response_format'] return Markup(render_to_string('core/tags/easy_invite', {'emails': emails}, context_instance=RequestContext(request), response_format=response_format))
[ "@", "contextfunction", "def", "easy_invite_block", "(", "context", ",", "emails", "=", "None", ")", ":", "if", "(", "emails", "is", "None", ")", ":", "emails", "=", "[", "]", "request", "=", "context", "[", "'request'", "]", "response_format", "=", "'ht...
the humanized datetime of the last update to an object .
train
false
324
def lod_sort_by_key(_list, indexkey): _list.sort((lambda a, b: (a[indexkey] < b[indexkey]))) return _list
[ "def", "lod_sort_by_key", "(", "_list", ",", "indexkey", ")", ":", "_list", ".", "sort", "(", "(", "lambda", "a", ",", "b", ":", "(", "a", "[", "indexkey", "]", "<", "b", "[", "indexkey", "]", ")", ")", ")", "return", "_list" ]
sorts a list of dictionaries by a given key in the dictionaries note: this is a destructive operation .
train
false
325
def set_default_encoding_file(file): global default_encoding_file default_encoding_file = file
[ "def", "set_default_encoding_file", "(", "file", ")", ":", "global", "default_encoding_file", "default_encoding_file", "=", "file" ]
set file used to get codec information .
train
false
326
def addFacesByConvex(faces, indexedLoop): if (len(indexedLoop) < 3): return indexBegin = indexedLoop[0].index for indexedPointIndex in xrange(1, (len(indexedLoop) - 1)): indexCenter = indexedLoop[indexedPointIndex].index indexEnd = indexedLoop[((indexedPointIndex + 1) % len(indexedLoop))].index if ((indexBegin != indexCenter) and (indexCenter != indexEnd) and (indexEnd != indexBegin)): faceFromConvex = face.Face() faceFromConvex.index = len(faces) faceFromConvex.vertexIndexes.append(indexBegin) faceFromConvex.vertexIndexes.append(indexCenter) faceFromConvex.vertexIndexes.append(indexEnd) faces.append(faceFromConvex)
[ "def", "addFacesByConvex", "(", "faces", ",", "indexedLoop", ")", ":", "if", "(", "len", "(", "indexedLoop", ")", "<", "3", ")", ":", "return", "indexBegin", "=", "indexedLoop", "[", "0", "]", ".", "index", "for", "indexedPointIndex", "in", "xrange", "("...
add faces from a convex polygon .
train
false
327
def _bytes_to_json(value): if isinstance(value, bytes): value = base64.encodestring(value) return value
[ "def", "_bytes_to_json", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "value", "=", "base64", ".", "encodestring", "(", "value", ")", "return", "value" ]
coerce value to an json-compatible representation .
train
false
329
def _get_addons(request, addons, addon_id, action): items = [] a = MenuItem() a.selected = (not addon_id) (a.text, a.url) = (_('All My Add-ons'), reverse('devhub.feed_all')) if action: a.url += ('?action=' + action) items.append(a) for addon in addons: item = MenuItem() try: item.selected = (addon_id and (addon.id == int(addon_id))) except ValueError: pass url = reverse('devhub.feed', args=[addon.slug]) if action: url += ('?action=' + action) (item.text, item.url) = (addon.name, url) items.append(item) return items
[ "def", "_get_addons", "(", "request", ",", "addons", ",", "addon_id", ",", "action", ")", ":", "items", "=", "[", "]", "a", "=", "MenuItem", "(", ")", "a", ".", "selected", "=", "(", "not", "addon_id", ")", "(", "a", ".", "text", ",", "a", ".", ...
create a list of menuitems for the activity feed .
train
false
330
def wrap_clause_in_parens(sql): if sql.strip(): sql = u'({})'.format(sql) return sa.text(sql)
[ "def", "wrap_clause_in_parens", "(", "sql", ")", ":", "if", "sql", ".", "strip", "(", ")", ":", "sql", "=", "u'({})'", ".", "format", "(", "sql", ")", "return", "sa", ".", "text", "(", "sql", ")" ]
wrap where/having clause with parenthesis if necessary .
train
false
332
def get_effective_user(requesting_user, target_username): if (target_username == requesting_user.username): return requesting_user elif (target_username == ''): return AnonymousUser() elif can_view_courses_for_username(requesting_user, target_username): return User.objects.get(username=target_username) else: raise PermissionDenied()
[ "def", "get_effective_user", "(", "requesting_user", ",", "target_username", ")", ":", "if", "(", "target_username", "==", "requesting_user", ".", "username", ")", ":", "return", "requesting_user", "elif", "(", "target_username", "==", "''", ")", ":", "return", ...
get the user we want to view information on behalf of .
train
false
333
def request_from_dict(d, spider=None): cb = d['callback'] if (cb and spider): cb = _get_method(spider, cb) eb = d['errback'] if (eb and spider): eb = _get_method(spider, eb) return Request(url=d['url'].encode('ascii'), callback=cb, errback=eb, method=d['method'], headers=d['headers'], body=d['body'], cookies=d['cookies'], meta=d['meta'], encoding=d['_encoding'], priority=d['priority'], dont_filter=d['dont_filter'])
[ "def", "request_from_dict", "(", "d", ",", "spider", "=", "None", ")", ":", "cb", "=", "d", "[", "'callback'", "]", "if", "(", "cb", "and", "spider", ")", ":", "cb", "=", "_get_method", "(", "spider", ",", "cb", ")", "eb", "=", "d", "[", "'errbac...
create request object from a dict .
train
false
334
def mark_plot_labels(app, document): for (name, explicit) in document.nametypes.iteritems(): if (not explicit): continue labelid = document.nameids[name] if (labelid is None): continue node = document.ids[labelid] if (node.tagname in ('html_only', 'latex_only')): for n in node: if (n.tagname == 'figure'): sectname = name for c in n: if (c.tagname == 'caption'): sectname = c.astext() break node['ids'].remove(labelid) node['names'].remove(name) n['ids'].append(labelid) n['names'].append(name) document.settings.env.labels[name] = (document.settings.env.docname, labelid, sectname) break
[ "def", "mark_plot_labels", "(", "app", ",", "document", ")", ":", "for", "(", "name", ",", "explicit", ")", "in", "document", ".", "nametypes", ".", "iteritems", "(", ")", ":", "if", "(", "not", "explicit", ")", ":", "continue", "labelid", "=", "docume...
to make plots referenceable .
train
false
335
def setUp_test_db(): db.upgradeDatabase(db.DBConnection(), mainDB.InitialSchema) db.sanityCheckDatabase(db.DBConnection(), mainDB.MainSanityCheck) db.upgradeDatabase(db.DBConnection('cache.db'), cache_db.InitialSchema)
[ "def", "setUp_test_db", "(", ")", ":", "db", ".", "upgradeDatabase", "(", "db", ".", "DBConnection", "(", ")", ",", "mainDB", ".", "InitialSchema", ")", "db", ".", "sanityCheckDatabase", "(", "db", ".", "DBConnection", "(", ")", ",", "mainDB", ".", "Main...
upgrades the db to the latest version .
train
false
336
def get_nll(x, parzen, batch_size=10): inds = range(x.shape[0]) n_batches = int(numpy.ceil((float(len(inds)) / batch_size))) times = [] nlls = [] for i in range(n_batches): begin = time.time() nll = parzen(x[inds[i::n_batches]]) end = time.time() times.append((end - begin)) nlls.extend(nll) if ((i % 10) == 0): print i, numpy.mean(times), numpy.mean(nlls) return numpy.array(nlls)
[ "def", "get_nll", "(", "x", ",", "parzen", ",", "batch_size", "=", "10", ")", ":", "inds", "=", "range", "(", "x", ".", "shape", "[", "0", "]", ")", "n_batches", "=", "int", "(", "numpy", ".", "ceil", "(", "(", "float", "(", "len", "(", "inds",...
credit: yann n .
train
false
340
def add_kdb_reader(sub_signature, cls): _kdb_readers[sub_signature] = cls
[ "def", "add_kdb_reader", "(", "sub_signature", ",", "cls", ")", ":", "_kdb_readers", "[", "sub_signature", "]", "=", "cls" ]
add or overwrite the class used to process a keepass file .
train
false
341
def monitor_load_globals(sock, filename, ext): return communicate(sock, '__load_globals__()', settings=[filename, ext])
[ "def", "monitor_load_globals", "(", "sock", ",", "filename", ",", "ext", ")", ":", "return", "communicate", "(", "sock", ",", "'__load_globals__()'", ",", "settings", "=", "[", "filename", ",", "ext", "]", ")" ]
load globals() from file .
train
false
342
def _get_loc(): if (LOC_KEY in __context__): return __context__[LOC_KEY]
[ "def", "_get_loc", "(", ")", ":", "if", "(", "LOC_KEY", "in", "__context__", ")", ":", "return", "__context__", "[", "LOC_KEY", "]" ]
return the active file location .
train
false
344
def _update_stylesheet(obj): get_stylesheet.cache_clear() if (not sip.isdeleted(obj)): obj.setStyleSheet(get_stylesheet(obj.STYLESHEET))
[ "def", "_update_stylesheet", "(", "obj", ")", ":", "get_stylesheet", ".", "cache_clear", "(", ")", "if", "(", "not", "sip", ".", "isdeleted", "(", "obj", ")", ")", ":", "obj", ".", "setStyleSheet", "(", "get_stylesheet", "(", "obj", ".", "STYLESHEET", ")...
update the stylesheet for obj .
train
false
345
def make_range_iterator(typ): return cgutils.create_struct_proxy(typ)
[ "def", "make_range_iterator", "(", "typ", ")", ":", "return", "cgutils", ".", "create_struct_proxy", "(", "typ", ")" ]
return the structure representation of the given *typ* .
train
false
346
def time_difference(date1, date2): later = (mktime(date1.timetuple()) + (date1.microsecond / 1000000.0)) earlier = (mktime(date2.timetuple()) + (date2.microsecond / 1000000.0)) return (later - earlier)
[ "def", "time_difference", "(", "date1", ",", "date2", ")", ":", "later", "=", "(", "mktime", "(", "date1", ".", "timetuple", "(", ")", ")", "+", "(", "date1", ".", "microsecond", "/", "1000000.0", ")", ")", "earlier", "=", "(", "mktime", "(", "date2"...
returns the time difference in seconds between the given two datetime objects .
train
false
347
def get_data_files(): data_files = [] ntrim = len((here + os.path.sep)) for (d, dirs, filenames) in os.walk(share_jupyter): data_files.append((d[ntrim:], [pjoin(d, f) for f in filenames])) return data_files
[ "def", "get_data_files", "(", ")", ":", "data_files", "=", "[", "]", "ntrim", "=", "len", "(", "(", "here", "+", "os", ".", "path", ".", "sep", ")", ")", "for", "(", "d", ",", "dirs", ",", "filenames", ")", "in", "os", ".", "walk", "(", "share_...
walk up until we find share/jupyter/hub .
train
false
348
def get_demography_template(stream, model, tp_dir=None): if (tp_dir is None): filename = sep.join([builtin_tpl_dir, (model + '.par')]) else: filename = sep.join([tp_dir, (model + '.par')]) with open(filename, 'r') as f: l = f.readline() while (l != ''): stream.write(l) l = f.readline()
[ "def", "get_demography_template", "(", "stream", ",", "model", ",", "tp_dir", "=", "None", ")", ":", "if", "(", "tp_dir", "is", "None", ")", ":", "filename", "=", "sep", ".", "join", "(", "[", "builtin_tpl_dir", ",", "(", "model", "+", "'.par'", ")", ...
gets a demograpy template .
train
false
349
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
return a fresh instance of the hash object .
train
false
350
def decodeInventoryEntry_level2(document): count = Inventory(str(document)) for token in document: if ((token.dep_ == (u'pobj' or u'meta')) and (token.pos_ == (u'NOUN' or u'NNS' or u'NN'))): item = '' for i in token.children: item += (' ' + str(i)) item += (' ' + str(token)) count.item = item if (token.head.dep_ != u'prep'): break else: amountUnit = token.head.head count.unit = str(amountUnit) for inner in amountUnit.children: if (inner.pos_ == u'NUM'): count.amount += str(inner) return count
[ "def", "decodeInventoryEntry_level2", "(", "document", ")", ":", "count", "=", "Inventory", "(", "str", "(", "document", ")", ")", "for", "token", "in", "document", ":", "if", "(", "(", "token", ".", "dep_", "==", "(", "u'pobj'", "or", "u'meta'", ")", ...
entry level 2 .
train
false
351
def test_iba_sklearn_metrics(): (y_true, y_pred, _) = make_prediction(binary=True) acc = make_index_balanced_accuracy(alpha=0.5, squared=True)(accuracy_score) score = acc(y_true, y_pred) assert_equal(score, 0.54756) jss = make_index_balanced_accuracy(alpha=0.5, squared=True)(jaccard_similarity_score) score = jss(y_true, y_pred) assert_equal(score, 0.54756) pre = make_index_balanced_accuracy(alpha=0.5, squared=True)(precision_score) score = pre(y_true, y_pred) assert_equal(score, 0.65025) rec = make_index_balanced_accuracy(alpha=0.5, squared=True)(recall_score) score = rec(y_true, y_pred) assert_equal(score, 0.4161600000000001)
[ "def", "test_iba_sklearn_metrics", "(", ")", ":", "(", "y_true", ",", "y_pred", ",", "_", ")", "=", "make_prediction", "(", "binary", "=", "True", ")", "acc", "=", "make_index_balanced_accuracy", "(", "alpha", "=", "0.5", ",", "squared", "=", "True", ")", ...
test the compatibility of sklearn metrics within iba .
train
false
352
@conf.commands.register def split_layers(lower, upper, __fval=None, **fval): if (__fval is not None): fval.update(__fval) split_bottom_up(lower, upper, **fval) split_top_down(lower, upper, **fval)
[ "@", "conf", ".", "commands", ".", "register", "def", "split_layers", "(", "lower", ",", "upper", ",", "__fval", "=", "None", ",", "**", "fval", ")", ":", "if", "(", "__fval", "is", "not", "None", ")", ":", "fval", ".", "update", "(", "__fval", ")"...
split 2 layers previously bound .
train
false
353
def check_classification_targets(y): y_type = type_of_target(y) if (y_type not in ['binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences']): raise ValueError(('Unknown label type: %r' % y_type))
[ "def", "check_classification_targets", "(", "y", ")", ":", "y_type", "=", "type_of_target", "(", "y", ")", "if", "(", "y_type", "not", "in", "[", "'binary'", ",", "'multiclass'", ",", "'multiclass-multioutput'", ",", "'multilabel-indicator'", ",", "'multilabel-seq...
ensure that target y is of a non-regression type .
train
false
356
def optimal_mode(data): if data.isdigit(): return MODE_NUMBER if RE_ALPHA_NUM.match(data): return MODE_ALPHA_NUM return MODE_8BIT_BYTE
[ "def", "optimal_mode", "(", "data", ")", ":", "if", "data", ".", "isdigit", "(", ")", ":", "return", "MODE_NUMBER", "if", "RE_ALPHA_NUM", ".", "match", "(", "data", ")", ":", "return", "MODE_ALPHA_NUM", "return", "MODE_8BIT_BYTE" ]
calculate the optimal mode for this chunk of data .
train
false
357
def get_rpc_server(target, endpoint): serializer = RequestContextSerializer(JsonPayloadSerializer()) access_policy = dispatcher.DefaultRPCAccessPolicy return oslo_messaging.get_rpc_server(TRANSPORT, target, [endpoint], executor='eventlet', serializer=serializer, access_policy=access_policy)
[ "def", "get_rpc_server", "(", "target", ",", "endpoint", ")", ":", "serializer", "=", "RequestContextSerializer", "(", "JsonPayloadSerializer", "(", ")", ")", "access_policy", "=", "dispatcher", ".", "DefaultRPCAccessPolicy", "return", "oslo_messaging", ".", "get_rpc_...
return a configured oslo_messaging rpc server .
train
false
359
def DateTime2literal(d, c): return string_literal(format_TIMESTAMP(d), c)
[ "def", "DateTime2literal", "(", "d", ",", "c", ")", ":", "return", "string_literal", "(", "format_TIMESTAMP", "(", "d", ")", ",", "c", ")" ]
format a datetime object as an iso timestamp .
train
false
360
def test_broadcast_dims(): test((1, 2, 3)) test((2, 1, 3)) test((2, 3, 1)) test2((1, 2, 3)) test2((2, 1, 3)) test2((2, 3, 1))
[ "def", "test_broadcast_dims", "(", ")", ":", "test", "(", "(", "1", ",", "2", ",", "3", ")", ")", "test", "(", "(", "2", ",", "1", ",", "3", ")", ")", "test", "(", "(", "2", ",", "3", ",", "1", ")", ")", "test2", "(", "(", "1", ",", "2"...
test with some dimensions being 1 .
train
false
361
def strip_course_id(path): course_id = unicode(FAKE_COURSE_KEY) return path.split(course_id)[0]
[ "def", "strip_course_id", "(", "path", ")", ":", "course_id", "=", "unicode", "(", "FAKE_COURSE_KEY", ")", "return", "path", ".", "split", "(", "course_id", ")", "[", "0", "]" ]
the utility function to help remove the fake course id from the url path .
train
false
362
def parse_environ_block(data): ret = {} pos = 0 WINDOWS_ = WINDOWS while True: next_pos = data.find('\x00', pos) if (next_pos <= pos): break equal_pos = data.find('=', pos, next_pos) if (equal_pos > pos): key = data[pos:equal_pos] value = data[(equal_pos + 1):next_pos] if WINDOWS_: key = key.upper() ret[key] = value pos = (next_pos + 1) return ret
[ "def", "parse_environ_block", "(", "data", ")", ":", "ret", "=", "{", "}", "pos", "=", "0", "WINDOWS_", "=", "WINDOWS", "while", "True", ":", "next_pos", "=", "data", ".", "find", "(", "'\\x00'", ",", "pos", ")", "if", "(", "next_pos", "<=", "pos", ...
parse a c environ block of environment variables into a dictionary .
train
false
363
def dmp_eval_tail(f, A, u, K): if (not A): return f if dmp_zero_p(f, u): return dmp_zero((u - len(A))) e = _rec_eval_tail(f, 0, A, u, K) if (u == (len(A) - 1)): return e else: return dmp_strip(e, (u - len(A)))
[ "def", "dmp_eval_tail", "(", "f", ",", "A", ",", "u", ",", "K", ")", ":", "if", "(", "not", "A", ")", ":", "return", "f", "if", "dmp_zero_p", "(", "f", ",", "u", ")", ":", "return", "dmp_zero", "(", "(", "u", "-", "len", "(", "A", ")", ")",...
evaluate a polynomial at x_j = a_j .
train
false
364
def dnn_gradweight(img, topgrad, kerns_shp, border_mode='valid', subsample=(1, 1), conv_mode='conv'): ctx_name = infer_context_name(img, topgrad) img = as_gpuarray_variable(img, ctx_name) topgrad = as_gpuarray_variable(topgrad, ctx_name) img = gpu_contiguous(img) topgrad = gpu_contiguous(topgrad) kerns_shp = as_tensor_variable(kerns_shp) desc = gpu_dnn_conv_desc(border_mode=border_mode, subsample=subsample, conv_mode=conv_mode)(kerns_shp) out = gpu_alloc_empty(ctx_name, dtype=img.dtype)(*kerns_shp) return gpu_dnn_conv_gradW()(img, topgrad, out, desc)
[ "def", "dnn_gradweight", "(", "img", ",", "topgrad", ",", "kerns_shp", ",", "border_mode", "=", "'valid'", ",", "subsample", "=", "(", "1", ",", "1", ")", ",", "conv_mode", "=", "'conv'", ")", ":", "ctx_name", "=", "infer_context_name", "(", "img", ",", ...
gpu convolution gradient with respect to weight using cudnn from nvidia .
train
false
369
def soft_unicode(s): if (not isinstance(s, unicode)): s = unicode(s) return s
[ "def", "soft_unicode", "(", "s", ")", ":", "if", "(", "not", "isinstance", "(", "s", ",", "unicode", ")", ")", ":", "s", "=", "unicode", "(", "s", ")", "return", "s" ]
make a string unicode if it isnt already .
train
false
371
@dispatch(Node, Mapping) def _subs(o, d): newargs = (subs(arg, d) for arg in o._args) return type(o)(*newargs)
[ "@", "dispatch", "(", "Node", ",", "Mapping", ")", "def", "_subs", "(", "o", ",", "d", ")", ":", "newargs", "=", "(", "subs", "(", "arg", ",", "d", ")", "for", "arg", "in", "o", ".", "_args", ")", "return", "type", "(", "o", ")", "(", "*", ...
returns a list of subclass strings .
train
false
372
def _clear_context(): keep_context = ('docker.client', 'docker.exec_driver', 'dockerng._pull_status', 'docker.docker_version', 'docker.docker_py_version') for key in list(__context__): try: if (key.startswith('docker.') and (key not in keep_context)): __context__.pop(key) except AttributeError: pass
[ "def", "_clear_context", "(", ")", ":", "keep_context", "=", "(", "'docker.client'", ",", "'docker.exec_driver'", ",", "'dockerng._pull_status'", ",", "'docker.docker_version'", ",", "'docker.docker_py_version'", ")", "for", "key", "in", "list", "(", "__context__", ")...
clear variables stored in __context__ .
train
true
373
def appendimages(im1, im2): rows1 = im1.shape[0] rows2 = im2.shape[0] if (rows1 < rows2): im1 = concatenate((im1, zeros(((rows2 - rows1), im1.shape[1]))), axis=0) elif (rows1 > rows2): im2 = concatenate((im2, zeros(((rows1 - rows2), im2.shape[1]))), axis=0) return concatenate((im1, im2), axis=1)
[ "def", "appendimages", "(", "im1", ",", "im2", ")", ":", "rows1", "=", "im1", ".", "shape", "[", "0", "]", "rows2", "=", "im2", ".", "shape", "[", "0", "]", "if", "(", "rows1", "<", "rows2", ")", ":", "im1", "=", "concatenate", "(", "(", "im1",...
return a new image that appends the two images side-by-side .
train
false
376
def in_bounds(x, lb, ub): return np.all(((x >= lb) & (x <= ub)))
[ "def", "in_bounds", "(", "x", ",", "lb", ",", "ub", ")", ":", "return", "np", ".", "all", "(", "(", "(", "x", ">=", "lb", ")", "&", "(", "x", "<=", "ub", ")", ")", ")" ]
check if a point lies within bounds .
train
false
378
def _is_msie8or9(): if ((request.user_agent is None) or (request.user_agent.browser != 'msie') or (request.user_agent.version is None)): return False version = tuple(map(int, request.user_agent.version.split('.'))) return ((8, 0) <= version < (10, 0))
[ "def", "_is_msie8or9", "(", ")", ":", "if", "(", "(", "request", ".", "user_agent", "is", "None", ")", "or", "(", "request", ".", "user_agent", ".", "browser", "!=", "'msie'", ")", "or", "(", "request", ".", "user_agent", ".", "version", "is", "None", ...
returns true if and only if the user agent of the client making the request indicates that it is microsoft internet explorer 8 or 9 .
train
false
380
def is_protected_type(obj): return isinstance(obj, (types.NoneType, int, long, datetime.datetime, datetime.date, datetime.time, float, Decimal))
[ "def", "is_protected_type", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "(", "types", ".", "NoneType", ",", "int", ",", "long", ",", "datetime", ".", "datetime", ",", "datetime", ".", "date", ",", "datetime", ".", "time", ",", "float...
determine if the object instance is of a protected type .
train
true
381
def mkswap(device): if (not ismounted(device)): run_as_root(('mkswap %(device)s' % locals())) else: abort('swap partition is mounted')
[ "def", "mkswap", "(", "device", ")", ":", "if", "(", "not", "ismounted", "(", "device", ")", ")", ":", "run_as_root", "(", "(", "'mkswap %(device)s'", "%", "locals", "(", ")", ")", ")", "else", ":", "abort", "(", "'swap partition is mounted'", ")" ]
format swap partition example:: from fabtools .
train
false
382
def sinh(x): return Sinh()(x)
[ "def", "sinh", "(", "x", ")", ":", "return", "Sinh", "(", ")", "(", "x", ")" ]
evaluates the hyperbolic sine of an interval .
train
false
384
def get_sd_auth(val, sd_auth_pillar_name='serverdensity'): sd_pillar = __pillar__.get(sd_auth_pillar_name) log.debug('Server Density Pillar: {0}'.format(sd_pillar)) if (not sd_pillar): log.error('Could not load {0} pillar'.format(sd_auth_pillar_name)) raise CommandExecutionError('{0} pillar is required for authentication'.format(sd_auth_pillar_name)) try: return sd_pillar[val] except KeyError: log.error('Could not find value {0} in pillar'.format(val)) raise CommandExecutionError('{0} value was not found in pillar'.format(val))
[ "def", "get_sd_auth", "(", "val", ",", "sd_auth_pillar_name", "=", "'serverdensity'", ")", ":", "sd_pillar", "=", "__pillar__", ".", "get", "(", "sd_auth_pillar_name", ")", "log", ".", "debug", "(", "'Server Density Pillar: {0}'", ".", "format", "(", "sd_pillar", ...
returns requested server density authentication value from pillar .
train
true
385
def end_threads(): for t in _threadPool: t.my_thread_ended = True t.join()
[ "def", "end_threads", "(", ")", ":", "for", "t", "in", "_threadPool", ":", "t", ".", "my_thread_ended", "=", "True", "t", ".", "join", "(", ")" ]
this function must be called once when the gui shuts down .
train
false
387
def _remove_dups(L): seen_before = set([]) L2 = [] for i in L: if (i not in seen_before): seen_before.add(i) L2.append(i) return L2
[ "def", "_remove_dups", "(", "L", ")", ":", "seen_before", "=", "set", "(", "[", "]", ")", "L2", "=", "[", "]", "for", "i", "in", "L", ":", "if", "(", "i", "not", "in", "seen_before", ")", ":", "seen_before", ".", "add", "(", "i", ")", "L2", "...
removes duplicates and preserves the original order of the elements .
train
false
388
def set_computer_name(name): cmd = 'systemsetup -setcomputername "{0}"'.format(name) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated(name, get_computer_name)
[ "def", "set_computer_name", "(", "name", ")", ":", "cmd", "=", "'systemsetup -setcomputername \"{0}\"'", ".", "format", "(", "name", ")", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_success", "(", "cmd", ")", "return", "salt", ".", "utils", "....
set the windows computer name .
train
false
389
def eval_kfold(A, B, train, labels, shuffle=True, k=10, seed=1234, use_feats=False): labels = np.array(labels) if use_feats: features = np.c_[(np.abs((A - B)), (A * B), feats(train[0], train[1]))] else: features = np.c_[(np.abs((A - B)), (A * B))] scan = [(2 ** t) for t in range(0, 9, 1)] npts = len(features) kf = KFold(npts, n_folds=k, shuffle=shuffle, random_state=seed) scores = [] for s in scan: scanscores = [] for (train, test) in kf: X_train = features[train] y_train = labels[train] X_test = features[test] y_test = labels[test] clf = LogisticRegression(C=s) clf.fit(X_train, y_train) yhat = clf.predict(X_test) fscore = f1(y_test, yhat) scanscores.append(fscore) print (s, fscore) scores.append(np.mean(scanscores)) print scores s_ind = np.argmax(scores) s = scan[s_ind] print scores print s return s
[ "def", "eval_kfold", "(", "A", ",", "B", ",", "train", ",", "labels", ",", "shuffle", "=", "True", ",", "k", "=", "10", ",", "seed", "=", "1234", ",", "use_feats", "=", "False", ")", ":", "labels", "=", "np", ".", "array", "(", "labels", ")", "...
perform k-fold cross validation .
train
false
390
def ix_(*args): out = [] nd = len(args) for (k, new) in enumerate(args): new = cupy.asarray(new) if (new.ndim != 1): raise ValueError('Cross index must be 1 dimensional') if (new.size == 0): new = new.astype(numpy.intp) if cupy.issubdtype(new.dtype, cupy.bool_): (new,) = new.nonzero() new = new.reshape(((((1,) * k) + (new.size,)) + ((1,) * ((nd - k) - 1)))) out.append(new) return tuple(out)
[ "def", "ix_", "(", "*", "args", ")", ":", "out", "=", "[", "]", "nd", "=", "len", "(", "args", ")", "for", "(", "k", ",", "new", ")", "in", "enumerate", "(", "args", ")", ":", "new", "=", "cupy", ".", "asarray", "(", "new", ")", "if", "(", ...
construct an open mesh from multiple sequences .
train
false
391
def filter_pathext(val): return os.path.splitext((val or u''))[1]
[ "def", "filter_pathext", "(", "val", ")", ":", "return", "os", ".", "path", ".", "splitext", "(", "(", "val", "or", "u''", ")", ")", "[", "1", "]" ]
extension of a path .
train
false
392
def _virt_call(domain, function, section, comment, **kwargs): ret = {'name': domain, 'changes': {}, 'result': True, 'comment': ''} targeted_domains = fnmatch.filter(__salt__['virt.list_domains'](), domain) changed_domains = list() ignored_domains = list() for domain in targeted_domains: try: response = __salt__['virt.{0}'.format(function)](domain, **kwargs) if isinstance(response, dict): response = response['name'] changed_domains.append({'domain': domain, function: response}) except libvirt.libvirtError as err: ignored_domains.append({'domain': domain, 'issue': str(err)}) if (not changed_domains): ret['result'] = False ret['comment'] = 'No changes had happened' if ignored_domains: ret['changes'] = {'ignored': ignored_domains} else: ret['changes'] = {section: changed_domains} ret['comment'] = comment return ret
[ "def", "_virt_call", "(", "domain", ",", "function", ",", "section", ",", "comment", ",", "**", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "domain", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ...
helper to call the virt functions .
train
false
396
def crt1(m): return gf_crt1(m, ZZ)
[ "def", "crt1", "(", "m", ")", ":", "return", "gf_crt1", "(", "m", ",", "ZZ", ")" ]
first part of chinese remainder theorem .
train
false
398
def group_type_get(context, id, inactive=False, expected_fields=None): return IMPL.group_type_get(context, id, inactive, expected_fields)
[ "def", "group_type_get", "(", "context", ",", "id", ",", "inactive", "=", "False", ",", "expected_fields", "=", "None", ")", ":", "return", "IMPL", ".", "group_type_get", "(", "context", ",", "id", ",", "inactive", ",", "expected_fields", ")" ]
return a dict describing specific group_type .
train
false
399
def init(mpstate): return SerialModule(mpstate)
[ "def", "init", "(", "mpstate", ")", ":", "return", "SerialModule", "(", "mpstate", ")" ]
call to load the plugins machinery .
train
false
400
def _build_variant_map(): result = {} for name in dir(messages): value = getattr(messages, name) if (isinstance(value, type) and issubclass(value, messages.Field)): for variant in getattr(value, 'VARIANTS', []): result[variant] = value return result
[ "def", "_build_variant_map", "(", ")", ":", "result", "=", "{", "}", "for", "name", "in", "dir", "(", "messages", ")", ":", "value", "=", "getattr", "(", "messages", ",", "name", ")", "if", "(", "isinstance", "(", "value", ",", "type", ")", "and", ...
map variants to fields .
train
false
401
def identity(x): return x
[ "def", "identity", "(", "x", ")", ":", "return", "x" ]
identity function .
train
false
402
def hours_time_string(hours): minutes = int(round((hours * 60))) return ('%02d:%02d' % divmod(minutes, 60))
[ "def", "hours_time_string", "(", "hours", ")", ":", "minutes", "=", "int", "(", "round", "(", "(", "hours", "*", "60", ")", ")", ")", "return", "(", "'%02d:%02d'", "%", "divmod", "(", "minutes", ",", "60", ")", ")" ]
convert a number of hours into a string with format %h:%m .
train
false
404
@require_POST @login_required def unwatch_question(request, question_id): question = get_object_or_404(Question, pk=question_id) QuestionReplyEvent.stop_notifying(request.user, question) QuestionSolvedEvent.stop_notifying(request.user, question) return HttpResponseRedirect(question.get_absolute_url())
[ "@", "require_POST", "@", "login_required", "def", "unwatch_question", "(", "request", ",", "question_id", ")", ":", "question", "=", "get_object_or_404", "(", "Question", ",", "pk", "=", "question_id", ")", "QuestionReplyEvent", ".", "stop_notifying", "(", "reque...
stop watching a question .
train
false
406
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get new repository .
train
false
407
def test_pycuda_theano(): from pycuda.compiler import SourceModule mod = SourceModule('\n__global__ void multiply_them(float *dest, float *a, float *b)\n{\n const int i = threadIdx.x;\n dest[i] = a[i] * b[i];\n}\n') multiply_them = mod.get_function('multiply_them') a = numpy.random.randn(100).astype(numpy.float32) b = numpy.random.randn(100).astype(numpy.float32) ga = cuda_ndarray.CudaNdarray(a) gb = cuda_ndarray.CudaNdarray(b) dest = cuda_ndarray.CudaNdarray.zeros(a.shape) multiply_them(dest, ga, gb, block=(400, 1, 1), grid=(1, 1)) assert (numpy.asarray(dest) == (a * b)).all()
[ "def", "test_pycuda_theano", "(", ")", ":", "from", "pycuda", ".", "compiler", "import", "SourceModule", "mod", "=", "SourceModule", "(", "'\\n__global__ void multiply_them(float *dest, float *a, float *b)\\n{\\n const int i = threadIdx.x;\\n dest[i] = a[i] * b[i];\\n}\\n'", ")", ...
simple example with pycuda function and theano cudandarray object .
train
false
408
def follow_dependencies(subset, package_list): dependency_graph = get_dependency_graph(package_list) curr_pkgs = None updated_pkgs = set(subset) while (curr_pkgs != updated_pkgs): curr_pkgs = updated_pkgs updated_pkgs = set(curr_pkgs) for package in curr_pkgs: updated_pkgs.update(dependency_graph[package]) return sorted(curr_pkgs)
[ "def", "follow_dependencies", "(", "subset", ",", "package_list", ")", ":", "dependency_graph", "=", "get_dependency_graph", "(", "package_list", ")", "curr_pkgs", "=", "None", "updated_pkgs", "=", "set", "(", "subset", ")", "while", "(", "curr_pkgs", "!=", "upd...
get a directed graph of package dependencies .
train
false
409
def patch_images(new_image_dir): pathto = partial(os.path.join, new_image_dir) if (new_image_dir != 'default'): if (not os.path.isdir(new_image_dir)): raise IOError('Unable to find the user supplied directory {}'.format(new_image_dir)) new_images = ((varname, pathto(filename)) for (varname, filename) in _image_details if os.path.exists(pathto(filename))) globals().update(new_images)
[ "def", "patch_images", "(", "new_image_dir", ")", ":", "pathto", "=", "partial", "(", "os", ".", "path", ".", "join", ",", "new_image_dir", ")", "if", "(", "new_image_dir", "!=", "'default'", ")", ":", "if", "(", "not", "os", ".", "path", ".", "isdir",...
loads custom images from the user supplied directory .
train
false