id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
45,689 | def multipart_upload_lister(bucket, key_marker='', upload_id_marker='', headers=None, encoding_type=None):
more_results = True
k = None
while more_results:
rs = bucket.get_all_multipart_uploads(key_marker=key_marker, upload_id_marker=upload_id_marker, headers=headers, encoding_type=encoding_type)
for k in rs:
(yield k)
key_marker = rs.next_key_marker
if (key_marker and (encoding_type == 'url')):
key_marker = unquote_str(key_marker)
upload_id_marker = rs.next_upload_id_marker
more_results = rs.is_truncated
| [
"def",
"multipart_upload_lister",
"(",
"bucket",
",",
"key_marker",
"=",
"''",
",",
"upload_id_marker",
"=",
"''",
",",
"headers",
"=",
"None",
",",
"encoding_type",
"=",
"None",
")",
":",
"more_results",
"=",
"True",
"k",
"=",
"None",
"while",
"more_results... | a generator function for listing multipart uploads in a bucket . | train | true |
45,690 | def volume_glance_metadata_delete_by_volume(context, volume_id):
return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id)
| [
"def",
"volume_glance_metadata_delete_by_volume",
"(",
"context",
",",
"volume_id",
")",
":",
"return",
"IMPL",
".",
"volume_glance_metadata_delete_by_volume",
"(",
"context",
",",
"volume_id",
")"
] | delete the glance metadata for a volume . | train | false |
45,691 | def test_pandas(test_data):
pd_data = test_data.pd_data.copy()
ds = ChartDataSource.from_data(pd_data)
assert (len(ds.columns) == 2)
assert (len(ds.index) == 4)
ds = ChartDataSource(pd_data)
assert (len(ds.columns) == 2)
assert (len(ds.index) == 4)
assert pd_data.equals(test_data.pd_data)
| [
"def",
"test_pandas",
"(",
"test_data",
")",
":",
"pd_data",
"=",
"test_data",
".",
"pd_data",
".",
"copy",
"(",
")",
"ds",
"=",
"ChartDataSource",
".",
"from_data",
"(",
"pd_data",
")",
"assert",
"(",
"len",
"(",
"ds",
".",
"columns",
")",
"==",
"2",
... | test creating chart data source from existing dataframe . | train | false |
45,692 | def setIndexLink(template, indexFilename):
indexLinks = domhelpers.findElementsWithAttribute(template, 'class', 'index-link')
for link in indexLinks:
if (indexFilename is None):
link.parentNode.removeChild(link)
else:
link.nodeName = link.tagName = link.endTagName = 'a'
for attrName in link.attributes.keys():
link.removeAttribute(attrName)
link.setAttribute('href', indexFilename)
| [
"def",
"setIndexLink",
"(",
"template",
",",
"indexFilename",
")",
":",
"indexLinks",
"=",
"domhelpers",
".",
"findElementsWithAttribute",
"(",
"template",
",",
"'class'",
",",
"'index-link'",
")",
"for",
"link",
"in",
"indexLinks",
":",
"if",
"(",
"indexFilenam... | insert a link to an index document . | train | false |
45,694 | def fix_torrent(file_path):
f = open(file_path, 'rb')
bdata = f.read()
f.close()
fixed_data = bdecode(bdata)
if (fixed_data is not None):
fixed_data = bencode(fixed_data)
return fixed_data
| [
"def",
"fix_torrent",
"(",
"file_path",
")",
":",
"f",
"=",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"bdata",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"fixed_data",
"=",
"bdecode",
"(",
"bdata",
")",
"if",
"(",
"fixed_data",... | reads and checks if a torrent file is valid and tries to overwrite the torrent file with a non-sloppy version . | train | false |
45,696 | def getJoinOfXIntersectionIndexes(xIntersectionIndexList):
xIntersections = []
solidTable = {}
solid = False
xIntersectionIndexList.sort()
for xIntersectionIndex in xIntersectionIndexList:
toggleHashtable(solidTable, xIntersectionIndex.index, '')
oldSolid = solid
solid = (len(solidTable) > 0)
if (oldSolid != solid):
xIntersections.append(xIntersectionIndex.x)
return xIntersections
| [
"def",
"getJoinOfXIntersectionIndexes",
"(",
"xIntersectionIndexList",
")",
":",
"xIntersections",
"=",
"[",
"]",
"solidTable",
"=",
"{",
"}",
"solid",
"=",
"False",
"xIntersectionIndexList",
".",
"sort",
"(",
")",
"for",
"xIntersectionIndex",
"in",
"xIntersectionIn... | get joined x intersections from surrounding layers . | train | false |
45,699 | def charade_cli():
from sys import argv
for path in argv[1:]:
print _description_of(path)
| [
"def",
"charade_cli",
"(",
")",
":",
"from",
"sys",
"import",
"argv",
"for",
"path",
"in",
"argv",
"[",
"1",
":",
"]",
":",
"print",
"_description_of",
"(",
"path",
")"
] | script which takes one or more file paths and reports on their detected encodings example:: % chardetect . | train | false |
45,700 | def test_renn_sample_wt_fit():
renn = RepeatedEditedNearestNeighbours(random_state=RND_SEED)
assert_raises(RuntimeError, renn.sample, X, Y)
| [
"def",
"test_renn_sample_wt_fit",
"(",
")",
":",
"renn",
"=",
"RepeatedEditedNearestNeighbours",
"(",
"random_state",
"=",
"RND_SEED",
")",
"assert_raises",
"(",
"RuntimeError",
",",
"renn",
".",
"sample",
",",
"X",
",",
"Y",
")"
] | test either if an error is raised when sample is called before fitting . | train | false |
45,702 | def test_hcae_yaml():
limited_epoch_train(os.path.join(pylearn2.__path__[0], 'scripts/autoencoder_example/hcae.yaml'))
| [
"def",
"test_hcae_yaml",
"(",
")",
":",
"limited_epoch_train",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pylearn2",
".",
"__path__",
"[",
"0",
"]",
",",
"'scripts/autoencoder_example/hcae.yaml'",
")",
")"
] | train a higher order contractive autoencoder for a single epoch . | train | false |
45,703 | def getText(elem):
l = []
for element in elem.getiterator():
if element.text:
l.append(element.text)
if element.tail:
l.append(element.tail)
return ' '.join(l)
| [
"def",
"getText",
"(",
"elem",
")",
":",
"l",
"=",
"[",
"]",
"for",
"element",
"in",
"elem",
".",
"getiterator",
"(",
")",
":",
"if",
"element",
".",
"text",
":",
"l",
".",
"append",
"(",
"element",
".",
"text",
")",
"if",
"element",
".",
"tail",... | return the internal text for the given elementtree node . | train | false |
45,704 | def _get_buckets():
return (__opts__['s3.buckets'] if ('s3.buckets' in __opts__) else {})
| [
"def",
"_get_buckets",
"(",
")",
":",
"return",
"(",
"__opts__",
"[",
"'s3.buckets'",
"]",
"if",
"(",
"'s3.buckets'",
"in",
"__opts__",
")",
"else",
"{",
"}",
")"
] | return the configuration buckets . | train | false |
45,705 | def insert_versioning_documents(resource, documents):
resource_def = app.config['DOMAIN'][resource]
_id = resource_def['id_field']
if (resource_def['versioning'] is True):
if (not isinstance(documents, list)):
documents = [documents]
request_auth_value = None
auth = resource_def['authentication']
auth_field = resource_def['auth_field']
if (auth and auth_field):
request_auth_value = auth.get_request_auth_value()
version = app.config['VERSION']
versioned_documents = []
for (index, document) in enumerate(documents):
ver_doc = {}
fields = versioned_fields(resource_def)
for field in document:
if (field in fields):
ver_doc[field] = document[field]
ver_doc[versioned_id_field(resource_def)] = document[_id]
ver_doc[version] = document[version]
if request_auth_value:
ver_doc[auth_field] = request_auth_value
versioned_documents.append(ver_doc)
source = resource_def['datasource']['source']
versionable_resource_name = (source + app.config['VERSIONS'])
app.data.insert(versionable_resource_name, versioned_documents)
| [
"def",
"insert_versioning_documents",
"(",
"resource",
",",
"documents",
")",
":",
"resource_def",
"=",
"app",
".",
"config",
"[",
"'DOMAIN'",
"]",
"[",
"resource",
"]",
"_id",
"=",
"resource_def",
"[",
"'id_field'",
"]",
"if",
"(",
"resource_def",
"[",
"'ve... | insert versioning copy of document . | train | false |
45,706 | def guess_archive_type(name):
name = name.lower()
for ending in ('tar', 'tar.gz', 'tar.bz2', 'tar.xz', 'tgz', 'tbz2', 'txz', 'tar.lzma', 'tlz'):
if name.endswith(('.' + ending)):
return 'tar'
for ending in ('zip', 'rar'):
if name.endswith(('.' + ending)):
return ending
return None
| [
"def",
"guess_archive_type",
"(",
"name",
")",
":",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"for",
"ending",
"in",
"(",
"'tar'",
",",
"'tar.gz'",
",",
"'tar.bz2'",
",",
"'tar.xz'",
",",
"'tgz'",
",",
"'tbz2'",
",",
"'txz'",
",",
"'tar.lzma'",
",",... | guess an archive type by its file extension . | train | true |
45,707 | def ipv4_addr(addr):
return __ip_addr(addr, socket.AF_INET)
| [
"def",
"ipv4_addr",
"(",
"addr",
")",
":",
"return",
"__ip_addr",
"(",
"addr",
",",
"socket",
".",
"AF_INET",
")"
] | returns true if the ipv4 address are valid . | train | false |
45,708 | def update_hash(hasher, obj):
hasher.update(str(type(obj)))
if isinstance(obj, (tuple, list)):
for e in obj:
update_hash(hasher, e)
elif isinstance(obj, dict):
for k in sorted(obj):
update_hash(hasher, k)
update_hash(hasher, obj[k])
else:
hasher.update(repr(obj))
| [
"def",
"update_hash",
"(",
"hasher",
",",
"obj",
")",
":",
"hasher",
".",
"update",
"(",
"str",
"(",
"type",
"(",
"obj",
")",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"for",
"e",
"in",
"obj",
":",... | update a hashlib hasher with a nested object . | train | false |
45,710 | def load_publickey(type, buffer):
if isinstance(buffer, _text_type):
buffer = buffer.encode('ascii')
bio = _new_mem_buf(buffer)
if (type == FILETYPE_PEM):
evp_pkey = _lib.PEM_read_bio_PUBKEY(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
elif (type == FILETYPE_ASN1):
evp_pkey = _lib.d2i_PUBKEY_bio(bio, _ffi.NULL)
else:
raise ValueError('type argument must be FILETYPE_PEM or FILETYPE_ASN1')
if (evp_pkey == _ffi.NULL):
_raise_current_error()
pkey = PKey.__new__(PKey)
pkey._pkey = _ffi.gc(evp_pkey, _lib.EVP_PKEY_free)
pkey._only_public = True
return pkey
| [
"def",
"load_publickey",
"(",
"type",
",",
"buffer",
")",
":",
"if",
"isinstance",
"(",
"buffer",
",",
"_text_type",
")",
":",
"buffer",
"=",
"buffer",
".",
"encode",
"(",
"'ascii'",
")",
"bio",
"=",
"_new_mem_buf",
"(",
"buffer",
")",
"if",
"(",
"type... | load a public key from a buffer . | train | true |
45,711 | def estimate_rank(data, tol='auto', return_singular=False, norm=True):
data = data.copy()
if (norm is True):
norms = _compute_row_norms(data)
data /= norms[:, np.newaxis]
s = linalg.svd(data, compute_uv=False, overwrite_a=True)
if isinstance(tol, string_types):
if (tol != 'auto'):
raise ValueError('tol must be "auto" or float')
eps = np.finfo(float).eps
tol = ((np.max(data.shape) * np.amax(s)) * eps)
tol = float(tol)
rank = np.sum((s > tol))
if (return_singular is True):
return (rank, s)
else:
return rank
| [
"def",
"estimate_rank",
"(",
"data",
",",
"tol",
"=",
"'auto'",
",",
"return_singular",
"=",
"False",
",",
"norm",
"=",
"True",
")",
":",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"(",
"norm",
"is",
"True",
")",
":",
"norms",
"=",
"_compute... | estimate matrix rank to a specified relative precision using randomized methods . | train | false |
45,713 | @check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def delete_entrance_exam(request, course_key):
return _delete_entrance_exam(request=request, course_key=course_key)
| [
"@",
"check_feature_enabled",
"(",
"feature_name",
"=",
"'ENTRANCE_EXAMS'",
")",
"def",
"delete_entrance_exam",
"(",
"request",
",",
"course_key",
")",
":",
"return",
"_delete_entrance_exam",
"(",
"request",
"=",
"request",
",",
"course_key",
"=",
"course_key",
")"
... | api method to delete an entrance exam . | train | false |
45,716 | def setglobalui(newui):
global globalui
globalui = newui
| [
"def",
"setglobalui",
"(",
"newui",
")",
":",
"global",
"globalui",
"globalui",
"=",
"newui"
] | set the global ui object to be used for logging . | train | false |
45,717 | def spline_filter(input, order=3, output=numpy.float64):
if ((order < 2) or (order > 5)):
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
(output, return_value) = _ni_support._get_output(output, input)
if ((order not in [0, 1]) and (input.ndim > 0)):
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output)
input = output
else:
output[...] = input[...]
return return_value
| [
"def",
"spline_filter",
"(",
"input",
",",
"order",
"=",
"3",
",",
"output",
"=",
"numpy",
".",
"float64",
")",
":",
"if",
"(",
"(",
"order",
"<",
"2",
")",
"or",
"(",
"order",
">",
"5",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'spline order n... | multi-dimensional spline filter . | train | false |
45,721 | def do_float(value, default=0.0):
try:
return float(value)
except (TypeError, ValueError):
return default
| [
"def",
"do_float",
"(",
"value",
",",
"default",
"=",
"0.0",
")",
":",
"try",
":",
"return",
"float",
"(",
"value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"default"
] | convert the value into a floating point number . | train | false |
45,722 | def equateSpherical(point, returnValue):
spherical = evaluate.getVector3ByFloatList(returnValue, point)
radius = spherical.x
elevationComplex = (euclidean.getWiddershinsUnitPolar(math.radians(spherical.z)) * radius)
azimuthComplex = (euclidean.getWiddershinsUnitPolar(math.radians(spherical.y)) * elevationComplex.real)
point.x = azimuthComplex.real
point.y = azimuthComplex.imag
point.z = elevationComplex.imag
| [
"def",
"equateSpherical",
"(",
"point",
",",
"returnValue",
")",
":",
"spherical",
"=",
"evaluate",
".",
"getVector3ByFloatList",
"(",
"returnValue",
",",
"point",
")",
"radius",
"=",
"spherical",
".",
"x",
"elevationComplex",
"=",
"(",
"euclidean",
".",
"getW... | get equation for spherical . | train | false |
45,723 | def _AllowBuilderStyleCalls(node):
def RecGetLeaves(node):
if isinstance(node, pytree.Leaf):
return [node]
children = []
for child in node.children:
children += RecGetLeaves(child)
return children
list_of_children = RecGetLeaves(node)
prev_child = None
for child in list_of_children:
if (child.value == '.'):
if (prev_child.lineno != child.lineno):
pytree_utils.SetNodeAnnotation(child, pytree_utils.Annotation.SPLIT_PENALTY, 0)
prev_child = child
| [
"def",
"_AllowBuilderStyleCalls",
"(",
"node",
")",
":",
"def",
"RecGetLeaves",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"pytree",
".",
"Leaf",
")",
":",
"return",
"[",
"node",
"]",
"children",
"=",
"[",
"]",
"for",
"child",
"in",
... | allow splitting before . | train | false |
45,724 | def test_sets():
objects = [set(), frozenset(), set([1]), frozenset([1]), set([1, 2]), frozenset([1, 2]), set([(-1), (-2), (-3)])]
expected = ['set()', 'frozenset()', '{1}', 'frozenset({1})', '{1, 2}', 'frozenset({1, 2})', '{-3, -2, -1}']
for (obj, expected_output) in zip(objects, expected):
got_output = pretty.pretty(obj)
(yield (nt.assert_equal, got_output, expected_output))
| [
"def",
"test_sets",
"(",
")",
":",
"objects",
"=",
"[",
"set",
"(",
")",
",",
"frozenset",
"(",
")",
",",
"set",
"(",
"[",
"1",
"]",
")",
",",
"frozenset",
"(",
"[",
"1",
"]",
")",
",",
"set",
"(",
"[",
"1",
",",
"2",
"]",
")",
",",
"froz... | test that set and frozenset use python 3 formatting . | train | false |
45,725 | def roots_chebyc(n, mu=False):
(x, w, m) = roots_chebyt(n, True)
x *= 2
w *= 2
m *= 2
if mu:
return (x, w, m)
else:
return (x, w)
| [
"def",
"roots_chebyc",
"(",
"n",
",",
"mu",
"=",
"False",
")",
":",
"(",
"x",
",",
"w",
",",
"m",
")",
"=",
"roots_chebyt",
"(",
"n",
",",
"True",
")",
"x",
"*=",
"2",
"w",
"*=",
"2",
"m",
"*=",
"2",
"if",
"mu",
":",
"return",
"(",
"x",
"... | gauss-chebyshev quadrature . | train | false |
45,726 | def get_cropped_centered_img_coords(options, tile_size, center_vector, crop, t):
(x, y) = get_tile_coords_from_tuple(options, t)
new_tile_x = ((x - crop[0]) - center_vector[0])
new_tile_y = ((y - crop[1]) - center_vector[1])
new_img_x = (new_tile_x * tile_size[0])
new_img_y = (new_tile_y * tile_size[1])
return (new_img_x, new_img_y)
| [
"def",
"get_cropped_centered_img_coords",
"(",
"options",
",",
"tile_size",
",",
"center_vector",
",",
"crop",
",",
"t",
")",
":",
"(",
"x",
",",
"y",
")",
"=",
"get_tile_coords_from_tuple",
"(",
"options",
",",
"t",
")",
"new_tile_x",
"=",
"(",
"(",
"x",
... | returns the new image coords used to paste tiles in the big image . | train | false |
45,728 | def _discover(hass, config, component_name, found_tellcore_devices):
if (not len(found_tellcore_devices)):
return
_LOGGER.info('Discovered %d new %s devices', len(found_tellcore_devices), component_name)
signal_repetitions = config[DOMAIN].get(ATTR_SIGNAL_REPETITIONS)
discovery.load_platform(hass, component_name, DOMAIN, {ATTR_DISCOVER_DEVICES: found_tellcore_devices, ATTR_DISCOVER_CONFIG: signal_repetitions}, config)
| [
"def",
"_discover",
"(",
"hass",
",",
"config",
",",
"component_name",
",",
"found_tellcore_devices",
")",
":",
"if",
"(",
"not",
"len",
"(",
"found_tellcore_devices",
")",
")",
":",
"return",
"_LOGGER",
".",
"info",
"(",
"'Discovered %d new %s devices'",
",",
... | setup and send the discovery event . | train | false |
45,730 | def valid_locale_fallback(desired_locale=None):
candidates_windows = [str(u'English'), str(u'C')]
candidates_posix = [str(u'en_US.UTF-8'), str(u'C')]
candidates = (candidates_windows if (sys.platform == u'win32') else candidates_posix)
if desired_locale:
candidates = list(candidates)
candidates.insert(0, desired_locale)
found_valid = False
for locale_n in candidates:
found_valid = is_valid_locale(locale_n)
if found_valid:
break
if (not found_valid):
msg = u'Could not find a valid fallback locale, tried: {0}'
utils.LOGGER.warn(msg.format(candidates))
elif (desired_locale and (desired_locale != locale_n)):
msg = u'Desired fallback locale {0} could not be set, using: {1}'
utils.LOGGER.warn(msg.format(desired_locale, locale_n))
return locale_n
| [
"def",
"valid_locale_fallback",
"(",
"desired_locale",
"=",
"None",
")",
":",
"candidates_windows",
"=",
"[",
"str",
"(",
"u'English'",
")",
",",
"str",
"(",
"u'C'",
")",
"]",
"candidates_posix",
"=",
"[",
"str",
"(",
"u'en_US.UTF-8'",
")",
",",
"str",
"("... | provide a default fallback_locale . | train | false |
45,732 | def execute_compile_sass(args):
for sys in args.system:
options = ''
options += ((' --theme-dirs ' + ' '.join(args.theme_dirs)) if args.theme_dirs else '')
options += ((' --themes ' + ' '.join(args.themes)) if args.themes else '')
options += (' --debug' if args.debug else '')
sh(django_cmd(sys, args.settings, 'compile_sass {system} {options}'.format(system=('cms' if (sys == 'studio') else sys), options=options)))
| [
"def",
"execute_compile_sass",
"(",
"args",
")",
":",
"for",
"sys",
"in",
"args",
".",
"system",
":",
"options",
"=",
"''",
"options",
"+=",
"(",
"(",
"' --theme-dirs '",
"+",
"' '",
".",
"join",
"(",
"args",
".",
"theme_dirs",
")",
")",
"if",
"args",
... | construct django management command compile_sass and execute it . | train | false |
45,733 | def reverse_lex_128(ustring):
newstr = u''
for ii in ustring:
ordinance = ord(ii)
new_byte = (127 - ordinance)
char = unichr(new_byte)
newstr += char
return newstr
| [
"def",
"reverse_lex_128",
"(",
"ustring",
")",
":",
"newstr",
"=",
"u''",
"for",
"ii",
"in",
"ustring",
":",
"ordinance",
"=",
"ord",
"(",
"ii",
")",
"new_byte",
"=",
"(",
"127",
"-",
"ordinance",
")",
"char",
"=",
"unichr",
"(",
"new_byte",
")",
"ne... | certain datastores are unable to store keys with unichars of 128 or more this function reflects on 127 and less . | train | false |
45,734 | def javascript_alert(url, js_msg, abort_on):
log.js.debug('alert: {}'.format(js_msg))
if config.get('ui', 'modal-js-dialog'):
raise CallSuper
if config.get('content', 'ignore-javascript-alert'):
return
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()), html.escape(js_msg))
message.ask('Javascript alert', msg, mode=usertypes.PromptMode.alert, abort_on=abort_on)
| [
"def",
"javascript_alert",
"(",
"url",
",",
"js_msg",
",",
"abort_on",
")",
":",
"log",
".",
"js",
".",
"debug",
"(",
"'alert: {}'",
".",
"format",
"(",
"js_msg",
")",
")",
"if",
"config",
".",
"get",
"(",
"'ui'",
",",
"'modal-js-dialog'",
")",
":",
... | display a javascript alert . | train | false |
45,735 | def _get_contact_type(user):
contact_type_name = getattr(settings, 'HARDTREE_IDENTITIES_DEFAULT_TYPE', 'person')
contact_type = Object.filter_permitted(user, ContactType.objects).filter(name__iexact=contact_type_name)
try:
contact_type = contact_type[0]
except IndexError:
contact_type = None
return contact_type
| [
"def",
"_get_contact_type",
"(",
"user",
")",
":",
"contact_type_name",
"=",
"getattr",
"(",
"settings",
",",
"'HARDTREE_IDENTITIES_DEFAULT_TYPE'",
",",
"'person'",
")",
"contact_type",
"=",
"Object",
".",
"filter_permitted",
"(",
"user",
",",
"ContactType",
".",
... | returns default contact_type for integration . | train | false |
45,736 | def upload_local(uploaded_file, key, **kwargs):
filename = secure_filename(uploaded_file.filename)
file_relative_path = ((((('static/media/' + key) + '/') + generate_hash(key)) + '/') + filename)
file_path = ((app.config['BASE_DIR'] + '/') + file_relative_path)
dir_path = file_path.rsplit('/', 1)[0]
try:
rmtree(dir_path)
except OSError:
pass
if (not os.path.isdir(dir_path)):
os.makedirs(dir_path)
uploaded_file.save(file_path)
return ('/serve_' + file_relative_path)
| [
"def",
"upload_local",
"(",
"uploaded_file",
",",
"key",
",",
"**",
"kwargs",
")",
":",
"filename",
"=",
"secure_filename",
"(",
"uploaded_file",
".",
"filename",
")",
"file_relative_path",
"=",
"(",
"(",
"(",
"(",
"(",
"'static/media/'",
"+",
"key",
")",
... | uploads file locally . | train | false |
45,742 | def get_reader_class(reader_name):
reader_name = reader_name.lower()
if (reader_name in _reader_aliases):
reader_name = _reader_aliases[reader_name]
try:
module = __import__(reader_name, globals(), locals(), level=1)
except ImportError:
module = __import__(reader_name, globals(), locals(), level=0)
return module.Reader
| [
"def",
"get_reader_class",
"(",
"reader_name",
")",
":",
"reader_name",
"=",
"reader_name",
".",
"lower",
"(",
")",
"if",
"(",
"reader_name",
"in",
"_reader_aliases",
")",
":",
"reader_name",
"=",
"_reader_aliases",
"[",
"reader_name",
"]",
"try",
":",
"module... | return the reader class from the reader_name module . | train | false |
45,743 | def get_all_interfaces():
return _interface_configs()
| [
"def",
"get_all_interfaces",
"(",
")",
":",
"return",
"_interface_configs",
"(",
")"
] | return configs for all interfaces cli example: . | train | false |
45,744 | def _root_anderson_doc():
pass
| [
"def",
"_root_anderson_doc",
"(",
")",
":",
"pass"
] | options nit : int . | train | false |
45,745 | def replace_command(command, broken, matched):
new_cmds = get_close_matches(broken, matched, cutoff=0.1)
return [replace_argument(command.script, broken, new_cmd.strip()) for new_cmd in new_cmds]
| [
"def",
"replace_command",
"(",
"command",
",",
"broken",
",",
"matched",
")",
":",
"new_cmds",
"=",
"get_close_matches",
"(",
"broken",
",",
"matched",
",",
"cutoff",
"=",
"0.1",
")",
"return",
"[",
"replace_argument",
"(",
"command",
".",
"script",
",",
"... | helper for *_no_command rules . | train | true |
45,747 | def _process_data(this_round_fp, log_fh=None, error_profile=None):
cmd = ('%s -relscore_pairid %s %s.dat' % (get_flowgram_ali_exe(), error_profile, this_round_fp))
proc = Popen(cmd, shell=True, universal_newlines=True, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = proc.communicate()
if (proc.returncode != 0):
host = gethostname()
if log_fh:
log_fh.write(('An error occured on %s at %f\n%s' % (host, time(), cmd)))
log_fh.write(stderr)
log_fh.close()
raise RuntimeError(('Worker process crashed. Aborting...!\n' + 'Note: You need to kill the other jobs yourself'))
if log_fh:
log_fh.write((this_round_fp + '... done!\n'))
return stdout
| [
"def",
"_process_data",
"(",
"this_round_fp",
",",
"log_fh",
"=",
"None",
",",
"error_profile",
"=",
"None",
")",
":",
"cmd",
"=",
"(",
"'%s -relscore_pairid %s %s.dat'",
"%",
"(",
"get_flowgram_ali_exe",
"(",
")",
",",
"error_profile",
",",
"this_round_fp",
")"... | compute alignment scores for flowgrams in this_round_fp . | train | false |
45,748 | def default_urlconf(request):
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({'title': _('Welcome to Django'), 'heading': _('It worked!'), 'subheading': _('Congratulations on your first Django-powered page.'), 'instructions': _('Next, start your first app by running <code>python manage.py startapp [app_label]</code>.'), 'explanation': _("You're seeing this message because you have <code>DEBUG = True</code> in your Django settings file and you haven't configured any URLs. Get to work!")})
return HttpResponse(t.render(c), content_type='text/html')
| [
"def",
"default_urlconf",
"(",
"request",
")",
":",
"t",
"=",
"DEBUG_ENGINE",
".",
"from_string",
"(",
"DEFAULT_URLCONF_TEMPLATE",
")",
"c",
"=",
"Context",
"(",
"{",
"'title'",
":",
"_",
"(",
"'Welcome to Django'",
")",
",",
"'heading'",
":",
"_",
"(",
"'... | create an empty urlconf 404 error response . | train | false |
45,749 | def build_shed_app(simple_kwargs):
log.info('Tool shed database connection: %s', simple_kwargs['database_connection'])
simple_kwargs['__file__'] = 'tool_shed_wsgi.ini.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
database_contexts.tool_shed_context = app.model.context
log.info('Embedded Toolshed application started')
return app
| [
"def",
"build_shed_app",
"(",
"simple_kwargs",
")",
":",
"log",
".",
"info",
"(",
"'Tool shed database connection: %s'",
",",
"simple_kwargs",
"[",
"'database_connection'",
"]",
")",
"simple_kwargs",
"[",
"'__file__'",
"]",
"=",
"'tool_shed_wsgi.ini.sample'",
"simple_kw... | build a galaxy app object from a simple keyword arguments . | train | false |
45,750 | def accesskey(context, key):
if ('_accesskeys' not in context):
context.vars['_accesskeys'] = {}
if (key not in context.vars['_accesskeys']):
context.vars['_accesskeys'][key] = 1
return ('accesskey="%s"' % key)
return ''
| [
"def",
"accesskey",
"(",
"context",
",",
"key",
")",
":",
"if",
"(",
"'_accesskeys'",
"not",
"in",
"context",
")",
":",
"context",
".",
"vars",
"[",
"'_accesskeys'",
"]",
"=",
"{",
"}",
"if",
"(",
"key",
"not",
"in",
"context",
".",
"vars",
"[",
"'... | helper to output each access key only once . | train | false |
45,751 | def delete_doc_if_exists(doctype, name, force=0):
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
| [
"def",
"delete_doc_if_exists",
"(",
"doctype",
",",
"name",
",",
"force",
"=",
"0",
")",
":",
"if",
"db",
".",
"exists",
"(",
"doctype",
",",
"name",
")",
":",
"delete_doc",
"(",
"doctype",
",",
"name",
",",
"force",
"=",
"force",
")"
] | delete document if exists . | train | false |
45,753 | def connect_socket_file(path='/var/run/tor/control', password=None, chroot_path=None, controller=stem.control.Controller):
try:
control_socket = stem.socket.ControlSocketFile(path)
except stem.SocketError as exc:
print exc
return None
return _connect_auth(control_socket, password, True, chroot_path, controller)
| [
"def",
"connect_socket_file",
"(",
"path",
"=",
"'/var/run/tor/control'",
",",
"password",
"=",
"None",
",",
"chroot_path",
"=",
"None",
",",
"controller",
"=",
"stem",
".",
"control",
".",
"Controller",
")",
":",
"try",
":",
"control_socket",
"=",
"stem",
"... | convenience function for quickly getting a control connection . | train | false |
45,754 | def status(name, sig=None):
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '{0} check {1}'.format(_cmd(), name)
return (not __salt__['cmd.retcode'](cmd))
| [
"def",
"status",
"(",
"name",
",",
"sig",
"=",
"None",
")",
":",
"if",
"sig",
":",
"return",
"bool",
"(",
"__salt__",
"[",
"'status.pid'",
"]",
"(",
"sig",
")",
")",
"cmd",
"=",
"'{0} check {1}'",
".",
"format",
"(",
"_cmd",
"(",
")",
",",
"name",
... | status of a vm . | train | true |
45,756 | def scrub_relative_urls(html):
try:
return re.sub(u'src[\\s]*=[\\s]*[\'"]files/([^\'"]*)[\'"]', u'src="/files/\\g<1>"', html)
except:
print u'Error', html
raise
| [
"def",
"scrub_relative_urls",
"(",
"html",
")",
":",
"try",
":",
"return",
"re",
".",
"sub",
"(",
"u'src[\\\\s]*=[\\\\s]*[\\'\"]files/([^\\'\"]*)[\\'\"]'",
",",
"u'src=\"/files/\\\\g<1>\"'",
",",
"html",
")",
"except",
":",
"print",
"u'Error'",
",",
"html",
"raise"
... | prepend a slash before a relative url . | train | false |
45,758 | def randomLetter():
return random.choice(string.ascii_letters)
| [
"def",
"randomLetter",
"(",
")",
":",
"return",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_letters",
")"
] | returns a random ascii letter . | train | false |
45,759 | def _parse_dnamasq(filename):
fileopts = {}
if (not os.path.isfile(filename)):
raise CommandExecutionError("Error: No such file '{0}'".format(filename))
with salt.utils.fopen(filename, 'r') as fp_:
for line in fp_:
if (not line.strip()):
continue
if line.startswith('#'):
continue
if ('=' in line):
comps = line.split('=')
if (comps[0] in fileopts):
if isinstance(fileopts[comps[0]], str):
temp = fileopts[comps[0]]
fileopts[comps[0]] = [temp]
fileopts[comps[0]].append(comps[1].strip())
else:
fileopts[comps[0]] = comps[1].strip()
else:
if ('unparsed' not in fileopts):
fileopts['unparsed'] = []
fileopts['unparsed'].append(line)
return fileopts
| [
"def",
"_parse_dnamasq",
"(",
"filename",
")",
":",
"fileopts",
"=",
"{",
"}",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
")",
":",
"raise",
"CommandExecutionError",
"(",
"\"Error: No such file '{0}'\"",
".",
"format",
"(",
"... | generic function for parsing dnsmasq files including includes . | train | false |
45,762 | def issue_completed(issue):
labels = issue.get('labels', [])
return any(((label['name'] == 'reso: completed') for label in labels))
| [
"def",
"issue_completed",
"(",
"issue",
")",
":",
"labels",
"=",
"issue",
".",
"get",
"(",
"'labels'",
",",
"[",
"]",
")",
"return",
"any",
"(",
"(",
"(",
"label",
"[",
"'name'",
"]",
"==",
"'reso: completed'",
")",
"for",
"label",
"in",
"labels",
")... | returns true iff this issue is has been resolved as completed . | train | false |
45,764 | def attach_video(func):
@functools.wraps(func)
def wrapper(self, *args, **kwgs):
self._need_attach_video = True
return func(self, *args, **kwgs)
return wrapper
| [
"def",
"attach_video",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwgs",
")",
":",
"self",
".",
"_need_attach_video",
"=",
"True",
"return",
"func",
"(",
"self... | notify test runner to attach test video in any case . | train | false |
45,765 | def _normalize_helper(number, replacements, remove_non_matches):
normalized_number = []
for char in number:
new_digit = replacements.get(char.upper(), None)
if (new_digit is not None):
normalized_number.append(new_digit)
elif (not remove_non_matches):
normalized_number.append(char)
return U_EMPTY_STRING.join(normalized_number)
| [
"def",
"_normalize_helper",
"(",
"number",
",",
"replacements",
",",
"remove_non_matches",
")",
":",
"normalized_number",
"=",
"[",
"]",
"for",
"char",
"in",
"number",
":",
"new_digit",
"=",
"replacements",
".",
"get",
"(",
"char",
".",
"upper",
"(",
")",
... | normalizes a string of characters representing a phone number by replacing all characters found in the accompanying map with the values therein . | train | true |
45,766 | def inventory_dispatcher():
DEBUG = True
class_mapper = {'cisco_ios_ssh': CiscoGatherInventory, 'arista_eos_ssh': AristaGatherInventory}
net_devices = NetworkDevice.objects.all()
for a_device in net_devices:
if ('ssh' in a_device.device_class):
if DEBUG:
print 'SSH inventory call: {} {}\n'.format(a_device.device_name, a_device.device_class)
ssh_connect = SSHConnection(a_device)
output = ssh_connect.send_command('show version\n')
inventory_obj = class_mapper[a_device.device_class](a_device, output)
inventory_obj.find_vendor()
inventory_obj.find_model()
inventory_obj.find_device_type()
inventory_obj.find_os_version()
inventory_obj.find_serial_number()
inventory_obj.find_uptime()
print 'Inventory gathering for device complete'
print_inventory(a_device)
elif ('onepk' in a_device.device_class):
if DEBUG:
print 'onePK inventory call: {} {}\n'.format(a_device.device_name, a_device.device_class)
pass
elif ('eapi' in a_device.device_class):
if DEBUG:
print 'eAPI inventory call: {} {}\n'.format(a_device.device_name, a_device.device_class)
pass
else:
pass
| [
"def",
"inventory_dispatcher",
"(",
")",
":",
"DEBUG",
"=",
"True",
"class_mapper",
"=",
"{",
"'cisco_ios_ssh'",
":",
"CiscoGatherInventory",
",",
"'arista_eos_ssh'",
":",
"AristaGatherInventory",
"}",
"net_devices",
"=",
"NetworkDevice",
".",
"objects",
".",
"all",... | dispatcher for calling ssh . | train | false |
45,769 | def assertWrapperExceptionTypes(self, deferred, mainType, reasonTypes):
def cbFailed(err):
for (reason, type) in zip(err.reasons, reasonTypes):
reason.trap(type)
self.assertEqual(len(err.reasons), len(reasonTypes), ('len(%s) != len(%s)' % (err.reasons, reasonTypes)))
return err
d = self.assertFailure(deferred, mainType)
d.addCallback(cbFailed)
return d
| [
"def",
"assertWrapperExceptionTypes",
"(",
"self",
",",
"deferred",
",",
"mainType",
",",
"reasonTypes",
")",
":",
"def",
"cbFailed",
"(",
"err",
")",
":",
"for",
"(",
"reason",
",",
"type",
")",
"in",
"zip",
"(",
"err",
".",
"reasons",
",",
"reasonTypes... | assert that the given l{deferred} fails with the exception given by c{maintype} and that the exceptions wrapped by the instance of c{maintype} it fails with match the list of exception types given by c{reasontypes} . | train | false |
45,770 | def do_contact_person_info(lava):
cps = []
if (lava is None):
return cps
contact_person = md.ContactPerson
for ava in lava:
cper = md.ContactPerson()
for (key, classpec) in contact_person.c_children.values():
try:
value = ava[key]
data = []
if isinstance(classpec, list):
if isinstance(value, basestring):
data = [classpec[0](text=value)]
else:
for val in value:
data.append(classpec[0](text=val))
else:
data = classpec(text=value)
setattr(cper, key, data)
except KeyError:
pass
for (prop, classpec, _) in contact_person.c_attributes.values():
try:
setattr(cper, prop, ava[prop])
except KeyError:
pass
typ = getattr(cper, 'contact_type')
if (not typ):
setattr(cper, 'contact_type', 'technical')
cps.append(cper)
return cps
| [
"def",
"do_contact_person_info",
"(",
"lava",
")",
":",
"cps",
"=",
"[",
"]",
"if",
"(",
"lava",
"is",
"None",
")",
":",
"return",
"cps",
"contact_person",
"=",
"md",
".",
"ContactPerson",
"for",
"ava",
"in",
"lava",
":",
"cper",
"=",
"md",
".",
"Con... | creates a contactperson instance from configuration information . | train | false |
45,771 | def trunc32(w):
w = int(((w & 2147483647) | (- (w & 2147483648))))
assert (type(w) == int)
return w
| [
"def",
"trunc32",
"(",
"w",
")",
":",
"w",
"=",
"int",
"(",
"(",
"(",
"w",
"&",
"2147483647",
")",
"|",
"(",
"-",
"(",
"w",
"&",
"2147483648",
")",
")",
")",
")",
"assert",
"(",
"type",
"(",
"w",
")",
"==",
"int",
")",
"return",
"w"
] | return the bottom 32 bits of w as a python int . | train | false |
45,772 | def agent_service_deregister(consul_url=None, serviceid=None):
ret = {}
data = {}
if (not consul_url):
consul_url = _get_config()
if (not consul_url):
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
if (not serviceid):
raise SaltInvocationError('Required argument "serviceid" is missing.')
function = 'agent/service/deregister/{0}'.format(serviceid)
res = _query(consul_url=consul_url, function=function, method='PUT', data=data)
if res['res']:
ret['res'] = True
ret['message'] = 'Service {0} removed from agent.'.format(serviceid)
else:
ret['res'] = False
ret['message'] = 'Unable to remove service {0}.'.format(serviceid)
return ret
| [
"def",
"agent_service_deregister",
"(",
"consul_url",
"=",
"None",
",",
"serviceid",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"if",
"(",
"not",
"consul_url",
")",
":",
"consul_url",
"=",
"_get_config",
"(",
")",
"if",
"(",
... | used to remove a service . | train | true |
45,773 | def cpe2dict(cpe_str):
if (not cpe_str.startswith('cpe:/')):
raise ValueError(('invalid cpe format (%s)\n' % cpe_str))
cpe_body = cpe_str[5:]
parts = cpe_body.split(':', 3)
nparts = len(parts)
if (nparts < 2):
raise ValueError(('invalid cpe format (%s)\n' % cpe_str))
cpe_type = parts[0]
cpe_vend = parts[1]
cpe_prod = (parts[2] if (nparts > 2) else '')
cpe_vers = (parts[3] if (nparts > 3) else '')
ret = {'type': cpe_type, 'vendor': cpe_vend, 'product': cpe_prod, 'version': cpe_vers}
return ret
| [
"def",
"cpe2dict",
"(",
"cpe_str",
")",
":",
"if",
"(",
"not",
"cpe_str",
".",
"startswith",
"(",
"'cpe:/'",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'invalid cpe format (%s)\\n'",
"%",
"cpe_str",
")",
")",
"cpe_body",
"=",
"cpe_str",
"[",
"5",
"... | helper function to parse cpes . | train | false |
45,775 | def delete_cloudformation_stack(stack_id, aws_config):
aws_output(['cloudformation', 'delete-stack', '--stack-name', stack_id], aws_config)
return wait_for_stack_status(stack_id, 'DELETE_COMPLETE', aws_config)
| [
"def",
"delete_cloudformation_stack",
"(",
"stack_id",
",",
"aws_config",
")",
":",
"aws_output",
"(",
"[",
"'cloudformation'",
",",
"'delete-stack'",
",",
"'--stack-name'",
",",
"stack_id",
"]",
",",
"aws_config",
")",
"return",
"wait_for_stack_status",
"(",
"stack... | delete a cloudformation stack . | train | false |
45,780 | def _get_request_audit_info(context, user_id=None):
remote_addr = None
http_user_agent = None
project_id = None
domain_id = None
if (context and ('environment' in context) and context['environment']):
environment = context['environment']
remote_addr = environment.get('REMOTE_ADDR')
http_user_agent = environment.get('HTTP_USER_AGENT')
if (not user_id):
user_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get('user_id')
project_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get('project_id')
domain_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get('domain_id')
host = pycadf.host.Host(address=remote_addr, agent=http_user_agent)
initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER, host=host)
if user_id:
initiator.user_id = user_id
initiator.id = utils.resource_uuid(user_id)
if project_id:
initiator.project_id = project_id
if domain_id:
initiator.domain_id = domain_id
return initiator
| [
"def",
"_get_request_audit_info",
"(",
"context",
",",
"user_id",
"=",
"None",
")",
":",
"remote_addr",
"=",
"None",
"http_user_agent",
"=",
"None",
"project_id",
"=",
"None",
"domain_id",
"=",
"None",
"if",
"(",
"context",
"and",
"(",
"'environment'",
"in",
... | collect audit information about the request used for cadf . | train | false |
45,781 | def _compute_object_size(value):
(num, obj) = value
if (num in DELTA_TYPES):
return chunks_length(obj[1])
return chunks_length(obj)
| [
"def",
"_compute_object_size",
"(",
"value",
")",
":",
"(",
"num",
",",
"obj",
")",
"=",
"value",
"if",
"(",
"num",
"in",
"DELTA_TYPES",
")",
":",
"return",
"chunks_length",
"(",
"obj",
"[",
"1",
"]",
")",
"return",
"chunks_length",
"(",
"obj",
")"
] | compute the size of a unresolved object for use with lrusizecache . | train | false |
45,782 | def vstack(operators, size):
return lo.LinOp(lo.VSTACK, size, operators, None)
| [
"def",
"vstack",
"(",
"operators",
",",
"size",
")",
":",
"return",
"lo",
".",
"LinOp",
"(",
"lo",
".",
"VSTACK",
",",
"size",
",",
"operators",
",",
"None",
")"
] | concatenates operators vertically . | train | false |
45,786 | def kinit(X, k):
n = X.shape[0]
'choose the 1st seed randomly, and store D(x)^2 in D[]'
centers = [X[random.randint(n)]]
D = [(norm((x - centers[0])) ** 2) for x in X]
for _ in range((k - 1)):
bestDsum = bestIdx = (-1)
for i in range(n):
'Dsum = sum_{x in X} min(D(x)^2,||x-xi||^2)'
Dsum = reduce((lambda x, y: (x + y)), (min(D[j], (norm((X[j] - X[i])) ** 2)) for j in range(n)))
if ((bestDsum < 0) or (Dsum < bestDsum)):
(bestDsum, bestIdx) = (Dsum, i)
centers.append(X[bestIdx])
D = [min(D[i], (norm((X[i] - X[bestIdx])) ** 2)) for i in range(n)]
return array(centers)
| [
"def",
"kinit",
"(",
"X",
",",
"k",
")",
":",
"n",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"centers",
"=",
"[",
"X",
"[",
"random",
".",
"randint",
"(",
"n",
")",
"]",
"]",
"D",
"=",
"[",
"(",
"norm",
"(",
"(",
"x",
"-",
"centers",
"[",
... | init k seeds according to kmeans++ . | train | false |
45,787 | def executeNextEvaluatorArguments(evaluator, evaluators, evaluatorIndex, nextEvaluator):
if (evaluator.value == None):
print 'Warning, executeNextEvaluatorArguments in evaluate can not get a evaluator.value for:'
print evaluatorIndex
print evaluators
print evaluator
return
nextEvaluator.value = evaluator.value(*nextEvaluator.arguments)
del evaluators[evaluatorIndex]
| [
"def",
"executeNextEvaluatorArguments",
"(",
"evaluator",
",",
"evaluators",
",",
"evaluatorIndex",
",",
"nextEvaluator",
")",
":",
"if",
"(",
"evaluator",
".",
"value",
"==",
"None",
")",
":",
"print",
"'Warning, executeNextEvaluatorArguments in evaluate can not get a ev... | execute the nextevaluator arguments . | train | false |
45,788 | def gemset_list_all(runas=None):
gemsets = {}
current_ruby = None
output = _rvm_do('default', ['rvm', 'gemset', 'list_all'], runas=runas)
if output:
gems_regex = re.compile('^ ([^ ]+)')
gemset_regex = re.compile('^gemsets for ([^ ]+)')
for line in output.splitlines():
match = gemset_regex.match(line)
if match:
current_ruby = match.group(1)
gemsets[current_ruby] = []
match = gems_regex.match(line)
if match:
gemsets[current_ruby].append(match.group(1))
return gemsets
| [
"def",
"gemset_list_all",
"(",
"runas",
"=",
"None",
")",
":",
"gemsets",
"=",
"{",
"}",
"current_ruby",
"=",
"None",
"output",
"=",
"_rvm_do",
"(",
"'default'",
",",
"[",
"'rvm'",
",",
"'gemset'",
",",
"'list_all'",
"]",
",",
"runas",
"=",
"runas",
")... | list all gemsets for all installed rubies . | train | true |
45,789 | def check_output_and_error(*args, **kwargs):
kwargs['stderr'] = STDOUT
try:
return check_output(*args, **kwargs)
except CalledProcessError as e:
raise CalledProcessErrorWithOutput(returncode=e.returncode, cmd=e.cmd, output=e.output)
| [
"def",
"check_output_and_error",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"'stderr'",
"]",
"=",
"STDOUT",
"try",
":",
"return",
"check_output",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"except",
"CalledProcessError",
"as",
"e",
... | like check_output but captures the stderr and raises an exception that incudes all the stdout and stderr output when coerced to str . | train | false |
45,790 | def test_genelatex_no_wrap():
def mock_kpsewhich(filename):
assert False, 'kpsewhich should not be called (called with {0})'.format(filename)
with patch.object(latextools, 'kpsewhich', mock_kpsewhich):
nt.assert_equal('\n'.join(latextools.genelatex('body text', False)), '\\documentclass{article}\n\\usepackage{amsmath}\n\\usepackage{amsthm}\n\\usepackage{amssymb}\n\\usepackage{bm}\n\\pagestyle{empty}\n\\begin{document}\nbody text\n\\end{document}')
| [
"def",
"test_genelatex_no_wrap",
"(",
")",
":",
"def",
"mock_kpsewhich",
"(",
"filename",
")",
":",
"assert",
"False",
",",
"'kpsewhich should not be called (called with {0})'",
".",
"format",
"(",
"filename",
")",
"with",
"patch",
".",
"object",
"(",
"latextools",
... | test genelatex with wrap=false . | train | false |
45,792 | def _getImports_pe(pth):
from ..lib import pefile
dlls = set()
pe = pefile.PE(pth, fast_load=True)
pe.parse_data_directories(directories=[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'], pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT']], forwarded_exports_only=True, import_dllnames_only=True)
for entry in getattr(pe, 'DIRECTORY_ENTRY_IMPORT', []):
dll_str = winutils.convert_dll_name_to_str(entry.dll)
dlls.add(dll_str)
exportSymbols = getattr(pe, 'DIRECTORY_ENTRY_EXPORT', None)
if exportSymbols:
for sym in exportSymbols.symbols:
if (sym.forwarder is not None):
forwarder = winutils.convert_dll_name_to_str(sym.forwarder)
(dll, _) = forwarder.split('.')
dlls.add((dll + '.dll'))
pe.close()
return dlls
| [
"def",
"_getImports_pe",
"(",
"pth",
")",
":",
"from",
".",
".",
"lib",
"import",
"pefile",
"dlls",
"=",
"set",
"(",
")",
"pe",
"=",
"pefile",
".",
"PE",
"(",
"pth",
",",
"fast_load",
"=",
"True",
")",
"pe",
".",
"parse_data_directories",
"(",
"direc... | find the binary dependencies of pth . | train | false |
45,793 | def make_most_abundant(seqs):
seq_to_group = unique_id_map(seqs)
groups = invert_dict(seq_to_group)
def most_abundant(ids, seqs='ignored'):
'Returns most abundant seq from ids'
id_groups = [len(groups[seq_to_group[i]]) for i in ids]
return ids[argmax(id_groups)]
return most_abundant
| [
"def",
"make_most_abundant",
"(",
"seqs",
")",
":",
"seq_to_group",
"=",
"unique_id_map",
"(",
"seqs",
")",
"groups",
"=",
"invert_dict",
"(",
"seq_to_group",
")",
"def",
"most_abundant",
"(",
"ids",
",",
"seqs",
"=",
"'ignored'",
")",
":",
"id_groups",
"=",... | makes function that chooses the most abundant seq from group . | train | false |
45,794 | def codex_fresh(codex, module):
if (not module.params['cache_valid_time']):
return False
timedelta = datetime.timedelta(seconds=module.params['cache_valid_time'])
for grimoire in codex:
lastupdate_path = os.path.join(SORCERY_STATE_DIR, (grimoire + '.lastupdate'))
try:
mtime = os.stat(lastupdate_path).st_mtime
except:
return False
lastupdate_ts = datetime.datetime.fromtimestamp(mtime)
if ((lastupdate_ts + timedelta) < datetime.datetime.now()):
return False
return True
| [
"def",
"codex_fresh",
"(",
"codex",
",",
"module",
")",
":",
"if",
"(",
"not",
"module",
".",
"params",
"[",
"'cache_valid_time'",
"]",
")",
":",
"return",
"False",
"timedelta",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"module",
".",
"para... | check if grimoire collection is fresh enough . | train | false |
45,795 | def generate_confirmation_token(user):
data = [str(user.id), md5(user.email)]
return _security.confirm_serializer.dumps(data)
| [
"def",
"generate_confirmation_token",
"(",
"user",
")",
":",
"data",
"=",
"[",
"str",
"(",
"user",
".",
"id",
")",
",",
"md5",
"(",
"user",
".",
"email",
")",
"]",
"return",
"_security",
".",
"confirm_serializer",
".",
"dumps",
"(",
"data",
")"
] | generates a unique confirmation token for the specified user . | train | true |
45,796 | def parse_pdb_command(env, sig, signode):
m = pdbcmd_sig_re.match(sig)
if (m is None):
raise ValueError
(name, args) = m.groups()
fullname = name.replace('(', '').replace(')', '')
signode += addnodes.desc_name(name, name)
if args:
signode += addnodes.desc_addname((' ' + args), (' ' + args))
return fullname
| [
"def",
"parse_pdb_command",
"(",
"env",
",",
"sig",
",",
"signode",
")",
":",
"m",
"=",
"pdbcmd_sig_re",
".",
"match",
"(",
"sig",
")",
"if",
"(",
"m",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"name",
",",
"args",
")",
"=",
"m",
".",
"... | transform a pdb command signature into rst nodes . | train | false |
45,800 | def get_global_context(nav, config):
extra_javascript = utils.create_media_urls(nav, config[u'extra_javascript'])
extra_css = utils.create_media_urls(nav, config[u'extra_css'])
timestamp = int(os.environ.get(u'SOURCE_DATE_EPOCH', timegm(datetime.utcnow().utctimetuple())))
return {u'nav': nav, u'base_url': nav.url_context.make_relative(u'/'), u'extra_css': extra_css, u'extra_javascript': extra_javascript, u'mkdocs_version': mkdocs.__version__, u'build_date_utc': datetime.utcfromtimestamp(timestamp), u'config': config, u'site_name': config[u'site_name'], u'site_url': config[u'site_url'], u'site_author': config[u'site_author'], u'homepage_url': nav.homepage.url, u'page_description': config[u'site_description'], u'favicon': config[u'site_favicon'], u'repo_url': config[u'repo_url'], u'repo_name': config[u'repo_name'], u'include_nav': config[u'include_nav'], u'include_next_prev': config[u'include_next_prev'], u'copyright': config[u'copyright'], u'google_analytics': config[u'google_analytics']}
| [
"def",
"get_global_context",
"(",
"nav",
",",
"config",
")",
":",
"extra_javascript",
"=",
"utils",
".",
"create_media_urls",
"(",
"nav",
",",
"config",
"[",
"u'extra_javascript'",
"]",
")",
"extra_css",
"=",
"utils",
".",
"create_media_urls",
"(",
"nav",
",",... | given the sitenavigation and config . | train | false |
45,802 | def cont_inputs(f):
return typefilter(inputvars(f), continuous_types)
| [
"def",
"cont_inputs",
"(",
"f",
")",
":",
"return",
"typefilter",
"(",
"inputvars",
"(",
"f",
")",
",",
"continuous_types",
")"
] | get the continuous inputs into a theano variables parameters a : theano variable returns r : list of tensor variables that are continuous inputs . | train | false |
45,803 | def ParseSuccessMsg(msg):
parsed = re.match(kSuccessMsgRe, msg)
if (not parsed):
return None
try:
(user, device, op, class_name, method_name) = parsed.groups()
return (user, device, op, class_name, method_name)
except Exception as e:
logging.warning(('RE matched "%s", but extracted wrong numbers of items: %r' % (msg, e)))
return None
| [
"def",
"ParseSuccessMsg",
"(",
"msg",
")",
":",
"parsed",
"=",
"re",
".",
"match",
"(",
"kSuccessMsgRe",
",",
"msg",
")",
"if",
"(",
"not",
"parsed",
")",
":",
"return",
"None",
"try",
":",
"(",
"user",
",",
"device",
",",
"op",
",",
"class_name",
... | attempt to parse the message for a user_op_manager success line and extract user . | train | false |
45,804 | def relativeTo(base, relativee):
basepath = os.path.abspath(base)
relativee = os.path.abspath(relativee)
if relativee.startswith(basepath):
relative = relativee[len(basepath):]
if relative.startswith(os.sep):
relative = relative[1:]
return os.path.join(base, relative)
raise ValueError(('%s is not a subpath of %s' % (relativee, basepath)))
| [
"def",
"relativeTo",
"(",
"base",
",",
"relativee",
")",
":",
"basepath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"base",
")",
"relativee",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"relativee",
")",
"if",
"relativee",
".",
"startswith",
"(",
... | gets relativee relative to basepath . | train | true |
45,805 | def _is_png(filename):
return ('.png' in filename)
| [
"def",
"_is_png",
"(",
"filename",
")",
":",
"return",
"(",
"'.png'",
"in",
"filename",
")"
] | determine if a file contains a png format image . | train | false |
45,807 | def add_instructor(course_key, requesting_user, new_instructor):
CourseInstructorRole(course_key).add_users(new_instructor)
auth.add_users(requesting_user, CourseStaffRole(course_key), new_instructor)
| [
"def",
"add_instructor",
"(",
"course_key",
",",
"requesting_user",
",",
"new_instructor",
")",
":",
"CourseInstructorRole",
"(",
"course_key",
")",
".",
"add_users",
"(",
"new_instructor",
")",
"auth",
".",
"add_users",
"(",
"requesting_user",
",",
"CourseStaffRole... | adds given user as instructor and staff to the given course . | train | false |
45,808 | def _firmware_update(firmwarefile='', host='', directory=''):
dest = os.path.join(directory, firmwarefile[7:])
__salt__['cp.get_file'](firmwarefile, dest)
username = __pillar__['proxy']['admin_user']
password = __pillar__['proxy']['admin_password']
__salt__['dracr.update_firmware'](dest, host=host, admin_username=username, admin_password=password)
| [
"def",
"_firmware_update",
"(",
"firmwarefile",
"=",
"''",
",",
"host",
"=",
"''",
",",
"directory",
"=",
"''",
")",
":",
"dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"firmwarefile",
"[",
"7",
":",
"]",
")",
"__salt__",
"[",
... | update firmware for a single host . | train | true |
45,810 | def DocToHelp(doc):
doc = doc.strip()
whitespace_only_line = re.compile('^[ DCTB ]+$', re.M)
doc = whitespace_only_line.sub('', doc)
doc = CutCommonSpacePrefix(doc)
doc = re.sub('(?<=\\S)\n(?=\\S)', ' ', doc, re.M)
return doc
| [
"def",
"DocToHelp",
"(",
"doc",
")",
":",
"doc",
"=",
"doc",
".",
"strip",
"(",
")",
"whitespace_only_line",
"=",
"re",
".",
"compile",
"(",
"'^[ DCTB ]+$'",
",",
"re",
".",
"M",
")",
"doc",
"=",
"whitespace_only_line",
".",
"sub",
"(",
"''",
",",
"... | takes a __doc__ string and reformats it as help . | train | false |
45,812 | def sm_backend_conf_create(context, values):
return IMPL.sm_backend_conf_create(context, values)
| [
"def",
"sm_backend_conf_create",
"(",
"context",
",",
"values",
")",
":",
"return",
"IMPL",
".",
"sm_backend_conf_create",
"(",
"context",
",",
"values",
")"
] | create a new sm backend config entry . | train | false |
45,813 | def increment_odd(x):
raise NotImplementedError('TODO: implement the function.')
| [
"def",
"increment_odd",
"(",
"x",
")",
":",
"raise",
"NotImplementedError",
"(",
"'TODO: implement the function.'",
")"
] | x: a theano vector returns: y: a theano vector equal to x . | train | false |
45,814 | @task
def apiserver(ctx, port=8000, wait=True, autoreload=True, host='127.0.0.1', pty=True):
env = os.environ.copy()
cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {}:{} --nothreading'.format(sys.executable, host, port)
if (not autoreload):
cmd += ' --noreload'
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
if wait:
return ctx.run(cmd, echo=True, pty=pty)
from subprocess import Popen
return Popen(cmd, shell=True, env=env)
| [
"@",
"task",
"def",
"apiserver",
"(",
"ctx",
",",
"port",
"=",
"8000",
",",
"wait",
"=",
"True",
",",
"autoreload",
"=",
"True",
",",
"host",
"=",
"'127.0.0.1'",
",",
"pty",
"=",
"True",
")",
":",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(... | run the api server . | train | false |
45,816 | def group_for_key(key):
if (not isinstance(key, entity_pb.Reference)):
key = entity_pb.Reference(key)
first_element = key.path().element(0)
key_copy = entity_pb.Reference()
key_copy.CopyFrom(key)
key_copy.path().clear_element()
element = key_copy.path().add_element()
element.MergeFrom(first_element)
return key_copy
| [
"def",
"group_for_key",
"(",
"key",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"key",
",",
"entity_pb",
".",
"Reference",
")",
")",
":",
"key",
"=",
"entity_pb",
".",
"Reference",
"(",
"key",
")",
"first_element",
"=",
"key",
".",
"path",
"(",
")... | extract the root path for a given key . | train | false |
45,817 | def append(name, value, convert=False, delimiter=DEFAULT_TARGET_DELIM):
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
grain = __salt__['grains.get'](name, None)
if grain:
if isinstance(grain, list):
if (value in grain):
ret['comment'] = 'Value {1} is already in the list for grain {0}'.format(name, value)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Value {1} in grain {0} is set to be added'.format(name, value)
ret['changes'] = {'added': value}
return ret
__salt__['grains.append'](name, value)
ret['comment'] = 'Value {1} was added to grain {0}'.format(name, value)
ret['changes'] = {'added': value}
elif (convert is True):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Grain {0} is set to be converted to list and value {1} will be added'.format(name, value)
ret['changes'] = {'added': value}
return ret
grain = [grain]
grain.append(value)
__salt__['grains.setval'](name, grain)
ret['comment'] = 'Value {1} was added to grain {0}'.format(name, value)
ret['changes'] = {'added': value}
else:
ret['result'] = False
ret['comment'] = 'Grain {0} is not a valid list'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Grain {0} does not exist'.format(name)
return ret
| [
"def",
"append",
"(",
"name",
",",
"value",
",",
"convert",
"=",
"False",
",",
"delimiter",
"=",
"DEFAULT_TARGET_DELIM",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"delimiter",
",",
"DEFAULT_TARGET_DELIM",
",",
"name",
")",
"ret",
"=",
"{",
"'name'",
... | append a to the list of the virtual ancestors . | train | true |
45,818 | def validate_read_preference_mode(dummy, name):
try:
return read_pref_mode_from_name(name)
except ValueError:
raise ValueError(('%s is not a valid read preference' % (name,)))
| [
"def",
"validate_read_preference_mode",
"(",
"dummy",
",",
"name",
")",
":",
"try",
":",
"return",
"read_pref_mode_from_name",
"(",
"name",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"(",
"'%s is not a valid read preference'",
"%",
"(",
"name",
... | validate read preference mode for a mongoreplicasetclient . | train | false |
45,820 | def encode_json_for_js(data, indent=None):
return json.dumps(data, indent=indent, cls=JSONEncoderForHTML)
| [
"def",
"encode_json_for_js",
"(",
"data",
",",
"indent",
"=",
"None",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"data",
",",
"indent",
"=",
"indent",
",",
"cls",
"=",
"JSONEncoderForHTML",
")"
] | converts data into a json string . | train | false |
45,823 | def defined_at():
if DEBUG:
frame = inspect.currentframe()
while frame:
try:
if (frame.f_globals['__package__'] != __package__):
break
except KeyError:
break
frame = frame.f_back
ret = Frame(frame.f_lineno, frame.f_globals.get('__package__'), frame.f_globals.get('__name__'), frame.f_code.co_filename)
del frame
return ret
| [
"def",
"defined_at",
"(",
")",
":",
"if",
"DEBUG",
":",
"frame",
"=",
"inspect",
".",
"currentframe",
"(",
")",
"while",
"frame",
":",
"try",
":",
"if",
"(",
"frame",
".",
"f_globals",
"[",
"'__package__'",
"]",
"!=",
"__package__",
")",
":",
"break",
... | get definition location of a pattern or a match . | train | true |
45,824 | def describe_role(name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_role(name)
if (not info):
return False
role = info.get_role_response.get_role_result.role
role['assume_role_policy_document'] = json.loads(_unquote(role.assume_role_policy_document))
for (policy_key, policy) in role['assume_role_policy_document'].items():
if (policy_key == 'Statement'):
for val in policy:
if (('Sid' in val) and (not val['Sid'])):
del val['Sid']
return role
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to get {0} information.'
log.error(msg.format(name))
return False
| [
"def",
"describe_role",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid"... | get information for a role . | train | true |
45,825 | def clone_vm(content, template, vm_name, si, datacenter_name, vm_folder, datastore_name, cluster_name, resource_pool, power_on):
datacenter = get_obj(content, [vim.Datacenter], datacenter_name)
if vm_folder:
destfolder = get_obj(content, [vim.Folder], vm_folder)
else:
destfolder = datacenter.vmFolder
if datastore_name:
datastore = get_obj(content, [vim.Datastore], datastore_name)
else:
datastore = get_obj(content, [vim.Datastore], template.datastore[0].info.name)
cluster = get_obj(content, [vim.ClusterComputeResource], cluster_name)
if resource_pool:
resource_pool = get_obj(content, [vim.ResourcePool], resource_pool)
else:
resource_pool = cluster.resourcePool
relospec = vim.vm.RelocateSpec()
relospec.datastore = datastore
relospec.pool = resource_pool
clonespec = vim.vm.CloneSpec()
clonespec.location = relospec
clonespec.powerOn = power_on
print 'cloning VM...'
task = template.Clone(folder=destfolder, name=vm_name, spec=clonespec)
wait_for_task(task)
| [
"def",
"clone_vm",
"(",
"content",
",",
"template",
",",
"vm_name",
",",
"si",
",",
"datacenter_name",
",",
"vm_folder",
",",
"datastore_name",
",",
"cluster_name",
",",
"resource_pool",
",",
"power_on",
")",
":",
"datacenter",
"=",
"get_obj",
"(",
"content",
... | clone a vm from a template/vm . | train | false |
45,826 | def getSynset(pos, offset):
return _dictionaryFor(pos).getSynset(offset)
| [
"def",
"getSynset",
"(",
"pos",
",",
"offset",
")",
":",
"return",
"_dictionaryFor",
"(",
"pos",
")",
".",
"getSynset",
"(",
"offset",
")"
] | lookup a synset by its offset . | train | false |
45,827 | def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, check_mode=False):
allocation_id_exists = False
gateways = []
states = ['available', 'pending']
(gws_retrieved, _, gws) = get_nat_gateways(client, subnet_id, states=states, check_mode=check_mode)
if (not gws_retrieved):
return (gateways, allocation_id_exists)
for gw in gws:
for address in gw['nat_gateway_addresses']:
if allocation_id:
if (address.get('allocation_id') == allocation_id):
allocation_id_exists = True
gateways.append(gw)
else:
gateways.append(gw)
return (gateways, allocation_id_exists)
| [
"def",
"gateway_in_subnet_exists",
"(",
"client",
",",
"subnet_id",
",",
"allocation_id",
"=",
"None",
",",
"check_mode",
"=",
"False",
")",
":",
"allocation_id_exists",
"=",
"False",
"gateways",
"=",
"[",
"]",
"states",
"=",
"[",
"'available'",
",",
"'pending... | retrieve all nat gateways for a subnet . | train | false |
45,828 | @utils.arg('class_name', metavar='<class>', help='Name of quota class to set the quotas for.')
@utils.arg('--monitors', metavar='<monitors>', type=int, default=None, help='New value for the "monitors" quota.')
@utils.arg('--snapshots', metavar='<snapshots>', type=int, default=None, help='New value for the "snapshots" quota.')
@utils.arg('--gigabytes', metavar='<gigabytes>', type=int, default=None, help='New value for the "gigabytes" quota.')
@utils.service_type('monitor')
def do_quota_class_update(cs, args):
_quota_update(cs.quota_classes, args.class_name, args)
| [
"@",
"utils",
".",
"arg",
"(",
"'class_name'",
",",
"metavar",
"=",
"'<class>'",
",",
"help",
"=",
"'Name of quota class to set the quotas for.'",
")",
"@",
"utils",
".",
"arg",
"(",
"'--monitors'",
",",
"metavar",
"=",
"'<monitors>'",
",",
"type",
"=",
"int",... | update the quotas for a quota class . | train | false |
45,829 | def cleanup_session(session):
now = int(time.time())
keys = list(session.keys())
for key in keys:
if (not key.startswith(u'search_')):
continue
value = session[key]
if ((not isinstance(value, dict)) or (value[u'ttl'] < now)):
del session[key]
| [
"def",
"cleanup_session",
"(",
"session",
")",
":",
"now",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"keys",
"=",
"list",
"(",
"session",
".",
"keys",
"(",
")",
")",
"for",
"key",
"in",
"keys",
":",
"if",
"(",
"not",
"key",
".",
"sta... | deletes old search results from session storage . | train | false |
45,830 | @command('history clear')
def clear_history():
g.userhist['history'].songs = []
history.save()
g.message = 'History cleared'
g.content = logo()
| [
"@",
"command",
"(",
"'history clear'",
")",
"def",
"clear_history",
"(",
")",
":",
"g",
".",
"userhist",
"[",
"'history'",
"]",
".",
"songs",
"=",
"[",
"]",
"history",
".",
"save",
"(",
")",
"g",
".",
"message",
"=",
"'History cleared'",
"g",
".",
"... | clears the users play history . | train | false |
45,831 | def cut_graph(gr, imsize):
(m, n) = imsize
source = (m * n)
sink = ((m * n) + 1)
(flows, cuts) = maximum_flow(gr, source, sink)
res = zeros((m * n))
for (pos, label) in cuts.items()[:(-2)]:
res[pos] = label
return res.reshape((m, n))
| [
"def",
"cut_graph",
"(",
"gr",
",",
"imsize",
")",
":",
"(",
"m",
",",
"n",
")",
"=",
"imsize",
"source",
"=",
"(",
"m",
"*",
"n",
")",
"sink",
"=",
"(",
"(",
"m",
"*",
"n",
")",
"+",
"1",
")",
"(",
"flows",
",",
"cuts",
")",
"=",
"maximu... | solve max flow of graph gr and return binary labels of the resulting segmentation . | train | false |
45,833 | def CreateConfig(**kwds):
return datastore_rpc.Configuration(**kwds)
| [
"def",
"CreateConfig",
"(",
"**",
"kwds",
")",
":",
"return",
"datastore_rpc",
".",
"Configuration",
"(",
"**",
"kwds",
")"
] | create a configuration object for use in configuring datastore calls . | train | false |
45,834 | def encode_rfc2231(s, charset=None, language=None):
import urllib
s = urllib.quote(s, safe='')
if ((charset is None) and (language is None)):
return s
if (language is None):
language = ''
return ("%s'%s'%s" % (charset, language, s))
| [
"def",
"encode_rfc2231",
"(",
"s",
",",
"charset",
"=",
"None",
",",
"language",
"=",
"None",
")",
":",
"import",
"urllib",
"s",
"=",
"urllib",
".",
"quote",
"(",
"s",
",",
"safe",
"=",
"''",
")",
"if",
"(",
"(",
"charset",
"is",
"None",
")",
"an... | encode string according to rfc 2231 . | train | false |
45,837 | def unparse_url(scheme, host, port, path=''):
return ('%s://%s%s' % (scheme, hostport(scheme, host, port), path))
| [
"def",
"unparse_url",
"(",
"scheme",
",",
"host",
",",
"port",
",",
"path",
"=",
"''",
")",
":",
"return",
"(",
"'%s://%s%s'",
"%",
"(",
"scheme",
",",
"hostport",
"(",
"scheme",
",",
"host",
",",
"port",
")",
",",
"path",
")",
")"
] | returns a url string . | train | false |
45,839 | @builtin(u'Lower-case text (ignore tags)', lower, apply_func_to_html_text)
def replace_lowercase_ignore_tags(match, number, file_name, metadata, dictionaries, data, functions, *args, **kwargs):
return apply_func_to_html_text(match, lower)
| [
"@",
"builtin",
"(",
"u'Lower-case text (ignore tags)'",
",",
"lower",
",",
"apply_func_to_html_text",
")",
"def",
"replace_lowercase_ignore_tags",
"(",
"match",
",",
"number",
",",
"file_name",
",",
"metadata",
",",
"dictionaries",
",",
"data",
",",
"functions",
",... | make matched text lower case . | train | false |
45,842 | def IsNone(value):
return ((value is None) or (value == '') or (value.lower() == 'none'))
| [
"def",
"IsNone",
"(",
"value",
")",
":",
"return",
"(",
"(",
"value",
"is",
"None",
")",
"or",
"(",
"value",
"==",
"''",
")",
"or",
"(",
"value",
".",
"lower",
"(",
")",
"==",
"'none'",
")",
")"
] | return true if either none . | train | false |
45,843 | def test_get_init_4():
nt.assert_is_none(mp.get_init(TMP_TEST_DIR))
| [
"def",
"test_get_init_4",
"(",
")",
":",
"nt",
".",
"assert_is_none",
"(",
"mp",
".",
"get_init",
"(",
"TMP_TEST_DIR",
")",
")"
] | get_init cant find __init__ in empty testdir . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.