id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
12,843 | def svm_load_model(model_file_name):
model = libsvm.svm_load_model(model_file_name.encode())
if (not model):
print ("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
| [
"def",
"svm_load_model",
"(",
"model_file_name",
")",
":",
"model",
"=",
"libsvm",
".",
"svm_load_model",
"(",
"model_file_name",
".",
"encode",
"(",
")",
")",
"if",
"(",
"not",
"model",
")",
":",
"print",
"(",
"\"can't open model file %s\"",
"%",
"model_file_... | svm_load_model -> model load a libsvm model from model_file_name and return . | train | false |
12,844 | def split_strip(string, delimiter=u','):
if (not string):
return []
words = [w.strip() for w in string.split(delimiter)]
return [w for w in words if w]
| [
"def",
"split_strip",
"(",
"string",
",",
"delimiter",
"=",
"u','",
")",
":",
"if",
"(",
"not",
"string",
")",
":",
"return",
"[",
"]",
"words",
"=",
"[",
"w",
".",
"strip",
"(",
")",
"for",
"w",
"in",
"string",
".",
"split",
"(",
"delimiter",
")... | splits string on delimiter . | train | false |
12,845 | def fullmodname(path):
comparepath = os.path.normcase(path)
longest = ''
for dir in sys.path:
dir = os.path.normcase(dir)
if (comparepath.startswith(dir) and (comparepath[len(dir)] == os.sep)):
if (len(dir) > len(longest)):
longest = dir
if longest:
base = path[(len(longest) + 1):]
else:
base = path
base = base.replace(os.sep, '.')
if os.altsep:
base = base.replace(os.altsep, '.')
(filename, ext) = os.path.splitext(base)
return filename
| [
"def",
"fullmodname",
"(",
"path",
")",
":",
"comparepath",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"path",
")",
"longest",
"=",
"''",
"for",
"dir",
"in",
"sys",
".",
"path",
":",
"dir",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"dir",
... | return a plausible module name for the path . | train | false |
12,846 | def test_non_existing_unknown_ext():
with pytest.raises(IOError):
data = Table.read(u'non-existing-file-with-unknown.ext')
| [
"def",
"test_non_existing_unknown_ext",
"(",
")",
":",
"with",
"pytest",
".",
"raises",
"(",
"IOError",
")",
":",
"data",
"=",
"Table",
".",
"read",
"(",
"u'non-existing-file-with-unknown.ext'",
")"
] | raise the correct error when attempting to read a non-existing file with an unknown extension . | train | false |
12,847 | def DNSServiceAddRecord(sdRef, flags=0, rrtype=_NO_DEFAULT, rdata=_NO_DEFAULT, ttl=0):
_NO_DEFAULT.check(rrtype)
_NO_DEFAULT.check(rdata)
(rdlen, rdata) = _string_to_length_and_void_p(rdata)
_global_lock.acquire()
try:
RecordRef = _DNSServiceAddRecord(sdRef, flags, rrtype, rdlen, rdata, ttl)
finally:
_global_lock.release()
sdRef._add_record_ref(RecordRef)
return RecordRef
| [
"def",
"DNSServiceAddRecord",
"(",
"sdRef",
",",
"flags",
"=",
"0",
",",
"rrtype",
"=",
"_NO_DEFAULT",
",",
"rdata",
"=",
"_NO_DEFAULT",
",",
"ttl",
"=",
"0",
")",
":",
"_NO_DEFAULT",
".",
"check",
"(",
"rrtype",
")",
"_NO_DEFAULT",
".",
"check",
"(",
... | add a record to a registered service . | train | false |
12,848 | def check_strict_xfail(pyfuncitem):
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini('xfail_strict')
is_strict_xfail = evalxfail.get('strict', strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail(('[XPASS(strict)] ' + explanation), pytrace=False)
| [
"def",
"check_strict_xfail",
"(",
"pyfuncitem",
")",
":",
"evalxfail",
"=",
"pyfuncitem",
".",
"_evalxfail",
"if",
"evalxfail",
".",
"istrue",
"(",
")",
":",
"strict_default",
"=",
"pyfuncitem",
".",
"config",
".",
"getini",
"(",
"'xfail_strict'",
")",
"is_str... | check xfail for the given passing test . | train | false |
12,849 | def register_local_role(name, role_fn):
set_implicit_options(role_fn)
_roles[name] = role_fn
| [
"def",
"register_local_role",
"(",
"name",
",",
"role_fn",
")",
":",
"set_implicit_options",
"(",
"role_fn",
")",
"_roles",
"[",
"name",
"]",
"=",
"role_fn"
] | register an interpreted text role by its local or language-dependent name . | train | false |
12,850 | def intrinsics_multi_constructor(loader, tag_prefix, node):
tag = node.tag[1:]
prefix = 'Fn::'
if (tag == 'Ref'):
prefix = ''
cfntag = (prefix + tag)
if (tag == 'GetAtt'):
value = node.value.split('.', 1)
elif isinstance(node, ScalarNode):
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
value = loader.construct_sequence(node)
else:
value = loader.construct_mapping(node)
return {cfntag: value}
| [
"def",
"intrinsics_multi_constructor",
"(",
"loader",
",",
"tag_prefix",
",",
"node",
")",
":",
"tag",
"=",
"node",
".",
"tag",
"[",
"1",
":",
"]",
"prefix",
"=",
"'Fn::'",
"if",
"(",
"tag",
"==",
"'Ref'",
")",
":",
"prefix",
"=",
"''",
"cfntag",
"="... | yaml constructor to parse cloudformation intrinsics . | train | true |
12,851 | def nltkdemo18():
return [Template(Pos([(-1)])), Template(Pos([1])), Template(Pos([(-2)])), Template(Pos([2])), Template(Pos([(-2), (-1)])), Template(Pos([1, 2])), Template(Pos([(-3), (-2), (-1)])), Template(Pos([1, 2, 3])), Template(Pos([(-1)]), Pos([1])), Template(Word([(-1)])), Template(Word([1])), Template(Word([(-2)])), Template(Word([2])), Template(Word([(-2), (-1)])), Template(Word([1, 2])), Template(Word([(-3), (-2), (-1)])), Template(Word([1, 2, 3])), Template(Word([(-1)]), Word([1]))]
| [
"def",
"nltkdemo18",
"(",
")",
":",
"return",
"[",
"Template",
"(",
"Pos",
"(",
"[",
"(",
"-",
"1",
")",
"]",
")",
")",
",",
"Template",
"(",
"Pos",
"(",
"[",
"1",
"]",
")",
")",
",",
"Template",
"(",
"Pos",
"(",
"[",
"(",
"-",
"2",
")",
... | return 18 templates . | train | false |
12,852 | def diagnose():
print('')
print('KA Lite diagnostics')
print('')
print('Calculating diagnostics...')
sys.stdout.flush()
print('')
diagnostics = []
diag = (lambda x, y: diagnostics.append((x, y)))
diag('KA Lite version', kalite.__version__)
diag('python', sys.version)
diag('platform', platform.platform())
(status_code, urls) = get_urls()
for addr in urls:
diag('server address', addr)
for addr in get_urls_proxy():
diag('server proxy', addr)
diag('server status', status.codes[status_code])
settings_imported = True
try:
from django.conf import settings
from django.template.defaultfilters import filesizeformat
except:
settings_imported = False
diag('Settings failure', traceback.format_exc())
if settings_imported:
diag('installed in', os.path.dirname(kalite.__file__))
diag('content root', settings.CONTENT_ROOT)
diag('content size', filesizeformat(get_size(settings.CONTENT_ROOT)))
diag('user database', settings.DATABASES['default']['NAME'])
try:
from securesync.models import Device
device = Device.get_own_device()
sync_sessions = device.client_sessions.all()
zone = device.get_zone()
diag('device name', str(device.name))
diag('device ID', str(device.id))
diag('device registered', str(device.is_registered()))
diag('synced', str((sync_sessions.latest('timestamp').timestamp if sync_sessions.exists() else 'Never')))
diag('sync result', (('OK' if (sync_sessions.latest('timestamp').errors == 0) else 'Error') if sync_sessions.exists() else '-'))
diag('zone ID', (str(zone.id) if zone else 'Unset'))
except:
diag('Device failure', traceback.format_exc())
for (k, v) in diagnostics:
values = str(v).split('\n')
values = '\n'.join(([values[0]] + map((lambda x: ((' ' * 22) + x)), values[1:])))
print((k.upper() + ': ').ljust(21), values)
| [
"def",
"diagnose",
"(",
")",
":",
"print",
"(",
"''",
")",
"print",
"(",
"'KA Lite diagnostics'",
")",
"print",
"(",
"''",
")",
"print",
"(",
"'Calculating diagnostics...'",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"print",
"(",
"''",
")",
"d... | report diagnosis for any symptoms we find . | train | false |
12,853 | def to64(number):
if (not ((type(number) is types.LongType) or (type(number) is types.IntType))):
raise TypeError('You must pass a long or an int')
if (0 <= number <= 9):
return byte((number + 48))
if (10 <= number <= 35):
return byte((number + 55))
if (36 <= number <= 61):
return byte((number + 61))
if (number == 62):
return byte(45)
if (number == 63):
return byte(95)
raise ValueError(('Invalid Base64 value: %i' % number))
| [
"def",
"to64",
"(",
"number",
")",
":",
"if",
"(",
"not",
"(",
"(",
"type",
"(",
"number",
")",
"is",
"types",
".",
"LongType",
")",
"or",
"(",
"type",
"(",
"number",
")",
"is",
"types",
".",
"IntType",
")",
")",
")",
":",
"raise",
"TypeError",
... | converts a number in the range of 0 to 63 into base 64 digit character in the range of 0-9 . | train | false |
12,854 | @release.command()
def ghrelease():
version = get_version(1)
tag = ('v' + version)
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
subprocess.check_call(['github-release', 'release', '-u', GITHUB_USER, '-r', GITHUB_REPO, '--tag', tag, '--name', '{} {}'.format(GITHUB_REPO, version), '--description', cl_md])
tarball = os.path.join(BASE, 'dist', 'beets-{}.tar.gz'.format(version))
subprocess.check_call(['github-release', 'upload', '-u', GITHUB_USER, '-r', GITHUB_REPO, '--tag', tag, '--name', os.path.basename(tarball), '--file', tarball])
| [
"@",
"release",
".",
"command",
"(",
")",
"def",
"ghrelease",
"(",
")",
":",
"version",
"=",
"get_version",
"(",
"1",
")",
"tag",
"=",
"(",
"'v'",
"+",
"version",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"BASE",
",",
"'chang... | create a github release using the github-release command-line tool . | train | false |
12,856 | def get_neg_detection_mode():
raise NotImplementedError('TODO: implement this function.')
| [
"def",
"get_neg_detection_mode",
"(",
")",
":",
"raise",
"NotImplementedError",
"(",
"'TODO: implement this function.'",
")"
] | returns a theano mode that detects if any negative value occurs in the evaluation of a theano function . | train | false |
12,858 | def reverse_remove_duplicate_renditions(*args, **kwargs):
pass
| [
"def",
"reverse_remove_duplicate_renditions",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"pass"
] | this is a no-op . | train | false |
12,860 | def _format_value(val, limit, level, len=len, repr=repr):
if (level <= 0):
return '...'
typ = type(val)
if (typ in EASY_TYPES):
if (typ is float):
rep = str(val)
elif (typ is long):
if (val >= (10L ** 99)):
return '...L'
elif (val <= (- (10L ** 98))):
return '-...L'
else:
rep = repr(val)
else:
rep = repr(val)
if ((typ is long) and (len(rep) > limit)):
n1 = ((limit - 3) // 2)
n2 = ((limit - 3) - n1)
rep = ((rep[:n1] + '...') + rep[(- n2):])
return rep
if (typ in META_TYPES):
return val.__name__
if (typ in STRING_TYPES):
n1 = ((limit - 3) // 2)
if (n1 < 1):
n1 = 1
n2 = ((limit - 3) - n1)
if (n2 < 1):
n2 = 1
if (len(val) > limit):
rep = repr((val[:n1] + val[(- n2):]))
else:
rep = repr(val)
if (len(rep) <= limit):
return rep
return ((rep[:n1] + '...') + rep[(- n2):])
if (typ is types.MethodType):
if (val.im_self is None):
fmt = '<unbound method %s of %s>'
else:
fmt = '<method %s of %s<>>'
if (val.im_class is not None):
return (fmt % (val.__name__, val.im_class.__name__))
else:
return (fmt % (val.__name__, '?'))
if (typ is types.FunctionType):
nam = val.__name__
if (nam == '<lambda>'):
return nam
else:
return ('<function %s>' % val.__name__)
if (typ is types.BuiltinFunctionType):
if (val.__self__ is not None):
return ('<built-in method %s of %s<>>' % (val.__name__, type(val.__self__).__name__))
else:
return ('<built-in function %s>' % val.__name__)
if (typ is types.ModuleType):
if hasattr(val, '__file__'):
return ('<module %s>' % val.__name__)
else:
return ('<built-in module %s>' % val.__name__)
if (typ is types.CodeType):
return ('<code object %s>' % val.co_name)
if isinstance(val, ProtocolBuffer.ProtocolMessage):
buf = [val.__class__.__name__, '<']
limit -= (len(buf[0]) + 2)
append = buf.append
first = True
dct = getattr(val, '__dict__', None)
if dct:
for (k, v) in sorted(dct.items()):
if (k.startswith('has_') or (not k.endswith('_'))):
continue
name = k[:(-1)]
has_method = getattr(val, ('has_' + name), None)
if (has_method is not None):
if ((type(has_method) is not types.MethodType) or (not has_method())):
continue
size_method = getattr(val, (name + '_size'), None)
if (size_method is not None):
if ((type(size_method) is not types.MethodType) or (not size_method())):
continue
if ((has_method is None) and (size_method is None)):
continue
if first:
first = False
else:
append(', ')
limit -= (len(name) + 2)
if (limit <= 0):
append('...')
break
append(name)
append('=')
rep = _format_value(v, limit, (level - 1))
limit -= len(rep)
append(rep)
append('>')
return ''.join(buf)
dct = getattr(val, '__dict__', None)
if (type(dct) is dict):
if (typ is INSTANCE_TYPE):
typ = val.__class__
typnam = typ.__name__
priv = (('_' + typnam) + '__')
buffer = [typnam, '<']
limit -= (len(buffer[0]) + 2)
if (len(dct) <= (limit // 4)):
names = sorted(dct)
else:
names = list(dct)
append = buffer.append
first = True
if issubclass(typ, BUILTIN_TYPES):
for builtin_typ in BUILTIN_TYPES:
if issubclass(typ, builtin_typ):
try:
val = builtin_typ(val)
assert (type(val) is builtin_typ)
except Exception:
break
else:
append(_format_value(val, limit, (level - 1)))
first = False
break
for nam in names:
if (not isinstance(nam, basestring)):
continue
if first:
first = False
else:
append(', ')
pnam = nam
if pnam.startswith(priv):
pnam = pnam[(len(priv) - 2):]
limit -= (len(pnam) + 2)
if (limit <= 0):
append('...')
break
append(pnam)
append('=')
rep = _format_value(dct[nam], limit, (level - 1))
limit -= len(rep)
append(rep)
append('>')
return ''.join(buffer)
how = CONTAINER_TYPES.get(typ)
if how:
(head, tail) = how
buffer = [head]
append = buffer.append
limit -= 2
series = val
isdict = (typ is dict)
if (isdict and (len(val) <= (limit // 4))):
series = sorted(val)
try:
for elem in series:
if (limit <= 0):
append('...')
break
rep = _format_value(elem, limit, (level - 1))
limit -= (len(rep) + 2)
append(rep)
if isdict:
rep = _format_value(val[elem], limit, (level - 1))
limit -= len(rep)
append(':')
append(rep)
append(', ')
if (buffer[(-1)] == ', '):
if ((tail == ')') and (len(val) == 1)):
buffer[(-1)] = ',)'
else:
buffer[(-1)] = tail
else:
append(tail)
return ''.join(buffer)
except (RuntimeError, KeyError):
return ((head + tail) + ' (Container modified during iteration)')
if issubclass(typ, BUILTIN_TYPES):
for builtin_typ in BUILTIN_TYPES:
if issubclass(typ, builtin_typ):
try:
val = builtin_typ(val)
assert (type(val) is builtin_typ)
except Exception:
break
else:
typnam = typ.__name__
limit -= (len(typnam) + 2)
return ('%s<%s>' % (typnam, _format_value(val, limit, (level - 1))))
if ((message is not None) and isinstance(val, message.Message)):
buffer = [typ.__name__, '<']
limit -= (len(buffer[0]) + 2)
append = buffer.append
first = True
fields = val.ListFields()
for (f, v) in fields:
if first:
first = False
else:
append(', ')
name = f.name
limit -= (len(name) + 2)
if (limit <= 0):
append('...')
break
append(name)
append('=')
if (f.label == f.LABEL_REPEATED):
limit -= 2
append('[')
first_sub = True
for item in v:
if first_sub:
first_sub = False
else:
limit -= 2
append(', ')
if (limit <= 0):
append('...')
break
rep = _format_value(item, limit, (level - 1))
limit -= len(rep)
append(rep)
append(']')
else:
rep = _format_value(v, limit, (level - 1))
limit -= len(rep)
append(rep)
append('>')
return ''.join(buffer)
return (typ.__name__ + '<>')
| [
"def",
"_format_value",
"(",
"val",
",",
"limit",
",",
"level",
",",
"len",
"=",
"len",
",",
"repr",
"=",
"repr",
")",
":",
"if",
"(",
"level",
"<=",
"0",
")",
":",
"return",
"'...'",
"typ",
"=",
"type",
"(",
"val",
")",
"if",
"(",
"typ",
"in",... | converts a card value to its appropriate string representation as defined by the fits format . | train | false |
12,861 | def classification_summary(y, t, label_num=None, beta=1.0, ignore_label=(-1)):
return ClassificationSummary(label_num, beta, ignore_label)(y, t)
| [
"def",
"classification_summary",
"(",
"y",
",",
"t",
",",
"label_num",
"=",
"None",
",",
"beta",
"=",
"1.0",
",",
"ignore_label",
"=",
"(",
"-",
"1",
")",
")",
":",
"return",
"ClassificationSummary",
"(",
"label_num",
",",
"beta",
",",
"ignore_label",
")... | calculates precision . | train | false |
12,862 | def denied(request):
return render(request, '403.html', {'request_path': request.path, 'title': _('Permission Denied')}, status=403)
| [
"def",
"denied",
"(",
"request",
")",
":",
"return",
"render",
"(",
"request",
",",
"'403.html'",
",",
"{",
"'request_path'",
":",
"request",
".",
"path",
",",
"'title'",
":",
"_",
"(",
"'Permission Denied'",
")",
"}",
",",
"status",
"=",
"403",
")"
] | error handler showing list of available projects . | train | false |
12,863 | def test_rollback(plugin, config, backup):
try:
plugin.rollback_checkpoints(1337)
except le_errors.Error as error:
logger.error('Plugin raised an exception during rollback:')
logger.exception(error)
return False
if _dirs_are_unequal(config, backup):
logger.error('Rollback failed for config `%s`', config)
return False
else:
logger.info('Rollback succeeded')
return True
| [
"def",
"test_rollback",
"(",
"plugin",
",",
"config",
",",
"backup",
")",
":",
"try",
":",
"plugin",
".",
"rollback_checkpoints",
"(",
"1337",
")",
"except",
"le_errors",
".",
"Error",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"'Plugin raised an excep... | tests the rollback checkpoints function . | train | false |
12,864 | def get_user_location():
if (c.user and c.user.pref_use_global_defaults):
return ''
return get_request_location(request, c)
| [
"def",
"get_user_location",
"(",
")",
":",
"if",
"(",
"c",
".",
"user",
"and",
"c",
".",
"user",
".",
"pref_use_global_defaults",
")",
":",
"return",
"''",
"return",
"get_request_location",
"(",
"request",
",",
"c",
")"
] | determine country of origin for the current user this is provided via a call to geoip . | train | false |
12,865 | def listbucket(path_prefix, marker=None, prefix=None, max_keys=None, delimiter=None, retry_params=None, _account_id=None):
if prefix:
common.validate_bucket_path(path_prefix)
bucket = path_prefix
else:
(bucket, prefix) = common._process_path_prefix(path_prefix)
if (marker and marker.startswith(bucket)):
marker = marker[(len(bucket) + 1):]
api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id)
options = {}
if marker:
options['marker'] = marker
if max_keys:
options['max-keys'] = max_keys
if prefix:
options['prefix'] = prefix
if delimiter:
options['delimiter'] = delimiter
return _Bucket(api, bucket, options)
| [
"def",
"listbucket",
"(",
"path_prefix",
",",
"marker",
"=",
"None",
",",
"prefix",
"=",
"None",
",",
"max_keys",
"=",
"None",
",",
"delimiter",
"=",
"None",
",",
"retry_params",
"=",
"None",
",",
"_account_id",
"=",
"None",
")",
":",
"if",
"prefix",
"... | returns a gcsfilestat iterator over a bucket . | train | true |
12,866 | def test_service():
schema = vol.Schema(cv.service)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_turn_on')
schema('homeassistant.turn_on')
| [
"def",
"test_service",
"(",
")",
":",
"schema",
"=",
"vol",
".",
"Schema",
"(",
"cv",
".",
"service",
")",
"with",
"pytest",
".",
"raises",
"(",
"vol",
".",
"MultipleInvalid",
")",
":",
"schema",
"(",
"'invalid_turn_on'",
")",
"schema",
"(",
"'homeassist... | test service validation . | train | false |
12,867 | def generate_patterns(django_name, gwt_name):
pattern_list = defaults.patterns(django_name, ('^rpc/', 'views.handle_rpc'), ('^rpc_doc', 'views.rpc_documentation'))
debug_pattern_list = defaults.patterns('', ('^(?P<forward_addr>autotest.*)', 'autotest.frontend.afe.views.gwt_forward'), ('^client/(?P<path>.*)$', 'django.views.static.serve', {'document_root': os.path.join(os.path.dirname(__file__), '..', 'frontend', 'client', 'www')}), ('^$', 'django.views.generic.simple.redirect_to', {'url': ('client/autotest.%(name)s/%(name)s.html' % dict(name=gwt_name))}))
return (pattern_list, debug_pattern_list)
| [
"def",
"generate_patterns",
"(",
"django_name",
",",
"gwt_name",
")",
":",
"pattern_list",
"=",
"defaults",
".",
"patterns",
"(",
"django_name",
",",
"(",
"'^rpc/'",
",",
"'views.handle_rpc'",
")",
",",
"(",
"'^rpc_doc'",
",",
"'views.rpc_documentation'",
")",
"... | generates the common url patterns for the given names . | train | false |
12,868 | def ltc():
s3.filter = (s3db.hms_hospital.facility_type == 31)
return hospital()
| [
"def",
"ltc",
"(",
")",
":",
"s3",
".",
"filter",
"=",
"(",
"s3db",
".",
"hms_hospital",
".",
"facility_type",
"==",
"31",
")",
"return",
"hospital",
"(",
")"
] | filtered rest controller for sandy . | train | false |
12,869 | def _resort(kernel_labels, list_to_sort):
labels = [tko_rpc_utils.KernelString(label) for label in kernel_labels]
resorted_pairs = sorted(zip(labels, list_to_sort))
return [pair[1] for pair in resorted_pairs]
| [
"def",
"_resort",
"(",
"kernel_labels",
",",
"list_to_sort",
")",
":",
"labels",
"=",
"[",
"tko_rpc_utils",
".",
"KernelString",
"(",
"label",
")",
"for",
"label",
"in",
"kernel_labels",
"]",
"resorted_pairs",
"=",
"sorted",
"(",
"zip",
"(",
"labels",
",",
... | resorts a list . | train | false |
12,870 | def avg_stdev(lst):
avg = (sum(lst) / len(lst))
sdsq = sum((((x - avg) ** 2) for x in lst))
stdev = ((sdsq / (len(lst) - 1)) ** 0.5)
return (avg, stdev)
| [
"def",
"avg_stdev",
"(",
"lst",
")",
":",
"avg",
"=",
"(",
"sum",
"(",
"lst",
")",
"/",
"len",
"(",
"lst",
")",
")",
"sdsq",
"=",
"sum",
"(",
"(",
"(",
"(",
"x",
"-",
"avg",
")",
"**",
"2",
")",
"for",
"x",
"in",
"lst",
")",
")",
"stdev",... | return average and standard deviation of the given list . | train | false |
12,871 | def build_mute_dict(dict_data=False):
t = Twitter(auth=authen())
next_cursor = (-1)
screen_name_list = []
name_list = []
while (next_cursor != 0):
list = t.mutes.users.list(screen_name=g['original_name'], cursor=next_cursor, skip_status=True, include_entities=False)
screen_name_list += [('@' + u['screen_name']) for u in list['users']]
name_list += [u['name'] for u in list['users']]
next_cursor = list['next_cursor']
if dict_data:
return dict(zip(screen_name_list, name_list))
else:
return screen_name_list
| [
"def",
"build_mute_dict",
"(",
"dict_data",
"=",
"False",
")",
":",
"t",
"=",
"Twitter",
"(",
"auth",
"=",
"authen",
"(",
")",
")",
"next_cursor",
"=",
"(",
"-",
"1",
")",
"screen_name_list",
"=",
"[",
"]",
"name_list",
"=",
"[",
"]",
"while",
"(",
... | build muting list . | train | false |
12,872 | def _safe_shutdown_socket(sock, how=socket.SHUT_RDWR):
try:
sock.shutdown(how)
except socket.error as exc:
if (exc.errno != errno.ENOTCONN):
raise
| [
"def",
"_safe_shutdown_socket",
"(",
"sock",
",",
"how",
"=",
"socket",
".",
"SHUT_RDWR",
")",
":",
"try",
":",
"sock",
".",
"shutdown",
"(",
"how",
")",
"except",
"socket",
".",
"error",
"as",
"exc",
":",
"if",
"(",
"exc",
".",
"errno",
"!=",
"errno... | shutdown a socket . | train | false |
12,873 | def guess_plateau(x, y):
if (len(x) != len(y)):
return 0
diffs = []
indexes = range(len(y))
for i in indexes:
if ((i + 1) not in indexes):
continue
diffs.append((y[(i + 1)] - y[i]))
diffs = np.array(diffs)
ymax = y[(-1)]
for i in indexes:
if ((y[i] > (ymax - diffs.std())) and (y[i] < (ymax + diffs.std()))):
ymax = y[i]
break
return ymax
| [
"def",
"guess_plateau",
"(",
"x",
",",
"y",
")",
":",
"if",
"(",
"len",
"(",
"x",
")",
"!=",
"len",
"(",
"y",
")",
")",
":",
"return",
"0",
"diffs",
"=",
"[",
"]",
"indexes",
"=",
"range",
"(",
"len",
"(",
"y",
")",
")",
"for",
"i",
"in",
... | given two axes returns a guess of the plateau point . | train | false |
12,874 | def get_subtitles(video):
result_list = []
if (not video.subtitle_languages):
return result_list
for language in video.subtitle_languages:
if (hasattr(language, 'opensubtitles') and language.opensubtitles):
result_list.append(language.opensubtitles)
return sorted(result_list)
| [
"def",
"get_subtitles",
"(",
"video",
")",
":",
"result_list",
"=",
"[",
"]",
"if",
"(",
"not",
"video",
".",
"subtitle_languages",
")",
":",
"return",
"result_list",
"for",
"language",
"in",
"video",
".",
"subtitle_languages",
":",
"if",
"(",
"hasattr",
"... | return a sorted list of detected subtitles for the given video file . | train | false |
12,875 | def generate_thumbnail_download_link_vimeo(video_id_from_shortcode):
video_metadata = urllib.urlopen((('https://vimeo.com/api/v2/video/' + video_id_from_shortcode) + '.json'))
video_metadata_parsed = json.load(video_metadata)
video_thumbnail_large_location = video_metadata_parsed[0]['thumbnail_large']
return video_thumbnail_large_location
| [
"def",
"generate_thumbnail_download_link_vimeo",
"(",
"video_id_from_shortcode",
")",
":",
"video_metadata",
"=",
"urllib",
".",
"urlopen",
"(",
"(",
"(",
"'https://vimeo.com/api/v2/video/'",
"+",
"video_id_from_shortcode",
")",
"+",
"'.json'",
")",
")",
"video_metadata_p... | thumbnail url generator for vimeo videos . | train | false |
12,876 | def avoid_wrapping(value):
return value.replace(u' ', u'\xa0')
| [
"def",
"avoid_wrapping",
"(",
"value",
")",
":",
"return",
"value",
".",
"replace",
"(",
"u' '",
",",
"u'\\xa0'",
")"
] | avoid text wrapping in the middle of a phrase by adding non-breaking spaces where there previously were normal spaces . | train | false |
12,877 | @receiver(user_logged_out)
def log_successful_logout(sender, request, user, **kwargs):
if hasattr(request, 'user'):
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u'Logout - user.id: {0}'.format(request.user.id))
else:
AUDIT_LOG.info(u'Logout - {0}'.format(request.user))
| [
"@",
"receiver",
"(",
"user_logged_out",
")",
"def",
"log_successful_logout",
"(",
"sender",
",",
"request",
",",
"user",
",",
"**",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"request",
",",
"'user'",
")",
":",
"if",
"settings",
".",
"FEATURES",
"[",
"'S... | handler to log when logouts have occurred successfully . | train | false |
12,878 | def no_vi_headers(physical_line, line_number, lines):
if ((line_number <= 5) or (line_number > (len(lines) - 5))):
if vi_header_re.match(physical_line):
return (0, "N314: Don't put vi configuration in source files")
| [
"def",
"no_vi_headers",
"(",
"physical_line",
",",
"line_number",
",",
"lines",
")",
":",
"if",
"(",
"(",
"line_number",
"<=",
"5",
")",
"or",
"(",
"line_number",
">",
"(",
"len",
"(",
"lines",
")",
"-",
"5",
")",
")",
")",
":",
"if",
"vi_header_re",... | check for vi editor configuration in source files . | train | false |
12,879 | def boxcox_llf(lmb, data):
data = np.asarray(data)
N = data.shape[0]
if (N == 0):
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = ((lmb - 1) * np.sum(np.log(data), axis=0))
llf -= ((N / 2.0) * np.log(np.sum((((y - y_mean) ** 2.0) / N), axis=0)))
return llf
| [
"def",
"boxcox_llf",
"(",
"lmb",
",",
"data",
")",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"N",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"if",
"(",
"N",
"==",
"0",
")",
":",
"return",
"np",
".",
"nan",
"y",
"=",
"boxcox",
... | the boxcox log-likelihood function . | train | false |
12,880 | def levelize_path(path):
parts = tuple(filter(None, path.split('/')))
return [('/' + '/'.join(parts[:n])) for n in range(len(parts), 0, (-1))]
| [
"def",
"levelize_path",
"(",
"path",
")",
":",
"parts",
"=",
"tuple",
"(",
"filter",
"(",
"None",
",",
"path",
".",
"split",
"(",
"'/'",
")",
")",
")",
"return",
"[",
"(",
"'/'",
"+",
"'/'",
".",
"join",
"(",
"parts",
"[",
":",
"n",
"]",
")",
... | splits given path to list of paths removing latest level in each step . | train | false |
12,881 | def http_response_to_document_iters(response, read_chunk_size=4096):
chunked = is_chunked(dict(response.getheaders()))
if (response.status == 200):
if chunked:
return iter([(0, None, None, response.getheaders(), response)])
content_length = int(response.getheader('Content-Length'))
return iter([(0, (content_length - 1), content_length, response.getheaders(), response)])
(content_type, params_list) = parse_content_type(response.getheader('Content-Type'))
if (content_type != 'multipart/byteranges'):
(start, end, length) = parse_content_range(response.getheader('Content-Range'))
return iter([(start, end, length, response.getheaders(), response)])
else:
params = dict(params_list)
return multipart_byteranges_to_document_iters(response, params['boundary'], read_chunk_size)
| [
"def",
"http_response_to_document_iters",
"(",
"response",
",",
"read_chunk_size",
"=",
"4096",
")",
":",
"chunked",
"=",
"is_chunked",
"(",
"dict",
"(",
"response",
".",
"getheaders",
"(",
")",
")",
")",
"if",
"(",
"response",
".",
"status",
"==",
"200",
... | takes a successful object-get http response and turns it into an iterator of 5-tuples . | train | false |
12,882 | def closeness_centrality(G, nodes, normalized=True):
closeness = {}
path_length = nx.single_source_shortest_path_length
top = set(nodes)
bottom = (set(G) - top)
n = float(len(top))
m = float(len(bottom))
for node in top:
sp = dict(path_length(G, node))
totsp = sum(sp.values())
if ((totsp > 0.0) and (len(G) > 1)):
closeness[node] = ((m + (2 * (n - 1))) / totsp)
if normalized:
s = ((len(sp) - 1.0) / (len(G) - 1))
closeness[node] *= s
else:
closeness[n] = 0.0
for node in bottom:
sp = dict(path_length(G, node))
totsp = sum(sp.values())
if ((totsp > 0.0) and (len(G) > 1)):
closeness[node] = ((n + (2 * (m - 1))) / totsp)
if normalized:
s = ((len(sp) - 1.0) / (len(G) - 1))
closeness[node] *= s
else:
closeness[n] = 0.0
return closeness
| [
"def",
"closeness_centrality",
"(",
"G",
",",
"nodes",
",",
"normalized",
"=",
"True",
")",
":",
"closeness",
"=",
"{",
"}",
"path_length",
"=",
"nx",
".",
"single_source_shortest_path_length",
"top",
"=",
"set",
"(",
"nodes",
")",
"bottom",
"=",
"(",
"set... | compute closeness centrality for nodes . | train | false |
12,884 | def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if ((not name_value) and (not strict_parsing)):
continue
nv = name_value.split('=', 1)
if (len(nv) != 2):
if strict_parsing:
raise ValueError, ('bad query field: %r' % (name_value,))
if keep_blank_values:
nv.append('')
else:
continue
if (len(nv[1]) or keep_blank_values):
name = unquote(nv[0].replace('+', ' '))
value = unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
| [
"def",
"parse_qsl",
"(",
"qs",
",",
"keep_blank_values",
"=",
"0",
",",
"strict_parsing",
"=",
"0",
")",
":",
"pairs",
"=",
"[",
"s2",
"for",
"s1",
"in",
"qs",
".",
"split",
"(",
"'&'",
")",
"for",
"s2",
"in",
"s1",
".",
"split",
"(",
"';'",
")",... | parse a query given as a string argument . | train | true |
12,885 | def _test_reference(raw, reref, ref_data, ref_from):
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True, stim=True, exclude='bads')
picks_ref = [raw.ch_names.index(ch) for ch in ref_from]
if isinstance(raw, Evoked):
_data = raw.data
_reref = reref.data
else:
_data = raw._data
_reref = reref._data
assert_array_equal(ref_data, _data[..., picks_ref, :].mean((-2)))
raw_eeg_data = _data[..., picks_eeg, :]
raw_other_data = _data[..., picks_other, :]
reref_eeg_data = _reref[..., picks_eeg, :]
reref_other_data = _reref[..., picks_other, :]
if isinstance(raw, BaseEpochs):
unref_eeg_data = (reref_eeg_data + ref_data[:, np.newaxis, :])
else:
unref_eeg_data = (reref_eeg_data + ref_data)
assert_allclose(raw_eeg_data, unref_eeg_data, 1e-06, atol=1e-15)
assert_allclose(raw_other_data, reref_other_data, 1e-06, atol=1e-15)
| [
"def",
"_test_reference",
"(",
"raw",
",",
"reref",
",",
"ref_data",
",",
"ref_from",
")",
":",
"picks_eeg",
"=",
"pick_types",
"(",
"raw",
".",
"info",
",",
"meg",
"=",
"False",
",",
"eeg",
"=",
"True",
",",
"exclude",
"=",
"'bads'",
")",
"picks_other... | test whether a reference has been correctly applied . | train | false |
12,888 | def latlon_round(latlon, spacing=1000):
g = latlon_to_grid(latlon)
g.easting = ((g.easting // spacing) * spacing)
g.northing = ((g.northing // spacing) * spacing)
return g.latlon()
| [
"def",
"latlon_round",
"(",
"latlon",
",",
"spacing",
"=",
"1000",
")",
":",
"g",
"=",
"latlon_to_grid",
"(",
"latlon",
")",
"g",
".",
"easting",
"=",
"(",
"(",
"g",
".",
"easting",
"//",
"spacing",
")",
"*",
"spacing",
")",
"g",
".",
"northing",
"... | round to nearest grid corner . | train | true |
12,889 | def create_transport(host, connect_timeout, ssl=False):
if ssl:
return SSLTransport(host, connect_timeout, ssl)
else:
return TCPTransport(host, connect_timeout)
| [
"def",
"create_transport",
"(",
"host",
",",
"connect_timeout",
",",
"ssl",
"=",
"False",
")",
":",
"if",
"ssl",
":",
"return",
"SSLTransport",
"(",
"host",
",",
"connect_timeout",
",",
"ssl",
")",
"else",
":",
"return",
"TCPTransport",
"(",
"host",
",",
... | given a few parameters from the connection constructor . | train | true |
12,890 | @register_canonicalize
@gof.local_optimizer([AdvancedIncSubtensor1])
def local_set_to_inc_subtensor(node):
if (isinstance(node.op, AdvancedIncSubtensor1) and node.op.set_instead_of_inc and node.inputs[1].owner and isinstance(node.inputs[1].owner.op, Elemwise) and isinstance(node.inputs[1].owner.op.scalar_op, scalar.Add)):
addn = node.inputs[1].owner
subn = None
other = None
if (addn.inputs[0].owner and isinstance(addn.inputs[0].owner.op, AdvancedSubtensor1)):
subn = addn.inputs[0].owner
other = addn.inputs[1]
elif (addn.inputs[1].owner and isinstance(addn.inputs[1].owner.op, AdvancedSubtensor1)):
subn = addn.inputs[1].owner
other = addn.inputs[0]
else:
return
if ((subn.inputs[1] != node.inputs[2]) or (subn.inputs[0] != node.inputs[0])):
return
ret = advanced_inc_subtensor1(node.inputs[0], other, node.inputs[2])
copy_stack_trace(node.outputs, ret)
return [ret]
| [
"@",
"register_canonicalize",
"@",
"gof",
".",
"local_optimizer",
"(",
"[",
"AdvancedIncSubtensor1",
"]",
")",
"def",
"local_set_to_inc_subtensor",
"(",
"node",
")",
":",
"if",
"(",
"isinstance",
"(",
"node",
".",
"op",
",",
"AdvancedIncSubtensor1",
")",
"and",
... | advancedincsubtensor1 -> advancedincsubtensor1 . | train | false |
12,891 | def align(str1, str2, epsilon=0):
if (np == None):
raise ImportError(u'You need numpy in order to use the align function')
assert (0.0 <= epsilon <= 1.0), u'Epsilon must be between 0.0 and 1.0.'
m = len(str1)
n = len(str2)
S = np.zeros(((m + 1), (n + 1)), dtype=float)
for i in range(1, (m + 1)):
for j in range(1, (n + 1)):
edit1 = (S[((i - 1), j)] + sigma_skip(str1[(i - 1)]))
edit2 = (S[(i, (j - 1))] + sigma_skip(str2[(j - 1)]))
edit3 = (S[((i - 1), (j - 1))] + sigma_sub(str1[(i - 1)], str2[(j - 1)]))
if (i > 1):
edit4 = (S[((i - 2), (j - 1))] + sigma_exp(str2[(j - 1)], str1[(i - 2):i]))
else:
edit4 = (- inf)
if (j > 1):
edit5 = (S[((i - 1), (j - 2))] + sigma_exp(str1[(i - 1)], str2[(j - 2):j]))
else:
edit5 = (- inf)
S[(i, j)] = max(edit1, edit2, edit3, edit4, edit5, 0)
T = ((1 - epsilon) * np.amax(S))
alignments = []
for i in range(1, (m + 1)):
for j in range(1, (n + 1)):
if (S[(i, j)] >= T):
alignments.append(_retrieve(i, j, 0, S, T, str1, str2, []))
return alignments
| [
"def",
"align",
"(",
"str1",
",",
"str2",
",",
"epsilon",
"=",
"0",
")",
":",
"if",
"(",
"np",
"==",
"None",
")",
":",
"raise",
"ImportError",
"(",
"u'You need numpy in order to use the align function'",
")",
"assert",
"(",
"0.0",
"<=",
"epsilon",
"<=",
"1... | align -> int rounds x up to nearest multiple of the alignment . | train | false |
12,892 | def stubout_determine_is_pv_objectstore(stubs):
def f(*args):
return False
stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
| [
"def",
"stubout_determine_is_pv_objectstore",
"(",
"stubs",
")",
":",
"def",
"f",
"(",
"*",
"args",
")",
":",
"return",
"False",
"stubs",
".",
"Set",
"(",
"vm_utils",
",",
"'_determine_is_pv_objectstore'",
",",
"f",
")"
] | assumes vms stu have pv kernels . | train | false |
12,895 | def getCraftSequence():
return 'chop preface outset mill multiply drill lift flow feed home lash fillet limit unpause alteration export'.split()
| [
"def",
"getCraftSequence",
"(",
")",
":",
"return",
"'chop preface outset mill multiply drill lift flow feed home lash fillet limit unpause alteration export'",
".",
"split",
"(",
")"
] | get the extrusion craft sequence . | train | false |
12,896 | def format_action(action):
if ('id' in action):
del action['id']
if ('finish_time' in action):
del action['finish_time']
return action
| [
"def",
"format_action",
"(",
"action",
")",
":",
"if",
"(",
"'id'",
"in",
"action",
")",
":",
"del",
"action",
"[",
"'id'",
"]",
"if",
"(",
"'finish_time'",
"in",
"action",
")",
":",
"del",
"action",
"[",
"'finish_time'",
"]",
"return",
"action"
] | remove keys that arent serialized . | train | false |
12,897 | def identity(x):
return x
| [
"def",
"identity",
"(",
"x",
")",
":",
"return",
"x"
] | just returns input variables . | train | false |
12,899 | def _all_usage_keys(descriptors, aside_types):
usage_ids = set()
for descriptor in descriptors:
usage_ids.add(descriptor.scope_ids.usage_id)
for aside_type in aside_types:
usage_ids.add(AsideUsageKeyV1(descriptor.scope_ids.usage_id, aside_type))
usage_ids.add(AsideUsageKeyV2(descriptor.scope_ids.usage_id, aside_type))
return usage_ids
| [
"def",
"_all_usage_keys",
"(",
"descriptors",
",",
"aside_types",
")",
":",
"usage_ids",
"=",
"set",
"(",
")",
"for",
"descriptor",
"in",
"descriptors",
":",
"usage_ids",
".",
"add",
"(",
"descriptor",
".",
"scope_ids",
".",
"usage_id",
")",
"for",
"aside_ty... | return a set of all usage_ids for the descriptors and for as all asides in aside_types for those descriptors . | train | false |
12,900 | def parse_env_file(env_file):
environment = {}
with open(env_file, 'r') as f:
for line in f:
if (line[0] == '#'):
continue
line = line.strip()
if (not line):
continue
parse_line = line.split('=', 1)
if (len(parse_line) == 2):
(k, v) = parse_line
environment[k] = v
else:
raise errors.DockerException('Invalid line in environment file {0}:\n{1}'.format(env_file, line))
return environment
| [
"def",
"parse_env_file",
"(",
"env_file",
")",
":",
"environment",
"=",
"{",
"}",
"with",
"open",
"(",
"env_file",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"(",
"line",
"[",
"0",
"]",
"==",
"'#'",
")",
":",
"continue"... | reads a line-separated environment file . | train | true |
12,901 | @parametrize('table', tables.mapped_classes)
def test_variable_names_2(table):
assert (getattr(tables, table.__name__) is table)
| [
"@",
"parametrize",
"(",
"'table'",
",",
"tables",
".",
"mapped_classes",
")",
"def",
"test_variable_names_2",
"(",
"table",
")",
":",
"assert",
"(",
"getattr",
"(",
"tables",
",",
"table",
".",
"__name__",
")",
"is",
"table",
")"
] | we also want all of the tables exported . | train | false |
12,903 | def randu_array(nchars, size, dtype='O'):
retval = np.random.choice(RANDU_CHARS, size=(nchars * np.prod(size))).view((np.unicode_, nchars)).reshape(size)
if (dtype is None):
return retval
else:
return retval.astype(dtype)
| [
"def",
"randu_array",
"(",
"nchars",
",",
"size",
",",
"dtype",
"=",
"'O'",
")",
":",
"retval",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"RANDU_CHARS",
",",
"size",
"=",
"(",
"nchars",
"*",
"np",
".",
"prod",
"(",
"size",
")",
")",
")",
".",... | generate an array of unicode strings . | train | false |
12,904 | def verify_claim_token(user, token, pid):
if (not user.verify_claim_token(token=token, project_id=pid)):
if user.is_registered:
error_data = {'message_short': 'User has already been claimed.', 'message_long': 'Please <a href="/login/">log in</a> to continue.'}
raise HTTPError(400, data=error_data)
else:
return False
return True
| [
"def",
"verify_claim_token",
"(",
"user",
",",
"token",
",",
"pid",
")",
":",
"if",
"(",
"not",
"user",
".",
"verify_claim_token",
"(",
"token",
"=",
"token",
",",
"project_id",
"=",
"pid",
")",
")",
":",
"if",
"user",
".",
"is_registered",
":",
"error... | view helper that checks that a claim token for a given user and node id is valid . | train | false |
12,906 | def conjugate_row(row, K):
result = []
for r in row:
try:
result.append(r.conjugate())
except AttributeError:
result.append(r)
return result
| [
"def",
"conjugate_row",
"(",
"row",
",",
"K",
")",
":",
"result",
"=",
"[",
"]",
"for",
"r",
"in",
"row",
":",
"try",
":",
"result",
".",
"append",
"(",
"r",
".",
"conjugate",
"(",
")",
")",
"except",
"AttributeError",
":",
"result",
".",
"append",... | returns the conjugate of a row element-wise examples . | train | false |
12,908 | def assign_funcs(modname, service, module=None, get_conn_funcname='_get_conn', cache_id_funcname='_cache_id', exactly_one_funcname='_exactly_one'):
mod = sys.modules[modname]
setattr(mod, get_conn_funcname, get_connection_func(service, module=module))
setattr(mod, cache_id_funcname, cache_id_func(service))
if (exactly_one_funcname is not None):
setattr(mod, exactly_one_funcname, exactly_one)
| [
"def",
"assign_funcs",
"(",
"modname",
",",
"service",
",",
"module",
"=",
"None",
",",
"get_conn_funcname",
"=",
"'_get_conn'",
",",
"cache_id_funcname",
"=",
"'_cache_id'",
",",
"exactly_one_funcname",
"=",
"'_exactly_one'",
")",
":",
"mod",
"=",
"sys",
".",
... | assign _get_conn and _cache_id functions to the named module . | train | true |
12,909 | def copy_missing_matrix(A, B, missing, missing_rows=False, missing_cols=False, is_diagonal=False, inplace=False, prefix=None):
if (prefix is None):
prefix = find_best_blas_type((A, B))[0]
copy = prefix_copy_missing_matrix_map[prefix]
if (not inplace):
B = np.copy(B, order='F')
try:
if (not A.is_f_contig()):
raise ValueError()
except:
A = np.asfortranarray(A)
copy(A, B, np.asfortranarray(missing), missing_rows, missing_cols, is_diagonal)
return B
| [
"def",
"copy_missing_matrix",
"(",
"A",
",",
"B",
",",
"missing",
",",
"missing_rows",
"=",
"False",
",",
"missing_cols",
"=",
"False",
",",
"is_diagonal",
"=",
"False",
",",
"inplace",
"=",
"False",
",",
"prefix",
"=",
"None",
")",
":",
"if",
"(",
"pr... | copy the rows or columns of a time-varying matrix where all non-missing values are in the upper left corner of the matrix . | train | false |
12,910 | def _load_uuid(hass, filename=UPDATER_UUID_FILE):
try:
with open(hass.config.path(filename)) as fptr:
jsonf = json.loads(fptr.read())
return uuid.UUID(jsonf['uuid'], version=4).hex
except (ValueError, AttributeError):
return None
except FileNotFoundError:
return _create_uuid(hass, filename)
| [
"def",
"_load_uuid",
"(",
"hass",
",",
"filename",
"=",
"UPDATER_UUID_FILE",
")",
":",
"try",
":",
"with",
"open",
"(",
"hass",
".",
"config",
".",
"path",
"(",
"filename",
")",
")",
"as",
"fptr",
":",
"jsonf",
"=",
"json",
".",
"loads",
"(",
"fptr",... | load uuid from a file or return none . | train | false |
12,911 | def fail_entry_with_error(entry, error):
log.error(error)
entry.fail(error)
| [
"def",
"fail_entry_with_error",
"(",
"entry",
",",
"error",
")",
":",
"log",
".",
"error",
"(",
"error",
")",
"entry",
".",
"fail",
"(",
"error",
")"
] | log error message at error level and fail the entry . | train | false |
12,912 | def get_messages_for_api_calls(request):
message_lists = []
for message in get_messages(request):
msg_txt = message.message
if (not (isinstance(msg_txt, SafeString) or isinstance(msg_txt, SafeUnicode))):
msg_txt = cgi.escape(unicode(msg_txt))
msg_type = message.tags
message_lists.append({msg_type: msg_txt})
return message_lists
| [
"def",
"get_messages_for_api_calls",
"(",
"request",
")",
":",
"message_lists",
"=",
"[",
"]",
"for",
"message",
"in",
"get_messages",
"(",
"request",
")",
":",
"msg_txt",
"=",
"message",
".",
"message",
"if",
"(",
"not",
"(",
"isinstance",
"(",
"msg_txt",
... | re-usable function that returns a list of messages to be used by api calls . | train | false |
12,913 | def _write_pyc_wrapper(*args):
try:
return _orig_write_pyc(*args)
except IOError as e:
if (e.errno == errno.EACCES):
return False
| [
"def",
"_write_pyc_wrapper",
"(",
"*",
"args",
")",
":",
"try",
":",
"return",
"_orig_write_pyc",
"(",
"*",
"args",
")",
"except",
"IOError",
"as",
"e",
":",
"if",
"(",
"e",
".",
"errno",
"==",
"errno",
".",
"EACCES",
")",
":",
"return",
"False"
] | wraps the internal _write_pyc method in py . | train | false |
12,914 | def parse_rarefaction_fname(name_string):
(root, ext) = os.path.splitext(name_string)
root_list = root.split('_')
iters = int(root_list.pop())
seqs_per_sam = int(root_list.pop())
base_name = '_'.join(root_list)
return (base_name, seqs_per_sam, iters, ext)
| [
"def",
"parse_rarefaction_fname",
"(",
"name_string",
")",
":",
"(",
"root",
",",
"ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"name_string",
")",
"root_list",
"=",
"root",
".",
"split",
"(",
"'_'",
")",
"iters",
"=",
"int",
"(",
"root_lis... | returns base . | train | false |
12,915 | def test_enn_sk_estimator():
check_estimator(RepeatedEditedNearestNeighbours)
| [
"def",
"test_enn_sk_estimator",
"(",
")",
":",
"check_estimator",
"(",
"RepeatedEditedNearestNeighbours",
")"
] | test the sklearn estimator compatibility . | train | false |
12,916 | def default_device_names_for_instance(instance, root_device_name, *block_device_lists):
dev_list = [bdm.device_name for bdm in itertools.chain(*block_device_lists) if bdm.device_name]
if (root_device_name not in dev_list):
dev_list.append(root_device_name)
for bdm in itertools.chain(*block_device_lists):
dev = bdm.device_name
if (not dev):
dev = get_next_device_name(instance, dev_list, root_device_name)
bdm.device_name = dev
bdm.save()
dev_list.append(dev)
| [
"def",
"default_device_names_for_instance",
"(",
"instance",
",",
"root_device_name",
",",
"*",
"block_device_lists",
")",
":",
"dev_list",
"=",
"[",
"bdm",
".",
"device_name",
"for",
"bdm",
"in",
"itertools",
".",
"chain",
"(",
"*",
"block_device_lists",
")",
"... | generate missing device names for an instance . | train | false |
12,917 | def test_plot_topo_image_epochs():
import matplotlib.pyplot as plt
title = 'ERF images - MNE sample data'
epochs = _get_epochs()
epochs.load_data()
cmap = mne_analyze_colormap(format='matplotlib')
data_min = epochs._data.min()
fig = plot_topo_image_epochs(epochs, sigma=0.5, vmin=(-200), vmax=200, colorbar=True, title=title, cmap=cmap)
assert_equal(epochs._data.min(), data_min)
_fake_click(fig, fig.axes[2], (0.08, 0.64))
plt.close('all')
| [
"def",
"test_plot_topo_image_epochs",
"(",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"title",
"=",
"'ERF images - MNE sample data'",
"epochs",
"=",
"_get_epochs",
"(",
")",
"epochs",
".",
"load_data",
"(",
")",
"cmap",
"=",
"mne_analyze_colorma... | test plotting of epochs image topography . | train | false |
12,918 | @pytest.mark.parametrize('sp_model,extra_inputs', [(CustomCarrier, []), (CustomPaymentProcessor, ['rounding_quantize']), (PseudoPaymentProcessor, ['bg_color', 'fg_color'])])
def test_service_provide_edit_view(rf, admin_user, sp_model, extra_inputs):
with override_settings(LANGUAGES=[('en', 'en')]):
base_inputs = ['csrfmiddlewaretoken', 'name__en', 'enabled', 'logo']
get_default_shop()
view = ServiceProviderEditView.as_view()
provider_name = 'some name'
service_provider = sp_model.objects.create(name=provider_name)
soup = get_bs_object_for_view(rf.get('/'), view, admin_user, object=service_provider)
provider_form = soup.find('form', attrs={'id': 'service_provider_form'})
rendered_fields = []
for input_field in provider_form.findAll('input'):
rendered_fields.append(input_field['name'])
assert (rendered_fields == (base_inputs + extra_inputs))
assert (provider_form.find('input', attrs={'name': 'name__en'})['value'] == provider_name)
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'sp_model,extra_inputs'",
",",
"[",
"(",
"CustomCarrier",
",",
"[",
"]",
")",
",",
"(",
"CustomPaymentProcessor",
",",
"[",
"'rounding_quantize'",
"]",
")",
",",
"(",
"PseudoPaymentProcessor",
",",
"[",
"... | test that serviceprovideeditview works with existing serviceprovider subclasses to make things little bit more simple lets use only english as an language . | train | false |
12,921 | def auth_pipeline_urls(auth_entry, redirect_url=None):
if (not third_party_auth.is_enabled()):
return {}
return {provider.provider_id: third_party_auth.pipeline.get_login_url(provider.provider_id, auth_entry, redirect_url=redirect_url) for provider in third_party_auth.provider.Registry.displayed_for_login()}
| [
"def",
"auth_pipeline_urls",
"(",
"auth_entry",
",",
"redirect_url",
"=",
"None",
")",
":",
"if",
"(",
"not",
"third_party_auth",
".",
"is_enabled",
"(",
")",
")",
":",
"return",
"{",
"}",
"return",
"{",
"provider",
".",
"provider_id",
":",
"third_party_auth... | retrieve urls for each enabled third-party auth provider . | train | false |
12,922 | def _beta_loss_to_float(beta_loss):
allowed_beta_loss = {'frobenius': 2, 'kullback-leibler': 1, 'itakura-saito': 0}
if (isinstance(beta_loss, str) and (beta_loss in allowed_beta_loss)):
beta_loss = allowed_beta_loss[beta_loss]
if (not isinstance(beta_loss, numbers.Number)):
raise ValueError(('Invalid beta_loss parameter: got %r instead of one of %r, or a float.' % (beta_loss, allowed_beta_loss.keys())))
return beta_loss
| [
"def",
"_beta_loss_to_float",
"(",
"beta_loss",
")",
":",
"allowed_beta_loss",
"=",
"{",
"'frobenius'",
":",
"2",
",",
"'kullback-leibler'",
":",
"1",
",",
"'itakura-saito'",
":",
"0",
"}",
"if",
"(",
"isinstance",
"(",
"beta_loss",
",",
"str",
")",
"and",
... | convert string beta_loss to float . | train | false |
12,924 | def resolve_task_cgroup_path(pid, controller):
if (controller not in get_all_controllers()):
raise error.TestError(("Doesn't support controller <%s>" % controller))
root_path = get_cgroup_mountpoint(controller)
proc_cgroup = ('/proc/%d/cgroup' % pid)
if (not os.path.isfile(proc_cgroup)):
raise NameError(('File %s does not exist\n Check whether cgroup installed in the system' % proc_cgroup))
try:
proc_file = open(proc_cgroup, 'r')
proc_cgroup_txt = proc_file.read()
finally:
proc_file.close()
mount_path = re.findall((':\\S*,*%s,*\\S*:(\\S*)\\n' % controller), proc_cgroup_txt)
return os.path.join(root_path, mount_path[0].strip('/'))
| [
"def",
"resolve_task_cgroup_path",
"(",
"pid",
",",
"controller",
")",
":",
"if",
"(",
"controller",
"not",
"in",
"get_all_controllers",
"(",
")",
")",
":",
"raise",
"error",
".",
"TestError",
"(",
"(",
"\"Doesn't support controller <%s>\"",
"%",
"controller",
"... | resolving cgroup mount path of a particular task . | train | false |
12,925 | def is_running(proxyname):
return {'result': _is_proxy_running(proxyname)}
| [
"def",
"is_running",
"(",
"proxyname",
")",
":",
"return",
"{",
"'result'",
":",
"_is_proxy_running",
"(",
"proxyname",
")",
"}"
] | test whether task is running under name . | train | false |
12,926 | def _sc_decode(soundcheck):
if isinstance(soundcheck, six.text_type):
soundcheck = soundcheck.encode('utf-8')
try:
soundcheck = codecs.decode(soundcheck.replace(' ', ''), 'hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError, binascii.Error):
return (0.0, 0.0)
maxgain = max(soundcheck[:2])
if (maxgain > 0):
gain = (math.log10((maxgain / 1000.0)) * (-10))
else:
gain = 0.0
peak = (max(soundcheck[6:8]) / 32768.0)
return (round(gain, 2), round(peak, 6))
| [
"def",
"_sc_decode",
"(",
"soundcheck",
")",
":",
"if",
"isinstance",
"(",
"soundcheck",
",",
"six",
".",
"text_type",
")",
":",
"soundcheck",
"=",
"soundcheck",
".",
"encode",
"(",
"'utf-8'",
")",
"try",
":",
"soundcheck",
"=",
"codecs",
".",
"decode",
... | convert a sound check string value to a tuple as used by replaygain . | train | true |
12,927 | def loadpkl(infile):
fmlogger.debug(u'Loading pkl: %s', infile)
if infile.endswith(u'pklz'):
pkl_file = gzip.open(infile, u'rb')
else:
pkl_file = open(infile, u'rb')
try:
unpkl = pickle.load(pkl_file)
except UnicodeDecodeError:
unpkl = pickle.load(pkl_file, fix_imports=True, encoding=u'utf-8')
return unpkl
| [
"def",
"loadpkl",
"(",
"infile",
")",
":",
"fmlogger",
".",
"debug",
"(",
"u'Loading pkl: %s'",
",",
"infile",
")",
"if",
"infile",
".",
"endswith",
"(",
"u'pklz'",
")",
":",
"pkl_file",
"=",
"gzip",
".",
"open",
"(",
"infile",
",",
"u'rb'",
")",
"else... | load a zipped or plain cpickled file . | train | false |
12,928 | def _package_activity_query(package_id):
import ckan.model as model
q = model.Session.query(model.Activity)
q = q.filter_by(object_id=package_id)
return q
| [
"def",
"_package_activity_query",
"(",
"package_id",
")",
":",
"import",
"ckan",
".",
"model",
"as",
"model",
"q",
"=",
"model",
".",
"Session",
".",
"query",
"(",
"model",
".",
"Activity",
")",
"q",
"=",
"q",
".",
"filter_by",
"(",
"object_id",
"=",
"... | return an sqlalchemy query for all activities about package_id . | train | false |
12,929 | def jit(signature_or_function=None, locals={}, target='cpu', cache=False, **options):
if ('argtypes' in options):
raise DeprecationError(_msg_deprecated_signature_arg.format('argtypes'))
if ('restype' in options):
raise DeprecationError(_msg_deprecated_signature_arg.format('restype'))
if (signature_or_function is None):
pyfunc = None
sigs = None
elif isinstance(signature_or_function, list):
pyfunc = None
sigs = signature_or_function
elif sigutils.is_signature(signature_or_function):
pyfunc = None
sigs = [signature_or_function]
else:
pyfunc = signature_or_function
sigs = None
wrapper = _jit(sigs, locals=locals, target=target, cache=cache, targetoptions=options)
if (pyfunc is not None):
return wrapper(pyfunc)
else:
return wrapper
| [
"def",
"jit",
"(",
"signature_or_function",
"=",
"None",
",",
"locals",
"=",
"{",
"}",
",",
"target",
"=",
"'cpu'",
",",
"cache",
"=",
"False",
",",
"**",
"options",
")",
":",
"if",
"(",
"'argtypes'",
"in",
"options",
")",
":",
"raise",
"DeprecationErr... | jit compile a python function conforming to the hsa-python . | train | false |
12,931 | def _replace_arg(arg, tvars, args):
if (tvars is None):
tvars = []
if hasattr(arg, u'_subs_tree'):
return arg._subs_tree(tvars, args)
if isinstance(arg, TypeVar):
for (i, tvar) in enumerate(tvars):
if (arg == tvar):
return args[i]
return arg
| [
"def",
"_replace_arg",
"(",
"arg",
",",
"tvars",
",",
"args",
")",
":",
"if",
"(",
"tvars",
"is",
"None",
")",
":",
"tvars",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"arg",
",",
"u'_subs_tree'",
")",
":",
"return",
"arg",
".",
"_subs_tree",
"(",
"tvars... | an internal helper function: replace arg if it is a type variable found in tvars with corresponding substitution from args or with corresponding substitution sub-tree if arg is a generic type . | train | false |
12,932 | def restore_system_resolver():
global _resolver
_resolver = None
socket.getaddrinfo = _original_getaddrinfo
socket.getnameinfo = _original_getnameinfo
socket.getfqdn = _original_getfqdn
socket.gethostbyname = _original_gethostbyname
socket.gethostbyname_ex = _original_gethostbyname_ex
socket.gethostbyaddr = _original_gethostbyaddr
| [
"def",
"restore_system_resolver",
"(",
")",
":",
"global",
"_resolver",
"_resolver",
"=",
"None",
"socket",
".",
"getaddrinfo",
"=",
"_original_getaddrinfo",
"socket",
".",
"getnameinfo",
"=",
"_original_getnameinfo",
"socket",
".",
"getfqdn",
"=",
"_original_getfqdn"... | undo the effects of override_system_resolver() . | train | false |
12,935 | def datetime_to_W3CDTF(dt):
return datetime.datetime.strftime(dt, W3CDTF_FORMAT)
| [
"def",
"datetime_to_W3CDTF",
"(",
"dt",
")",
":",
"return",
"datetime",
".",
"datetime",
".",
"strftime",
"(",
"dt",
",",
"W3CDTF_FORMAT",
")"
] | convert from a datetime to a timestamp string . | train | false |
12,936 | def clean_indexes():
shutil.rmtree(data_dir('whoosh'))
create_index()
| [
"def",
"clean_indexes",
"(",
")",
":",
"shutil",
".",
"rmtree",
"(",
"data_dir",
"(",
"'whoosh'",
")",
")",
"create_index",
"(",
")"
] | cleans all indexes . | train | false |
12,937 | def post_create_comm_note(note):
thread = note.thread
obj = thread.obj
for developer in obj.authors.all():
thread.join_thread(developer)
try:
nonuser_mozilla_contacts = []
for email in obj.get_mozilla_contacts():
try:
moz_contact = UserProfile.objects.get(email=email)
thread.join_thread(moz_contact)
except UserProfile.DoesNotExist:
nonuser_mozilla_contacts.append((None, email))
utils_mail.email_recipients(nonuser_mozilla_contacts, note, extra_context={'nonuser_mozilla_contact': True})
except AttributeError:
pass
author = note.author
if author:
(cc, created_cc) = thread.join_thread(author)
utils_mail.send_mail_comm(note)
| [
"def",
"post_create_comm_note",
"(",
"note",
")",
":",
"thread",
"=",
"note",
".",
"thread",
"obj",
"=",
"thread",
".",
"obj",
"for",
"developer",
"in",
"obj",
".",
"authors",
".",
"all",
"(",
")",
":",
"thread",
".",
"join_thread",
"(",
"developer",
"... | stuff to do after creating note . | train | false |
12,938 | def checkbox(text=u'', tooltip=u'', checked=None):
return _checkbox(QtWidgets.QCheckBox, text, tooltip, checked)
| [
"def",
"checkbox",
"(",
"text",
"=",
"u''",
",",
"tooltip",
"=",
"u''",
",",
"checked",
"=",
"None",
")",
":",
"return",
"_checkbox",
"(",
"QtWidgets",
".",
"QCheckBox",
",",
"text",
",",
"tooltip",
",",
"checked",
")"
] | create a checkbox . | train | false |
12,940 | def claim_interface(device, interface):
device._ctx.managed_claim_interface(device, interface)
| [
"def",
"claim_interface",
"(",
"device",
",",
"interface",
")",
":",
"device",
".",
"_ctx",
".",
"managed_claim_interface",
"(",
"device",
",",
"interface",
")"
] | explicitly claim an interface . | train | false |
12,941 | def _merge_grad_data(data, method='rms'):
data = data.reshape(((len(data) // 2), 2, (-1)))
if (method == 'mean'):
data = np.mean(data, axis=1)
elif (method == 'rms'):
data = np.sqrt((np.sum((data ** 2), axis=1) / 2))
else:
raise ValueError(('method must be "rms" or "mean, got %s.' % method))
return data
| [
"def",
"_merge_grad_data",
"(",
"data",
",",
"method",
"=",
"'rms'",
")",
":",
"data",
"=",
"data",
".",
"reshape",
"(",
"(",
"(",
"len",
"(",
"data",
")",
"//",
"2",
")",
",",
"2",
",",
"(",
"-",
"1",
")",
")",
")",
"if",
"(",
"method",
"=="... | merge data from channel pairs using the rms or mean . | train | false |
12,942 | def get_inputs_and_params(func):
sig = signature(func)
inputs = []
params = []
for param in sig.parameters.values():
if (param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD)):
raise ValueError(u'Signature must not have *args or **kwargs')
if (param.default == param.empty):
inputs.append(param)
else:
params.append(param)
return (inputs, params)
| [
"def",
"get_inputs_and_params",
"(",
"func",
")",
":",
"sig",
"=",
"signature",
"(",
"func",
")",
"inputs",
"=",
"[",
"]",
"params",
"=",
"[",
"]",
"for",
"param",
"in",
"sig",
".",
"parameters",
".",
"values",
"(",
")",
":",
"if",
"(",
"param",
".... | given a callable . | train | false |
12,943 | def mod_run_check(cmd_kwargs, onlyif, unless, creates):
cmd_kwargs = copy.deepcopy(cmd_kwargs)
cmd_kwargs['use_vt'] = False
if (onlyif is not None):
if isinstance(onlyif, string_types):
cmd = __salt__['cmd.retcode'](onlyif, ignore_retcode=True, python_shell=True, **cmd_kwargs)
log.debug('Last command return code: {0}'.format(cmd))
if (cmd != 0):
return {'comment': 'onlyif execution failed', 'skip_watch': True, 'result': True}
elif isinstance(onlyif, list):
for entry in onlyif:
cmd = __salt__['cmd.retcode'](entry, ignore_retcode=True, python_shell=True, **cmd_kwargs)
log.debug("Last command '{0}' return code: {1}".format(entry, cmd))
if (cmd != 0):
return {'comment': 'onlyif execution failed: {0}'.format(entry), 'skip_watch': True, 'result': True}
elif (not isinstance(onlyif, string_types)):
if (not onlyif):
log.debug('Command not run: onlyif did not evaluate to string_type')
return {'comment': 'onlyif execution failed', 'skip_watch': True, 'result': True}
if (unless is not None):
if isinstance(unless, string_types):
cmd = __salt__['cmd.retcode'](unless, ignore_retcode=True, python_shell=True, **cmd_kwargs)
log.debug('Last command return code: {0}'.format(cmd))
if (cmd == 0):
return {'comment': 'unless execution succeeded', 'skip_watch': True, 'result': True}
elif isinstance(unless, list):
cmd = []
for entry in unless:
cmd.append(__salt__['cmd.retcode'](entry, ignore_retcode=True, python_shell=True, **cmd_kwargs))
log.debug('Last command return code: {0}'.format(cmd))
if all([(c == 0) for c in cmd]):
return {'comment': 'unless execution succeeded', 'skip_watch': True, 'result': True}
elif (not isinstance(unless, string_types)):
if unless:
log.debug('Command not run: unless did not evaluate to string_type')
return {'comment': 'unless execution succeeded', 'skip_watch': True, 'result': True}
if (isinstance(creates, string_types) and os.path.exists(creates)):
return {'comment': '{0} exists'.format(creates), 'result': True}
elif (isinstance(creates, list) and all([os.path.exists(path) for path in creates])):
return {'comment': 'All files in creates exist', 'result': True}
return True
| [
"def",
"mod_run_check",
"(",
"cmd_kwargs",
",",
"onlyif",
",",
"unless",
",",
"creates",
")",
":",
"cmd_kwargs",
"=",
"copy",
".",
"deepcopy",
"(",
"cmd_kwargs",
")",
"cmd_kwargs",
"[",
"'use_vt'",
"]",
"=",
"False",
"if",
"(",
"onlyif",
"is",
"not",
"No... | execute the onlyif and unless logic . | train | true |
12,944 | def test_find_links_requirements_file_relative_path(script, data):
script.scratch_path.join('test-req.txt').write(textwrap.dedent(('\n --no-index\n --find-links=%s\n parent==0.1\n ' % data.packages)))
result = script.pip('install', '-r', (script.scratch_path / 'test-req.txt'), cwd=data.root)
egg_info_folder = ((script.site_packages / 'parent-0.1-py%s.egg-info') % pyversion)
initools_folder = (script.site_packages / 'parent')
assert (egg_info_folder in result.files_created), str(result)
assert (initools_folder in result.files_created), str(result)
| [
"def",
"test_find_links_requirements_file_relative_path",
"(",
"script",
",",
"data",
")",
":",
"script",
".",
"scratch_path",
".",
"join",
"(",
"'test-req.txt'",
")",
".",
"write",
"(",
"textwrap",
".",
"dedent",
"(",
"(",
"'\\n --no-index\\n --find-lin... | test find-links as a relative path to a reqs file . | train | false |
12,945 | def _find_utmp():
result = {}
for utmp in ('/var/run/utmp', '/run/utmp'):
try:
result[os.stat(utmp).st_mtime] = utmp
except Exception:
pass
return result[sorted(result).pop()]
| [
"def",
"_find_utmp",
"(",
")",
":",
"result",
"=",
"{",
"}",
"for",
"utmp",
"in",
"(",
"'/var/run/utmp'",
",",
"'/run/utmp'",
")",
":",
"try",
":",
"result",
"[",
"os",
".",
"stat",
"(",
"utmp",
")",
".",
"st_mtime",
"]",
"=",
"utmp",
"except",
"Ex... | figure out which utmp file to use when determining runlevel . | train | true |
12,946 | def getAddIndexedHeightGrid(heightGrid, minimumXY, step, top, vertexes):
indexedHeightGrid = []
for (rowIndex, row) in enumerate(heightGrid):
indexedRow = []
indexedHeightGrid.append(indexedRow)
rowOffset = ((step.imag * float(rowIndex)) + minimumXY.imag)
for (columnIndex, element) in enumerate(row):
columnOffset = ((step.real * float(columnIndex)) + minimumXY.real)
vector3index = Vector3Index(len(vertexes), columnOffset, rowOffset, (top * element))
indexedRow.append(vector3index)
vertexes.append(vector3index)
return indexedHeightGrid
| [
"def",
"getAddIndexedHeightGrid",
"(",
"heightGrid",
",",
"minimumXY",
",",
"step",
",",
"top",
",",
"vertexes",
")",
":",
"indexedHeightGrid",
"=",
"[",
"]",
"for",
"(",
"rowIndex",
",",
"row",
")",
"in",
"enumerate",
"(",
"heightGrid",
")",
":",
"indexed... | get and add an indexed heightgrid . | train | false |
12,947 | def strip_internal_keys(dirty):
clean = dirty.copy()
for k in dirty.keys():
if (isinstance(k, string_types) and k.startswith('_ansible_')):
del clean[k]
elif isinstance(dirty[k], dict):
clean[k] = strip_internal_keys(dirty[k])
return clean
| [
"def",
"strip_internal_keys",
"(",
"dirty",
")",
":",
"clean",
"=",
"dirty",
".",
"copy",
"(",
")",
"for",
"k",
"in",
"dirty",
".",
"keys",
"(",
")",
":",
"if",
"(",
"isinstance",
"(",
"k",
",",
"string_types",
")",
"and",
"k",
".",
"startswith",
"... | all keys stating with _ansible_ are internal . | train | false |
12,949 | def path_for_host(host, environ=None):
if (environ is None):
environ = os.environ
for kv_pair in environ['MOCK_SSH_ROOTS'].split(':'):
(this_host, this_path) = kv_pair.split('=')
if (this_host == host):
return os.path.abspath(this_path)
raise KeyError(('Host %s is not specified in $MOCK_SSH_ROOTS (%s)' % (host, environ['MOCK_SSH_ROOTS'])))
| [
"def",
"path_for_host",
"(",
"host",
",",
"environ",
"=",
"None",
")",
":",
"if",
"(",
"environ",
"is",
"None",
")",
":",
"environ",
"=",
"os",
".",
"environ",
"for",
"kv_pair",
"in",
"environ",
"[",
"'MOCK_SSH_ROOTS'",
"]",
".",
"split",
"(",
"':'",
... | get the filesystem path that the given host is being faked at . | train | false |
12,950 | def monomial_max(*monoms):
M = list(monoms[0])
for N in monoms[1:]:
for (i, n) in enumerate(N):
M[i] = max(M[i], n)
return tuple(M)
| [
"def",
"monomial_max",
"(",
"*",
"monoms",
")",
":",
"M",
"=",
"list",
"(",
"monoms",
"[",
"0",
"]",
")",
"for",
"N",
"in",
"monoms",
"[",
"1",
":",
"]",
":",
"for",
"(",
"i",
",",
"n",
")",
"in",
"enumerate",
"(",
"N",
")",
":",
"M",
"[",
... | returns maximal degree for each variable in a set of monomials . | train | false |
12,951 | def default_populate_initial_workspace(initial_workspace, root_mask_term, execution_plan, dates, assets):
return initial_workspace
| [
"def",
"default_populate_initial_workspace",
"(",
"initial_workspace",
",",
"root_mask_term",
",",
"execution_plan",
",",
"dates",
",",
"assets",
")",
":",
"return",
"initial_workspace"
] | the default implementation for populate_initial_workspace . | train | false |
12,952 | def batches(items, number):
(div, mod) = divmod(len(items), number)
if (div > 1):
if mod:
div += 1
return batch_size(items, div)
elif (not div):
return ([[item] for item in items] + ([[]] * (number - mod)))
elif ((div == 1) and (not mod)):
return [[item] for item in items]
else:
return ([items[(i * 2):((i * 2) + 2)] for i in xrange(0, mod)] + [[item] for item in items[(mod * 2):]])
| [
"def",
"batches",
"(",
"items",
",",
"number",
")",
":",
"(",
"div",
",",
"mod",
")",
"=",
"divmod",
"(",
"len",
"(",
"items",
")",
",",
"number",
")",
"if",
"(",
"div",
">",
"1",
")",
":",
"if",
"mod",
":",
"div",
"+=",
"1",
"return",
"batch... | retrieves items in the given number of batches . | train | false |
12,953 | def collect_neutron_ports(bridges):
ports = []
for bridge in bridges:
ovs = ovs_lib.OVSBridge(bridge)
ports += [port.port_name for port in ovs.get_vif_ports()]
return ports
| [
"def",
"collect_neutron_ports",
"(",
"bridges",
")",
":",
"ports",
"=",
"[",
"]",
"for",
"bridge",
"in",
"bridges",
":",
"ovs",
"=",
"ovs_lib",
".",
"OVSBridge",
"(",
"bridge",
")",
"ports",
"+=",
"[",
"port",
".",
"port_name",
"for",
"port",
"in",
"ov... | collect ports created by neutron from ovs . | train | false |
12,954 | def get_num_profiles():
error_encountered = True
profiles = get_install_server_profiles()
if (profiles is not None):
if (len(profiles) < 1):
return 1
else:
return (len(profiles) + 1)
if error_encountered:
return 1
| [
"def",
"get_num_profiles",
"(",
")",
":",
"error_encountered",
"=",
"True",
"profiles",
"=",
"get_install_server_profiles",
"(",
")",
"if",
"(",
"profiles",
"is",
"not",
"None",
")",
":",
"if",
"(",
"len",
"(",
"profiles",
")",
"<",
"1",
")",
":",
"retur... | get the number of profiles . | train | false |
12,955 | def pload(fname, default=None):
try:
f = file(fname, 'r')
d = pickle.load(f)
except IOError:
d = default
else:
f.close()
return d
| [
"def",
"pload",
"(",
"fname",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"f",
"=",
"file",
"(",
"fname",
",",
"'r'",
")",
"d",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"except",
"IOError",
":",
"d",
"=",
"default",
"else",
":",
"f",
... | load a pickled object from a file . | train | false |
12,956 | def _update_module_location(module, new_location):
if isinstance(module, XModuleDescriptor):
rekey_fields = []
else:
rekey_fields = ((module.get_explicitly_set_fields_by_scope(Scope.content).keys() + module.get_explicitly_set_fields_by_scope(Scope.settings).keys()) + module.get_explicitly_set_fields_by_scope(Scope.children).keys())
module.location = new_location
if (len(rekey_fields) > 0):
module.force_save_fields(rekey_fields)
| [
"def",
"_update_module_location",
"(",
"module",
",",
"new_location",
")",
":",
"if",
"isinstance",
"(",
"module",
",",
"XModuleDescriptor",
")",
":",
"rekey_fields",
"=",
"[",
"]",
"else",
":",
"rekey_fields",
"=",
"(",
"(",
"module",
".",
"get_explicitly_set... | update a modules location . | train | false |
12,957 | def vb_get_max_network_slots():
sysprops = vb_get_box().systemProperties
totals = [sysprops.getMaxNetworkAdapters(adapter_type) for adapter_type in [1, 2]]
return sum(totals)
| [
"def",
"vb_get_max_network_slots",
"(",
")",
":",
"sysprops",
"=",
"vb_get_box",
"(",
")",
".",
"systemProperties",
"totals",
"=",
"[",
"sysprops",
".",
"getMaxNetworkAdapters",
"(",
"adapter_type",
")",
"for",
"adapter_type",
"in",
"[",
"1",
",",
"2",
"]",
... | max number of slots any machine can have @return: @rtype: number . | train | true |
12,958 | def goGoodSamaritan(prevValue, originalCharset):
if (kb.commonOutputs is None):
initCommonOutputs()
predictionSet = set()
commonValue = None
commonPattern = None
countCommonValue = 0
if (kb.partRun in kb.commonOutputs):
commonPartOutputs = kb.commonOutputs[kb.partRun]
commonPattern = commonFinderOnly(prevValue, commonPartOutputs)
if (commonPattern and (commonPattern == prevValue)):
commonPattern = None
for item in commonPartOutputs:
if item.startswith(prevValue):
commonValue = item
countCommonValue += 1
if (len(item) > len(prevValue)):
char = item[len(prevValue)]
predictionSet.add(char)
if (countCommonValue > 1):
commonValue = None
commonCharset = []
otherCharset = []
for ordChar in originalCharset:
if (chr(ordChar) not in predictionSet):
otherCharset.append(ordChar)
else:
commonCharset.append(ordChar)
commonCharset.sort()
return (commonValue, commonPattern, commonCharset, originalCharset)
else:
return (None, None, None, originalCharset)
| [
"def",
"goGoodSamaritan",
"(",
"prevValue",
",",
"originalCharset",
")",
":",
"if",
"(",
"kb",
".",
"commonOutputs",
"is",
"None",
")",
":",
"initCommonOutputs",
"(",
")",
"predictionSet",
"=",
"set",
"(",
")",
"commonValue",
"=",
"None",
"commonPattern",
"=... | function for retrieving parameters needed for common prediction feature . | train | false |
12,960 | def _ConvertedToAdditionalOption(tool, msvs_name, flag):
def _Translate(value, msbuild_settings):
if (value == 'true'):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if ('AdditionalOptions' in tool_settings):
new_flags = ('%s %s' % (tool_settings['AdditionalOptions'], flag))
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
| [
"def",
"_ConvertedToAdditionalOption",
"(",
"tool",
",",
"msvs_name",
",",
"flag",
")",
":",
"def",
"_Translate",
"(",
"value",
",",
"msbuild_settings",
")",
":",
"if",
"(",
"value",
"==",
"'true'",
")",
":",
"tool_settings",
"=",
"_GetMSBuildToolSettings",
"(... | defines a setting thats handled via a command line option in msbuild . | train | false |
12,961 | def finish_subprocess(proc, cmdline, ok_exit_codes=None):
if (ok_exit_codes is None):
ok_exit_codes = [0]
(out, err) = proc.communicate()
ret = proc.returncode
if (ret not in ok_exit_codes):
raise Exception(("'%(cmdline)s' returned non-zero exit code: retcode=%(ret)i, out='%(out)s', stderr='%(err)s'" % locals()))
return (out, err)
| [
"def",
"finish_subprocess",
"(",
"proc",
",",
"cmdline",
",",
"ok_exit_codes",
"=",
"None",
")",
":",
"if",
"(",
"ok_exit_codes",
"is",
"None",
")",
":",
"ok_exit_codes",
"=",
"[",
"0",
"]",
"(",
"out",
",",
"err",
")",
"=",
"proc",
".",
"communicate",... | ensure that the process returned a zero exit code indicating success . | train | false |
12,962 | def dmp_ground_primitive(f, u, K):
if (not u):
return dup_primitive(f, K)
if dmp_zero_p(f, u):
return (K.zero, f)
cont = dmp_ground_content(f, u, K)
if K.is_one(cont):
return (cont, f)
else:
return (cont, dmp_quo_ground(f, cont, u, K))
| [
"def",
"dmp_ground_primitive",
"(",
"f",
",",
"u",
",",
"K",
")",
":",
"if",
"(",
"not",
"u",
")",
":",
"return",
"dup_primitive",
"(",
"f",
",",
"K",
")",
"if",
"dmp_zero_p",
"(",
"f",
",",
"u",
")",
":",
"return",
"(",
"K",
".",
"zero",
",",
... | compute content and the primitive form of f in k[x] . | train | false |
12,963 | def sync_biom_and_mf(pmf, bt):
mf_samples = set(pmf)
bt_samples = set(bt.ids())
if (mf_samples == bt_samples):
return (pmf, bt, set())
else:
shared_samples = mf_samples.intersection(bt_samples)
assert (len(shared_samples) != 0), 'sync_biom_and_mf: No shared samples, no point in continuing.'
nonshared_samples = (mf_samples.union(bt_samples) - shared_samples)
npmf = {k: v for (k, v) in pmf.items() if (k in shared_samples)}
def _f(sv, sid, smd):
return (sid in shared_samples)
nbt = bt.filter(_f, axis='sample')
return (npmf, nbt, nonshared_samples)
| [
"def",
"sync_biom_and_mf",
"(",
"pmf",
",",
"bt",
")",
":",
"mf_samples",
"=",
"set",
"(",
"pmf",
")",
"bt_samples",
"=",
"set",
"(",
"bt",
".",
"ids",
"(",
")",
")",
"if",
"(",
"mf_samples",
"==",
"bt_samples",
")",
":",
"return",
"(",
"pmf",
",",... | reduce mapping file dict and biom table to shared samples . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.