id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
22,148 | def fill_oauth2_slug(apps, schema_editor):
OAuth2ProviderConfig = apps.get_model(u'third_party_auth', u'OAuth2ProviderConfig')
for config in OAuth2ProviderConfig.objects.all():
config.provider_slug = config.backend_name
config.save()
| [
"def",
"fill_oauth2_slug",
"(",
"apps",
",",
"schema_editor",
")",
":",
"OAuth2ProviderConfig",
"=",
"apps",
".",
"get_model",
"(",
"u'third_party_auth'",
",",
"u'OAuth2ProviderConfig'",
")",
"for",
"config",
"in",
"OAuth2ProviderConfig",
".",
"objects",
".",
"all",... | fill in the provider_slug to be the same as backend_name for backwards compatability . | train | false |
22,149 | def provider_fw_rule_create(context, rule):
return IMPL.provider_fw_rule_create(context, rule)
| [
"def",
"provider_fw_rule_create",
"(",
"context",
",",
"rule",
")",
":",
"return",
"IMPL",
".",
"provider_fw_rule_create",
"(",
"context",
",",
"rule",
")"
] | add a firewall rule at the provider level . | train | false |
22,152 | def CDLLONGLINE(barDs, count):
return call_talib_with_ohlc(barDs, count, talib.CDLLONGLINE)
| [
"def",
"CDLLONGLINE",
"(",
"barDs",
",",
"count",
")",
":",
"return",
"call_talib_with_ohlc",
"(",
"barDs",
",",
"count",
",",
"talib",
".",
"CDLLONGLINE",
")"
] | long line candle . | train | false |
22,156 | def jellyToSource(obj, file=None):
aot = jellyToAOT(obj)
if file:
file.write(getSource(aot).encode('utf-8'))
else:
return getSource(aot)
| [
"def",
"jellyToSource",
"(",
"obj",
",",
"file",
"=",
"None",
")",
":",
"aot",
"=",
"jellyToAOT",
"(",
"obj",
")",
"if",
"file",
":",
"file",
".",
"write",
"(",
"getSource",
"(",
"aot",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"... | pass me an object and . | train | false |
22,158 | @public
def parallel_poly_from_expr(exprs, *gens, **args):
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
| [
"@",
"public",
"def",
"parallel_poly_from_expr",
"(",
"exprs",
",",
"*",
"gens",
",",
"**",
"args",
")",
":",
"opt",
"=",
"options",
".",
"build_options",
"(",
"gens",
",",
"args",
")",
"return",
"_parallel_poly_from_expr",
"(",
"exprs",
",",
"opt",
")"
] | construct polynomials from expressions . | train | false |
22,160 | def buildOverlappedSequences(numSequences=2, seqLen=5, sharedElements=[3, 4], numOnBitsPerPattern=3, patternOverlap=0, seqOverlap=0, **kwargs):
numSharedElements = len(sharedElements)
numUniqueElements = (seqLen - numSharedElements)
numPatterns = (numSharedElements + (numUniqueElements * numSequences))
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
numCols = len(patterns[0])
trainingSequences = []
uniquePatternIndices = range(numSharedElements, numPatterns)
for _ in xrange(numSequences):
sequence = []
sharedPatternIndices = range(numSharedElements)
for j in xrange(seqLen):
if (j in sharedElements):
patIdx = sharedPatternIndices.pop(0)
else:
patIdx = uniquePatternIndices.pop(0)
sequence.append(patterns[patIdx])
trainingSequences.append(sequence)
if (VERBOSITY >= 3):
print '\nTraining sequences'
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences)
| [
"def",
"buildOverlappedSequences",
"(",
"numSequences",
"=",
"2",
",",
"seqLen",
"=",
"5",
",",
"sharedElements",
"=",
"[",
"3",
",",
"4",
"]",
",",
"numOnBitsPerPattern",
"=",
"3",
",",
"patternOverlap",
"=",
"0",
",",
"seqOverlap",
"=",
"0",
",",
"**",... | create training sequences that share some elements in the middle . | train | true |
22,161 | def add_flow_exception(exc):
global _flow_exceptions
if ((not isinstance(exc, type)) or (not issubclass(exc, Exception))):
raise TypeError(('Expected an Exception subclass, got %r' % (exc,)))
as_set = set(_flow_exceptions)
as_set.add(exc)
_flow_exceptions = tuple(as_set)
| [
"def",
"add_flow_exception",
"(",
"exc",
")",
":",
"global",
"_flow_exceptions",
"if",
"(",
"(",
"not",
"isinstance",
"(",
"exc",
",",
"type",
")",
")",
"or",
"(",
"not",
"issubclass",
"(",
"exc",
",",
"Exception",
")",
")",
")",
":",
"raise",
"TypeErr... | add an exception that should not be logged . | train | true |
22,164 | def xblock_view(request, course_id, usage_id, view_name):
if (not settings.FEATURES.get('ENABLE_XBLOCK_VIEW_ENDPOINT', False)):
log.warn("Attempt to use deactivated XBlock view endpoint - see FEATURES['ENABLE_XBLOCK_VIEW_ENDPOINT']")
raise Http404
if (not request.user.is_authenticated()):
raise PermissionDenied
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
except InvalidKeyError:
raise Http404('Invalid location')
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key)
(instance, _) = get_module_by_usage_id(request, course_id, usage_id, course=course)
try:
fragment = instance.render(view_name, context=request.GET)
except NoSuchViewError:
log.exception('Attempt to render missing view on %s: %s', instance, view_name)
raise Http404
hashed_resources = OrderedDict()
for resource in fragment.resources:
hashed_resources[hash_resource(resource)] = resource
return JsonResponse({'html': fragment.content, 'resources': hashed_resources.items(), 'csrf_token': unicode(csrf(request)['csrf_token'])})
| [
"def",
"xblock_view",
"(",
"request",
",",
"course_id",
",",
"usage_id",
",",
"view_name",
")",
":",
"if",
"(",
"not",
"settings",
".",
"FEATURES",
".",
"get",
"(",
"'ENABLE_XBLOCK_VIEW_ENDPOINT'",
",",
"False",
")",
")",
":",
"log",
".",
"warn",
"(",
"\... | returns the rendered view of a given xblock . | train | false |
22,165 | def reverse_helper(regex, *args, **kwargs):
result = re.sub('\\(([^)]+)\\)', MatchChecker(args, kwargs), regex.pattern)
return result.replace('^', '').replace('$', '')
| [
"def",
"reverse_helper",
"(",
"regex",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"result",
"=",
"re",
".",
"sub",
"(",
"'\\\\(([^)]+)\\\\)'",
",",
"MatchChecker",
"(",
"args",
",",
"kwargs",
")",
",",
"regex",
".",
"pattern",
")",
"return",
"res... | does a "reverse" lookup -- returns the url for the given args/kwargs . | train | false |
22,166 | def _dump_model(model, attrs=None):
for field in model._meta.fields:
print ('%s=%s,' % (field.name, str(getattr(model, field.name)))),
if (attrs is not None):
for attr in attrs:
print ('%s=%s,' % (attr, str(getattr(model, attr)))),
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
print ('%s=%s (%i),' % (field.name, ', '.join(map(str, vals.all())), vals.count())),
print
| [
"def",
"_dump_model",
"(",
"model",
",",
"attrs",
"=",
"None",
")",
":",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"fields",
":",
"print",
"(",
"'%s=%s,'",
"%",
"(",
"field",
".",
"name",
",",
"str",
"(",
"getattr",
"(",
"model",
",",
"fiel... | dump the model fields for debugging . | train | false |
22,167 | def send_email(subject=None, recipients=[], html=''):
plain_txt_email = 'Please view in a mail client that supports HTML.'
if app.config.get('EMAILS_USE_SMTP'):
try:
with app.app_context():
msg = Message(subject, recipients=recipients)
msg.body = plain_txt_email
msg.html = html
mail.send(msg)
app.logger.debug('Emailed {} - {} '.format(recipients, subject))
except Exception as e:
m = 'Failed to send failure message with subject: {}\n{} {}'.format(subject, Exception, e)
app.logger.warn(m)
app.logger.warn(traceback.format_exc())
else:
try:
ses_region = app.config.get('SES_REGION', 'us-east-1')
ses = boto.ses.connect_to_region(ses_region)
except Exception as e:
m = 'Failed to connect to ses using boto. Check your boto credentials. {} {}'.format(Exception, e)
app.logger.warn(m)
app.logger.warn(traceback.format_exc())
return
for email in recipients:
try:
ses.send_email(app.config.get('MAIL_DEFAULT_SENDER'), subject, html, email, format='html')
app.logger.debug('Emailed {} - {} '.format(email, subject))
except Exception as e:
m = 'Failed to send failure message with subject: {}\n{} {}'.format(subject, Exception, e)
app.logger.warn(m)
app.logger.warn(traceback.format_exc())
| [
"def",
"send_email",
"(",
"subject",
"=",
"None",
",",
"recipients",
"=",
"[",
"]",
",",
"html",
"=",
"''",
")",
":",
"plain_txt_email",
"=",
"'Please view in a mail client that supports HTML.'",
"if",
"app",
".",
"config",
".",
"get",
"(",
"'EMAILS_USE_SMTP'",
... | send email using backend specified in email_backend . | train | false |
22,168 | def scourUnitlessLength(length, needsRendererWorkaround=False):
if (not isinstance(length, Decimal)):
length = getcontext().create_decimal(str(length))
if (int(length) == length):
length = getcontext().create_decimal(int(length))
nonsci = unicode(length).lower().replace('e+', 'e')
if (not needsRendererWorkaround):
if ((len(nonsci) > 2) and (nonsci[:2] == '0.')):
nonsci = nonsci[1:]
elif ((len(nonsci) > 3) and (nonsci[:3] == '-0.')):
nonsci = ('-' + nonsci[2:])
if (len(nonsci) > 3):
sci = unicode(length.normalize()).lower().replace('e+', 'e')
if (len(sci) < len(nonsci)):
return sci
else:
return nonsci
else:
return nonsci
| [
"def",
"scourUnitlessLength",
"(",
"length",
",",
"needsRendererWorkaround",
"=",
"False",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"length",
",",
"Decimal",
")",
")",
":",
"length",
"=",
"getcontext",
"(",
")",
".",
"create_decimal",
"(",
"str",
"("... | scours the numeric part of a length only . | train | false |
22,171 | def _update_course_context(request, context, course, platform_name):
context['full_course_image_url'] = request.build_absolute_uri(course_image_url(course))
course_title_from_cert = context['certificate_data'].get('course_title', '')
accomplishment_copy_course_name = (course_title_from_cert if course_title_from_cert else course.display_name)
context['accomplishment_copy_course_name'] = accomplishment_copy_course_name
course_number = (course.display_coursenumber if course.display_coursenumber else course.number)
context['course_number'] = course_number
if context['organization_long_name']:
context['accomplishment_copy_course_description'] = _('a course of study offered by {partner_short_name}, an online learning initiative of {partner_long_name}.').format(partner_short_name=context['organization_short_name'], partner_long_name=context['organization_long_name'], platform_name=platform_name)
else:
context['accomplishment_copy_course_description'] = _('a course of study offered by {partner_short_name}.').format(partner_short_name=context['organization_short_name'], platform_name=platform_name)
| [
"def",
"_update_course_context",
"(",
"request",
",",
"context",
",",
"course",
",",
"platform_name",
")",
":",
"context",
"[",
"'full_course_image_url'",
"]",
"=",
"request",
".",
"build_absolute_uri",
"(",
"course_image_url",
"(",
"course",
")",
")",
"course_tit... | updates context dictionary with course info . | train | false |
22,172 | def func_args_as_dict(func, args, kwargs):
arg_names = list(OrderedDict.fromkeys(itertools.chain(inspect.getargspec(func)[0], kwargs.keys())))
return OrderedDict((list(six.moves.zip(arg_names, args)) + list(kwargs.items())))
| [
"def",
"func_args_as_dict",
"(",
"func",
",",
"args",
",",
"kwargs",
")",
":",
"arg_names",
"=",
"list",
"(",
"OrderedDict",
".",
"fromkeys",
"(",
"itertools",
".",
"chain",
"(",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"[",
"0",
"]",
",",
"kwar... | return given functions positional and key value arguments as an ordered dictionary . | train | true |
22,174 | def _CheckSortLimit(limit):
return _CheckInteger(limit, 'limit', upper_bound=MAXIMUM_SORTED_DOCUMENTS)
| [
"def",
"_CheckSortLimit",
"(",
"limit",
")",
":",
"return",
"_CheckInteger",
"(",
"limit",
",",
"'limit'",
",",
"upper_bound",
"=",
"MAXIMUM_SORTED_DOCUMENTS",
")"
] | checks the limit on number of docs to score or sort is not too large . | train | false |
22,176 | def human_seconds(interval):
units = [(1, u'second'), (60, u'minute'), (60, u'hour'), (24, u'day'), (7, u'week'), (52, u'year'), (10, u'decade')]
for i in range((len(units) - 1)):
(increment, suffix) = units[i]
(next_increment, _) = units[(i + 1)]
interval /= float(increment)
if (interval < next_increment):
break
else:
(increment, suffix) = units[(-1)]
interval /= float(increment)
return (u'%3.1f %ss' % (interval, suffix))
| [
"def",
"human_seconds",
"(",
"interval",
")",
":",
"units",
"=",
"[",
"(",
"1",
",",
"u'second'",
")",
",",
"(",
"60",
",",
"u'minute'",
")",
",",
"(",
"60",
",",
"u'hour'",
")",
",",
"(",
"24",
",",
"u'day'",
")",
",",
"(",
"7",
",",
"u'week'"... | formats interval . | train | false |
22,178 | def edit_action(parent, *keys):
action = qtutils.add_action_with_status_tip(parent, cmds.LaunchEditor.name(), N_(u'Edit selected paths'), cmds.run(cmds.LaunchEditor), hotkeys.EDIT, *keys)
action.setIcon(icons.edit())
return action
| [
"def",
"edit_action",
"(",
"parent",
",",
"*",
"keys",
")",
":",
"action",
"=",
"qtutils",
".",
"add_action_with_status_tip",
"(",
"parent",
",",
"cmds",
".",
"LaunchEditor",
".",
"name",
"(",
")",
",",
"N_",
"(",
"u'Edit selected paths'",
")",
",",
"cmds"... | launch an editor -> qaction . | train | false |
22,179 | def _create_graph(action_context):
G = nx.DiGraph()
G.add_node(SYSTEM_SCOPE, value=KeyValueLookup(scope=SYSTEM_SCOPE))
system_keyvalue_context = {SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)}
G.add_node(DATASTORE_PARENT_SCOPE, value=system_keyvalue_context)
G.add_node(ACTION_CONTEXT_KV_PREFIX, value=action_context)
return G
| [
"def",
"_create_graph",
"(",
"action_context",
")",
":",
"G",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"G",
".",
"add_node",
"(",
"SYSTEM_SCOPE",
",",
"value",
"=",
"KeyValueLookup",
"(",
"scope",
"=",
"SYSTEM_SCOPE",
")",
")",
"system_keyvalue_context",
"=",
... | creates a generic directed graph for depencency tree and fills it with basic context variables . | train | false |
22,180 | def _subst_vars(path, local_vars):
def _replacer(matchobj):
name = matchobj.group(1)
if (name in local_vars):
return local_vars[name]
elif (name in os.environ):
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
| [
"def",
"_subst_vars",
"(",
"path",
",",
"local_vars",
")",
":",
"def",
"_replacer",
"(",
"matchobj",
")",
":",
"name",
"=",
"matchobj",
".",
"group",
"(",
"1",
")",
"if",
"(",
"name",
"in",
"local_vars",
")",
":",
"return",
"local_vars",
"[",
"name",
... | in the string path . | train | true |
22,182 | def fake_mldata(columns_dict, dataname, matfile, ordering=None):
datasets = dict(columns_dict)
for name in datasets:
datasets[name] = datasets[name].T
if (ordering is None):
ordering = sorted(list(datasets.keys()))
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object')
for (i, name) in enumerate(ordering):
datasets['mldata_descr_ordering'][(0, i)] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
| [
"def",
"fake_mldata",
"(",
"columns_dict",
",",
"dataname",
",",
"matfile",
",",
"ordering",
"=",
"None",
")",
":",
"datasets",
"=",
"dict",
"(",
"columns_dict",
")",
"for",
"name",
"in",
"datasets",
":",
"datasets",
"[",
"name",
"]",
"=",
"datasets",
"[... | create a fake mldata data set . | train | false |
22,183 | def test_gcrs_cirs():
(ra, dec, _) = randomly_sample_sphere(200)
gcrs = GCRS(ra=ra, dec=dec, obstime=u'J2000')
gcrs6 = GCRS(ra=ra, dec=dec, obstime=u'J2006')
gcrs2 = gcrs.transform_to(CIRS).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(CIRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
assert (not allclose(gcrs.ra, gcrs6_2.ra))
assert (not allclose(gcrs.dec, gcrs6_2.dec))
gcrs3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(ITRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs3.ra)
assert_allclose(gcrs.dec, gcrs3.dec)
gcrs4 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(ICRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs4.ra)
assert_allclose(gcrs.dec, gcrs4.dec)
| [
"def",
"test_gcrs_cirs",
"(",
")",
":",
"(",
"ra",
",",
"dec",
",",
"_",
")",
"=",
"randomly_sample_sphere",
"(",
"200",
")",
"gcrs",
"=",
"GCRS",
"(",
"ra",
"=",
"ra",
",",
"dec",
"=",
"dec",
",",
"obstime",
"=",
"u'J2000'",
")",
"gcrs6",
"=",
"... | check gcrs<->cirs transforms for round-tripping . | train | false |
22,184 | def dePemList(s, name):
bList = []
prefix = ('-----BEGIN %s-----' % name)
postfix = ('-----END %s-----' % name)
while 1:
start = s.find(prefix)
if (start == (-1)):
return bList
end = s.find(postfix, (start + len(prefix)))
if (end == (-1)):
raise SyntaxError('Missing PEM postfix')
s2 = s[(start + len(prefix)):end]
retBytes = a2b_base64(s2)
bList.append(retBytes)
s = s[(end + len(postfix)):]
| [
"def",
"dePemList",
"(",
"s",
",",
"name",
")",
":",
"bList",
"=",
"[",
"]",
"prefix",
"=",
"(",
"'-----BEGIN %s-----'",
"%",
"name",
")",
"postfix",
"=",
"(",
"'-----END %s-----'",
"%",
"name",
")",
"while",
"1",
":",
"start",
"=",
"s",
".",
"find",... | decode a sequence of pem blocks into a list of bytearrays . | train | false |
22,185 | def get_asserts_total_rate(name):
return float(reduce((lambda memo, obj: (memo + get_rate(('%sasserts_%s' % (NAME_PREFIX, obj))))), ['regular', 'warning', 'msg', 'user', 'rollovers'], 0))
| [
"def",
"get_asserts_total_rate",
"(",
"name",
")",
":",
"return",
"float",
"(",
"reduce",
"(",
"(",
"lambda",
"memo",
",",
"obj",
":",
"(",
"memo",
"+",
"get_rate",
"(",
"(",
"'%sasserts_%s'",
"%",
"(",
"NAME_PREFIX",
",",
"obj",
")",
")",
")",
")",
... | return the total number of asserts per second . | train | false |
22,188 | def p_init_declarator_1(t):
pass
| [
"def",
"p_init_declarator_1",
"(",
"t",
")",
":",
"pass"
] | init_declarator : declarator . | train | false |
22,189 | def exponential_decay_noise(xp, shape, dtype, hook, opt):
std = numpy.sqrt((hook.eta / numpy.power((1 + opt.t), 0.55)))
return xp.random.normal(0, std, shape).astype(dtype)
| [
"def",
"exponential_decay_noise",
"(",
"xp",
",",
"shape",
",",
"dtype",
",",
"hook",
",",
"opt",
")",
":",
"std",
"=",
"numpy",
".",
"sqrt",
"(",
"(",
"hook",
".",
"eta",
"/",
"numpy",
".",
"power",
"(",
"(",
"1",
"+",
"opt",
".",
"t",
")",
",... | time-dependent annealed gaussian noise function from the paper: adding gradient noise improves learning for very deep networks <URL . | train | false |
22,190 | def getSheetThickness(elementNode):
return getCascadeFloatWithoutSelf(3.0, elementNode, 'sheetThickness')
| [
"def",
"getSheetThickness",
"(",
"elementNode",
")",
":",
"return",
"getCascadeFloatWithoutSelf",
"(",
"3.0",
",",
"elementNode",
",",
"'sheetThickness'",
")"
] | get the sheet thickness . | train | false |
22,191 | def list_modules(desc=False):
cmd = u'Get-InstalledModule'
modules = _pshell(cmd)
if isinstance(modules, dict):
ret = []
if desc:
modules_ret = {}
modules_ret[modules[u'Name']] = copy.deepcopy(modules)
modules = modules_ret
return modules
ret.append(modules[u'Name'])
return ret
names = []
if desc:
names = {}
for module in modules:
if desc:
names[module[u'Name']] = module
continue
names.append(module[u'Name'])
return names
| [
"def",
"list_modules",
"(",
"desc",
"=",
"False",
")",
":",
"cmd",
"=",
"u'Get-InstalledModule'",
"modules",
"=",
"_pshell",
"(",
"cmd",
")",
"if",
"isinstance",
"(",
"modules",
",",
"dict",
")",
":",
"ret",
"=",
"[",
"]",
"if",
"desc",
":",
"modules_r... | list the modules loaded on the minion . | train | true |
22,192 | def _instance_in_resize_state(instance):
vm = instance.vm_state
task = instance.task_state
if (vm == vm_states.RESIZED):
return True
if ((vm in [vm_states.ACTIVE, vm_states.STOPPED]) and (task in [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH, task_states.REBUILDING])):
return True
return False
| [
"def",
"_instance_in_resize_state",
"(",
"instance",
")",
":",
"vm",
"=",
"instance",
".",
"vm_state",
"task",
"=",
"instance",
".",
"task_state",
"if",
"(",
"vm",
"==",
"vm_states",
".",
"RESIZED",
")",
":",
"return",
"True",
"if",
"(",
"(",
"vm",
"in",... | returns true if the instance is in one of the resizing states . | train | false |
22,193 | def unzip_archive(archive):
assert os.path.exists(archive), ('File not found - %s' % archive)
tmpdir = os.path.join(tempfile.gettempdir(), os.path.basename(archive))
assert (tmpdir != archive)
if os.path.exists(tmpdir):
pass
elif tarfile.is_tarfile(archive):
print 'Extracting tarfile ...'
with tarfile.open(archive) as tf:
tf.extractall(path=tmpdir)
elif zipfile.is_zipfile(archive):
print 'Extracting zipfile ...'
with zipfile.ZipFile(archive) as zf:
zf.extractall(path=tmpdir)
else:
raise ValueError(('Unknown file type for %s' % os.path.basename(archive)))
return tmpdir
| [
"def",
"unzip_archive",
"(",
"archive",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"archive",
")",
",",
"(",
"'File not found - %s'",
"%",
"archive",
")",
"tmpdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
... | unzips an archive into a temporary directory returns a link to that directory arguments: archive -- the path to an archive file . | train | false |
22,197 | def dimension_div(a, b):
if ((a == datashape.var) or (b == datashape.var)):
return datashape.var
if isinstance(a, Fixed):
a = int(a)
if isinstance(b, Fixed):
b = int(b)
return int(ceil((a / b)))
| [
"def",
"dimension_div",
"(",
"a",
",",
"b",
")",
":",
"if",
"(",
"(",
"a",
"==",
"datashape",
".",
"var",
")",
"or",
"(",
"b",
"==",
"datashape",
".",
"var",
")",
")",
":",
"return",
"datashape",
".",
"var",
"if",
"isinstance",
"(",
"a",
",",
"... | how many times does b fit into a? . | train | false |
22,198 | def _group_tasks_by_name_and_status(task_dict):
group_status = {}
for task in task_dict:
if (task.task_family not in group_status):
group_status[task.task_family] = []
group_status[task.task_family].append(task)
return group_status
| [
"def",
"_group_tasks_by_name_and_status",
"(",
"task_dict",
")",
":",
"group_status",
"=",
"{",
"}",
"for",
"task",
"in",
"task_dict",
":",
"if",
"(",
"task",
".",
"task_family",
"not",
"in",
"group_status",
")",
":",
"group_status",
"[",
"task",
".",
"task_... | takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name . | train | true |
22,199 | def p_atom_number(p):
p[0] = ast.Const(p[1])
| [
"def",
"p_atom_number",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Const",
"(",
"p",
"[",
"1",
"]",
")"
] | atom : number | string . | train | false |
22,200 | def get_entity(expression):
if expression:
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
try:
return eval(expression, namespace)
except BaseException:
return None
| [
"def",
"get_entity",
"(",
"expression",
")",
":",
"if",
"expression",
":",
"namespace",
"=",
"sys",
".",
"modules",
".",
"copy",
"(",
")",
"namespace",
".",
"update",
"(",
"__main__",
".",
"__dict__",
")",
"try",
":",
"return",
"eval",
"(",
"expression",... | return the object corresponding to expression evaluated in a namespace spanning sys . | train | false |
22,201 | def _check_users(users):
messg = ''
valid = True
for (user, user_details) in six.iteritems(users):
if (not user_details):
valid = False
messg += 'Please provide details for username {user}.\n'.format(user=user)
continue
if (not (isinstance(user_details.get('level'), int) or (0 <= user_details.get('level') <= 15))):
messg += 'Level must be a integer between 0 and 15 for username {user}. Will assume 0.\n'.format(user=user)
return (valid, messg)
| [
"def",
"_check_users",
"(",
"users",
")",
":",
"messg",
"=",
"''",
"valid",
"=",
"True",
"for",
"(",
"user",
",",
"user_details",
")",
"in",
"six",
".",
"iteritems",
"(",
"users",
")",
":",
"if",
"(",
"not",
"user_details",
")",
":",
"valid",
"=",
... | checks if the input dictionary of users is valid . | train | true |
22,203 | def get_all_key_combinations(data, flattened_schema):
schema_prefixes = set([key[:(-1)] for key in flattened_schema])
combinations = set([()])
for key in sorted(data.keys(), key=flattened_order_key):
key_prefix = key[:(-1):2]
if (key_prefix not in schema_prefixes):
continue
if (tuple(tuple(key[:(-3)])) not in combinations):
continue
combinations.add(tuple(key[:(-1)]))
return combinations
| [
"def",
"get_all_key_combinations",
"(",
"data",
",",
"flattened_schema",
")",
":",
"schema_prefixes",
"=",
"set",
"(",
"[",
"key",
"[",
":",
"(",
"-",
"1",
")",
"]",
"for",
"key",
"in",
"flattened_schema",
"]",
")",
"combinations",
"=",
"set",
"(",
"[",
... | compare the schema against the given data and get all valid tuples that match the schema ignoring the last value in the tuple . | train | false |
22,204 | def check_envelope(result, func, cargs, offset=(-1)):
env = ptr_byref(cargs, offset)
return env
| [
"def",
"check_envelope",
"(",
"result",
",",
"func",
",",
"cargs",
",",
"offset",
"=",
"(",
"-",
"1",
")",
")",
":",
"env",
"=",
"ptr_byref",
"(",
"cargs",
",",
"offset",
")",
"return",
"env"
] | checks a function that returns an ogr envelope by reference . | train | false |
22,206 | def ne_(a, b, msg=None):
assert (a != b), (msg or ('%r == %r' % (a, b)))
| [
"def",
"ne_",
"(",
"a",
",",
"b",
",",
"msg",
"=",
"None",
")",
":",
"assert",
"(",
"a",
"!=",
"b",
")",
",",
"(",
"msg",
"or",
"(",
"'%r == %r'",
"%",
"(",
"a",
",",
"b",
")",
")",
")"
] | assert a != b . | train | false |
22,207 | def _bem_specify_els(bem, els, mults):
sol = np.zeros((len(els), bem['solution'].shape[1]))
scalp = bem['surfs'][0]
rrs = np.concatenate([apply_trans(bem['head_mri_t']['trans'], el['rmag']) for el in els], axis=0)
ws = np.concatenate([el['w'] for el in els])
(tri_weights, tri_idx) = _project_onto_surface(rrs, scalp)
tri_weights *= ws
weights = np.einsum('ij,jik->jk', tri_weights, bem['solution'][scalp['tris'][tri_idx]])
edges = np.concatenate([[0], np.cumsum([len(el['w']) for el in els])])
for (ii, (start, stop)) in enumerate(zip(edges[:(-1)], edges[1:])):
sol[ii] = weights[start:stop].sum(0)
sol *= mults
return sol
| [
"def",
"_bem_specify_els",
"(",
"bem",
",",
"els",
",",
"mults",
")",
":",
"sol",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"els",
")",
",",
"bem",
"[",
"'solution'",
"]",
".",
"shape",
"[",
"1",
"]",
")",
")",
"scalp",
"=",
"bem",
"[",
... | set up for computing the solution at a set of eeg electrodes . | train | false |
22,208 | def random_name_generator(first, second, x):
names = []
for i in range(0, int(x)):
random_first = randint(0, (len(first) - 1))
random_last = randint(0, (len(second) - 1))
names.append('{0} {1}'.format(first[random_first], second[random_last]))
return set(names)
| [
"def",
"random_name_generator",
"(",
"first",
",",
"second",
",",
"x",
")",
":",
"names",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"x",
")",
")",
":",
"random_first",
"=",
"randint",
"(",
"0",
",",
"(",
"len",
"(",
"... | generates random names . | train | false |
22,209 | def selected_item(list_widget, items):
widget_items = list_widget.selectedItems()
if (not widget_items):
return None
widget_item = widget_items[0]
row = list_widget.row(widget_item)
if (row < len(items)):
return items[row]
else:
return None
| [
"def",
"selected_item",
"(",
"list_widget",
",",
"items",
")",
":",
"widget_items",
"=",
"list_widget",
".",
"selectedItems",
"(",
")",
"if",
"(",
"not",
"widget_items",
")",
":",
"return",
"None",
"widget_item",
"=",
"widget_items",
"[",
"0",
"]",
"row",
... | returns the model item that corresponds to the selected qlistwidget row . | train | false |
22,210 | def _rgb_vector(color):
if isinstance(color, six.string_types):
color = color_dict[color]
return np.array(color[:3])
| [
"def",
"_rgb_vector",
"(",
"color",
")",
":",
"if",
"isinstance",
"(",
"color",
",",
"six",
".",
"string_types",
")",
":",
"color",
"=",
"color_dict",
"[",
"color",
"]",
"return",
"np",
".",
"array",
"(",
"color",
"[",
":",
"3",
"]",
")"
] | return rgb color as array . | train | false |
22,211 | @validate('form', 'graph', 'tree')
def valid_att_in_field(arch):
return (not arch.xpath('//field[not(@name)]'))
| [
"@",
"validate",
"(",
"'form'",
",",
"'graph'",
",",
"'tree'",
")",
"def",
"valid_att_in_field",
"(",
"arch",
")",
":",
"return",
"(",
"not",
"arch",
".",
"xpath",
"(",
"'//field[not(@name)]'",
")",
")"
] | field nodes must all have a @name . | train | false |
22,212 | def _get_zendesk_custom_field_context(request):
context = {}
course_id = request.POST.get('course_id')
if (not course_id):
return context
context['course_id'] = course_id
if (not request.user.is_authenticated()):
return context
enrollment = CourseEnrollment.get_enrollment(request.user, CourseKey.from_string(course_id))
if (enrollment and enrollment.is_active):
context['enrollment_mode'] = enrollment.mode
return context
| [
"def",
"_get_zendesk_custom_field_context",
"(",
"request",
")",
":",
"context",
"=",
"{",
"}",
"course_id",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'course_id'",
")",
"if",
"(",
"not",
"course_id",
")",
":",
"return",
"context",
"context",
"[",
"'c... | construct a dictionary of data that can be stored in zendesk custom fields . | train | false |
22,213 | def parse_setheader(s):
return _parse_hook(s)
| [
"def",
"parse_setheader",
"(",
"s",
")",
":",
"return",
"_parse_hook",
"(",
"s",
")"
] | returns a tuple . | train | false |
22,214 | def ntohs(bs):
return struct.unpack('!h', bs)[0]
| [
"def",
"ntohs",
"(",
"bs",
")",
":",
"return",
"struct",
".",
"unpack",
"(",
"'!h'",
",",
"bs",
")",
"[",
"0",
"]"
] | ntohs -> integer convert a 16-bit integer from network to host byte order . | train | false |
22,215 | def all_localhosts():
ips = ['127.0.0.1']
try:
info = socket.getaddrinfo('::1', None)
(af, socktype, proto, _canonname, _sa) = info[0]
s = socket.socket(af, socktype, proto)
s.close()
except socket.error:
return ips
try:
info = socket.getaddrinfo('localhost', None)
except:
return ips
ips = []
for item in info:
item = item[4][0]
if ((item not in ips) and (('::1' not in item) or sabnzbd.cfg.ipv6_hosting())):
ips.append(item)
return ips
| [
"def",
"all_localhosts",
"(",
")",
":",
"ips",
"=",
"[",
"'127.0.0.1'",
"]",
"try",
":",
"info",
"=",
"socket",
".",
"getaddrinfo",
"(",
"'::1'",
",",
"None",
")",
"(",
"af",
",",
"socktype",
",",
"proto",
",",
"_canonname",
",",
"_sa",
")",
"=",
"... | return all unique values of localhost in order of preference . | train | false |
22,216 | def no_type_check_decorator(decorator):
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
| [
"def",
"no_type_check_decorator",
"(",
"decorator",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"decorator",
")",
"def",
"wrapped_decorator",
"(",
"*",
"args",
",",
"**",
"kwds",
")",
":",
"func",
"=",
"decorator",
"(",
"*",
"args",
",",
"**",
"kwds",... | decorator to give another decorator the @no_type_check effect . | train | true |
22,218 | def setup_test_show_dir():
if (not os.path.exists(SHOW_DIR)):
os.makedirs(SHOW_DIR)
| [
"def",
"setup_test_show_dir",
"(",
")",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"SHOW_DIR",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"SHOW_DIR",
")"
] | create a test show directory . | train | false |
22,219 | def idzr_rsvd(m, n, matveca, matvec, k):
(U, V, S, ier) = _id.idzr_rsvd(m, n, matveca, matvec, k)
if ier:
raise _RETCODE_ERROR
return (U, V, S)
| [
"def",
"idzr_rsvd",
"(",
"m",
",",
"n",
",",
"matveca",
",",
"matvec",
",",
"k",
")",
":",
"(",
"U",
",",
"V",
",",
"S",
",",
"ier",
")",
"=",
"_id",
".",
"idzr_rsvd",
"(",
"m",
",",
"n",
",",
"matveca",
",",
"matvec",
",",
"k",
")",
"if",
... | compute svd of a complex matrix to a specified rank using random matrix-vector multiplication . | train | false |
22,220 | def meijerint_indefinite(f, x):
from sympy import hyper, meijerg
results = []
for a in sorted((_find_splitting_points(f, x) | {S(0)}), key=default_sort_key):
res = _meijerint_indefinite_1(f.subs(x, (x + a)), x)
if (not res):
continue
res = res.subs(x, (x - a))
if _has(res, hyper, meijerg):
results.append(res)
else:
return res
if f.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_indefinite(_rewrite_hyperbolics_as_exp(f), x)
if rv:
if (not (type(rv) is list)):
return collect(factor_terms(rv), rv.atoms(exp))
results.extend(rv)
if results:
return next(ordered(results))
| [
"def",
"meijerint_indefinite",
"(",
"f",
",",
"x",
")",
":",
"from",
"sympy",
"import",
"hyper",
",",
"meijerg",
"results",
"=",
"[",
"]",
"for",
"a",
"in",
"sorted",
"(",
"(",
"_find_splitting_points",
"(",
"f",
",",
"x",
")",
"|",
"{",
"S",
"(",
... | compute an indefinite integral of f by rewriting it as a g function . | train | false |
22,221 | def _patch_file(path, content):
f = open(path)
existing_content = f.read()
f.close()
if (existing_content == content):
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
| [
"def",
"_patch_file",
"(",
"path",
",",
"content",
")",
":",
"f",
"=",
"open",
"(",
"path",
")",
"existing_content",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"if",
"(",
"existing_content",
"==",
"content",
")",
":",
"log",
".",... | will backup the file then patch it . | train | true |
22,222 | def transformVector3sByMatrix(tetragrid, vector3s):
if getIsIdentityTetragridOrNone(tetragrid):
return
for vector3 in vector3s:
transformVector3Blindly(tetragrid, vector3)
| [
"def",
"transformVector3sByMatrix",
"(",
"tetragrid",
",",
"vector3s",
")",
":",
"if",
"getIsIdentityTetragridOrNone",
"(",
"tetragrid",
")",
":",
"return",
"for",
"vector3",
"in",
"vector3s",
":",
"transformVector3Blindly",
"(",
"tetragrid",
",",
"vector3",
")"
] | transform the vector3s by a matrix . | train | false |
22,223 | def get_pants_configdir():
config_home = os.environ.get(u'XDG_CONFIG_HOME')
if (not config_home):
config_home = u'~/.config'
return os.path.expanduser(os.path.join(config_home, u'pants'))
| [
"def",
"get_pants_configdir",
"(",
")",
":",
"config_home",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"u'XDG_CONFIG_HOME'",
")",
"if",
"(",
"not",
"config_home",
")",
":",
"config_home",
"=",
"u'~/.config'",
"return",
"os",
".",
"path",
".",
"expanduser",
... | return the pants global config directory . | train | false |
22,224 | def _render_footer_html(request, show_openedx_logo, include_dependencies):
bidi = ('rtl' if translation.get_language_bidi() else 'ltr')
css_name = settings.FOOTER_CSS['openedx'][bidi]
context = {'hide_openedx_link': (not show_openedx_logo), 'footer_js_url': _footer_static_url(request, 'js/footer-edx.js'), 'footer_css_urls': _footer_css_urls(request, css_name), 'bidi': bidi, 'include_dependencies': include_dependencies}
return render_to_response('footer.html', context)
| [
"def",
"_render_footer_html",
"(",
"request",
",",
"show_openedx_logo",
",",
"include_dependencies",
")",
":",
"bidi",
"=",
"(",
"'rtl'",
"if",
"translation",
".",
"get_language_bidi",
"(",
")",
"else",
"'ltr'",
")",
"css_name",
"=",
"settings",
".",
"FOOTER_CSS... | render the footer as html . | train | false |
22,226 | def addFacesByConvexLoops(faces, indexedLoops):
if (len(indexedLoops) < 2):
return
for indexedLoopsIndex in xrange((len(indexedLoops) - 2)):
addFacesByConvexBottomTopLoop(faces, indexedLoops[indexedLoopsIndex], indexedLoops[(indexedLoopsIndex + 1)])
indexedLoopBottom = indexedLoops[(-2)]
indexedLoopTop = indexedLoops[(-1)]
if (len(indexedLoopTop) < 1):
indexedLoopTop = indexedLoops[0]
addFacesByConvexBottomTopLoop(faces, indexedLoopBottom, indexedLoopTop)
| [
"def",
"addFacesByConvexLoops",
"(",
"faces",
",",
"indexedLoops",
")",
":",
"if",
"(",
"len",
"(",
"indexedLoops",
")",
"<",
"2",
")",
":",
"return",
"for",
"indexedLoopsIndex",
"in",
"xrange",
"(",
"(",
"len",
"(",
"indexedLoops",
")",
"-",
"2",
")",
... | add faces from loops . | train | false |
22,227 | def huge_state():
return _huge(DeploymentState(), NodeState(hostname=u'192.0.2.31', applications={}))
| [
"def",
"huge_state",
"(",
")",
":",
"return",
"_huge",
"(",
"DeploymentState",
"(",
")",
",",
"NodeState",
"(",
"hostname",
"=",
"u'192.0.2.31'",
",",
"applications",
"=",
"{",
"}",
")",
")"
] | return a state with many containers . | train | false |
22,228 | def expand_path(path):
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
| [
"def",
"expand_path",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
")",
")"
] | expands directories and globs in given path . | train | true |
22,229 | def get_external_ip():
try:
r = requests.get(METADATA_NETWORK_INTERFACE_URL, headers={'Metadata-Flavor': 'Google'}, timeout=2)
return r.text
except requests.RequestException:
logging.info('Metadata server could not be reached, assuming local.')
return 'localhost'
| [
"def",
"get_external_ip",
"(",
")",
":",
"try",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"METADATA_NETWORK_INTERFACE_URL",
",",
"headers",
"=",
"{",
"'Metadata-Flavor'",
":",
"'Google'",
"}",
",",
"timeout",
"=",
"2",
")",
"return",
"r",
".",
"text",
... | returns a deferred which will be called with the wan ip address retreived through upnp . | train | false |
22,231 | def libvlc_audio_equalizer_get_band_frequency(u_index):
f = (_Cfunctions.get('libvlc_audio_equalizer_get_band_frequency', None) or _Cfunction('libvlc_audio_equalizer_get_band_frequency', ((1,),), None, ctypes.c_float, ctypes.c_uint))
return f(u_index)
| [
"def",
"libvlc_audio_equalizer_get_band_frequency",
"(",
"u_index",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_audio_equalizer_get_band_frequency'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_audio_equalizer_get_band_frequency'",
",",
"(",
... | get a particular equalizer band frequency . | train | true |
22,232 | def PosixShutdown():
dev_process = GlobalProcess()
children = dev_process.Children()
for term_signal in (signal.SIGTERM, signal.SIGKILL):
for child in children:
if (child.process is None):
continue
if (child.process.returncode is not None):
continue
pid = child.process.pid
try:
logging.debug('posix kill %d with signal %d', pid, term_signal)
os.kill(pid, term_signal)
except OSError as err:
logging.error('Error encountered sending pid %d signal %d:%s\n', pid, term_signal, err)
break
time.sleep(0.2)
for child in children:
if (child.process is None):
continue
if (child.process.returncode is not None):
continue
try:
child.process.wait()
except OSError as e:
if (e.errno != errno.ECHILD):
raise e
| [
"def",
"PosixShutdown",
"(",
")",
":",
"dev_process",
"=",
"GlobalProcess",
"(",
")",
"children",
"=",
"dev_process",
".",
"Children",
"(",
")",
"for",
"term_signal",
"in",
"(",
"signal",
".",
"SIGTERM",
",",
"signal",
".",
"SIGKILL",
")",
":",
"for",
"c... | kills a posix process with os . | train | false |
22,235 | def _get_deleted_exploration_change_list(exploration_id):
return [{'cmd': collection_domain.CMD_DELETE_COLLECTION_NODE, 'exploration_id': exploration_id}]
| [
"def",
"_get_deleted_exploration_change_list",
"(",
"exploration_id",
")",
":",
"return",
"[",
"{",
"'cmd'",
":",
"collection_domain",
".",
"CMD_DELETE_COLLECTION_NODE",
",",
"'exploration_id'",
":",
"exploration_id",
"}",
"]"
] | generates a change list for deleting an exploration from a collection . | train | false |
22,236 | def allocation():
return s3_rest_controller()
| [
"def",
"allocation",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] | rest controller for budget_allocation @status: experimental . | train | false |
22,237 | def renew_cert(config, domains, le_client, lineage):
renewal_params = lineage.configuration['renewalparams']
original_server = renewal_params.get('server', cli.flag_default('server'))
_avoid_invalidating_lineage(config, lineage, original_server)
if (not domains):
domains = lineage.names()
(new_certr, new_chain, new_key, _) = le_client.obtain_certificate(domains)
if config.dry_run:
logger.debug('Dry run: skipping updating lineage at %s', os.path.dirname(lineage.cert))
else:
prior_version = lineage.latest_common_version()
new_cert = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, new_certr.body.wrapped)
new_chain = crypto_util.dump_pyopenssl_chain(new_chain)
lineage.save_successor(prior_version, new_cert, new_key.pem, new_chain, config)
lineage.update_all_links_to(lineage.latest_common_version())
hooks.renew_hook(config, domains, lineage.live_dir)
| [
"def",
"renew_cert",
"(",
"config",
",",
"domains",
",",
"le_client",
",",
"lineage",
")",
":",
"renewal_params",
"=",
"lineage",
".",
"configuration",
"[",
"'renewalparams'",
"]",
"original_server",
"=",
"renewal_params",
".",
"get",
"(",
"'server'",
",",
"cl... | renew a certificate lineage . | train | false |
22,238 | def tensordot(a, b, axes=2):
return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)
| [
"def",
"tensordot",
"(",
"a",
",",
"b",
",",
"axes",
"=",
"2",
")",
":",
"return",
"_tensordot_as_dot",
"(",
"a",
",",
"b",
",",
"axes",
",",
"dot",
"=",
"dot",
",",
"batched",
"=",
"False",
")"
] | compute a generalized dot product over provided axes . | train | false |
22,242 | def redirect_or_next(endpoint, **kwargs):
return redirect((request.args.get('next') or endpoint), **kwargs)
| [
"def",
"redirect_or_next",
"(",
"endpoint",
",",
"**",
"kwargs",
")",
":",
"return",
"redirect",
"(",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'next'",
")",
"or",
"endpoint",
")",
",",
"**",
"kwargs",
")"
] | redirects the user back to the page they were viewing or to a specified endpoint . | train | false |
22,243 | def label_from_bin(buf):
mpls_label = type_desc.Int3.to_user(six.binary_type(buf))
return ((mpls_label >> 4), (mpls_label & 1))
| [
"def",
"label_from_bin",
"(",
"buf",
")",
":",
"mpls_label",
"=",
"type_desc",
".",
"Int3",
".",
"to_user",
"(",
"six",
".",
"binary_type",
"(",
"buf",
")",
")",
"return",
"(",
"(",
"mpls_label",
">>",
"4",
")",
",",
"(",
"mpls_label",
"&",
"1",
")",... | converts binary representation label to integer . | train | true |
22,244 | def new_no_content_resp(uri, add_id=False):
no_content_response = HTTPResponse(NO_CONTENT, '', Headers(), uri, uri, msg='No Content')
if add_id:
no_content_response.id = consecutive_number_generator.inc()
return no_content_response
| [
"def",
"new_no_content_resp",
"(",
"uri",
",",
"add_id",
"=",
"False",
")",
":",
"no_content_response",
"=",
"HTTPResponse",
"(",
"NO_CONTENT",
",",
"''",
",",
"Headers",
"(",
")",
",",
"uri",
",",
"uri",
",",
"msg",
"=",
"'No Content'",
")",
"if",
"add_... | return a new no_content httpresponse object . | train | false |
22,246 | def pip_install_req_file(req_file):
pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'
sh('{pip_cmd} -r {req_file}'.format(pip_cmd=pip_cmd, req_file=req_file))
| [
"def",
"pip_install_req_file",
"(",
"req_file",
")",
":",
"pip_cmd",
"=",
"'pip install -q --disable-pip-version-check --exists-action w'",
"sh",
"(",
"'{pip_cmd} -r {req_file}'",
".",
"format",
"(",
"pip_cmd",
"=",
"pip_cmd",
",",
"req_file",
"=",
"req_file",
")",
")"
... | pip install the requirements file . | train | false |
22,247 | @task
def bundle_certs(ctx, domain, cert_path):
cert_files = ['{0}.crt'.format(domain), 'COMODORSADomainValidationSecureServerCA.crt', 'COMODORSAAddTrustCA.crt', 'AddTrustExternalCARoot.crt']
certs = ' '.join((os.path.join(cert_path, cert_file) for cert_file in cert_files))
cmd = 'cat {certs} > {domain}.bundle.crt'.format(certs=certs, domain=domain)
ctx.run(cmd)
| [
"@",
"task",
"def",
"bundle_certs",
"(",
"ctx",
",",
"domain",
",",
"cert_path",
")",
":",
"cert_files",
"=",
"[",
"'{0}.crt'",
".",
"format",
"(",
"domain",
")",
",",
"'COMODORSADomainValidationSecureServerCA.crt'",
",",
"'COMODORSAAddTrustCA.crt'",
",",
"'AddTru... | concatenate certificates from namecheap in the correct order . | train | false |
22,248 | def fire(data, tag):
try:
event = salt.utils.event.get_event('minion', sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], opts=__opts__, listen=False)
return event.fire_event(data, tag)
except Exception:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log.debug(lines)
return False
| [
"def",
"fire",
"(",
"data",
",",
"tag",
")",
":",
"try",
":",
"event",
"=",
"salt",
".",
"utils",
".",
"event",
".",
"get_event",
"(",
"'minion'",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'tra... | fire an event on the local minion event bus . | train | true |
22,249 | def show_floating_ip(kwargs=None, call=None):
if (call != 'function'):
log.error('The show_floating_ip function must be called with -f or --function.')
return False
if (not kwargs):
kwargs = {}
if ('floating_ip' not in kwargs):
log.error('A floating IP is required.')
return False
floating_ip = kwargs['floating_ip']
log.debug('Floating ip is {0}'.format(floating_ip))
details = query(method='floating_ips', command=floating_ip)
return details
| [
"def",
"show_floating_ip",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"(",
"call",
"!=",
"'function'",
")",
":",
"log",
".",
"error",
"(",
"'The show_floating_ip function must be called with -f or --function.'",
")",
"return",
"False",
... | show the details of a floating ip . | train | true |
22,250 | def encode_truncate(text, limit, encoding='utf8', return_encoded=True):
assert ((text is None) or (type(text) == six.text_type))
assert (type(limit) in six.integer_types)
assert (limit >= 0)
if (text is None):
return
s = text.encode(encoding)
if (len(s) > limit):
s = s[:limit]
text = s.decode(encoding, 'ignore')
if return_encoded:
s = text.encode(encoding)
if return_encoded:
return s
else:
return text
| [
"def",
"encode_truncate",
"(",
"text",
",",
"limit",
",",
"encoding",
"=",
"'utf8'",
",",
"return_encoded",
"=",
"True",
")",
":",
"assert",
"(",
"(",
"text",
"is",
"None",
")",
"or",
"(",
"type",
"(",
"text",
")",
"==",
"six",
".",
"text_type",
")",... | given a string . | train | false |
22,252 | def test_collect_4():
(a, b, c, x) = symbols('a,b,c,x')
assert (collect(((a * (x ** c)) + (b * (x ** c))), (x ** c)) == ((x ** c) * (a + b)))
assert (collect(((a * (x ** (2 * c))) + (b * (x ** (2 * c)))), (x ** c)) == ((x ** (2 * c)) * (a + b)))
| [
"def",
"test_collect_4",
"(",
")",
":",
"(",
"a",
",",
"b",
",",
"c",
",",
"x",
")",
"=",
"symbols",
"(",
"'a,b,c,x'",
")",
"assert",
"(",
"collect",
"(",
"(",
"(",
"a",
"*",
"(",
"x",
"**",
"c",
")",
")",
"+",
"(",
"b",
"*",
"(",
"x",
"*... | collect with respect to a power . | train | false |
22,253 | def firmware_update(hosts=None, directory=''):
ret = {}
ret.changes = {}
success = True
for (host, firmwarefile) in hosts:
try:
_firmware_update(firmwarefile, host, directory)
ret['changes'].update({'host': {'comment': 'Firmware update submitted for {0}'.format(host), 'success': True}})
except CommandExecutionError as err:
success = False
ret['changes'].update({'host': {'comment': 'FAILED to update firmware for {0}'.format(host), 'success': False, 'reason': str(err)}})
ret['result'] = success
return ret
| [
"def",
"firmware_update",
"(",
"hosts",
"=",
"None",
",",
"directory",
"=",
"''",
")",
":",
"ret",
"=",
"{",
"}",
"ret",
".",
"changes",
"=",
"{",
"}",
"success",
"=",
"True",
"for",
"(",
"host",
",",
"firmwarefile",
")",
"in",
"hosts",
":",
"try",... | state to update the firmware on host using the racadm command firmwarefile filename starting with salt:// host string representing the hostname supplied to the racadm command directory directory name where firmwarefile will be downloaded . | train | true |
22,254 | def medfilt(volume, kernel_size=None):
volume = atleast_1d(volume)
if (kernel_size is None):
kernel_size = ([3] * volume.ndim)
kernel_size = asarray(kernel_size)
if (kernel_size.shape == ()):
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if ((kernel_size[k] % 2) != 1):
raise ValueError('Each element of kernel_size should be odd.')
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = (numels // 2)
return sigtools._order_filterND(volume, domain, order)
| [
"def",
"medfilt",
"(",
"volume",
",",
"kernel_size",
"=",
"None",
")",
":",
"volume",
"=",
"atleast_1d",
"(",
"volume",
")",
"if",
"(",
"kernel_size",
"is",
"None",
")",
":",
"kernel_size",
"=",
"(",
"[",
"3",
"]",
"*",
"volume",
".",
"ndim",
")",
... | perform a median filter on an n-dimensional array . | train | false |
22,257 | def _CopyFieldExpressionToProtocolBuffer(field_expression, pb):
pb.set_name(field_expression.name.encode('utf-8'))
pb.set_expression(field_expression.expression.encode('utf-8'))
| [
"def",
"_CopyFieldExpressionToProtocolBuffer",
"(",
"field_expression",
",",
"pb",
")",
":",
"pb",
".",
"set_name",
"(",
"field_expression",
".",
"name",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"pb",
".",
"set_expression",
"(",
"field_expression",
".",
"express... | copies fieldexpression to a search_service_pb . | train | false |
22,258 | def register_msgpack():
pack = unpack = None
try:
import msgpack
if (msgpack.version >= (0, 4)):
from msgpack import packb, unpackb
def pack(s):
return packb(s, use_bin_type=True)
def unpack(s):
return unpackb(s, encoding=u'utf-8')
else:
def version_mismatch(*args, **kwargs):
raise SerializerNotInstalled(u'msgpack requires msgpack-python >= 0.4.0')
pack = unpack = version_mismatch
except (ImportError, ValueError):
def not_available(*args, **kwargs):
raise SerializerNotInstalled(u'No decoder installed for msgpack. Please install the msgpack-python library')
pack = unpack = not_available
registry.register(u'msgpack', pack, unpack, content_type=u'application/x-msgpack', content_encoding=u'binary')
| [
"def",
"register_msgpack",
"(",
")",
":",
"pack",
"=",
"unpack",
"=",
"None",
"try",
":",
"import",
"msgpack",
"if",
"(",
"msgpack",
".",
"version",
">=",
"(",
"0",
",",
"4",
")",
")",
":",
"from",
"msgpack",
"import",
"packb",
",",
"unpackb",
"def",... | register msgpack serializer . | train | false |
22,260 | def decode_thumbnail(tup):
if (tup is None):
return None
return (tup[0], tup[1], b64decode(tup[2]))
| [
"def",
"decode_thumbnail",
"(",
"tup",
")",
":",
"if",
"(",
"tup",
"is",
"None",
")",
":",
"return",
"None",
"return",
"(",
"tup",
"[",
"0",
"]",
",",
"tup",
"[",
"1",
"]",
",",
"b64decode",
"(",
"tup",
"[",
"2",
"]",
")",
")"
] | decode an encoded thumbnail into its 3 component parts . | train | false |
22,261 | def codecName(encoding):
if ((encoding is not None) and (type(encoding) in types.StringTypes)):
canonicalName = ascii_punctuation_re.sub('', encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| [
"def",
"codecName",
"(",
"encoding",
")",
":",
"if",
"(",
"(",
"encoding",
"is",
"not",
"None",
")",
"and",
"(",
"type",
"(",
"encoding",
")",
"in",
"types",
".",
"StringTypes",
")",
")",
":",
"canonicalName",
"=",
"ascii_punctuation_re",
".",
"sub",
"... | return the python codec name corresponding to an encoding or none if the string doesnt correspond to a valid encoding . | train | false |
22,262 | def test_get_filter():
assert isinstance(get_filter('jsmin'), Filter)
assert_raises(ValueError, get_filter, 'notafilteractually')
class MyFilter(Filter, ):
pass
assert isinstance(get_filter(MyFilter), MyFilter)
assert_raises(ValueError, get_filter, object())
f = MyFilter()
assert (id(get_filter(f)) == id(f))
assert hasattr(get_filter((lambda : None)), 'output')
assert (get_filter('sass', scss=True).use_scss == True)
assert_raises(AssertionError, get_filter, f, 'test')
assert_raises(AssertionError, get_filter, (lambda : None), 'test')
| [
"def",
"test_get_filter",
"(",
")",
":",
"assert",
"isinstance",
"(",
"get_filter",
"(",
"'jsmin'",
")",
",",
"Filter",
")",
"assert_raises",
"(",
"ValueError",
",",
"get_filter",
",",
"'notafilteractually'",
")",
"class",
"MyFilter",
"(",
"Filter",
",",
")",
... | test filter resolving . | train | false |
22,264 | def CreateInteractiveWindow(makeDoc=None, makeFrame=None):
assert (edit is None), 'Creating second interactive window!'
bDocking = LoadPreference('Docking', 0)
if bDocking:
CreateDockedInteractiveWindow()
else:
CreateMDIInteractiveWindow(makeDoc, makeFrame)
assert (edit is not None), 'Created interactive window, but did not set the global!'
edit.currentView.SetFocus()
| [
"def",
"CreateInteractiveWindow",
"(",
"makeDoc",
"=",
"None",
",",
"makeFrame",
"=",
"None",
")",
":",
"assert",
"(",
"edit",
"is",
"None",
")",
",",
"'Creating second interactive window!'",
"bDocking",
"=",
"LoadPreference",
"(",
"'Docking'",
",",
"0",
")",
... | create a standard or docked interactive window unconditionally . | train | false |
22,265 | def touch_init(parent_dir, child_dir):
if (not child_dir.startswith(parent_dir)):
logging.error(('%s is not a subdirectory of %s' % (child_dir, parent_dir)))
return
sub_parent_dirs = parent_dir.split(os.path.sep)
sub_child_dirs = child_dir.split(os.path.sep)
for sub_dir in sub_child_dirs[len(sub_parent_dirs):]:
sub_parent_dirs.append(sub_dir)
path = os.path.sep.join(sub_parent_dirs)
init_py = os.path.join(path, '__init__.py')
open(init_py, 'a').close()
| [
"def",
"touch_init",
"(",
"parent_dir",
",",
"child_dir",
")",
":",
"if",
"(",
"not",
"child_dir",
".",
"startswith",
"(",
"parent_dir",
")",
")",
":",
"logging",
".",
"error",
"(",
"(",
"'%s is not a subdirectory of %s'",
"%",
"(",
"child_dir",
",",
"parent... | touch __init__ . | train | false |
22,266 | def findline(lines, start):
for i in range(len(lines)):
if (lines[i][:len(start)] == start):
return i
return (-1)
| [
"def",
"findline",
"(",
"lines",
",",
"start",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"lines",
")",
")",
":",
"if",
"(",
"lines",
"[",
"i",
"]",
"[",
":",
"len",
"(",
"start",
")",
"]",
"==",
"start",
")",
":",
"return",
"i",
... | return line starting with given string or -1 . | train | false |
22,267 | def modified_precision(references, hypothesis, n):
counts = (Counter(ngrams(hypothesis, n)) if (len(hypothesis) >= n) else Counter())
max_counts = {}
for reference in references:
reference_counts = (Counter(ngrams(reference, n)) if (len(reference) >= n) else Counter())
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
clipped_counts = {ngram: min(count, max_counts[ngram]) for (ngram, count) in counts.items()}
numerator = sum(clipped_counts.values())
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False)
| [
"def",
"modified_precision",
"(",
"references",
",",
"hypothesis",
",",
"n",
")",
":",
"counts",
"=",
"(",
"Counter",
"(",
"ngrams",
"(",
"hypothesis",
",",
"n",
")",
")",
"if",
"(",
"len",
"(",
"hypothesis",
")",
">=",
"n",
")",
"else",
"Counter",
"... | calculate modified ngram precision . | train | false |
22,268 | def GetMapiTypeName(propType):
if (not ptTable):
for (name, value) in mapitags.__dict__.iteritems():
if (name[:3] == 'PT_'):
ptTable[value] = name
rawType = (propType & (~ mapitags.MV_FLAG))
return ptTable.get(rawType, str(hex(rawType)))
| [
"def",
"GetMapiTypeName",
"(",
"propType",
")",
":",
"if",
"(",
"not",
"ptTable",
")",
":",
"for",
"(",
"name",
",",
"value",
")",
"in",
"mapitags",
".",
"__dict__",
".",
"iteritems",
"(",
")",
":",
"if",
"(",
"name",
"[",
":",
"3",
"]",
"==",
"'... | given a mapi type flag . | train | false |
22,269 | def resetwarnings():
filters[:] = []
| [
"def",
"resetwarnings",
"(",
")",
":",
"filters",
"[",
":",
"]",
"=",
"[",
"]"
] | reset warning behavior to testing defaults . | train | false |
22,270 | def update_user_email(user, new_email):
validate_email_unique(new_email)
validate_email(new_email)
user.email = new_email
user.save()
| [
"def",
"update_user_email",
"(",
"user",
",",
"new_email",
")",
":",
"validate_email_unique",
"(",
"new_email",
")",
"validate_email",
"(",
"new_email",
")",
"user",
".",
"email",
"=",
"new_email",
"user",
".",
"save",
"(",
")"
] | adds/updates sailthru when a user email address is changed args: username: a string representation of user identifier old_email: original email address returns: none . | train | false |
22,273 | def ListInstancesDNS(region, instances=None, node_types=[], states=[], names=[]):
return [i.public_dns_name for i in ListInstances(region, instances=instances, node_types=node_types, states=states, names=names)]
| [
"def",
"ListInstancesDNS",
"(",
"region",
",",
"instances",
"=",
"None",
",",
"node_types",
"=",
"[",
"]",
",",
"states",
"=",
"[",
"]",
",",
"names",
"=",
"[",
"]",
")",
":",
"return",
"[",
"i",
".",
"public_dns_name",
"for",
"i",
"in",
"ListInstanc... | return a list of dns names for instances matching the arguments . | train | false |
22,276 | def enum_as_sequence(enum):
return filter((lambda x: ((not x.startswith('__')) and (x not in ['_VALUES_TO_NAMES', '_NAMES_TO_VALUES']))), dir(enum))
| [
"def",
"enum_as_sequence",
"(",
"enum",
")",
":",
"return",
"filter",
"(",
"(",
"lambda",
"x",
":",
"(",
"(",
"not",
"x",
".",
"startswith",
"(",
"'__'",
")",
")",
"and",
"(",
"x",
"not",
"in",
"[",
"'_VALUES_TO_NAMES'",
",",
"'_NAMES_TO_VALUES'",
"]",... | returns an array whose entries are the names of the enums constants . | train | false |
22,277 | @pytest.mark.skipif(u'not HAS_SCIPY')
def test_poisson_conf_frequentist_confidence_gehrels_2sigma():
nlh = np.array([(0, 2, 0, 3.783), (1, 2, 0.023, 5.683), (2, 2, 0.23, 7.348), (3, 2, 0.596, 8.902), (4, 2, 1.058, 10.39), (5, 2, 1.583, 11.82), (6, 2, 2.153, 13.22), (7, 2, 2.758, 14.59), (8, 2, 3.391, 15.94), (9, 2, 4.046, 17.27), (10, 2, 4.719, 18.58)])
assert_allclose(funcs.poisson_conf_interval(nlh[:, 0], sigma=2, interval=u'frequentist-confidence').T, nlh[:, 2:], rtol=0.01)
| [
"@",
"pytest",
".",
"mark",
".",
"skipif",
"(",
"u'not HAS_SCIPY'",
")",
"def",
"test_poisson_conf_frequentist_confidence_gehrels_2sigma",
"(",
")",
":",
"nlh",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"0",
",",
"2",
",",
"0",
",",
"3.783",
")",
",",
"(",... | test intervals against those published in gehrels 1986 note: i think theres a typo in gehrels 1986 . | train | false |
22,278 | def format_device(dev):
copy_dev = dev.copy()
for key in ('ip', 'replication_ip'):
if (':' in copy_dev[key]):
copy_dev[key] = (('[' + copy_dev[key]) + ']')
return ('d%(id)sr%(region)sz%(zone)s-%(ip)s:%(port)sR%(replication_ip)s:%(replication_port)s/%(device)s_"%(meta)s"' % copy_dev)
| [
"def",
"format_device",
"(",
"dev",
")",
":",
"copy_dev",
"=",
"dev",
".",
"copy",
"(",
")",
"for",
"key",
"in",
"(",
"'ip'",
",",
"'replication_ip'",
")",
":",
"if",
"(",
"':'",
"in",
"copy_dev",
"[",
"key",
"]",
")",
":",
"copy_dev",
"[",
"key",
... | format a device for display . | train | false |
22,279 | def _get_repo_options_env(env):
env_options = ''
if (env is None):
return env_options
if (not isinstance(env, dict)):
raise SaltInvocationError("'env' must be a Python dictionary")
for (key, value) in env.items():
if (key == 'OPTIONS'):
env_options += '{0}\n'.format(value)
return env_options
| [
"def",
"_get_repo_options_env",
"(",
"env",
")",
":",
"env_options",
"=",
"''",
"if",
"(",
"env",
"is",
"None",
")",
":",
"return",
"env_options",
"if",
"(",
"not",
"isinstance",
"(",
"env",
",",
"dict",
")",
")",
":",
"raise",
"SaltInvocationError",
"("... | get repo environment overrides dictionary to use in repo options process env a dictionary of variables to define the repository options example: . | train | true |
22,280 | def close_on_error(read_meth):
def new_read_meth(inst):
try:
return read_meth(inst)
except httplib.HTTPException:
inst.close()
raise
return new_read_meth
| [
"def",
"close_on_error",
"(",
"read_meth",
")",
":",
"def",
"new_read_meth",
"(",
"inst",
")",
":",
"try",
":",
"return",
"read_meth",
"(",
"inst",
")",
"except",
"httplib",
".",
"HTTPException",
":",
"inst",
".",
"close",
"(",
")",
"raise",
"return",
"n... | decorator function . | train | false |
22,281 | def f64_as_int64(builder, val):
assert (val.type == Type.double())
return builder.bitcast(val, Type.int(64))
| [
"def",
"f64_as_int64",
"(",
"builder",
",",
"val",
")",
":",
"assert",
"(",
"val",
".",
"type",
"==",
"Type",
".",
"double",
"(",
")",
")",
"return",
"builder",
".",
"bitcast",
"(",
"val",
",",
"Type",
".",
"int",
"(",
"64",
")",
")"
] | bitcast a double into a 64-bit integer . | train | false |
22,282 | def _wireNameToPythonIdentifier(key):
lkey = nativeString(key.replace('-', '_'))
if (lkey in PYTHON_KEYWORDS):
return lkey.title()
return lkey
| [
"def",
"_wireNameToPythonIdentifier",
"(",
"key",
")",
":",
"lkey",
"=",
"nativeString",
"(",
"key",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
")",
"if",
"(",
"lkey",
"in",
"PYTHON_KEYWORDS",
")",
":",
"return",
"lkey",
".",
"title",
"(",
")",
"retu... | normalize an argument name from the wire for use with python code . | train | false |
22,283 | def upload_pyspark_file(project_id, bucket_name, filename, file):
print 'Uploading pyspark file to GCS'
client = storage.Client(project=project_id)
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(filename)
blob.upload_from_file(file)
| [
"def",
"upload_pyspark_file",
"(",
"project_id",
",",
"bucket_name",
",",
"filename",
",",
"file",
")",
":",
"print",
"'Uploading pyspark file to GCS'",
"client",
"=",
"storage",
".",
"Client",
"(",
"project",
"=",
"project_id",
")",
"bucket",
"=",
"client",
"."... | uploads the pyspark file in this directory to the configured input bucket . | train | false |
22,284 | def _to_unicode(var):
if isinstance(var, str):
try:
var = unicode(var)
except Exception:
try:
var = unicode(var, 'utf-8')
except Exception:
try:
var = unicode(var, 'latin-1')
except Exception:
try:
var = unicode(var, sickbeard.SYS_ENCODING)
except Exception:
try:
var = unicode(var, detect(var).get('encoding'))
except Exception:
var = unicode(var, sickbeard.SYS_ENCODING, 'replace')
return var
| [
"def",
"_to_unicode",
"(",
"var",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"str",
")",
":",
"try",
":",
"var",
"=",
"unicode",
"(",
"var",
")",
"except",
"Exception",
":",
"try",
":",
"var",
"=",
"unicode",
"(",
"var",
",",
"'utf-8'",
")",
... | provides the unicode string for the given ascii bytes . | train | false |
22,285 | def deform(image, deformer, resample=Image.BILINEAR):
return image.transform(image.size, Image.MESH, deformer.getmesh(image), resample)
| [
"def",
"deform",
"(",
"image",
",",
"deformer",
",",
"resample",
"=",
"Image",
".",
"BILINEAR",
")",
":",
"return",
"image",
".",
"transform",
"(",
"image",
".",
"size",
",",
"Image",
".",
"MESH",
",",
"deformer",
".",
"getmesh",
"(",
"image",
")",
"... | deform the image . | train | false |
22,286 | def parse_fields(fields_string):
fields = []
for main_field in FIELD_DELIMITERS_RE.split(fields_string):
if ('(' not in main_field):
fields.append(main_field)
continue
(section, sub_fields) = main_field.split('(')
fields.append({section: tuple(sub_fields[:(-1)].split(','))})
return tuple(fields)
| [
"def",
"parse_fields",
"(",
"fields_string",
")",
":",
"fields",
"=",
"[",
"]",
"for",
"main_field",
"in",
"FIELD_DELIMITERS_RE",
".",
"split",
"(",
"fields_string",
")",
":",
"if",
"(",
"'('",
"not",
"in",
"main_field",
")",
":",
"fields",
".",
"append",
... | converts a fields string to a tuple . | train | false |
22,287 | def posix_to_dt_str(posix):
dt = datetime.datetime.utcfromtimestamp(posix)
dt_str = dt.strftime(_DT_FORMAT)
return (dt_str + '.000Z')
| [
"def",
"posix_to_dt_str",
"(",
"posix",
")",
":",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"posix",
")",
"dt_str",
"=",
"dt",
".",
"strftime",
"(",
"_DT_FORMAT",
")",
"return",
"(",
"dt_str",
"+",
"'.000Z'",
")"
] | reverse of str_to_datetime . | train | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.