id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1
value | is_duplicated bool 2
classes |
|---|---|---|---|---|---|
43,484 | def notification_from_headers(channel, headers):
headers = _upper_header_keys(headers)
channel_id = headers[X_GOOG_CHANNEL_ID]
if (channel.id != channel_id):
raise errors.InvalidNotificationError(('Channel id mismatch: %s != %s' % (channel.id, channel_id)))
else:
message_number = int(headers[X_GOOG_MESSAGE_NUMBER])
state = headers[X_GOOG_RESOURCE_STATE]
resource_uri = headers[X_GOOG_RESOURCE_URI]
resource_id = headers[X_GOOG_RESOURCE_ID]
return Notification(message_number, state, resource_uri, resource_id)
| [
"def",
"notification_from_headers",
"(",
"channel",
",",
"headers",
")",
":",
"headers",
"=",
"_upper_header_keys",
"(",
"headers",
")",
"channel_id",
"=",
"headers",
"[",
"X_GOOG_CHANNEL_ID",
"]",
"if",
"(",
"channel",
".",
"id",
"!=",
"channel_id",
")",
":",... | parse a notification from the webhook request headers . | train | false |
43,485 | @task(aliases=['celery'])
def celery_worker(ctx, level='debug', hostname=None, beat=False):
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
cmd = 'celery worker -A framework.celery_tasks -l {0}'.format(level)
if hostname:
cmd = (cmd + ' --hostname={}'.format(hostname))
if beat:
cmd = (cmd + ' --beat')
ctx.run(bin_prefix(cmd), pty=True)
| [
"@",
"task",
"(",
"aliases",
"=",
"[",
"'celery'",
"]",
")",
"def",
"celery_worker",
"(",
"ctx",
",",
"level",
"=",
"'debug'",
",",
"hostname",
"=",
"None",
",",
"beat",
"=",
"False",
")",
":",
"os",
".",
"environ",
"[",
"'DJANGO_SETTINGS_MODULE'",
"]"... | run the celery process . | train | false |
43,486 | def check_vcs_conflict():
try:
ok = True
for fn in _get_files(only_py=True):
with tokenize.open(fn) as f:
for line in f:
if any((line.startswith((c * 7)) for c in '<>=|')):
print 'Found conflict marker in {}'.format(fn)
ok = False
print ()
return ok
except Exception:
traceback.print_exc()
return None
| [
"def",
"check_vcs_conflict",
"(",
")",
":",
"try",
":",
"ok",
"=",
"True",
"for",
"fn",
"in",
"_get_files",
"(",
"only_py",
"=",
"True",
")",
":",
"with",
"tokenize",
".",
"open",
"(",
"fn",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if... | check vcs conflict markers . | train | false |
43,487 | def ip_string_to_num(s):
return reduce((lambda a, b: ((a << 8) | b)), map(int, s.split('.')))
| [
"def",
"ip_string_to_num",
"(",
"s",
")",
":",
"return",
"reduce",
"(",
"(",
"lambda",
"a",
",",
"b",
":",
"(",
"(",
"a",
"<<",
"8",
")",
"|",
"b",
")",
")",
",",
"map",
"(",
"int",
",",
"s",
".",
"split",
"(",
"'.'",
")",
")",
")"
] | convert dotted ipv4 address to integer . | train | false |
43,488 | def publish_course(course):
store = modulestore()
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
store.publish(course.location, ModuleStoreEnum.UserID.test)
| [
"def",
"publish_course",
"(",
"course",
")",
":",
"store",
"=",
"modulestore",
"(",
")",
"with",
"store",
".",
"branch_setting",
"(",
"ModuleStoreEnum",
".",
"Branch",
".",
"draft_preferred",
",",
"course",
".",
"id",
")",
":",
"store",
".",
"publish",
"("... | helper method to publish the course . | train | false |
43,489 | @register.tag
def get_obj_perms(parser, token):
bits = token.split_contents()
format = u'{% get_obj_perms user/group for obj as "context_var" %}'
if ((len(bits) != 6) or (bits[2] != u'for') or (bits[4] != u'as')):
raise template.TemplateSyntaxError((u'get_obj_perms tag should be in format: %s' % format))
for_whom = bits[1]
obj = bits[3]
context_var = bits[5]
if ((context_var[0] != context_var[(-1)]) or (context_var[0] not in (u'"', u"'"))):
raise template.TemplateSyntaxError(u"get_obj_perms tag's context_var argument should be in quotes")
context_var = context_var[1:(-1)]
return ObjectPermissionsNode(for_whom, obj, context_var)
| [
"@",
"register",
".",
"tag",
"def",
"get_obj_perms",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"format",
"=",
"u'{% get_obj_perms user/group for obj as \"context_var\" %}'",
"if",
"(",
"(",
"len",
"(",
"bits",
... | returns a list of permissions for a given user/group and obj . | train | false |
43,490 | def test_load_strings_defaults_to_english():
locale.getdefaultlocale = (lambda : ('en_US', 'UTF-8'))
strings.load_strings(helpers)
assert (strings._('wait_for_hs') == 'Waiting for HS to be ready:')
| [
"def",
"test_load_strings_defaults_to_english",
"(",
")",
":",
"locale",
".",
"getdefaultlocale",
"=",
"(",
"lambda",
":",
"(",
"'en_US'",
",",
"'UTF-8'",
")",
")",
"strings",
".",
"load_strings",
"(",
"helpers",
")",
"assert",
"(",
"strings",
".",
"_",
"(",... | load_strings() loads english by default . | train | false |
43,491 | @contextmanager
def xml_writer(response, fields, name=None, bom=False):
if hasattr(response, u'headers'):
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
if name:
response.headers['Content-disposition'] = 'attachment; filename="{name}.xml"'.format(name=encode_rfc2231(name))
if bom:
response.write(UTF8_BOM)
response.write('<data>\n')
(yield XMLWriter(response, [f['id'] for f in fields]))
response.write('</data>\n')
| [
"@",
"contextmanager",
"def",
"xml_writer",
"(",
"response",
",",
"fields",
",",
"name",
"=",
"None",
",",
"bom",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"response",
",",
"u'headers'",
")",
":",
"response",
".",
"headers",
"[",
"'Content-Type'",
"]... | context manager for writing utf-8 xml data to response . | train | false |
43,492 | def remove_users(caller, role, *users):
if (not ((len(users) == 1) and (caller == users[0]))):
_check_caller_authority(caller, role)
role.remove_users(*users)
| [
"def",
"remove_users",
"(",
"caller",
",",
"role",
",",
"*",
"users",
")",
":",
"if",
"(",
"not",
"(",
"(",
"len",
"(",
"users",
")",
"==",
"1",
")",
"and",
"(",
"caller",
"==",
"users",
"[",
"0",
"]",
")",
")",
")",
":",
"_check_caller_authority... | the caller requests removing the given users from the role . | train | false |
43,493 | def _is_statement_unnamed(statement):
return (statement in ('log', 'channel', 'junction', 'options'))
| [
"def",
"_is_statement_unnamed",
"(",
"statement",
")",
":",
"return",
"(",
"statement",
"in",
"(",
"'log'",
",",
"'channel'",
",",
"'junction'",
",",
"'options'",
")",
")"
] | returns true . | train | false |
43,494 | def add_vlan_binding(vlanid, vlanname, netid):
LOG.debug(_('add_vlan_binding() called'))
session = db.get_session()
try:
binding = session.query(network_models_v2.Vlan_Binding).filter_by(vlan_id=vlanid).one()
raise c_exc.NetworkVlanBindingAlreadyExists(vlan_id=vlanid, network_id=netid)
except exc.NoResultFound:
binding = network_models_v2.Vlan_Binding(vlanid, vlanname, netid)
session.add(binding)
session.flush()
return binding
| [
"def",
"add_vlan_binding",
"(",
"vlanid",
",",
"vlanname",
",",
"netid",
")",
":",
"LOG",
".",
"debug",
"(",
"_",
"(",
"'add_vlan_binding() called'",
")",
")",
"session",
"=",
"db",
".",
"get_session",
"(",
")",
"try",
":",
"binding",
"=",
"session",
"."... | adds a vlan to network association . | train | false |
43,496 | def load_data_str(rel_path):
full_path = ((path(__file__).abspath().dirname() / 'data') / rel_path)
with open(full_path) as data_file:
return data_file.read()
| [
"def",
"load_data_str",
"(",
"rel_path",
")",
":",
"full_path",
"=",
"(",
"(",
"path",
"(",
"__file__",
")",
".",
"abspath",
"(",
")",
".",
"dirname",
"(",
")",
"/",
"'data'",
")",
"/",
"rel_path",
")",
"with",
"open",
"(",
"full_path",
")",
"as",
... | load a file from the "data" directory as a string . | train | false |
43,497 | @contextlib.contextmanager
def redirect_fd(fd):
save = os.dup(fd)
(r, w) = os.pipe()
try:
os.dup2(w, fd)
(yield io.open(r, 'r'))
finally:
os.close(w)
os.dup2(save, fd)
os.close(save)
| [
"@",
"contextlib",
".",
"contextmanager",
"def",
"redirect_fd",
"(",
"fd",
")",
":",
"save",
"=",
"os",
".",
"dup",
"(",
"fd",
")",
"(",
"r",
",",
"w",
")",
"=",
"os",
".",
"pipe",
"(",
")",
"try",
":",
"os",
".",
"dup2",
"(",
"w",
",",
"fd",... | temporarily redirect *fd* to a pipes write end and return a file object wrapping the pipes read end . | train | false |
43,499 | def check_resource_update(rsrc, template_id, resource_data, engine_id, stack, msg_queue):
check_message = functools.partial(_check_for_message, msg_queue)
if (rsrc.action == resource.Resource.INIT):
rsrc.create_convergence(template_id, resource_data, engine_id, stack.time_remaining(), check_message)
else:
rsrc.update_convergence(template_id, resource_data, engine_id, stack.time_remaining(), stack, check_message)
| [
"def",
"check_resource_update",
"(",
"rsrc",
",",
"template_id",
",",
"resource_data",
",",
"engine_id",
",",
"stack",
",",
"msg_queue",
")",
":",
"check_message",
"=",
"functools",
".",
"partial",
"(",
"_check_for_message",
",",
"msg_queue",
")",
"if",
"(",
"... | create or update the resource if appropriate . | train | false |
43,500 | def xfail(reason=''):
__tracebackhide__ = True
raise XFailed(reason)
| [
"def",
"xfail",
"(",
"reason",
"=",
"''",
")",
":",
"__tracebackhide__",
"=",
"True",
"raise",
"XFailed",
"(",
"reason",
")"
] | xfail an executing test or setup functions with the given reason . | train | false |
43,502 | @cache_permission
def can_edit_subproject(user, project):
return check_permission(user, project, 'trans.change_subproject')
| [
"@",
"cache_permission",
"def",
"can_edit_subproject",
"(",
"user",
",",
"project",
")",
":",
"return",
"check_permission",
"(",
"user",
",",
"project",
",",
"'trans.change_subproject'",
")"
] | checks whether user can edit subprojects on given project . | train | false |
43,503 | def has_sub_tasks(task):
if istask(task):
return True
elif isinstance(task, list):
return any((has_sub_tasks(i) for i in task))
else:
return False
| [
"def",
"has_sub_tasks",
"(",
"task",
")",
":",
"if",
"istask",
"(",
"task",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"task",
",",
"list",
")",
":",
"return",
"any",
"(",
"(",
"has_sub_tasks",
"(",
"i",
")",
"for",
"i",
"in",
"task",
"... | returns true if the task has sub tasks . | train | false |
43,504 | def create_catalog_api_client(user, catalog_integration):
scopes = ['email', 'profile']
expires_in = settings.OAUTH_ID_TOKEN_EXPIRATION
jwt = JwtBuilder(user).build_token(scopes, expires_in)
return EdxRestApiClient(catalog_integration.internal_api_url, jwt=jwt)
| [
"def",
"create_catalog_api_client",
"(",
"user",
",",
"catalog_integration",
")",
":",
"scopes",
"=",
"[",
"'email'",
",",
"'profile'",
"]",
"expires_in",
"=",
"settings",
".",
"OAUTH_ID_TOKEN_EXPIRATION",
"jwt",
"=",
"JwtBuilder",
"(",
"user",
")",
".",
"build_... | returns an api client which can be used to make catalog api requests . | train | false |
43,505 | def __get_image(conn, vm_):
img = config.get_cloud_config_value('image', vm_, __opts__, default='debian-7', search_global=False)
return conn.ex_get_image(img)
| [
"def",
"__get_image",
"(",
"conn",
",",
"vm_",
")",
":",
"img",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'image'",
",",
"vm_",
",",
"__opts__",
",",
"default",
"=",
"'debian-7'",
",",
"search_global",
"=",
"False",
")",
"return",
"conn",
".",
... | the get_image for gce allows partial name matching and returns a libcloud object . | train | true |
43,506 | def init_params(options, preemb=None):
params = OrderedDict()
if (preemb == None):
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
else:
params['Wemb'] = preemb
params = get_layer('ff')[0](options, params, prefix='ff_state', nin=options['dimctx'], nout=options['dim'])
params = get_layer(options['decoder'])[0](options, params, prefix='decoder', nin=options['dim_word'], dim=options['dim'])
if options['doutput']:
params = get_layer('ff')[0](options, params, prefix='ff_hid', nin=options['dim'], nout=options['dim_word'])
params = get_layer('ff')[0](options, params, prefix='ff_logit', nin=options['dim_word'], nout=options['n_words'])
else:
params = get_layer('ff')[0](options, params, prefix='ff_logit', nin=options['dim'], nout=options['n_words'])
return params
| [
"def",
"init_params",
"(",
"options",
",",
"preemb",
"=",
"None",
")",
":",
"params",
"=",
"OrderedDict",
"(",
")",
"if",
"(",
"preemb",
"==",
"None",
")",
":",
"params",
"[",
"'Wemb'",
"]",
"=",
"norm_weight",
"(",
"options",
"[",
"'n_words'",
"]",
... | initialize all parameters . | train | false |
43,507 | @task()
@timeit
def index_task(cls, id_list, **kw):
statsd.incr(('search.tasks.index_task.%s' % cls.get_mapping_type_name()))
try:
pin_this_thread()
qs = cls.get_model().objects.filter(pk__in=id_list).values_list('pk', flat=True)
for id_ in qs:
try:
cls.index(cls.extract_document(id_), id_=id_)
except UnindexMeBro:
cls.unindex(id_)
except Exception as exc:
retries = index_task.request.retries
if (retries >= MAX_RETRIES):
raise IndexingTaskError()
statsd.incr('search.tasks.index_task.retry', 1)
statsd.incr(('search.tasks.index_task.retry%d' % RETRY_TIMES[retries]), 1)
index_task.retry(exc=exc, max_retries=MAX_RETRIES, countdown=RETRY_TIMES[retries])
finally:
unpin_this_thread()
| [
"@",
"task",
"(",
")",
"@",
"timeit",
"def",
"index_task",
"(",
"cls",
",",
"id_list",
",",
"**",
"kw",
")",
":",
"statsd",
".",
"incr",
"(",
"(",
"'search.tasks.index_task.%s'",
"%",
"cls",
".",
"get_mapping_type_name",
"(",
")",
")",
")",
"try",
":",... | index documents specified by cls and ids . | train | false |
43,508 | def only_active_assets(reference_date_value, assets):
return [a for a in assets if was_active(reference_date_value, a)]
| [
"def",
"only_active_assets",
"(",
"reference_date_value",
",",
"assets",
")",
":",
"return",
"[",
"a",
"for",
"a",
"in",
"assets",
"if",
"was_active",
"(",
"reference_date_value",
",",
"a",
")",
"]"
] | filter an iterable of asset objects down to just assets that were alive at the time corresponding to reference_date_value . | train | false |
43,509 | def decov(h):
return DeCov()(h)
| [
"def",
"decov",
"(",
"h",
")",
":",
"return",
"DeCov",
"(",
")",
"(",
"h",
")"
] | computes the decov loss of h args: h : variable holding a matrix where the first dimension corresponds to the batches . | train | false |
43,510 | def truncate(content, length=100, suffix='...'):
if (len(content) <= length):
return content
else:
return (content[:length].rsplit(' ', 1)[0] + suffix)
| [
"def",
"truncate",
"(",
"content",
",",
"length",
"=",
"100",
",",
"suffix",
"=",
"'...'",
")",
":",
"if",
"(",
"len",
"(",
"content",
")",
"<=",
"length",
")",
":",
"return",
"content",
"else",
":",
"return",
"(",
"content",
"[",
":",
"length",
"]... | truncate datetime expression examples . | train | false |
43,512 | def select_device(device_id):
context = devices.get_context(device_id)
return context.device
| [
"def",
"select_device",
"(",
"device_id",
")",
":",
"context",
"=",
"devices",
".",
"get_context",
"(",
"device_id",
")",
"return",
"context",
".",
"device"
] | make the context associated with device *device_id* the current context . | train | false |
43,513 | def histogram_bins(timeseries):
series = scipy.array([x[1] for x in timeseries])
t = tail_avg(timeseries)
h = np.histogram(series, bins=15)
bins = h[1]
for (index, bin_size) in enumerate(h[0]):
if (bin_size <= 20):
if (index == 0):
if (t <= bins[0]):
return True
elif ((t >= bins[index]) and (t < bins[(index + 1)])):
return True
return False
| [
"def",
"histogram_bins",
"(",
"timeseries",
")",
":",
"series",
"=",
"scipy",
".",
"array",
"(",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"timeseries",
"]",
")",
"t",
"=",
"tail_avg",
"(",
"timeseries",
")",
"h",
"=",
"np",
".",
"histogram",
"(",... | a timeseries is anomalous if the average of the last three datapoints falls into a histogram bin with less than 20 other datapoints returns: the size of the bin which contains the tail_avg . | train | false |
43,514 | def set_cache_dir(cache_dir):
if ((cache_dir is not None) and (not op.exists(cache_dir))):
raise IOError(('Directory %s does not exist' % cache_dir))
set_config('MNE_CACHE_DIR', cache_dir, set_env=False)
| [
"def",
"set_cache_dir",
"(",
"cache_dir",
")",
":",
"if",
"(",
"(",
"cache_dir",
"is",
"not",
"None",
")",
"and",
"(",
"not",
"op",
".",
"exists",
"(",
"cache_dir",
")",
")",
")",
":",
"raise",
"IOError",
"(",
"(",
"'Directory %s does not exist'",
"%",
... | set the directory to be used for temporary file storage . | train | false |
43,515 | def _run_ems(objective_function, data, cond_idx, train, test):
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum((d ** 2), axis=0))[None, :]
return (np.sum((data[test[0]] * d), axis=0), d)
| [
"def",
"_run_ems",
"(",
"objective_function",
",",
"data",
",",
"cond_idx",
",",
"train",
",",
"test",
")",
":",
"d",
"=",
"objective_function",
"(",
"*",
"(",
"data",
"[",
"np",
".",
"intersect1d",
"(",
"c",
",",
"train",
")",
"]",
"for",
"c",
"in",... | run ems . | train | false |
43,516 | @pytest.fixture
def hass_recorder():
hass = get_test_home_assistant()
def setup_recorder(config):
'Setup with params.'
db_uri = 'sqlite://'
conf = {recorder.CONF_DB_URL: db_uri}
conf.update(config)
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: conf})
hass.start()
hass.block_till_done()
recorder._verify_instance()
recorder._INSTANCE.block_till_done()
return hass
(yield setup_recorder)
hass.stop()
| [
"@",
"pytest",
".",
"fixture",
"def",
"hass_recorder",
"(",
")",
":",
"hass",
"=",
"get_test_home_assistant",
"(",
")",
"def",
"setup_recorder",
"(",
"config",
")",
":",
"db_uri",
"=",
"'sqlite://'",
"conf",
"=",
"{",
"recorder",
".",
"CONF_DB_URL",
":",
"... | hass fixture with in-memory recorder . | train | false |
43,519 | def _get_old_unhelpful():
old_formatted = {}
cursor = connection.cursor()
cursor.execute('SELECT doc_id, yes, no\n FROM\n (SELECT wiki_revision.document_id as doc_id,\n SUM(limitedvotes.helpful) as yes,\n SUM(NOT(limitedvotes.helpful)) as no\n FROM\n (SELECT * FROM wiki_helpfulvote\n WHERE created <= DATE_SUB(CURDATE(), INTERVAL 1 WEEK)\n AND created >= DATE_SUB(DATE_SUB(CURDATE(),\n INTERVAL 1 WEEK), INTERVAL 1 WEEK)\n ) as limitedvotes\n INNER JOIN wiki_revision ON\n limitedvotes.revision_id=wiki_revision.id\n INNER JOIN wiki_document ON\n wiki_document.id=wiki_revision.document_id\n WHERE wiki_document.locale="en-US"\n GROUP BY doc_id\n HAVING no > yes\n ) as calculated')
old_data = cursor.fetchall()
for data in old_data:
doc_id = data[0]
yes = float(data[1])
no = float(data[2])
total = (yes + no)
if (total == 0):
continue
old_formatted[doc_id] = {'total': total, 'percentage': (yes / total)}
return old_formatted
| [
"def",
"_get_old_unhelpful",
"(",
")",
":",
"old_formatted",
"=",
"{",
"}",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"'SELECT doc_id, yes, no\\n FROM\\n (SELECT wiki_revision.document_id as doc_id,\\n ... | gets the data from 2 weeks ago and formats it as output so that we can get a percent change . | train | false |
43,523 | def reply_published_cb(sender, user, reply, trivial, **kwargs):
siteconfig = SiteConfiguration.objects.get_current()
if (siteconfig.get(u'mail_send_review_mail') and (not trivial)):
mail_reply(reply, user)
| [
"def",
"reply_published_cb",
"(",
"sender",
",",
"user",
",",
"reply",
",",
"trivial",
",",
"**",
"kwargs",
")",
":",
"siteconfig",
"=",
"SiteConfiguration",
".",
"objects",
".",
"get_current",
"(",
")",
"if",
"(",
"siteconfig",
".",
"get",
"(",
"u'mail_se... | send e-mail when a review reply is published . | train | false |
43,525 | @handle_response_format
@treeio_login_required
def transaction_edit(request, transaction_id, response_format='html'):
transaction = get_object_or_404(Transaction, pk=transaction_id)
if request.POST:
if ('cancel' not in request.POST):
form = TransactionForm(request.user.profile, None, None, request.POST, instance=transaction)
if form.is_valid():
transaction = form.save(commit=False)
convert(transaction, 'value')
return HttpResponseRedirect(reverse('finance_transaction_view', args=[transaction.id]))
else:
return HttpResponseRedirect(reverse('finance_transaction_view', args=[transaction.id]))
else:
form = TransactionForm(request.user.profile, None, None, instance=transaction)
return render_to_response('finance/transaction_edit', {'form': form, 'transaction': transaction}, context_instance=RequestContext(request), response_format=response_format)
| [
"@",
"handle_response_format",
"@",
"treeio_login_required",
"def",
"transaction_edit",
"(",
"request",
",",
"transaction_id",
",",
"response_format",
"=",
"'html'",
")",
":",
"transaction",
"=",
"get_object_or_404",
"(",
"Transaction",
",",
"pk",
"=",
"transaction_id... | transaction edit page . | train | false |
43,526 | def setup_editor(qtbot):
text = 'a = 1\nprint(a)\n\nx = 2'
editorStack = EditorStack(None, [])
editorStack.set_introspector(Mock())
editorStack.set_find_widget(Mock())
editorStack.set_io_actions(Mock(), Mock(), Mock(), Mock())
finfo = editorStack.new('foo.py', 'utf-8', text)
qtbot.addWidget(editorStack)
return (editorStack, finfo.editor)
| [
"def",
"setup_editor",
"(",
"qtbot",
")",
":",
"text",
"=",
"'a = 1\\nprint(a)\\n\\nx = 2'",
"editorStack",
"=",
"EditorStack",
"(",
"None",
",",
"[",
"]",
")",
"editorStack",
".",
"set_introspector",
"(",
"Mock",
"(",
")",
")",
"editorStack",
".",
"set_find_w... | set up editorstack with codeeditor containing some python code . | train | false |
43,527 | def clean_xml_string(s):
return u''.join((c for c in s if is_valid_xml_char_ordinal(ord(c))))
| [
"def",
"clean_xml_string",
"(",
"s",
")",
":",
"return",
"u''",
".",
"join",
"(",
"(",
"c",
"for",
"c",
"in",
"s",
"if",
"is_valid_xml_char_ordinal",
"(",
"ord",
"(",
"c",
")",
")",
")",
")"
] | cleans string from invalid xml chars solution was found there:: URL . | train | false |
43,528 | def send_item_json():
try:
item_id = request.args[0]
except:
raise HTTP(400, current.xml.json_message(False, 400, 'No value provided!'))
stable = s3db.org_site
istable = s3db.inv_send
ittable = s3db.inv_track_item
inv_ship_status = s3db.inv_ship_status
istable.date.represent = (lambda dt: dt[:10])
query = (((((ittable.req_item_id == item_id) & (istable.id == ittable.send_id)) & (istable.site_id == stable.id)) & ((istable.status == inv_ship_status['SENT']) | (istable.status == inv_ship_status['RECEIVED']))) & (ittable.deleted == False))
records = db(query).select(istable.id, istable.date, stable.name, ittable.quantity)
output = ('[%s,%s' % (json.dumps(dict(id=str(T('Sent')), quantity='#')), records.json()[1:]))
response.headers['Content-Type'] = 'application/json'
return output
| [
"def",
"send_item_json",
"(",
")",
":",
"try",
":",
"item_id",
"=",
"request",
".",
"args",
"[",
"0",
"]",
"except",
":",
"raise",
"HTTP",
"(",
"400",
",",
"current",
".",
"xml",
".",
"json_message",
"(",
"False",
",",
"400",
",",
"'No value provided!'... | used by s3 . | train | false |
43,529 | def ATR(barDs, count, timeperiod=(- (2 ** 31))):
return call_talib_with_hlc(barDs, count, talib.ATR, timeperiod)
| [
"def",
"ATR",
"(",
"barDs",
",",
"count",
",",
"timeperiod",
"=",
"(",
"-",
"(",
"2",
"**",
"31",
")",
")",
")",
":",
"return",
"call_talib_with_hlc",
"(",
"barDs",
",",
"count",
",",
"talib",
".",
"ATR",
",",
"timeperiod",
")"
] | average true range . | train | false |
43,530 | def list_transaction(hostname, username, password, label):
bigip_session = _build_session(username, password)
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
if trans_id:
try:
response = bigip_session.get((BIG_IP_URL_BASE.format(host=hostname) + '/transaction/{trans_id}/commands'.format(trans_id=trans_id)))
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the bigip.start_transaction function'
| [
"def",
"list_transaction",
"(",
"hostname",
",",
"username",
",",
"password",
",",
"label",
")",
":",
"bigip_session",
"=",
"_build_session",
"(",
"username",
",",
"password",
")",
"trans_id",
"=",
"__salt__",
"[",
"'grains.get'",
"]",
"(",
"'bigip_f5_trans:{lab... | a function to connect to a bigip device and list an existing transaction . | train | true |
43,531 | def Normal(name, mean, std):
return rv(name, NormalDistribution, (mean, std))
| [
"def",
"Normal",
"(",
"name",
",",
"mean",
",",
"std",
")",
":",
"return",
"rv",
"(",
"name",
",",
"NormalDistribution",
",",
"(",
"mean",
",",
"std",
")",
")"
] | create a continuous random variable with a normal distribution . | train | false |
43,532 | def get_stack(context=1):
frame = sys._getframe(1)
framelist = []
while frame:
framelist.append(((frame,) + getframeinfo(frame, context)))
frame = frame.f_back
return framelist
| [
"def",
"get_stack",
"(",
"context",
"=",
"1",
")",
":",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
"framelist",
"=",
"[",
"]",
"while",
"frame",
":",
"framelist",
".",
"append",
"(",
"(",
"(",
"frame",
",",
")",
"+",
"getframeinfo",
"(",... | get a list of records for a frame and all higher frames . | train | false |
43,533 | def ensure_utf8(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
| [
"def",
"ensure_utf8",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"return",
"s",
".",
"encode",
"(",
"'utf8'",
")",
"return",
"s"
] | not all of pyobjc and python understand unicode paths very well yet . | train | false |
43,534 | def _ListSeparatorsInXMLFormat(separators, indent=''):
result = ''
separators = list(separators)
separators.sort()
for sep_char in separators:
result += ('%s<list_separator>%s</list_separator>\n' % (indent, repr(sep_char)))
return result
| [
"def",
"_ListSeparatorsInXMLFormat",
"(",
"separators",
",",
"indent",
"=",
"''",
")",
":",
"result",
"=",
"''",
"separators",
"=",
"list",
"(",
"separators",
")",
"separators",
".",
"sort",
"(",
")",
"for",
"sep_char",
"in",
"separators",
":",
"result",
"... | generates xml encoding of a list of list separators . | train | false |
43,535 | def onInterfaceAppShutDown():
INFO_MSG('onInterfaceAppShutDown()')
g_poller.stop()
| [
"def",
"onInterfaceAppShutDown",
"(",
")",
":",
"INFO_MSG",
"(",
"'onInterfaceAppShutDown()'",
")",
"g_poller",
".",
"stop",
"(",
")"
] | kbengine method . | train | false |
43,536 | def postorder_3color(graph, root):
color = dict()
order = []
def dfs_walk(node):
color[node] = 'grey'
for succ in graph.successors(node):
if (color.get(succ) == 'grey'):
print 'CYCLE: {0}-->{1}'.format(node, succ)
if (succ not in color):
dfs_walk(succ)
order.append(node)
color[node] = 'black'
dfs_walk(root)
return order
| [
"def",
"postorder_3color",
"(",
"graph",
",",
"root",
")",
":",
"color",
"=",
"dict",
"(",
")",
"order",
"=",
"[",
"]",
"def",
"dfs_walk",
"(",
"node",
")",
":",
"color",
"[",
"node",
"]",
"=",
"'grey'",
"for",
"succ",
"in",
"graph",
".",
"successo... | return a post-order ordering of nodes in the graph . | train | false |
43,538 | def VAR(x, B, const=0):
p = B.shape[0]
T = x.shape[0]
xhat = np.zeros(x.shape)
for t in range(p, T):
xhat[t, :] = (const + (x[(t - p):t, :, np.newaxis] * B).sum(axis=1).sum(axis=0))
return xhat
| [
"def",
"VAR",
"(",
"x",
",",
"B",
",",
"const",
"=",
"0",
")",
":",
"p",
"=",
"B",
".",
"shape",
"[",
"0",
"]",
"T",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"xhat",
"=",
"np",
".",
"zeros",
"(",
"x",
".",
"shape",
")",
"for",
"t",
"in",
... | multivariate linear filter parameters x: array columns are variables . | train | false |
43,539 | def upgrade_tools(name, reboot=False, call=None):
if (call != 'action'):
raise SaltCloudSystemExit('The upgrade_tools action must be called with -a or --action.')
vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name)
return _upg_tools_helper(vm_ref, reboot)
| [
"def",
"upgrade_tools",
"(",
"name",
",",
"reboot",
"=",
"False",
",",
"call",
"=",
"None",
")",
":",
"if",
"(",
"call",
"!=",
"'action'",
")",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The upgrade_tools action must be called with -a or --action.'",
")",
"vm_ref... | to upgrade vmware tools on a specified virtual machine . | train | true |
43,540 | def _find_flavor(cs, flavor):
try:
return utils.find_resource(cs.flavors, flavor, is_public=None)
except exceptions.NotFound:
return cs.flavors.find(ram=flavor)
| [
"def",
"_find_flavor",
"(",
"cs",
",",
"flavor",
")",
":",
"try",
":",
"return",
"utils",
".",
"find_resource",
"(",
"cs",
".",
"flavors",
",",
"flavor",
",",
"is_public",
"=",
"None",
")",
"except",
"exceptions",
".",
"NotFound",
":",
"return",
"cs",
... | get a flavor by name . | train | false |
43,541 | def strip_leading_tabs(docs):
lines = docs.splitlines()
start = 0
for line in lines:
if (line != u''):
break
start += 1
if start:
lines = lines[start:]
if (len(lines) > 1):
start_line = 1
ref_line = lines[start_line]
while (not ref_line):
start_line += 1
if (start_line > len(lines)):
break
ref_line = lines[start_line]
strip_left = (len(ref_line) - len(ref_line.lstrip()))
if strip_left:
docs = u'\n'.join(([lines[0]] + [l[strip_left:] for l in lines[1:]]))
return docs
| [
"def",
"strip_leading_tabs",
"(",
"docs",
")",
":",
"lines",
"=",
"docs",
".",
"splitlines",
"(",
")",
"start",
"=",
"0",
"for",
"line",
"in",
"lines",
":",
"if",
"(",
"line",
"!=",
"u''",
")",
":",
"break",
"start",
"+=",
"1",
"if",
"start",
":",
... | strip leading tabs from __doc__ text . | train | false |
43,542 | def process_po_folder(domain, folder, extra=''):
result = True
for fname in glob.glob(os.path.join(folder, '*.po')):
basename = os.path.split(fname)[1]
name = os.path.splitext(basename)[0]
mo_path = os.path.normpath(('%s/%s%s' % (MO_DIR, name, MO_LOCALE)))
mo_name = ('%s.mo' % domain)
if (not os.path.exists(mo_path)):
os.makedirs(mo_path)
mo_file = os.path.join(mo_path, mo_name)
print ('Compile %s' % mo_file)
(ret, output) = run(('%s %s -o "%s" "%s"' % (TOOL, extra, mo_file, fname)))
if (ret != 0):
print ('\nMissing %s. Please install this package first.' % TOOL)
exit(1)
if ('WARNING:' in output):
print output
result = False
return result
| [
"def",
"process_po_folder",
"(",
"domain",
",",
"folder",
",",
"extra",
"=",
"''",
")",
":",
"result",
"=",
"True",
"for",
"fname",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'*.po'",
")",
")",
":",
"basena... | process each po file in folder . | train | false |
43,543 | def _my_principal_branch(expr, period, full_pb=False):
from sympy import principal_branch
res = principal_branch(expr, period)
if (not full_pb):
res = res.replace(principal_branch, (lambda x, y: x))
return res
| [
"def",
"_my_principal_branch",
"(",
"expr",
",",
"period",
",",
"full_pb",
"=",
"False",
")",
":",
"from",
"sympy",
"import",
"principal_branch",
"res",
"=",
"principal_branch",
"(",
"expr",
",",
"period",
")",
"if",
"(",
"not",
"full_pb",
")",
":",
"res",... | bring expr nearer to its principal branch by removing superfluous factors . | train | false |
43,544 | def is_loopback_connection(request):
host_ip = socket.gethostbyname(socket.gethostname())
remote_ip = get_request_ip(request)
return (remote_ip in ['127.0.0.1', 'localhost', host_ip])
| [
"def",
"is_loopback_connection",
"(",
"request",
")",
":",
"host_ip",
"=",
"socket",
".",
"gethostbyname",
"(",
"socket",
".",
"gethostname",
"(",
")",
")",
"remote_ip",
"=",
"get_request_ip",
"(",
"request",
")",
"return",
"(",
"remote_ip",
"in",
"[",
"'127... | test whether the ip making the request is the same as the ip serving the request . | train | false |
43,545 | def maybe_convert_indices(indices, n):
if isinstance(indices, list):
indices = np.array(indices)
if (len(indices) == 0):
return np.empty(0, dtype=np.int_)
mask = (indices < 0)
if mask.any():
indices[mask] += n
mask = ((indices >= n) | (indices < 0))
if mask.any():
raise IndexError('indices are out-of-bounds')
return indices
| [
"def",
"maybe_convert_indices",
"(",
"indices",
",",
"n",
")",
":",
"if",
"isinstance",
"(",
"indices",
",",
"list",
")",
":",
"indices",
"=",
"np",
".",
"array",
"(",
"indices",
")",
"if",
"(",
"len",
"(",
"indices",
")",
"==",
"0",
")",
":",
"ret... | if we have negative indicies . | train | true |
43,550 | def test_make_table(table_types, mixin_cols):
t = table_types.Table(mixin_cols)
check_mixin_type(t, t['m'], mixin_cols['m'])
cols = list(mixin_cols.values())
t = table_types.Table(cols, names=('i', 'a', 'b', 'm'))
check_mixin_type(t, t['m'], mixin_cols['m'])
t = table_types.Table(cols)
check_mixin_type(t, t['col3'], mixin_cols['m'])
| [
"def",
"test_make_table",
"(",
"table_types",
",",
"mixin_cols",
")",
":",
"t",
"=",
"table_types",
".",
"Table",
"(",
"mixin_cols",
")",
"check_mixin_type",
"(",
"t",
",",
"t",
"[",
"'m'",
"]",
",",
"mixin_cols",
"[",
"'m'",
"]",
")",
"cols",
"=",
"li... | make a table with the columns in mixin_cols . | train | false |
43,554 | @contextfunction
def projects_time_slot_list(context, time_slots, no_dates=False):
request = context['request']
response_format = 'html'
if ('response_format' in context):
response_format = context['response_format']
return Markup(render_to_string('projects/tags/time_slot_list', {'time_slots': time_slots, 'no_dates': no_dates}, context_instance=RequestContext(request), response_format=response_format))
| [
"@",
"contextfunction",
"def",
"projects_time_slot_list",
"(",
"context",
",",
"time_slots",
",",
"no_dates",
"=",
"False",
")",
":",
"request",
"=",
"context",
"[",
"'request'",
"]",
"response_format",
"=",
"'html'",
"if",
"(",
"'response_format'",
"in",
"conte... | print a list of time slots . | train | false |
43,556 | def parse_reflog_line(line):
(begin, message) = line.split(' DCTB ', 1)
(old_sha, new_sha, rest) = begin.split(' ', 2)
(committer, timestamp_str, timezone_str) = rest.rsplit(' ', 2)
return Entry(old_sha, new_sha, committer, int(timestamp_str), parse_timezone(timezone_str)[0], message)
| [
"def",
"parse_reflog_line",
"(",
"line",
")",
":",
"(",
"begin",
",",
"message",
")",
"=",
"line",
".",
"split",
"(",
"' DCTB '",
",",
"1",
")",
"(",
"old_sha",
",",
"new_sha",
",",
"rest",
")",
"=",
"begin",
".",
"split",
"(",
"' '",
",",
"2",
"... | parse a reflog line . | train | false |
43,557 | def attribute_rule(allowed_attrs):
def fn(tag):
for (attr, val) in list(tag.attrs.items()):
rule = allowed_attrs.get(attr)
if rule:
if callable(rule):
new_val = rule(val)
if (new_val is None):
del tag[attr]
else:
tag[attr] = new_val
else:
pass
else:
del tag[attr]
return fn
| [
"def",
"attribute_rule",
"(",
"allowed_attrs",
")",
":",
"def",
"fn",
"(",
"tag",
")",
":",
"for",
"(",
"attr",
",",
"val",
")",
"in",
"list",
"(",
"tag",
".",
"attrs",
".",
"items",
"(",
")",
")",
":",
"rule",
"=",
"allowed_attrs",
".",
"get",
"... | generator for functions that can be used as entries in whitelister . | train | false |
43,558 | def getNewRepository():
return ExportRepository()
| [
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] | get new repository . | train | false |
43,559 | def emits_warning(*messages):
@decorator
def decorate(fn, *args, **kw):
with expect_warnings(assert_=False, *messages):
return fn(*args, **kw)
return decorate
| [
"def",
"emits_warning",
"(",
"*",
"messages",
")",
":",
"@",
"decorator",
"def",
"decorate",
"(",
"fn",
",",
"*",
"args",
",",
"**",
"kw",
")",
":",
"with",
"expect_warnings",
"(",
"assert_",
"=",
"False",
",",
"*",
"messages",
")",
":",
"return",
"f... | mark a test as emitting a warning . | train | false |
43,561 | def Save(root=None, formats=None, **options):
clf = options.pop('clf', True)
Config(**options)
if (formats is None):
formats = ['pdf', 'eps']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
if root:
for fmt in formats:
SaveFormat(root, fmt)
if clf:
Clf()
| [
"def",
"Save",
"(",
"root",
"=",
"None",
",",
"formats",
"=",
"None",
",",
"**",
"options",
")",
":",
"clf",
"=",
"options",
".",
"pop",
"(",
"'clf'",
",",
"True",
")",
"Config",
"(",
"**",
"options",
")",
"if",
"(",
"formats",
"is",
"None",
")",... | saves the plot in the given formats and clears the figure . | train | false |
43,565 | @frappe.whitelist()
def archive_restore_column(board_name, column_title, status):
doc = frappe.get_doc(u'Kanban Board', board_name)
for col in doc.columns:
if (column_title == col.column_name):
col.status = status
doc.save()
return doc.columns
| [
"@",
"frappe",
".",
"whitelist",
"(",
")",
"def",
"archive_restore_column",
"(",
"board_name",
",",
"column_title",
",",
"status",
")",
":",
"doc",
"=",
"frappe",
".",
"get_doc",
"(",
"u'Kanban Board'",
",",
"board_name",
")",
"for",
"col",
"in",
"doc",
".... | set columns status to status . | train | false |
43,567 | def is_ha_router(router):
try:
requested_router_type = router.extra_attributes.ha
except AttributeError:
requested_router_type = router.get('ha')
if validators.is_attr_set(requested_router_type):
return requested_router_type
return cfg.CONF.l3_ha
| [
"def",
"is_ha_router",
"(",
"router",
")",
":",
"try",
":",
"requested_router_type",
"=",
"router",
".",
"extra_attributes",
".",
"ha",
"except",
"AttributeError",
":",
"requested_router_type",
"=",
"router",
".",
"get",
"(",
"'ha'",
")",
"if",
"validators",
"... | return true if router to be handled is ha . | train | false |
43,568 | def install_cache(cache_name='cache', backend='sqlite', expire_after=None, allowable_codes=(200,), allowable_methods=('GET',), session_factory=CachedSession, **backend_options):
_patch_session_factory((lambda : session_factory(cache_name=cache_name, backend=backend, expire_after=expire_after, allowable_codes=allowable_codes, allowable_methods=allowable_methods, **backend_options)))
| [
"def",
"install_cache",
"(",
"cache_name",
"=",
"'cache'",
",",
"backend",
"=",
"'sqlite'",
",",
"expire_after",
"=",
"None",
",",
"allowable_codes",
"=",
"(",
"200",
",",
")",
",",
"allowable_methods",
"=",
"(",
"'GET'",
",",
")",
",",
"session_factory",
... | installs cache for all requests requests by monkey-patching session parameters are the same as in :class:cachedsession . | train | false |
43,570 | @docfiller
def uniform_filter1d(input, size, axis=(-1), output=None, mode='reflect', cval=0.0, origin=0):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if (size < 1):
raise RuntimeError('incorrect filter size')
(output, return_value) = _ni_support._get_output(output, input)
if ((((size // 2) + origin) < 0) or (((size // 2) + origin) >= size)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval, origin)
return return_value
| [
"@",
"docfiller",
"def",
"uniform_filter1d",
"(",
"input",
",",
"size",
",",
"axis",
"=",
"(",
"-",
"1",
")",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"'reflect'",
",",
"cval",
"=",
"0.0",
",",
"origin",
"=",
"0",
")",
":",
"input",
"=",
"nu... | calculate a one-dimensional uniform filter along the given axis . | train | false |
43,572 | def coarsen(reduction, x, axes, trim_excess=False):
for i in range(x.ndim):
if (i not in axes):
axes[i] = 1
if trim_excess:
ind = tuple(((slice(0, (- (d % axes[i]))) if (d % axes[i]) else slice(None, None)) for (i, d) in enumerate(x.shape)))
x = x[ind]
newshape = tuple(concat([((x.shape[i] / axes[i]), axes[i]) for i in range(x.ndim)]))
return reduction(x.reshape(newshape), axis=tuple(range(1, (x.ndim * 2), 2)))
| [
"def",
"coarsen",
"(",
"reduction",
",",
"x",
",",
"axes",
",",
"trim_excess",
"=",
"False",
")",
":",
"for",
"i",
"in",
"range",
"(",
"x",
".",
"ndim",
")",
":",
"if",
"(",
"i",
"not",
"in",
"axes",
")",
":",
"axes",
"[",
"i",
"]",
"=",
"1",... | coarsen array by applying reduction to fixed size neighborhoods parameters reduction: function function like np . | train | false |
43,573 | def _rows_differ(row, _row):
row_copy = copy.deepcopy(row)
_row_copy = copy.deepcopy(_row)
for panel in row_copy['panels']:
if ('id' in panel):
del panel['id']
for _panel in _row_copy['panels']:
if ('id' in _panel):
del _panel['id']
diff = DictDiffer(row_copy, _row_copy)
return (diff.changed() or diff.added() or diff.removed())
| [
"def",
"_rows_differ",
"(",
"row",
",",
"_row",
")",
":",
"row_copy",
"=",
"copy",
".",
"deepcopy",
"(",
"row",
")",
"_row_copy",
"=",
"copy",
".",
"deepcopy",
"(",
"_row",
")",
"for",
"panel",
"in",
"row_copy",
"[",
"'panels'",
"]",
":",
"if",
"(",
... | check if grafana dashboard row and _row differ . | train | true |
43,574 | def get_all(context, namespace_name, session, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc'):
namespace = namespace_api.get(context, namespace_name, session)
query = session.query(models.MetadefTag).filter_by(namespace_id=namespace['id'])
marker_tag = None
if (marker is not None):
marker_tag = _get(context, marker, session)
sort_keys = ['created_at', 'id']
(sort_keys.insert(0, sort_key) if (sort_key not in sort_keys) else sort_keys)
query = paginate_query(query=query, model=models.MetadefTag, limit=limit, sort_keys=sort_keys, marker=marker_tag, sort_dir=sort_dir)
metadef_tag = query.all()
metadef_tag_list = []
for tag in metadef_tag:
metadef_tag_list.append(tag.to_dict())
return metadef_tag_list
| [
"def",
"get_all",
"(",
"context",
",",
"namespace_name",
",",
"session",
",",
"filters",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"sort_key",
"=",
"'created_at'",
",",
"sort_dir",
"=",
"'desc'",
")",
":",
"namespace",
"=",... | return a list of all available services . | train | false |
43,575 | def parse_rx_excludes(options, fatal):
excluded_patterns = []
for flag in options:
(option, parameter) = flag
if (option == '--exclude-rx'):
try:
excluded_patterns.append(re.compile(parameter))
except re.error as ex:
fatal(('invalid --exclude-rx pattern (%s): %s' % (parameter, ex)))
elif (option == '--exclude-rx-from'):
try:
f = open(resolve_parent(parameter))
except IOError as e:
raise fatal(("couldn't read %s" % parameter))
for pattern in f.readlines():
spattern = pattern.rstrip('\n')
if (not spattern):
continue
try:
excluded_patterns.append(re.compile(spattern))
except re.error as ex:
fatal(('invalid --exclude-rx pattern (%s): %s' % (spattern, ex)))
return excluded_patterns
| [
"def",
"parse_rx_excludes",
"(",
"options",
",",
"fatal",
")",
":",
"excluded_patterns",
"=",
"[",
"]",
"for",
"flag",
"in",
"options",
":",
"(",
"option",
",",
"parameter",
")",
"=",
"flag",
"if",
"(",
"option",
"==",
"'--exclude-rx'",
")",
":",
"try",
... | traverse the options and extract all rx excludes . | train | false |
43,576 | def bool_to_int(value):
if (value is True):
return '1'
elif (value is False):
return '0'
else:
return value
| [
"def",
"bool_to_int",
"(",
"value",
")",
":",
"if",
"(",
"value",
"is",
"True",
")",
":",
"return",
"'1'",
"elif",
"(",
"value",
"is",
"False",
")",
":",
"return",
"'0'",
"else",
":",
"return",
"value"
] | translates python booleans to rpc-safe integers . | train | false |
43,578 | def getCraftedText(fileName, text='', repository=None):
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
| [
"def",
"getCraftedText",
"(",
"fileName",
",",
"text",
"=",
"''",
",",
"repository",
"=",
"None",
")",
":",
"return",
"getCraftedTextFromText",
"(",
"archive",
".",
"getTextIfEmpty",
"(",
"fileName",
",",
"text",
")",
",",
"repository",
")"
] | reversal a gcode linear move file or text . | train | false |
43,579 | @depends(HAS_ESX_CLI)
def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None, port=None, esxi_hosts=None):
ret = {}
if esxi_hosts:
if (not isinstance(esxi_hosts, list)):
raise CommandExecutionError("'esxi_hosts' must be a list.")
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd_str, protocol=protocol, port=port, esxi_host=esxi_host)
if (response['retcode'] != 0):
ret.update({esxi_host: {'Error': response.get('stdout')}})
else:
ret.update({esxi_host: response})
else:
response = salt.utils.vmware.esxcli(host, username, password, cmd_str, protocol=protocol, port=port)
if (response['retcode'] != 0):
ret.update({host: {'Error': response.get('stdout')}})
else:
ret.update({host: response})
return ret
| [
"@",
"depends",
"(",
"HAS_ESX_CLI",
")",
"def",
"esxcli_cmd",
"(",
"cmd_str",
",",
"host",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"protocol",
"=",
"None",
",",
"port",
"=",
"None",
",",
"esxi_hosts",
"=",
"None",
... | run an esxcli command directly on the host or list of hosts . | train | true |
43,580 | def getComplexByMultiplierPrefix(multiplier, prefix, valueComplex, xmlElement):
if (multiplier == 0.0):
return valueComplex
oldMultipliedValueComplex = (valueComplex * multiplier)
complexByPrefix = getComplexByPrefix(prefix, oldMultipliedValueComplex, xmlElement)
if (complexByPrefix == oldMultipliedValueComplex):
return valueComplex
return (complexByPrefix / multiplier)
| [
"def",
"getComplexByMultiplierPrefix",
"(",
"multiplier",
",",
"prefix",
",",
"valueComplex",
",",
"xmlElement",
")",
":",
"if",
"(",
"multiplier",
"==",
"0.0",
")",
":",
"return",
"valueComplex",
"oldMultipliedValueComplex",
"=",
"(",
"valueComplex",
"*",
"multip... | get complex from multiplier . | train | false |
43,581 | def type_from_value(value):
return _type_mapping[type(value)]
| [
"def",
"type_from_value",
"(",
"value",
")",
":",
"return",
"_type_mapping",
"[",
"type",
"(",
"value",
")",
"]"
] | fetch type based on a primitive value . | train | false |
43,583 | @handle_response_format
@treeio_login_required
def account_edit(request, response_format='html'):
profile = request.user.profile
if request.POST:
form = AccountForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('account_view'))
else:
form = AccountForm(instance=profile)
return render_to_response('account/account_edit', {'profile': profile, 'form': Markup(form.as_ul())}, context_instance=RequestContext(request), response_format=response_format)
| [
"@",
"handle_response_format",
"@",
"treeio_login_required",
"def",
"account_edit",
"(",
"request",
",",
"response_format",
"=",
"'html'",
")",
":",
"profile",
"=",
"request",
".",
"user",
".",
"profile",
"if",
"request",
".",
"POST",
":",
"form",
"=",
"Accoun... | account edit page . | train | false |
43,584 | @register.inclusion_tag(u'reviews/review_reply_section.html', takes_context=True)
def reply_section(context, review, comment, context_type, context_id, reply_to_text=u''):
if (comment != u''):
if (type(comment) is ScreenshotComment):
context_id += u's'
elif (type(comment) is FileAttachmentComment):
context_id += u'f'
elif (type(comment) is GeneralComment):
context_id += u'g'
context_id += six.text_type(comment.id)
return {u'review': review, u'comment': comment, u'context_type': context_type, u'context_id': context_id, u'user': context.get(u'user', None), u'local_site_name': context.get(u'local_site_name'), u'reply_to_is_empty': (reply_to_text == u''), u'request': context[u'request']}
| [
"@",
"register",
".",
"inclusion_tag",
"(",
"u'reviews/review_reply_section.html'",
",",
"takes_context",
"=",
"True",
")",
"def",
"reply_section",
"(",
"context",
",",
"review",
",",
"comment",
",",
"context_type",
",",
"context_id",
",",
"reply_to_text",
"=",
"u... | renders a template for displaying a reply . | train | false |
43,586 | def EncodeUrl(base, params, escape_url, use_html_entities):
real_params = []
for (key, value) in params.iteritems():
if escape_url:
value = urllib.quote(value)
if value:
real_params.append(('%s=%s' % (key, value)))
if real_params:
url = ('%s?%s' % (base, '&'.join(real_params)))
else:
url = base
if use_html_entities:
url = cgi.escape(url, quote=True)
return url
| [
"def",
"EncodeUrl",
"(",
"base",
",",
"params",
",",
"escape_url",
",",
"use_html_entities",
")",
":",
"real_params",
"=",
"[",
"]",
"for",
"(",
"key",
",",
"value",
")",
"in",
"params",
".",
"iteritems",
"(",
")",
":",
"if",
"escape_url",
":",
"value"... | escape params . | train | false |
43,587 | def AddPost(content):
t = time.strftime('%c', time.localtime())
DB.append((t, content))
| [
"def",
"AddPost",
"(",
"content",
")",
":",
"t",
"=",
"time",
".",
"strftime",
"(",
"'%c'",
",",
"time",
".",
"localtime",
"(",
")",
")",
"DB",
".",
"append",
"(",
"(",
"t",
",",
"content",
")",
")"
] | add a new post to the database . | train | false |
43,588 | def random_dense_design_matrix_for_regression(rng, num_examples, dim, reg_min, reg_max):
X = rng.randn(num_examples, dim)
Y = rng.randint(reg_min, reg_max, (num_examples, 1))
return DenseDesignMatrix(X=X, y=Y)
| [
"def",
"random_dense_design_matrix_for_regression",
"(",
"rng",
",",
"num_examples",
",",
"dim",
",",
"reg_min",
",",
"reg_max",
")",
":",
"X",
"=",
"rng",
".",
"randn",
"(",
"num_examples",
",",
"dim",
")",
"Y",
"=",
"rng",
".",
"randint",
"(",
"reg_min",... | creates a random dense design matrix for regression . | train | false |
43,589 | def _enrollment_mode_display(enrollment_mode, verification_status, course_id):
course_mode_slugs = [mode.slug for mode in CourseMode.modes_for_course(course_id)]
if (enrollment_mode == CourseMode.VERIFIED):
if (verification_status in [VERIFY_STATUS_NEED_TO_VERIFY, VERIFY_STATUS_SUBMITTED, VERIFY_STATUS_APPROVED]):
display_mode = DISPLAY_VERIFIED
elif (DISPLAY_HONOR in course_mode_slugs):
display_mode = DISPLAY_HONOR
else:
display_mode = DISPLAY_AUDIT
elif (enrollment_mode in [CourseMode.PROFESSIONAL, CourseMode.NO_ID_PROFESSIONAL_MODE]):
display_mode = DISPLAY_PROFESSIONAL
else:
display_mode = enrollment_mode
return display_mode
| [
"def",
"_enrollment_mode_display",
"(",
"enrollment_mode",
",",
"verification_status",
",",
"course_id",
")",
":",
"course_mode_slugs",
"=",
"[",
"mode",
".",
"slug",
"for",
"mode",
"in",
"CourseMode",
".",
"modes_for_course",
"(",
"course_id",
")",
"]",
"if",
"... | checking enrollment mode and status and returns the display mode args: enrollment_mode : enrollment mode . | train | false |
43,590 | def quickstart_generator(war_path, sdk_root=None):
if (not sdk_root):
sdk_root = _SDK_ROOT
quickstart_xml_path = os.path.join(war_path, 'WEB-INF', 'quickstart-web.xml')
if os.path.exists(quickstart_xml_path):
os.remove(quickstart_xml_path)
(java_home, exec_suffix) = java_utils.JavaHomeAndSuffix()
java_command = (os.path.join(java_home, 'bin', 'java') + exec_suffix)
quickstartgenerator_jar = os.path.join(sdk_root, _QUICKSTART_JAR_PATH)
webdefaultxml = os.path.join(sdk_root, _JAVA_VMRUNTIME_PATH, 'etc', 'webdefault.xml')
command = [java_command, '-jar', quickstartgenerator_jar, war_path, webdefaultxml]
subprocess.check_call(command)
with open(quickstart_xml_path) as f:
return (f.read(), quickstart_xml_path)
| [
"def",
"quickstart_generator",
"(",
"war_path",
",",
"sdk_root",
"=",
"None",
")",
":",
"if",
"(",
"not",
"sdk_root",
")",
":",
"sdk_root",
"=",
"_SDK_ROOT",
"quickstart_xml_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"war_path",
",",
"'WEB-INF'",
","... | run the quickstart-web . | train | false |
43,591 | def checkDeprecatedOptions(args):
for _ in args:
if (_ in DEPRECATED_OPTIONS):
errMsg = ("switch/option '%s' is deprecated" % _)
if DEPRECATED_OPTIONS[_]:
errMsg += (' (hint: %s)' % DEPRECATED_OPTIONS[_])
raise SqlmapSyntaxException(errMsg)
| [
"def",
"checkDeprecatedOptions",
"(",
"args",
")",
":",
"for",
"_",
"in",
"args",
":",
"if",
"(",
"_",
"in",
"DEPRECATED_OPTIONS",
")",
":",
"errMsg",
"=",
"(",
"\"switch/option '%s' is deprecated\"",
"%",
"_",
")",
"if",
"DEPRECATED_OPTIONS",
"[",
"_",
"]",... | checks for deprecated options . | train | false |
43,592 | def getMinimumByVector3Path(path):
minimum = Vector3(9.876543219876543e+17, 9.876543219876543e+17, 9.876543219876543e+17)
for point in path:
minimum.minimize(point)
return minimum
| [
"def",
"getMinimumByVector3Path",
"(",
"path",
")",
":",
"minimum",
"=",
"Vector3",
"(",
"9.876543219876543e+17",
",",
"9.876543219876543e+17",
",",
"9.876543219876543e+17",
")",
"for",
"point",
"in",
"path",
":",
"minimum",
".",
"minimize",
"(",
"point",
")",
"... | get a vector3 with each component the minimum of the respective components of a vector3 path . | train | false |
43,593 | def ge(a, b):
return (a >= b)
| [
"def",
"ge",
"(",
"a",
",",
"b",
")",
":",
"return",
"(",
"a",
">=",
"b",
")"
] | same as a >= b . | train | false |
43,594 | def save_project(project, file_path):
json.dump(project, codecs.open(file_path, u'w', u'utf8'), indent=4)
| [
"def",
"save_project",
"(",
"project",
",",
"file_path",
")",
":",
"json",
".",
"dump",
"(",
"project",
",",
"codecs",
".",
"open",
"(",
"file_path",
",",
"u'w'",
",",
"u'utf8'",
")",
",",
"indent",
"=",
"4",
")"
] | saves given project as a json file keyword arguments: project -- the project to store filename -- the filename to store the project in . | train | false |
43,595 | def required_char_field_attrs(self, widget, *args, **kwargs):
attrs = super(fields.CharField, self).widget_attrs(widget, *args, **kwargs)
original_attrs = (charfield_widget_attrs(self, widget) or {})
attrs.update(original_attrs)
return attrs
| [
"def",
"required_char_field_attrs",
"(",
"self",
",",
"widget",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"attrs",
"=",
"super",
"(",
"fields",
".",
"CharField",
",",
"self",
")",
".",
"widget_attrs",
"(",
"widget",
",",
"*",
"args",
",",
"**",
... | this function is for use on the charfield class . | train | false |
43,596 | def name_value_to_dict(content):
result = {}
for match in re.finditer('^([A-Za-z_][A-Za-z0-9_]*) *= *([^#]*)', content, re.MULTILINE):
result[match.group(1)] = match.group(2).strip()
return result
| [
"def",
"name_value_to_dict",
"(",
"content",
")",
":",
"result",
"=",
"{",
"}",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"'^([A-Za-z_][A-Za-z0-9_]*) *= *([^#]*)'",
",",
"content",
",",
"re",
".",
"MULTILINE",
")",
":",
"result",
"[",
"match",
".",
... | converts a list of name=value pairs to a dictionary . | train | false |
43,597 | def _process_defaults(G, schemas):
for schema in schemas:
for (name, value) in six.iteritems(schema):
absent = (name not in G.node)
is_none = (G.node.get(name, {}).get('value') is None)
immutable = value.get('immutable', False)
if (absent or is_none or immutable):
_process(G, name, value.get('default'))
| [
"def",
"_process_defaults",
"(",
"G",
",",
"schemas",
")",
":",
"for",
"schema",
"in",
"schemas",
":",
"for",
"(",
"name",
",",
"value",
")",
"in",
"six",
".",
"iteritems",
"(",
"schema",
")",
":",
"absent",
"=",
"(",
"name",
"not",
"in",
"G",
".",... | process dependencies for parameters default values in the order schemas are defined . | train | false |
43,598 | @requires_segment_info
def modified_indicator(pl, segment_info, text=u'+'):
return (text if int(vim_getbufoption(segment_info, u'modified')) else None)
| [
"@",
"requires_segment_info",
"def",
"modified_indicator",
"(",
"pl",
",",
"segment_info",
",",
"text",
"=",
"u'+'",
")",
":",
"return",
"(",
"text",
"if",
"int",
"(",
"vim_getbufoption",
"(",
"segment_info",
",",
"u'modified'",
")",
")",
"else",
"None",
")"... | return a file modified indicator . | train | false |
43,599 | def connect_to_services(region=None):
global cloudservers, cloudfiles, cloud_loadbalancers, cloud_databases
global cloud_blockstorage, cloud_dns, cloud_networks, cloud_monitoring
global autoscale, images, queues, cloud_cdn
cloudservers = connect_to_cloudservers(region=region)
cloudfiles = connect_to_cloudfiles(region=region)
cloud_cdn = connect_to_cloud_cdn(region=region)
cloud_loadbalancers = connect_to_cloud_loadbalancers(region=region)
cloud_databases = connect_to_cloud_databases(region=region)
cloud_blockstorage = connect_to_cloud_blockstorage(region=region)
cloud_dns = connect_to_cloud_dns(region=region)
cloud_networks = connect_to_cloud_networks(region=region)
cloud_monitoring = connect_to_cloud_monitoring(region=region)
autoscale = connect_to_autoscale(region=region)
images = connect_to_images(region=region)
queues = connect_to_queues(region=region)
| [
"def",
"connect_to_services",
"(",
"region",
"=",
"None",
")",
":",
"global",
"cloudservers",
",",
"cloudfiles",
",",
"cloud_loadbalancers",
",",
"cloud_databases",
"global",
"cloud_blockstorage",
",",
"cloud_dns",
",",
"cloud_networks",
",",
"cloud_monitoring",
"glob... | establishes authenticated connections to the various cloud apis . | train | true |
43,600 | def disable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = _api_key_patch_replace(conn, apiKey, '/enabled', 'False')
return {'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
| [
"def",
"disable_api_key",
"(",
"apiKey",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"ke... | disable the given apikey . | train | false |
43,601 | def convertToPaths(dictionary):
if ((dictionary.__class__ == Vector3) or (dictionary.__class__.__name__ == 'Vector3Index')):
return
keys = getKeys(dictionary)
if (keys == None):
return
for key in keys:
value = dictionary[key]
if (value.__class__.__name__ == 'ElementNode'):
if (value.xmlObject != None):
dictionary[key] = getFloatListListsByPaths(value.xmlObject.getPaths())
else:
convertToPaths(dictionary[key])
| [
"def",
"convertToPaths",
"(",
"dictionary",
")",
":",
"if",
"(",
"(",
"dictionary",
".",
"__class__",
"==",
"Vector3",
")",
"or",
"(",
"dictionary",
".",
"__class__",
".",
"__name__",
"==",
"'Vector3Index'",
")",
")",
":",
"return",
"keys",
"=",
"getKeys",... | recursively convert any xmlelements to paths . | train | false |
43,603 | def _walk_js_scopes(scope, lpath=None):
if (lpath is None):
lpath = []
for subscope in scope:
if ((subscope.tag == 'variable') and (not subscope)):
continue
sublpath = (lpath + [subscope.get('name')])
(yield (subscope, sublpath))
for r in _walk_js_scopes(subscope, sublpath):
(yield r)
| [
"def",
"_walk_js_scopes",
"(",
"scope",
",",
"lpath",
"=",
"None",
")",
":",
"if",
"(",
"lpath",
"is",
"None",
")",
":",
"lpath",
"=",
"[",
"]",
"for",
"subscope",
"in",
"scope",
":",
"if",
"(",
"(",
"subscope",
".",
"tag",
"==",
"'variable'",
")",... | walk the subscopes of the given element . | train | false |
43,604 | def backends_data(user):
available = get_backends().keys()
values = {'associated': [], 'not_associated': available, 'backends': available}
if (hasattr(user, 'is_authenticated') and user.is_authenticated()):
associated = UserSocialAuth.get_social_auth_for_user(user)
not_associated = list((set(available) - set((assoc.provider for assoc in associated))))
values['associated'] = associated
values['not_associated'] = not_associated
return values
| [
"def",
"backends_data",
"(",
"user",
")",
":",
"available",
"=",
"get_backends",
"(",
")",
".",
"keys",
"(",
")",
"values",
"=",
"{",
"'associated'",
":",
"[",
"]",
",",
"'not_associated'",
":",
"available",
",",
"'backends'",
":",
"available",
"}",
"if"... | return backends data for given user . | train | false |
43,605 | def _GetFD(filename, file_pos, fd_cache):
if (filename in fd_cache):
(at, fd) = fd_cache[filename]
fd_cache[filename] = (time.time(), fd)
return fd
if (len(fd_cache) == MAX_OPEN_FILES):
lru = sorted(fd_cache.items(), key=itemgetter(1))[:int((0.1 * MAX_OPEN_FILES))]
for (key, (at, fd)) in lru:
fd.close()
del fd_cache[key]
if (filename.endswith('.gz') or filename.endswith('.gz.tmp')):
fd = gzip.open(filename, 'rb')
else:
fd = open(filename, 'r')
fd.seek(file_pos)
fd_cache[filename] = (time.time(), fd)
return fd
| [
"def",
"_GetFD",
"(",
"filename",
",",
"file_pos",
",",
"fd_cache",
")",
":",
"if",
"(",
"filename",
"in",
"fd_cache",
")",
":",
"(",
"at",
",",
"fd",
")",
"=",
"fd_cache",
"[",
"filename",
"]",
"fd_cache",
"[",
"filename",
"]",
"=",
"(",
"time",
"... | returns the file descriptor matching "filename" if present in the cache . | train | false |
43,608 | def setup_modifiers(node, field=None, context=None, in_tree_view=False):
modifiers = {}
if (field is not None):
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
| [
"def",
"setup_modifiers",
"(",
"node",
",",
"field",
"=",
"None",
",",
"context",
"=",
"None",
",",
"in_tree_view",
"=",
"False",
")",
":",
"modifiers",
"=",
"{",
"}",
"if",
"(",
"field",
"is",
"not",
"None",
")",
":",
"transfer_field_to_modifiers",
"(",... | processes node attributes and field descriptors to generate the modifiers node attribute and set it on the provided node . | train | false |
43,609 | def branch2(tree):
if isinstance(tree, tuple):
(name, subtree) = tree
print(name, data2[name])
print('subtree', subtree)
if testxb:
branchsum = data2[name]
else:
branchsum = name
for b in subtree:
branchsum = (branchsum + branch2(b))
else:
leavessum = sum((data2[bi] for bi in tree))
print('final branch with', tree, ''.join(tree), leavessum)
if testxb:
return leavessum
else:
return ''.join(tree)
print('working on branch', tree, branchsum)
return branchsum
| [
"def",
"branch2",
"(",
"tree",
")",
":",
"if",
"isinstance",
"(",
"tree",
",",
"tuple",
")",
":",
"(",
"name",
",",
"subtree",
")",
"=",
"tree",
"print",
"(",
"name",
",",
"data2",
"[",
"name",
"]",
")",
"print",
"(",
"'subtree'",
",",
"subtree",
... | walking a tree bottom-up based on dictionary . | train | false |
43,612 | def isdescriptor(x):
for item in ('__get__', '__set__', '__delete__'):
if hasattr(safe_getattr(x, item, None), '__call__'):
return True
return False
| [
"def",
"isdescriptor",
"(",
"x",
")",
":",
"for",
"item",
"in",
"(",
"'__get__'",
",",
"'__set__'",
",",
"'__delete__'",
")",
":",
"if",
"hasattr",
"(",
"safe_getattr",
"(",
"x",
",",
"item",
",",
"None",
")",
",",
"'__call__'",
")",
":",
"return",
"... | check if the object is some kind of descriptor . | train | false |
43,614 | def create_agent_config_map(config):
try:
bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError((_('Parsing bridge_mappings failed: %s.') % e))
kwargs = dict(integ_br=config.OVS.integration_bridge, tun_br=config.OVS.tunnel_bridge, local_ip=config.OVS.local_ip, bridge_mappings=bridge_mappings, root_helper=config.AGENT.root_helper, polling_interval=config.AGENT.polling_interval, enable_tunneling=config.OVS.enable_tunneling)
if (kwargs['enable_tunneling'] and (not kwargs['local_ip'])):
msg = _('Tunnelling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
| [
"def",
"create_agent_config_map",
"(",
"config",
")",
":",
"try",
":",
"bridge_mappings",
"=",
"q_utils",
".",
"parse_mappings",
"(",
"config",
".",
"OVS",
".",
"bridge_mappings",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"(",
... | create a map of agent config parameters . | train | false |
43,615 | def _existing_file_path_option(option_name, option_value):
file_path = FilePath(option_value)
if (not file_path.exists()):
raise UsageError(u"Problem with --{}. File does not exist: '{}'.".format(option_name, file_path.path))
return file_path
| [
"def",
"_existing_file_path_option",
"(",
"option_name",
",",
"option_value",
")",
":",
"file_path",
"=",
"FilePath",
"(",
"option_value",
")",
"if",
"(",
"not",
"file_path",
".",
"exists",
"(",
")",
")",
":",
"raise",
"UsageError",
"(",
"u\"Problem with --{}. F... | validate a command line option containing a filepath . | train | false |
43,616 | @pytest.mark.skipif(u'not HAS_SCIPY')
def test_z_at_value_roundtrip():
z = 0.5
skip = (u'Ok', u'angular_diameter_distance_z1z2', u'clone', u'de_density_scale', u'w')
import inspect
methods = inspect.getmembers(core.Planck13, predicate=inspect.ismethod)
for (name, func) in methods:
if (name.startswith(u'_') or (name in skip)):
continue
print(u'Round-trip testing {0}'.format(name))
fval = func(z)
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5), rtol=2e-08)
z2 = 2.0
func_z1z2 = [(lambda z1: core.Planck13._comoving_distance_z1z2(z1, z2)), (lambda z1: core.Planck13._comoving_transverse_distance_z1z2(z1, z2)), (lambda z1: core.Planck13.angular_diameter_distance_z1z2(z1, z2))]
for func in func_z1z2:
fval = func(z)
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5), rtol=2e-08)
| [
"@",
"pytest",
".",
"mark",
".",
"skipif",
"(",
"u'not HAS_SCIPY'",
")",
"def",
"test_z_at_value_roundtrip",
"(",
")",
":",
"z",
"=",
"0.5",
"skip",
"=",
"(",
"u'Ok'",
",",
"u'angular_diameter_distance_z1z2'",
",",
"u'clone'",
",",
"u'de_density_scale'",
",",
... | calculate values from a known redshift . | train | false |
43,617 | def get_container_data_volumes(container, volumes_option):
volumes = []
volumes_option = (volumes_option or [])
container_mounts = dict(((mount[u'Destination'], mount) for mount in (container.get(u'Mounts') or {})))
image_volumes = [VolumeSpec.parse(volume) for volume in (container.image_config[u'ContainerConfig'].get(u'Volumes') or {})]
for volume in set((volumes_option + image_volumes)):
if volume.external:
continue
mount = container_mounts.get(volume.internal)
if (not mount):
continue
if (not mount.get(u'Name')):
continue
volume = volume._replace(external=mount[u'Name'])
volumes.append(volume)
return volumes
| [
"def",
"get_container_data_volumes",
"(",
"container",
",",
"volumes_option",
")",
":",
"volumes",
"=",
"[",
"]",
"volumes_option",
"=",
"(",
"volumes_option",
"or",
"[",
"]",
")",
"container_mounts",
"=",
"dict",
"(",
"(",
"(",
"mount",
"[",
"u'Destination'",... | find the container data volumes that are in volumes_option . | train | false |
43,620 | def getPrime(N, randfunc=None):
if (randfunc is None):
_import_Random()
randfunc = Random.new().read
number = (getRandomNBitInteger(N, randfunc) | 1)
while (not isPrime(number, randfunc=randfunc)):
number = (number + 2)
return number
| [
"def",
"getPrime",
"(",
"N",
",",
"randfunc",
"=",
"None",
")",
":",
"if",
"(",
"randfunc",
"is",
"None",
")",
":",
"_import_Random",
"(",
")",
"randfunc",
"=",
"Random",
".",
"new",
"(",
")",
".",
"read",
"number",
"=",
"(",
"getRandomNBitInteger",
... | getprime:long return a random n-bit prime number . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.