id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
2,282
def parse257(resp): if (resp[:3] != '257'): raise error_reply(resp) if (resp[3:5] != ' "'): return '' dirname = '' i = 5 n = len(resp) while (i < n): c = resp[i] i = (i + 1) if (c == '"'): if ((i >= n) or (resp[i] != '"')): break i = (i + 1) dirname = (dirname + c) return dirname
[ "def", "parse257", "(", "resp", ")", ":", "if", "(", "resp", "[", ":", "3", "]", "!=", "'257'", ")", ":", "raise", "error_reply", "(", "resp", ")", "if", "(", "resp", "[", "3", ":", "5", "]", "!=", "' \"'", ")", ":", "return", "''", "dirname", ...
parse the 257 response for a mkd or pwd request .
train
false
2,283
def test_argument_destructuring(): can_compile(u'(fn [[a b]] (print a b))') cant_compile(u'(fn [[]] 0)')
[ "def", "test_argument_destructuring", "(", ")", ":", "can_compile", "(", "u'(fn [[a b]] (print a b))'", ")", "cant_compile", "(", "u'(fn [[]] 0)'", ")" ]
ensure argument destructuring compilers .
train
false
2,284
def export_languages_json(): languages = frappe.db.get_all(u'Language', fields=[u'name', u'language_name']) languages = [{u'name': d.language_name, u'code': d.name} for d in languages] languages.sort((lambda a, b: (1 if (a[u'code'] > b[u'code']) else (-1)))) with open(frappe.get_app_path(u'frappe', u'geo', u'languages.json'), u'w') as f: f.write(frappe.as_json(languages))
[ "def", "export_languages_json", "(", ")", ":", "languages", "=", "frappe", ".", "db", ".", "get_all", "(", "u'Language'", ",", "fields", "=", "[", "u'name'", ",", "u'language_name'", "]", ")", "languages", "=", "[", "{", "u'name'", ":", "d", ".", "langua...
export list of all languages .
train
false
2,285
def getInsetSeparateLoopsFromAroundLoops(loops, radius, radiusAround, thresholdRatio=0.9): if (radius == 0.0): return loops isInset = (radius > 0) insetSeparateLoops = [] radius = abs(radius) radiusAround = max(abs(radiusAround), radius) points = getPointsFromLoops(loops, radiusAround, thresholdRatio) centers = getCentersFromPoints(points, (globalIntercircleMultiplier * radiusAround)) for center in centers: inset = getSimplifiedInsetFromClockwiseLoop(center, radius) if isLargeSameDirection(inset, center, radius): if (isInset == euclidean.getIsInFilledRegion(loops, inset[0])): if isInset: inset.reverse() insetSeparateLoops.append(inset) return insetSeparateLoops
[ "def", "getInsetSeparateLoopsFromAroundLoops", "(", "loops", ",", "radius", ",", "radiusAround", ",", "thresholdRatio", "=", "0.9", ")", ":", "if", "(", "radius", "==", "0.0", ")", ":", "return", "loops", "isInset", "=", "(", "radius", ">", "0", ")", "inse...
get the separate inset loops .
train
false
2,286
@profiler.trace def flavor_extra_delete(request, flavor_id, keys): flavor = novaclient(request).flavors.get(flavor_id) return flavor.unset_keys(keys)
[ "@", "profiler", ".", "trace", "def", "flavor_extra_delete", "(", "request", ",", "flavor_id", ",", "keys", ")", ":", "flavor", "=", "novaclient", "(", "request", ")", ".", "flavors", ".", "get", "(", "flavor_id", ")", "return", "flavor", ".", "unset_keys"...
unset the flavor extra spec keys .
train
false
2,288
@contextmanager def parallel_backend(backend, n_jobs=(-1), **backend_params): if isinstance(backend, _basestring): backend = BACKENDS[backend](**backend_params) old_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None) try: _backend.backend_and_jobs = (backend, n_jobs) (yield (backend, n_jobs)) finally: if (old_backend_and_jobs is None): if (getattr(_backend, 'backend_and_jobs', None) is not None): del _backend.backend_and_jobs else: _backend.backend_and_jobs = old_backend_and_jobs
[ "@", "contextmanager", "def", "parallel_backend", "(", "backend", ",", "n_jobs", "=", "(", "-", "1", ")", ",", "**", "backend_params", ")", ":", "if", "isinstance", "(", "backend", ",", "_basestring", ")", ":", "backend", "=", "BACKENDS", "[", "backend", ...
change the default backend used by parallel inside a with block .
train
false
2,289
def export_key(keyids=None, secret=False, user=None, gnupghome=None): gpg = _create_gpg(user, gnupghome) if isinstance(keyids, six.string_types): keyids = keyids.split(',') return gpg.export_keys(keyids, secret)
[ "def", "export_key", "(", "keyids", "=", "None", ",", "secret", "=", "False", ",", "user", "=", "None", ",", "gnupghome", "=", "None", ")", ":", "gpg", "=", "_create_gpg", "(", "user", ",", "gnupghome", ")", "if", "isinstance", "(", "keyids", ",", "s...
export a key from the gpg keychain keyids the key id(s) of the key(s) to be exported .
train
true
2,290
def is_executable_file(path): fpath = os.path.realpath(path) if (not os.path.isfile(fpath)): return False mode = os.stat(fpath).st_mode if (sys.platform.startswith('sunos') and (os.getuid() == 0)): return bool((mode & ((stat.S_IXUSR | stat.S_IXGRP) | stat.S_IXOTH))) return os.access(fpath, os.X_OK)
[ "def", "is_executable_file", "(", "path", ")", ":", "fpath", "=", "os", ".", "path", ".", "realpath", "(", "path", ")", "if", "(", "not", "os", ".", "path", ".", "isfile", "(", "fpath", ")", ")", ":", "return", "False", "mode", "=", "os", ".", "s...
checks that path is an executable regular file .
train
true
2,291
def inverse_permutation(perm): return permute_row_elements(arange(perm.shape[(-1)], dtype=perm.dtype), perm, inverse=True)
[ "def", "inverse_permutation", "(", "perm", ")", ":", "return", "permute_row_elements", "(", "arange", "(", "perm", ".", "shape", "[", "(", "-", "1", ")", "]", ",", "dtype", "=", "perm", ".", "dtype", ")", ",", "perm", ",", "inverse", "=", "True", ")"...
computes the inverse of permutations .
train
false
2,292
def _parse_id(s): match = re.search('[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s) if match: return match.group()
[ "def", "_parse_id", "(", "s", ")", ":", "match", "=", "re", ".", "search", "(", "'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}'", ",", "s", ")", "if", "match", ":", "return", "match", ".", "group", "(", ")" ]
search for a musicbrainz id in the given string and return it .
train
false
2,293
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None): (y_type, y_true, y_pred) = _check_targets(y_true, y_pred) if (y_type not in ('binary', 'multiclass')): raise ValueError(('%s is not supported' % y_type)) if (labels is None): labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) if np.all([(l not in y_true) for l in labels]): raise ValueError('At least one label specified must be in y_true') if (sample_weight is None): sample_weight = np.ones(y_true.shape[0], dtype=np.int) else: sample_weight = np.asarray(sample_weight) check_consistent_length(sample_weight, y_true, y_pred) n_labels = labels.size label_to_ind = dict(((y, x) for (x, y) in enumerate(labels))) y_pred = np.array([label_to_ind.get(x, (n_labels + 1)) for x in y_pred]) y_true = np.array([label_to_ind.get(x, (n_labels + 1)) for x in y_true]) ind = np.logical_and((y_pred < n_labels), (y_true < n_labels)) y_pred = y_pred[ind] y_true = y_true[ind] sample_weight = sample_weight[ind] CM = coo_matrix((sample_weight, (y_true, y_pred)), shape=(n_labels, n_labels)).toarray() return CM
[ "def", "confusion_matrix", "(", "y_true", ",", "y_pred", ",", "labels", "=", "None", ",", "sample_weight", "=", "None", ")", ":", "(", "y_type", ",", "y_true", ",", "y_pred", ")", "=", "_check_targets", "(", "y_true", ",", "y_pred", ")", "if", "(", "y_...
compute confusion matrix args: res : evaluation results index : model index returns: confusion matrix .
train
false
2,294
def yields(value): return (isinstance(value, Future) or iscoroutine(value))
[ "def", "yields", "(", "value", ")", ":", "return", "(", "isinstance", "(", "value", ",", "Future", ")", "or", "iscoroutine", "(", "value", ")", ")" ]
returns true iff the value yields .
train
false
2,295
def dump_threads_on_sigquit(signum, frame): dump_traceback()
[ "def", "dump_threads_on_sigquit", "(", "signum", ",", "frame", ")", ":", "dump_traceback", "(", ")" ]
dump out the threads to stderr .
train
false
2,296
def _warn_software_properties(repo): log.warning("The 'python-software-properties' package is not installed. For more accurate support of PPA repositories, you should install this package.") log.warning('Best guess at ppa format: {0}'.format(repo))
[ "def", "_warn_software_properties", "(", "repo", ")", ":", "log", ".", "warning", "(", "\"The 'python-software-properties' package is not installed. For more accurate support of PPA repositories, you should install this package.\"", ")", "log", ".", "warning", "(", "'Best guess at pp...
warn of missing python-software-properties package .
train
false
2,297
@receiver(product_viewed) def receive_product_view(sender, product, user, request, response, **kwargs): return history.update(product, request, response)
[ "@", "receiver", "(", "product_viewed", ")", "def", "receive_product_view", "(", "sender", ",", "product", ",", "user", ",", "request", ",", "response", ",", "**", "kwargs", ")", ":", "return", "history", ".", "update", "(", "product", ",", "request", ",",...
receiver to handle viewing single product pages requires the request and response objects due to dependence on cookies .
train
false
2,299
def canonicalize_name(name): if ((not name) or (name == SEP)): return name elif (name[0] == SEP): return ('/' + '/'.join([x for x in name.split(SEP) if x])) else: return '/'.join([x for x in name.split(SEP) if x])
[ "def", "canonicalize_name", "(", "name", ")", ":", "if", "(", "(", "not", "name", ")", "or", "(", "name", "==", "SEP", ")", ")", ":", "return", "name", "elif", "(", "name", "[", "0", "]", "==", "SEP", ")", ":", "return", "(", "'/'", "+", "'/'",...
put name in canonical form .
train
false
2,300
def wire_HTTPConnection_to_WSGI(host, app): class HTTPConnectionDecorator(object, ): 'Decorator to mock the HTTPConecction class.\n\n Wraps the real HTTPConnection class so that when you instantiate\n the class you might instead get a fake instance.\n ' def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if (connection_host == host): return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = http_client.HTTPConnection new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection) http_client.HTTPConnection = new_http_connection return oldHTTPConnection
[ "def", "wire_HTTPConnection_to_WSGI", "(", "host", ",", "app", ")", ":", "class", "HTTPConnectionDecorator", "(", "object", ",", ")", ":", "def", "__init__", "(", "self", ",", "wrapped", ")", ":", "self", ".", "wrapped", "=", "wrapped", "def", "__call__", ...
monkeypatches httpconnection so that if you try to connect to host .
train
false
2,301
def RenderExpoCdf(lam, low, high, n=101): xs = np.linspace(low, high, n) ps = (1 - np.exp(((- lam) * xs))) return (xs, ps)
[ "def", "RenderExpoCdf", "(", "lam", ",", "low", ",", "high", ",", "n", "=", "101", ")", ":", "xs", "=", "np", ".", "linspace", "(", "low", ",", "high", ",", "n", ")", "ps", "=", "(", "1", "-", "np", ".", "exp", "(", "(", "(", "-", "lam", ...
generates sequences of xs and ps for an exponential cdf .
train
false
2,302
def create_file(): if (request.vars and (not (request.vars.token == session.token))): redirect(URL('logout')) try: anchor = (('#' + request.vars.id) if request.vars.id else '') if request.vars.app: app = get_app(request.vars.app) path = abspath(request.vars.location) else: if request.vars.dir: request.vars.location += (request.vars.dir + '/') app = get_app(name=request.vars.location.split('/')[0]) path = apath(request.vars.location, r=request) filename = re.sub('[^\\w./-]+', '_', request.vars.filename) if (path[(-7):] == '/rules/'): if (len(filename) == 0): raise SyntaxError if (not (filename[(-3):] == '.py')): filename += '.py' lang = re.match('^plural_rules-(.*)\\.py$', filename).group(1) langinfo = read_possible_languages(apath(app, r=request))[lang] text = (dedent('\n #!/usr/bin/env python\n # -*- coding: utf-8 -*-\n # Plural-Forms for %(lang)s (%(langname)s)\n\n nplurals=2 # for example, English language has 2 forms:\n # 1 singular and 1 plural\n\n # Determine plural_id for number *n* as sequence of positive\n # integers: 0,1,...\n # NOTE! For singular form ALWAYS return plural_id = 0\n get_plural_id = lambda n: int(n != 1)\n\n # Construct and return plural form of *word* using\n # *plural_id* (which ALWAYS>0). This function will be executed\n # for words (or phrases) not found in plural_dict dictionary.\n # By default this function simply returns word in singular:\n construct_plural_form = lambda word, plural_id: word\n ')[1:] % dict(lang=langinfo[0], langname=langinfo[1])) elif (path[(-11):] == '/languages/'): if (len(filename) == 0): raise SyntaxError if (not (filename[(-3):] == '.py')): filename += '.py' path = os.path.join(apath(app, r=request), 'languages', filename) if (not os.path.exists(path)): safe_write(path, '') findT(apath(app, r=request), filename[:(-3)]) session.flash = T('language file "%(filename)s" created/updated', dict(filename=filename)) redirect((request.vars.sender + anchor)) elif (path[(-8):] == '/models/'): if (not (filename[(-3):] == '.py')): filename += '.py' if (len(filename) == 3): raise SyntaxError text = '# -*- coding: utf-8 -*-\n' elif (path[(-13):] == '/controllers/'): if (not (filename[(-3):] == '.py')): filename += '.py' if (len(filename) == 3): raise SyntaxError text = '# -*- coding: utf-8 -*-\n# %s\ndef index(): return dict(message="hello from %s")' text = (text % (T('try something like'), filename)) elif (path[(-7):] == '/views/'): if (request.vars.plugin and (not filename.startswith(('plugin_%s/' % request.vars.plugin)))): filename = ('plugin_%s/%s' % (request.vars.plugin, filename)) if (filename.find('.') < 0): filename += '.html' extension = filename.split('.')[(-1)].lower() if (len(filename) == 5): raise SyntaxError msg = T('This is the %(filename)s template', dict(filename=filename)) if (extension == 'html'): text = dedent(("\n {{extend 'layout.html'}}\n <h1>%s</h1>\n {{=BEAUTIFY(response._vars)}}" % msg))[1:] else: generic = os.path.join(path, ('generic.' + extension)) if os.path.exists(generic): text = read_file(generic) else: text = '' elif (path[(-9):] == '/modules/'): if (request.vars.plugin and (not filename.startswith(('plugin_%s/' % request.vars.plugin)))): filename = ('plugin_%s/%s' % (request.vars.plugin, filename)) if (not (filename[(-3):] == '.py')): filename += '.py' if (len(filename) == 3): raise SyntaxError text = dedent('\n #!/usr/bin/env python\n # -*- coding: utf-8 -*-\n from gluon import *\n')[1:] elif ((path[(-8):] == '/static/') or (path[(-9):] == '/private/')): if (request.vars.plugin and (not filename.startswith(('plugin_%s/' % request.vars.plugin)))): filename = ('plugin_%s/%s' % (request.vars.plugin, filename)) text = '' else: redirect((request.vars.sender + anchor)) full_filename = os.path.join(path, filename) dirpath = os.path.dirname(full_filename) if (not os.path.exists(dirpath)): os.makedirs(dirpath) if os.path.exists(full_filename): raise SyntaxError safe_write(full_filename, text) log_progress(app, 'CREATE', filename) if request.vars.dir: result = T('file "%(filename)s" created', dict(filename=full_filename[len(path):])) else: session.flash = T('file "%(filename)s" created', dict(filename=full_filename[len(path):])) vars = {} if request.vars.id: vars['id'] = request.vars.id if request.vars.app: vars['app'] = request.vars.app redirect(URL('edit', args=[os.path.join(request.vars.location, filename)], vars=vars)) except Exception as e: if (not isinstance(e, HTTP)): session.flash = T('cannot create file') if request.vars.dir: response.flash = result response.headers['web2py-component-content'] = 'append' response.headers['web2py-component-command'] = ('%s %s %s' % ("$.web2py.invalidate('#files_menu');", ("load_file('%s');" % URL('edit', args=[app, request.vars.dir, filename])), "$.web2py.enableElement($('#form form').find($.web2py.formInputClickSelector));")) return '' else: redirect((request.vars.sender + anchor))
[ "def", "create_file", "(", ")", ":", "if", "(", "request", ".", "vars", "and", "(", "not", "(", "request", ".", "vars", ".", "token", "==", "session", ".", "token", ")", ")", ")", ":", "redirect", "(", "URL", "(", "'logout'", ")", ")", "try", ":"...
create an empty file .
train
false
2,303
def run_in_dir(cmd, cwd, logstream=_RUN_DEFAULT_LOGSTREAM): old_dir = os.getcwd() try: os.chdir(cwd) __run_log(logstream, "running '%s' in '%s'", cmd, cwd) run(cmd, logstream=None) finally: os.chdir(old_dir)
[ "def", "run_in_dir", "(", "cmd", ",", "cwd", ",", "logstream", "=", "_RUN_DEFAULT_LOGSTREAM", ")", ":", "old_dir", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "chdir", "(", "cwd", ")", "__run_log", "(", "logstream", ",", "\"running '%s' in...
run the given command in the given working directory .
train
false
2,304
def test_itertools_islice_end(): from itertools import izip, islice it = izip([2, 3, 4], [4, 5, 6]) list(islice(it, 2)) for x in it: AreEqual(x, (4, 6))
[ "def", "test_itertools_islice_end", "(", ")", ":", "from", "itertools", "import", "izip", ",", "islice", "it", "=", "izip", "(", "[", "2", ",", "3", ",", "4", "]", ",", "[", "4", ",", "5", ",", "6", "]", ")", "list", "(", "islice", "(", "it", "...
islice shouldnt consume values after the limit specified by step .
train
false
2,305
def PrintPortfolio(pfl, with_returns=False): print 'Portfolio Title :', pfl.portfolio_title print 'Portfolio ID :', pfl.portfolio_id print ' Last updated :', pfl.updated.text d = pfl.portfolio_data print ' Currency :', d.currency_code if with_returns: print ' Gain % :', d.gain_percentage PrRtn(' Returns :', d) print ' Cost basis :', d.cost_basis print ' Days gain :', d.days_gain print ' Gain :', d.gain print ' Market value :', d.market_value print if pfl.positions: print ' <inlined positions>\n' PrintPositions(pfl.positions, with_returns) print ' </inlined positions>\n'
[ "def", "PrintPortfolio", "(", "pfl", ",", "with_returns", "=", "False", ")", ":", "print", "'Portfolio Title :'", ",", "pfl", ".", "portfolio_title", "print", "'Portfolio ID :'", ",", "pfl", ".", "portfolio_id", "print", "' Last updated :'", ",", "pfl", ...
print single portfolio .
train
false
2,306
@pytest.mark.usefixtures('table_type') def test_init_and_ref_from_multidim_ndarray(table_type): for copy in (False, True): nd = np.array([(1, [10, 20]), (3, [30, 40])], dtype=[(str('a'), 'i8'), (str('b'), 'i8', (2,))]) t = table_type(nd, copy=copy) assert (t.colnames == ['a', 'b']) assert (t['a'].shape == (2,)) assert (t['b'].shape == (2, 2)) t['a'][0] = (-200) t['b'][1][1] = (-100) if copy: assert (nd[str('a')][0] == 1) assert (nd[str('b')][1][1] == 40) else: assert (nd[str('a')][0] == (-200)) assert (nd[str('b')][1][1] == (-100))
[ "@", "pytest", ".", "mark", ".", "usefixtures", "(", "'table_type'", ")", "def", "test_init_and_ref_from_multidim_ndarray", "(", "table_type", ")", ":", "for", "copy", "in", "(", "False", ",", "True", ")", ":", "nd", "=", "np", ".", "array", "(", "[", "(...
test that initializing from an ndarray structured array with a multi-dim column works for both copy=false and true and that the referencing is as expected .
train
false
2,308
def _CheckAtom(atom): return _ValidateString(atom, 'atom', MAXIMUM_FIELD_ATOM_LENGTH, empty_ok=True)
[ "def", "_CheckAtom", "(", "atom", ")", ":", "return", "_ValidateString", "(", "atom", ",", "'atom'", ",", "MAXIMUM_FIELD_ATOM_LENGTH", ",", "empty_ok", "=", "True", ")" ]
checks the field atom is a valid string .
train
false
2,309
def make_property(info, prop, in_set): if in_set: return prop return prop.with_flags(case_flags=make_case_flags(info))
[ "def", "make_property", "(", "info", ",", "prop", ",", "in_set", ")", ":", "if", "in_set", ":", "return", "prop", "return", "prop", ".", "with_flags", "(", "case_flags", "=", "make_case_flags", "(", "info", ")", ")" ]
create the automagic property corresponding to a fact .
train
false
2,310
def send_email_for_expired_orders(email, event_name, invoice_id, order_url): send_email(to=email, action=MAIL_TO_EXPIRED_ORDERS, subject=MAILS[MAIL_TO_EXPIRED_ORDERS]['subject'].format(event_name=event_name), html=MAILS[MAIL_TO_EXPIRED_ORDERS]['message'].format(invoice_id=invoice_id, order_url=order_url))
[ "def", "send_email_for_expired_orders", "(", "email", ",", "event_name", ",", "invoice_id", ",", "order_url", ")", ":", "send_email", "(", "to", "=", "email", ",", "action", "=", "MAIL_TO_EXPIRED_ORDERS", ",", "subject", "=", "MAILS", "[", "MAIL_TO_EXPIRED_ORDERS"...
send email with order invoice link after purchase .
train
false
2,312
def resource_view_delete(context, data_dict): model = context['model'] id = _get_or_bust(data_dict, 'id') resource_view = model.ResourceView.get(id) if (not resource_view): raise NotFound context['resource_view'] = resource_view context['resource'] = model.Resource.get(resource_view.resource_id) _check_access('resource_view_delete', context, data_dict) resource_view.delete() model.repo.commit()
[ "def", "resource_view_delete", "(", "context", ",", "data_dict", ")", ":", "model", "=", "context", "[", "'model'", "]", "id", "=", "_get_or_bust", "(", "data_dict", ",", "'id'", ")", "resource_view", "=", "model", ".", "ResourceView", ".", "get", "(", "id...
delete a resource_view .
train
false
2,315
def test_read_config(): for config in config_fnames: cfg = _read_config(config) assert_true(all(((('unknown' not in block.lower()) and (block != '')) for block in cfg['user_blocks'])))
[ "def", "test_read_config", "(", ")", ":", "for", "config", "in", "config_fnames", ":", "cfg", "=", "_read_config", "(", "config", ")", "assert_true", "(", "all", "(", "(", "(", "(", "'unknown'", "not", "in", "block", ".", "lower", "(", ")", ")", "and",...
test read bti config file .
train
false
2,316
def slugize(slug): assert (name_pattern.match(slug) is not None) slug = slug.lower() for c in (' ', ',', '.', '_'): slug = slug.replace(c, '-') while ('--' in slug): slug = slug.replace('--', '-') slug = slug.strip('-') return slug
[ "def", "slugize", "(", "slug", ")", ":", "assert", "(", "name_pattern", ".", "match", "(", "slug", ")", "is", "not", "None", ")", "slug", "=", "slug", ".", "lower", "(", ")", "for", "c", "in", "(", "' '", ",", "','", ",", "'.'", ",", "'_'", ")"...
create a slug from a team name .
train
false
2,317
def _add_item(routing_key, body, message_id=None, delivery_mode=DELIVERY_DURABLE, headers=None, exchange=None, send_stats=True): if (not cfg.amqp_host): cfg.log.error(('Ignoring amqp message %r to %r' % (body, routing_key))) return if (not exchange): exchange = cfg.amqp_exchange chan = connection_manager.get_channel() msg = amqp.Message(body, timestamp=datetime.now(), delivery_mode=delivery_mode) if message_id: msg.properties['message_id'] = message_id if headers: msg.properties['application_headers'] = headers event_name = ('amqp.%s' % routing_key) try: chan.basic_publish(msg, exchange=exchange, routing_key=routing_key) except Exception as e: if send_stats: cfg.stats.event_count(event_name, 'enqueue_failed') if (e.errno == errno.EPIPE): connection_manager.get_channel(True) add_item(routing_key, body, message_id) else: raise else: if send_stats: cfg.stats.event_count(event_name, 'enqueue')
[ "def", "_add_item", "(", "routing_key", ",", "body", ",", "message_id", "=", "None", ",", "delivery_mode", "=", "DELIVERY_DURABLE", ",", "headers", "=", "None", ",", "exchange", "=", "None", ",", "send_stats", "=", "True", ")", ":", "if", "(", "not", "cf...
adds an item onto a queue .
train
false
2,318
def _check_region_for_parsing(number, default_region): if (not _is_valid_region_code(default_region)): if ((number is None) or (len(number) == 0)): return False match = _PLUS_CHARS_PATTERN.match(number) if (match is None): return False return True
[ "def", "_check_region_for_parsing", "(", "number", ",", "default_region", ")", ":", "if", "(", "not", "_is_valid_region_code", "(", "default_region", ")", ")", ":", "if", "(", "(", "number", "is", "None", ")", "or", "(", "len", "(", "number", ")", "==", ...
checks to see that the region code used is valid .
train
true
2,319
def send_validation(strategy, backend, code): if (not strategy.request.session.session_key): strategy.request.session.create() template = u'activation' if strategy.request.session.pop(u'password_reset', False): template = u'reset' url = u'{}?verification_code={}&id={}&type={}'.format(reverse(u'social:complete', args=(backend.name,)), code.code, strategy.request.session.session_key, template) send_notification_email(None, code.email, template, info=code.code, context={u'url': url})
[ "def", "send_validation", "(", "strategy", ",", "backend", ",", "code", ")", ":", "if", "(", "not", "strategy", ".", "request", ".", "session", ".", "session_key", ")", ":", "strategy", ".", "request", ".", "session", ".", "create", "(", ")", "template",...
sends verification email .
train
false
2,321
def read_os_detection(remote_read): try: linux1 = remote_read('/etc/passwd') linux2 = remote_read('/etc/mtab') linux3 = remote_read('/proc/sys/kernel/ostype') except: pass else: if (('/bin/' in linux1) or ('rw' in linux2) or ('linux' in linux3.lower())): om.out.debug('Identified remote OS as Linux, returning "linux".') return 'linux' try: win1 = remote_read('%SYSTEMROOT%\\win.ini') win2 = remote_read('C:\\windows\\win.ini') win3 = remote_read('C:\\win32\\win.ini') win4 = remote_read('C:\\win\\win.ini') except: pass else: if ('[fonts]' in (((win1 + win2) + win3) + win4)): om.out.debug('Identified remote OS as Windows, returning "windows".') return 'windows' raise OSDetectionException('Failed to get/identify the remote OS.')
[ "def", "read_os_detection", "(", "remote_read", ")", ":", "try", ":", "linux1", "=", "remote_read", "(", "'/etc/passwd'", ")", "linux2", "=", "remote_read", "(", "'/etc/mtab'", ")", "linux3", "=", "remote_read", "(", "'/proc/sys/kernel/ostype'", ")", "except", "...
uses the remote_read method to read remote files and determine what the remote os is .
train
false
2,324
def rewriter(condition, rewrite): def _rewriter(integral): (integrand, symbol) = integral if condition(*integral): rewritten = rewrite(*integral) if (rewritten != integrand): substep = integral_steps(rewritten, symbol) if ((not isinstance(substep, DontKnowRule)) and substep): return RewriteRule(rewritten, substep, integrand, symbol) return _rewriter
[ "def", "rewriter", "(", "condition", ",", "rewrite", ")", ":", "def", "_rewriter", "(", "integral", ")", ":", "(", "integrand", ",", "symbol", ")", "=", "integral", "if", "condition", "(", "*", "integral", ")", ":", "rewritten", "=", "rewrite", "(", "*...
create a template field function that rewrites the given field with the given rewriting rules .
train
false
2,326
@core_helper def is_url(*args, **kw): if (not args): return False try: url = urlparse.urlparse(args[0]) except ValueError: return False default_valid_schemes = ('http', 'https', 'ftp') valid_schemes = config.get('ckan.valid_url_schemes', '').lower().split() return (url.scheme in (valid_schemes or default_valid_schemes))
[ "@", "core_helper", "def", "is_url", "(", "*", "args", ",", "**", "kw", ")", ":", "if", "(", "not", "args", ")", ":", "return", "False", "try", ":", "url", "=", "urlparse", ".", "urlparse", "(", "args", "[", "0", "]", ")", "except", "ValueError", ...
return true if string is an http or ftp path .
train
false
2,327
def test_rename_mixin_columns(mixin_cols): t = QTable(mixin_cols) tc = t.copy() t.rename_column('m', 'mm') assert (t.colnames == ['i', 'a', 'b', 'mm']) if isinstance(t['mm'], table_helpers.ArrayWrapper): assert np.all((t['mm'].data == tc['m'].data)) elif isinstance(t['mm'], coordinates.SkyCoord): assert np.all((t['mm'].ra == tc['m'].ra)) assert np.all((t['mm'].dec == tc['m'].dec)) else: assert np.all((t['mm'] == tc['m']))
[ "def", "test_rename_mixin_columns", "(", "mixin_cols", ")", ":", "t", "=", "QTable", "(", "mixin_cols", ")", "tc", "=", "t", ".", "copy", "(", ")", "t", ".", "rename_column", "(", "'m'", ",", "'mm'", ")", "assert", "(", "t", ".", "colnames", "==", "[...
rename a mixin column .
train
false
2,330
def json_formatter(fstring): if (fstring == '||'): fstring = '[]' else: fstring = fstring.replace(' ', '') fstring = fstring.replace('|', '') fstring = fstring.replace('}{', '},{') fstring = fstring.replace("{u'", "{'") fstring = fstring.replace(":u'", ":'") fstring = fstring.replace(",u'", ",'") fstring = fstring.replace("'", '"') fstring = ('[%s]' % fstring) return fstring
[ "def", "json_formatter", "(", "fstring", ")", ":", "if", "(", "fstring", "==", "'||'", ")", ":", "fstring", "=", "'[]'", "else", ":", "fstring", "=", "fstring", ".", "replace", "(", "' '", ",", "''", ")", "fstring", "=", "fstring", ".", "replace", "(...
properly format the key-value import string to json .
train
false
2,332
def urlsafe_b64encode(s): return b64encode(s).translate(_urlsafe_encode_translation)
[ "def", "urlsafe_b64encode", "(", "s", ")", ":", "return", "b64encode", "(", "s", ")", ".", "translate", "(", "_urlsafe_encode_translation", ")" ]
encode a string using a url-safe base64 alphabet .
train
false
2,333
@login_required @view def importer(request, test_js=False): person = request.user.get_profile() data = get_personal_data(person) data['citation_form'] = mysite.profile.forms.ManuallyAddACitationForm(auto_id=False) data['test_js'] = (test_js or request.GET.get('test', None)) return (request, 'profile/importer.html', data)
[ "@", "login_required", "@", "view", "def", "importer", "(", "request", ",", "test_js", "=", "False", ")", ":", "person", "=", "request", ".", "user", ".", "get_profile", "(", ")", "data", "=", "get_personal_data", "(", "person", ")", "data", "[", "'citat...
get the logged-in users profile .
train
false
2,334
def _sub_clade(clade, term_names): term_clades = [clade.find_any(name) for name in term_names] sub_clade = clade.common_ancestor(term_clades) if (len(term_names) != sub_clade.count_terminals()): temp_clade = BaseTree.Clade() temp_clade.clades.extend(term_clades) for c in sub_clade.find_clades(terminal=False, order='preorder'): if (c == sub_clade.root): continue childs = (set(c.find_clades(terminal=True)) & set(term_clades)) if childs: for tc in temp_clade.find_clades(terminal=False, order='preorder'): tc_childs = set(tc.clades) tc_new_clades = (tc_childs - childs) if (childs.issubset(tc_childs) and tc_new_clades): tc.clades = list(tc_new_clades) child_clade = BaseTree.Clade() child_clade.clades.extend(list(childs)) tc.clades.append(child_clade) sub_clade = temp_clade return sub_clade
[ "def", "_sub_clade", "(", "clade", ",", "term_names", ")", ":", "term_clades", "=", "[", "clade", ".", "find_any", "(", "name", ")", "for", "name", "in", "term_names", "]", "sub_clade", "=", "clade", ".", "common_ancestor", "(", "term_clades", ")", "if", ...
extract a compatible subclade that only contains the given terminal names .
train
false
2,340
def dict_property(store, index): if hasattr(store, '__call__'): getter = (lambda self: store(self)[index]) setter = (lambda self, value: store(self).__setitem__(index, value)) elif isinstance(store, str): getter = (lambda self: self.__getattribute__(store)[index]) setter = (lambda self, value: self.__getattribute__(store).__setitem__(index, value)) else: getter = (lambda self: store[index]) setter = (lambda self, value: store.__setitem__(index, value)) return property(getter, setter)
[ "def", "dict_property", "(", "store", ",", "index", ")", ":", "if", "hasattr", "(", "store", ",", "'__call__'", ")", ":", "getter", "=", "(", "lambda", "self", ":", "store", "(", "self", ")", "[", "index", "]", ")", "setter", "=", "(", "lambda", "s...
helper to create class properties from a dictionary .
train
false
2,341
def get_host_id(kwargs=None, call=None): if (call == 'action'): raise SaltCloudSystemExit('The get_host_id function must be called with -f or --function.') if (kwargs is None): kwargs = {} name = kwargs.get('name', None) if (name is None): raise SaltCloudSystemExit('The get_host_id function requires a name.') try: ret = avail_locations()[name]['id'] except KeyError: raise SaltCloudSystemExit("The host '{0}' could not be found".format(name)) return ret
[ "def", "get_host_id", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "(", "call", "==", "'action'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'The get_host_id function must be called with -f or --function.'", ")", "if", "(", "kwargs", ...
returns a hosts id from the given host name .
train
true
2,342
def equal_args(*args, **kwargs): key = args if kwargs: key += (_kwargs_separator + tuple(sorted(kwargs.items()))) return key
[ "def", "equal_args", "(", "*", "args", ",", "**", "kwargs", ")", ":", "key", "=", "args", "if", "kwargs", ":", "key", "+=", "(", "_kwargs_separator", "+", "tuple", "(", "sorted", "(", "kwargs", ".", "items", "(", ")", ")", ")", ")", "return", "key"...
a memoized key factory that compares the equality of a stable sort of the parameters .
train
true
2,348
@receiver(pre_save, sender=UserProfile) def user_profile_pre_save_callback(sender, **kwargs): user_profile = kwargs['instance'] if (user_profile.requires_parental_consent() and user_profile.has_profile_image): user_profile.profile_image_uploaded_at = None user_profile._changed_fields = get_changed_fields_dict(user_profile, sender)
[ "@", "receiver", "(", "pre_save", ",", "sender", "=", "UserProfile", ")", "def", "user_profile_pre_save_callback", "(", "sender", ",", "**", "kwargs", ")", ":", "user_profile", "=", "kwargs", "[", "'instance'", "]", "if", "(", "user_profile", ".", "requires_pa...
ensure consistency of a user profile before saving it .
train
false
2,351
def lazy_validate(request_body_schema, resource_to_validate): schema_validator = validators.SchemaValidator(request_body_schema) schema_validator.validate(resource_to_validate)
[ "def", "lazy_validate", "(", "request_body_schema", ",", "resource_to_validate", ")", ":", "schema_validator", "=", "validators", ".", "SchemaValidator", "(", "request_body_schema", ")", "schema_validator", ".", "validate", "(", "resource_to_validate", ")" ]
a non-decorator way to validate a request .
train
false
2,352
def request_content(url, **kwargs): response = request_response(url, **kwargs) if (response is not None): return response.content
[ "def", "request_content", "(", "url", ",", "**", "kwargs", ")", ":", "response", "=", "request_response", "(", "url", ",", "**", "kwargs", ")", "if", "(", "response", "is", "not", "None", ")", ":", "return", "response", ".", "content" ]
wrapper for request_response .
train
false
2,353
def compute_intensity(image_r, image_g=None, image_b=None): if ((image_g is None) or (image_b is None)): if (not ((image_g is None) and (image_b is None))): raise ValueError(u'please specify either a single image or red, green, and blue images.') return image_r intensity = (((image_r + image_g) + image_b) / 3.0) return np.asarray(intensity, dtype=image_r.dtype)
[ "def", "compute_intensity", "(", "image_r", ",", "image_g", "=", "None", ",", "image_b", "=", "None", ")", ":", "if", "(", "(", "image_g", "is", "None", ")", "or", "(", "image_b", "is", "None", ")", ")", ":", "if", "(", "not", "(", "(", "image_g", ...
return a naive total intensity from the red .
train
false
2,354
def test_feature_fr_from_string2(): lang = Language('fr') feature = Feature.from_string(OUTLINED_FEATURE2, language=lang) assert_equals(feature.name, u'Faire plusieur choses en m\xeame temps') assert_equals(feature.description, u"De fa\xe7on \xe0 automatiser les tests\nEn tant que fain\xe9ant\nJ'utilise les plans de sc\xe9nario") (scenario,) = feature.scenarios assert_equals(scenario.name, 'Ajouter 2 nombres') assert_equals(scenario.outlines, [{u'input_1': u'20', u'input_2': u'30', u'bouton': u'add', u'output': u'50'}, {u'input_1': u'2', u'input_2': u'5', u'bouton': u'add', u'output': u'7'}, {u'input_1': u'0', u'input_2': u'40', u'bouton': u'add', u'output': u'40'}])
[ "def", "test_feature_fr_from_string2", "(", ")", ":", "lang", "=", "Language", "(", "'fr'", ")", "feature", "=", "Feature", ".", "from_string", "(", "OUTLINED_FEATURE2", ",", "language", "=", "lang", ")", "assert_equals", "(", "feature", ".", "name", ",", "u...
language: fr -> feature .
train
false
2,355
@ssl_required def aaq_step3(request, product_key, category_key): return aaq(request, product_key=product_key, category_key=category_key, step=1)
[ "@", "ssl_required", "def", "aaq_step3", "(", "request", ",", "product_key", ",", "category_key", ")", ":", "return", "aaq", "(", "request", ",", "product_key", "=", "product_key", ",", "category_key", "=", "category_key", ",", "step", "=", "1", ")" ]
step 3: the product and category is selected .
train
false
2,356
def get_permitted_actions(filter=None): if (filter is None): return RBACAgent.permitted_actions tmp_bunch = Bunch() [tmp_bunch.__dict__.__setitem__(k, v) for (k, v) in RBACAgent.permitted_actions.items() if k.startswith(filter)] return tmp_bunch
[ "def", "get_permitted_actions", "(", "filter", "=", "None", ")", ":", "if", "(", "filter", "is", "None", ")", ":", "return", "RBACAgent", ".", "permitted_actions", "tmp_bunch", "=", "Bunch", "(", ")", "[", "tmp_bunch", ".", "__dict__", ".", "__setitem__", ...
utility method to return a subset of rbacagents permitted actions .
train
false
2,357
def conv1d_mc1(input, filters, image_shape=None, filter_shape=None, border_mode='valid', subsample=(1,), filter_flip=True): if (image_shape is None): image_shape_mc1 = None else: image_shape_mc1 = (image_shape[0], image_shape[1], image_shape[2], 1) if (filter_shape is None): filter_shape_mc1 = None else: filter_shape_mc1 = (filter_shape[0], filter_shape[1], filter_shape[2], 1) if isinstance(border_mode, tuple): (border_mode,) = border_mode if isinstance(border_mode, int): border_mode = (border_mode, 0) input_mc1 = input.dimshuffle(0, 1, 2, 'x') filters_mc1 = filters.dimshuffle(0, 1, 2, 'x') conved = T.nnet.conv2d(input_mc1, filters_mc1, image_shape_mc1, filter_shape_mc1, subsample=(subsample[0], 1), border_mode=border_mode, filter_flip=filter_flip) return conved[:, :, :, 0]
[ "def", "conv1d_mc1", "(", "input", ",", "filters", ",", "image_shape", "=", "None", ",", "filter_shape", "=", "None", ",", "border_mode", "=", "'valid'", ",", "subsample", "=", "(", "1", ",", ")", ",", "filter_flip", "=", "True", ")", ":", "if", "(", ...
using conv2d with height == 1 .
train
false
2,358
def update_feedback_email_retries(user_id): model = feedback_models.UnsentFeedbackEmailModel.get(user_id) time_since_buffered = (datetime.datetime.utcnow() - model.created_on).seconds if (time_since_buffered > feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS): model.retries += 1 model.put()
[ "def", "update_feedback_email_retries", "(", "user_id", ")", ":", "model", "=", "feedback_models", ".", "UnsentFeedbackEmailModel", ".", "get", "(", "user_id", ")", "time_since_buffered", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "-", "mode...
if sufficient time has passed .
train
false
2,359
def time_xlwt(): start_time = clock() workbook = xlwt.Workbook() worksheet = workbook.add_sheet('Sheet1') for row in range((row_max // 2)): for col in range(col_max): worksheet.write((row * 2), col, ('Row: %d Col: %d' % (row, col))) for col in range(col_max): worksheet.write(((row * 2) + 1), col, (row + col)) workbook.save('xlwt.xls') elapsed = (clock() - start_time) print_elapsed_time('xlwt', elapsed)
[ "def", "time_xlwt", "(", ")", ":", "start_time", "=", "clock", "(", ")", "workbook", "=", "xlwt", ".", "Workbook", "(", ")", "worksheet", "=", "workbook", ".", "add_sheet", "(", "'Sheet1'", ")", "for", "row", "in", "range", "(", "(", "row_max", "//", ...
run xlwt in default mode .
train
false
2,360
def latlon_to_grid(latlon): from MAVProxy.modules.lib.ANUGA import redfearn (zone, easting, northing) = redfearn.redfearn(latlon[0], latlon[1]) if (latlon[0] < 0): hemisphere = 'S' else: hemisphere = 'N' return UTMGrid(zone, easting, northing, hemisphere=hemisphere)
[ "def", "latlon_to_grid", "(", "latlon", ")", ":", "from", "MAVProxy", ".", "modules", ".", "lib", ".", "ANUGA", "import", "redfearn", "(", "zone", ",", "easting", ",", "northing", ")", "=", "redfearn", ".", "redfearn", "(", "latlon", "[", "0", "]", ","...
convert to grid reference .
train
true
2,362
def rgb2hed(rgb): return separate_stains(rgb, hed_from_rgb)
[ "def", "rgb2hed", "(", "rgb", ")", ":", "return", "separate_stains", "(", "rgb", ",", "hed_from_rgb", ")" ]
rgb to haematoxylin-eosin-dab color space conversion .
train
false
2,364
def get_preferred_file_contents_encoding(): return (locale.getpreferredencoding() or u'utf-8')
[ "def", "get_preferred_file_contents_encoding", "(", ")", ":", "return", "(", "locale", ".", "getpreferredencoding", "(", ")", "or", "u'utf-8'", ")" ]
get encoding preferred for file contents .
train
false
2,365
def normalize_letters(one_letter_code): if (one_letter_code == '.'): return 'X' else: return one_letter_code.upper()
[ "def", "normalize_letters", "(", "one_letter_code", ")", ":", "if", "(", "one_letter_code", "==", "'.'", ")", ":", "return", "'X'", "else", ":", "return", "one_letter_code", ".", "upper", "(", ")" ]
convert raf one-letter amino acid codes into iupac standard codes .
train
false
2,366
def get_documents(cls, ids): fields = cls.get_mapping()['properties'].keys() ret = cls.search().filter(id__in=ids).values_dict(*fields)[:len(ids)] return cls.reshape(ret)
[ "def", "get_documents", "(", "cls", ",", "ids", ")", ":", "fields", "=", "cls", ".", "get_mapping", "(", ")", "[", "'properties'", "]", ".", "keys", "(", ")", "ret", "=", "cls", ".", "search", "(", ")", ".", "filter", "(", "id__in", "=", "ids", "...
returns a list of es documents with specified ids and doctype :arg cls: the mapping type class with a .
train
false
2,369
def choose_ncv(k): return max(((2 * k) + 1), 20)
[ "def", "choose_ncv", "(", "k", ")", ":", "return", "max", "(", "(", "(", "2", "*", "k", ")", "+", "1", ")", ",", "20", ")" ]
choose number of lanczos vectors based on target number of singular/eigen values and vectors to compute .
train
false
2,370
def pathstrip(path, n): pathlist = [path] while (os.path.dirname(pathlist[0]) != ''): pathlist[0:1] = os.path.split(pathlist[0]) return '/'.join(pathlist[n:])
[ "def", "pathstrip", "(", "path", ",", "n", ")", ":", "pathlist", "=", "[", "path", "]", "while", "(", "os", ".", "path", ".", "dirname", "(", "pathlist", "[", "0", "]", ")", "!=", "''", ")", ":", "pathlist", "[", "0", ":", "1", "]", "=", "os"...
strip n leading components from the given path .
train
true
2,371
def sys_decode(thing): if isinstance(thing, bytes_str): return thing.decode(ENCODING) return thing
[ "def", "sys_decode", "(", "thing", ")", ":", "if", "isinstance", "(", "thing", ",", "bytes_str", ")", ":", "return", "thing", ".", "decode", "(", "ENCODING", ")", "return", "thing" ]
return unicode .
train
false
2,372
def compute_chunk_last_header(lines, numlines, meta, last_header=None): if (last_header is None): last_header = [None, None] line = lines[0] for (i, (linenum, header_key)) in enumerate([(line[1], u'left_headers'), (line[4], u'right_headers')]): headers = meta[header_key] if headers: header = headers[(-1)] last_header[i] = {u'line': header[0], u'text': header[1].strip()} return last_header
[ "def", "compute_chunk_last_header", "(", "lines", ",", "numlines", ",", "meta", ",", "last_header", "=", "None", ")", ":", "if", "(", "last_header", "is", "None", ")", ":", "last_header", "=", "[", "None", ",", "None", "]", "line", "=", "lines", "[", "...
computes information for the displayed function/class headers .
train
false
2,373
def dummy_1d(x, varname=None): if (varname is None): labels = [('level_%d' % i) for i in range((x.max() + 1))] return ((x[:, None] == np.arange((x.max() + 1))).astype(int), labels) else: grouplabels = np.unique(x) labels = [(varname + ('_%s' % str(i))) for i in grouplabels] return ((x[:, None] == grouplabels).astype(int), labels)
[ "def", "dummy_1d", "(", "x", ",", "varname", "=", "None", ")", ":", "if", "(", "varname", "is", "None", ")", ":", "labels", "=", "[", "(", "'level_%d'", "%", "i", ")", "for", "i", "in", "range", "(", "(", "x", ".", "max", "(", ")", "+", "1", ...
dummy variable for id integer groups paramters x : ndarray .
train
false
2,375
def certificate_status_for_student(student, course_id): from course_modes.models import CourseMode try: generated_certificate = GeneratedCertificate.objects.get(user=student, course_id=course_id) cert_status = {'status': generated_certificate.status, 'mode': generated_certificate.mode, 'uuid': generated_certificate.verify_uuid} if generated_certificate.grade: cert_status['grade'] = generated_certificate.grade if (generated_certificate.mode == 'audit'): course_mode_slugs = [mode.slug for mode in CourseMode.modes_for_course(course_id)] if ('honor' not in course_mode_slugs): cert_status['status'] = CertificateStatuses.auditing return cert_status if (generated_certificate.status == CertificateStatuses.downloadable): cert_status['download_url'] = generated_certificate.download_url return cert_status except GeneratedCertificate.DoesNotExist: pass return {'status': CertificateStatuses.unavailable, 'mode': GeneratedCertificate.MODES.honor, 'uuid': None}
[ "def", "certificate_status_for_student", "(", "student", ",", "course_id", ")", ":", "from", "course_modes", ".", "models", "import", "CourseMode", "try", ":", "generated_certificate", "=", "GeneratedCertificate", ".", "objects", ".", "get", "(", "user", "=", "stu...
this returns a dictionary with a key for status .
train
false
2,377
def read_sheets_titles(xml_source): root = fromstring(xml_source) titles_root = root.find(QName('http://schemas.openxmlformats.org/officeDocument/2006/extended-properties', 'TitlesOfParts').text) vector = titles_root.find(QName(NAMESPACES['vt'], 'vector').text) (parts, names) = get_number_of_parts(xml_source) size = parts[names[0]] children = [c.text for c in vector.getchildren()] return children[:size]
[ "def", "read_sheets_titles", "(", "xml_source", ")", ":", "root", "=", "fromstring", "(", "xml_source", ")", "titles_root", "=", "root", ".", "find", "(", "QName", "(", "'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties'", ",", "'TitlesOfParts'", ...
read titles for all sheets .
train
false
2,378
def CDLTRISTAR(barDs, count): return call_talib_with_ohlc(barDs, count, talib.CDLTRISTAR)
[ "def", "CDLTRISTAR", "(", "barDs", ",", "count", ")", ":", "return", "call_talib_with_ohlc", "(", "barDs", ",", "count", ",", "talib", ".", "CDLTRISTAR", ")" ]
tristar pattern .
train
false
2,381
def _fswalk_follow_symlinks(path): assert os.path.isdir(deunicodise(path)) walkdirs = set([path]) for (dirpath, dirnames, filenames) in _os_walk_unicode(path): real_dirpath = unicodise(os.path.realpath(deunicodise(dirpath))) for dirname in dirnames: current = os.path.join(dirpath, dirname) real_current = unicodise(os.path.realpath(deunicodise(current))) if os.path.islink(deunicodise(current)): if ((real_dirpath == real_current) or real_dirpath.startswith((real_current + os.path.sep))): warning(('Skipping recursively symlinked directory %s' % dirname)) else: walkdirs.add(current) for walkdir in walkdirs: for (dirpath, dirnames, filenames) in _os_walk_unicode(walkdir): (yield (dirpath, dirnames, filenames))
[ "def", "_fswalk_follow_symlinks", "(", "path", ")", ":", "assert", "os", ".", "path", ".", "isdir", "(", "deunicodise", "(", "path", ")", ")", "walkdirs", "=", "set", "(", "[", "path", "]", ")", "for", "(", "dirpath", ",", "dirnames", ",", "filenames",...
walk filesystem .
train
false
2,383
def _migrate_collection_to_latest_schema(versioned_collection): collection_schema_version = versioned_collection['schema_version'] if (not (1 <= collection_schema_version <= feconf.CURRENT_COLLECTION_SCHEMA_VERSION)): raise Exception(('Sorry, we can only process v1-v%d collection schemas at present.' % feconf.CURRENT_COLLECTION_SCHEMA_VERSION))
[ "def", "_migrate_collection_to_latest_schema", "(", "versioned_collection", ")", ":", "collection_schema_version", "=", "versioned_collection", "[", "'schema_version'", "]", "if", "(", "not", "(", "1", "<=", "collection_schema_version", "<=", "feconf", ".", "CURRENT_COLLE...
holds the responsibility of performing a step-by-step .
train
false
2,385
def test_list_slice(): a = HyList([1, 2, 3, 4]) sl1 = a[1:] sl5 = a[5:] assert (type(sl1) == HyList) assert (sl1 == HyList([2, 3, 4])) assert (type(sl5) == HyList) assert (sl5 == HyList([]))
[ "def", "test_list_slice", "(", ")", ":", "a", "=", "HyList", "(", "[", "1", ",", "2", ",", "3", ",", "4", "]", ")", "sl1", "=", "a", "[", "1", ":", "]", "sl5", "=", "a", "[", "5", ":", "]", "assert", "(", "type", "(", "sl1", ")", "==", ...
check that slicing a hylist produces a hylist .
train
false
2,386
def extract_deps(fname, legal_deps): deps = {} for line in open(fname).readlines(): if (line[:8] != '#include'): continue inc = _re_include.match(line).group(1) if (inc in legal_deps.keys()): deps[inc] = legal_deps[inc] return deps
[ "def", "extract_deps", "(", "fname", ",", "legal_deps", ")", ":", "deps", "=", "{", "}", "for", "line", "in", "open", "(", "fname", ")", ".", "readlines", "(", ")", ":", "if", "(", "line", "[", ":", "8", "]", "!=", "'#include'", ")", ":", "contin...
extract the headers this file includes .
train
false
2,387
def generateOnlyInterface(list, int): for n in list: if int.providedBy(n): (yield n)
[ "def", "generateOnlyInterface", "(", "list", ",", "int", ")", ":", "for", "n", "in", "list", ":", "if", "int", ".", "providedBy", "(", "n", ")", ":", "(", "yield", "n", ")" ]
filters items in a list by class .
train
false
2,388
def full_path_split(path): (rest, tail) = os.path.split(path) if ((not rest) or (rest == os.path.sep)): return (tail,) return (full_path_split(rest) + (tail,))
[ "def", "full_path_split", "(", "path", ")", ":", "(", "rest", ",", "tail", ")", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "(", "(", "not", "rest", ")", "or", "(", "rest", "==", "os", ".", "path", ".", "sep", ")", ")", ":",...
function to do a full split on a path .
train
false
2,389
@not_implemented_for('multigraph') def core_number(G): if (G.number_of_selfloops() > 0): msg = 'Input graph has self loops which is not permitted; Consider using G.remove_edges_from(G.selfloop_edges()).' raise NetworkXError(msg) degrees = dict(G.degree()) nodes = sorted(degrees, key=degrees.get) bin_boundaries = [0] curr_degree = 0 for (i, v) in enumerate(nodes): if (degrees[v] > curr_degree): bin_boundaries.extend(([i] * (degrees[v] - curr_degree))) curr_degree = degrees[v] node_pos = {v: pos for (pos, v) in enumerate(nodes)} core = degrees nbrs = {v: list(nx.all_neighbors(G, v)) for v in G} for v in nodes: for u in nbrs[v]: if (core[u] > core[v]): nbrs[u].remove(v) pos = node_pos[u] bin_start = bin_boundaries[core[u]] node_pos[u] = bin_start node_pos[nodes[bin_start]] = pos (nodes[bin_start], nodes[pos]) = (nodes[pos], nodes[bin_start]) bin_boundaries[core[u]] += 1 core[u] -= 1 return core
[ "@", "not_implemented_for", "(", "'multigraph'", ")", "def", "core_number", "(", "G", ")", ":", "if", "(", "G", ".", "number_of_selfloops", "(", ")", ">", "0", ")", ":", "msg", "=", "'Input graph has self loops which is not permitted; Consider using G.remove_edges_fro...
return the core number for each vertex .
train
false
2,390
@cli.command() def edit(): MARKER = '# Everything below is ignored\n' message = click.edit(('\n\n' + MARKER)) if (message is not None): msg = message.split(MARKER, 1)[0].rstrip('\n') if (not msg): click.echo('Empty message!') else: click.echo(('Message:\n' + msg)) else: click.echo('You did not enter anything!')
[ "@", "cli", ".", "command", "(", ")", "def", "edit", "(", ")", ":", "MARKER", "=", "'# Everything below is ignored\\n'", "message", "=", "click", ".", "edit", "(", "(", "'\\n\\n'", "+", "MARKER", ")", ")", "if", "(", "message", "is", "not", "None", ")"...
open filename in a text editor .
train
false
2,391
def list_schemes(package): resources = pkg_resources.resource_listdir(package.__name__, '.') resources = list(filter(is_ows, resources)) return sorted(resources)
[ "def", "list_schemes", "(", "package", ")", ":", "resources", "=", "pkg_resources", ".", "resource_listdir", "(", "package", ".", "__name__", ",", "'.'", ")", "resources", "=", "list", "(", "filter", "(", "is_ows", ",", "resources", ")", ")", "return", "so...
return a list of example workflows .
train
false
2,392
def get_image_properties_table(meta): (get_image_properties_table,) = from_migration_import('007_add_owner', ['get_image_properties_table']) image_properties = get_image_properties_table(meta) return image_properties
[ "def", "get_image_properties_table", "(", "meta", ")", ":", "(", "get_image_properties_table", ",", ")", "=", "from_migration_import", "(", "'007_add_owner'", ",", "[", "'get_image_properties_table'", "]", ")", "image_properties", "=", "get_image_properties_table", "(", ...
no changes to the image properties table from 002 .
train
false
2,393
def chhome(name, home, **kwargs): kwargs = salt.utils.clean_kwargs(**kwargs) persist = kwargs.pop('persist', False) if kwargs: salt.utils.invalid_kwargs(kwargs) if persist: log.info("Ignoring unsupported 'persist' argument to user.chhome") pre_info = info(name) if (not pre_info): raise CommandExecutionError("User '{0}' does not exist".format(name)) if (home == pre_info['home']): return True _dscl(['/Users/{0}'.format(name), 'NFSHomeDirectory', pre_info['home'], home], ctype='change') time.sleep(1) return (info(name).get('home') == home)
[ "def", "chhome", "(", "name", ",", "home", ",", "**", "kwargs", ")", ":", "kwargs", "=", "salt", ".", "utils", ".", "clean_kwargs", "(", "**", "kwargs", ")", "persist", "=", "kwargs", ".", "pop", "(", "'persist'", ",", "False", ")", "if", "kwargs", ...
set a new home directory for an existing user name username to modify home new home directory to set persist : false set to true to prevent configuration files in the new home directory from being overwritten by the files from the skeleton directory .
train
true
2,394
def _update_configuration_context(context, configuration): config_key = configuration_helpers.get_value('domain_prefix') config = configuration.get('microsites', {}) if (config_key and config): context.update(config.get(config_key, {}))
[ "def", "_update_configuration_context", "(", "context", ",", "configuration", ")", ":", "config_key", "=", "configuration_helpers", ".", "get_value", "(", "'domain_prefix'", ")", "config", "=", "configuration", ".", "get", "(", "'microsites'", ",", "{", "}", ")", ...
site configuration will need to be able to override any hard coded content that was put into the context in the _update_certificate_context() call above .
train
false
2,395
def docstringLStrip(docstring): if (not docstring): return docstring docstring = string.replace(docstring, ' DCTB ', (' ' * 8)) lines = string.split(docstring, '\n') leading = 0 for l in xrange(1, len(lines)): line = lines[l] if string.strip(line): while 1: if (line[leading] == ' '): leading = (leading + 1) else: break if leading: break outlines = lines[0:1] for l in xrange(1, len(lines)): outlines.append(lines[l][leading:]) return string.join(outlines, '\n')
[ "def", "docstringLStrip", "(", "docstring", ")", ":", "if", "(", "not", "docstring", ")", ":", "return", "docstring", "docstring", "=", "string", ".", "replace", "(", "docstring", ",", "' DCTB '", ",", "(", "' '", "*", "8", ")", ")", "lines", "=", "str...
gets rid of unsightly lefthand docstring whitespace residue .
train
false
2,396
def get_vpc_info(vpc): return {'id': vpc.id, 'cidr_block': vpc.cidr_block, 'dhcp_options_id': vpc.dhcp_options_id, 'region': vpc.region.name, 'state': vpc.state}
[ "def", "get_vpc_info", "(", "vpc", ")", ":", "return", "{", "'id'", ":", "vpc", ".", "id", ",", "'cidr_block'", ":", "vpc", ".", "cidr_block", ",", "'dhcp_options_id'", ":", "vpc", ".", "dhcp_options_id", ",", "'region'", ":", "vpc", ".", "region", ".", ...
retrieves vpc information from an instance id and returns it as a dictionary .
train
false
2,398
def copy_stack_trace(from_var, to_var): tr = [] if (type(from_var) is list): for v in from_var: tr += getattr(v.tag, 'trace', []) else: tr = getattr(from_var.tag, 'trace', []) if (tr and isinstance(tr[0], tuple)): tr = [tr] if (type(to_var) is list): for v in to_var: v.tag.trace = (getattr(v.tag, 'trace', []) + tr) else: to_var.tag.trace = (getattr(to_var.tag, 'trace', []) + tr)
[ "def", "copy_stack_trace", "(", "from_var", ",", "to_var", ")", ":", "tr", "=", "[", "]", "if", "(", "type", "(", "from_var", ")", "is", "list", ")", ":", "for", "v", "in", "from_var", ":", "tr", "+=", "getattr", "(", "v", ".", "tag", ",", "'trac...
copies the stack trace from one or more tensor variables to one or more tensor variables .
train
false
2,401
def generate_x509_cert(user_id, project_id, bits=2048): subject = _user_cert_subject(user_id, project_id) with utils.tempdir() as tmpdir: keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.abspath(os.path.join(tmpdir, 'temp.csr')) utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits)) utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile, '-batch', '-subj', subject) with open(keyfile) as f: private_key = f.read() with open(csrfile) as f: csr = f.read() (serial, signed_csr) = sign_csr(csr, project_id) fname = os.path.join(ca_folder(project_id), ('newcerts/%s.pem' % serial)) cert = {'user_id': user_id, 'project_id': project_id, 'file_name': fname} db.certificate_create(context.get_admin_context(), cert) return (private_key, signed_csr)
[ "def", "generate_x509_cert", "(", "user_id", ",", "project_id", ",", "bits", "=", "2048", ")", ":", "subject", "=", "_user_cert_subject", "(", "user_id", ",", "project_id", ")", "with", "utils", ".", "tempdir", "(", ")", "as", "tmpdir", ":", "keyfile", "="...
generate and sign a cert for user in project .
train
false
2,402
def convert_to_prover9(input): if isinstance(input, list): result = [] for s in input: try: result.append(_convert_to_prover9(s.simplify())) except: print(('input %s cannot be converted to Prover9 input syntax' % input)) raise return result else: try: return _convert_to_prover9(input.simplify()) except: print(('input %s cannot be converted to Prover9 input syntax' % input)) raise
[ "def", "convert_to_prover9", "(", "input", ")", ":", "if", "isinstance", "(", "input", ",", "list", ")", ":", "result", "=", "[", "]", "for", "s", "in", "input", ":", "try", ":", "result", ".", "append", "(", "_convert_to_prover9", "(", "s", ".", "si...
convert a logic .
train
false
2,403
def adagrad_optimizer(learning_rate, epsilon, n_win=10): def optimizer(loss, param): updates = OrderedDict() if (param is not list): param = list(param) for param_ in param: i = theano.shared(floatX(np.array(0))) i_int = i.astype('int64') value = param_.get_value(borrow=True) accu = theano.shared(np.zeros((value.shape + (n_win,)), dtype=value.dtype)) grad = tt.grad(loss, param_) accu_new = tt.set_subtensor(accu[:, i_int], (grad ** 2)) i_new = tt.switch(((i + 1) < n_win), (i + 1), 0) updates[accu] = accu_new updates[i] = i_new accu_sum = accu_new.sum(axis=1) updates[param_] = (param_ - ((learning_rate * grad) / tt.sqrt((accu_sum + epsilon)))) return updates return optimizer
[ "def", "adagrad_optimizer", "(", "learning_rate", ",", "epsilon", ",", "n_win", "=", "10", ")", ":", "def", "optimizer", "(", "loss", ",", "param", ")", ":", "updates", "=", "OrderedDict", "(", ")", "if", "(", "param", "is", "not", "list", ")", ":", ...
returns a function that returns parameter updates .
train
false
2,404
def unicode_obj(obj): if isinstance(obj, dict): return unicode_dict(obj) elif isinstance(obj, (list, tuple)): return unicode_list(obj) elif isinstance(obj, six.string_types): return unicode_string(obj) elif isinstance(obj, (int, float)): return obj elif (obj is None): return obj else: try: return text(obj) except: return text(repr(obj))
[ "def", "unicode_obj", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "unicode_dict", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "unicode_list", "...
make sure keys and values of dict/list/tuple is unicode .
train
true
2,405
def get_text_from_s3(s3_connection, path): (bucket_name, key_name) = _from_path(path) bucket = s3_connection.get_bucket(bucket_name) k = boto.s3.Key(bucket) k.key = key_name txt = k.get_contents_as_string() return txt
[ "def", "get_text_from_s3", "(", "s3_connection", ",", "path", ")", ":", "(", "bucket_name", ",", "key_name", ")", "=", "_from_path", "(", "path", ")", "bucket", "=", "s3_connection", ".", "get_bucket", "(", "bucket_name", ")", "k", "=", "boto", ".", "s3", ...
read a file from s3 and return it as text .
train
false
2,406
def list_firewall_rules(profile=None): conn = _auth(profile) return conn.list_firewall_rules()
[ "def", "list_firewall_rules", "(", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "list_firewall_rules", "(", ")" ]
fetches a list of all firewall rules for a tenant cli example: .
train
false
2,408
def swap32(value): value = long(value) return (((((value & 255L) << 24) | ((value & 65280L) << 8)) | ((value & 16711680L) >> 8)) | ((value & 4278190080L) >> 24))
[ "def", "swap32", "(", "value", ")", ":", "value", "=", "long", "(", "value", ")", "return", "(", "(", "(", "(", "(", "value", "&", "255", "L", ")", "<<", "24", ")", "|", "(", "(", "value", "&", "65280", "L", ")", "<<", "8", ")", ")", "|", ...
swap byte between big and little endian of a 32 bits integer .
train
false
2,409
def _get_existing_regions(): existing_regions = [] possible_files = os.listdir(os.path.expanduser('~')) for f in possible_files: something = re.search('\\.bees\\.(.*)', f) (existing_regions.append(something.group(1)) if something else 'no') return existing_regions
[ "def", "_get_existing_regions", "(", ")", ":", "existing_regions", "=", "[", "]", "possible_files", "=", "os", ".", "listdir", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ")", "for", "f", "in", "possible_files", ":", "something", "=", "re...
return a list of zone name strings from looking at existing region ~/ .
train
false
2,411
def group_type_specs_get(context, group_type_id): return IMPL.group_type_specs_get(context, group_type_id)
[ "def", "group_type_specs_get", "(", "context", ",", "group_type_id", ")", ":", "return", "IMPL", ".", "group_type_specs_get", "(", "context", ",", "group_type_id", ")" ]
get all group specs for a group type .
train
false
2,412
def safe_as_int(val, atol=0.001): mod = (np.asarray(val) % 1) if (mod.ndim == 0): if (mod > 0.5): mod = (1 - mod) else: mod[(mod > 0.5)] = (1 - mod[(mod > 0.5)]) try: np.testing.assert_allclose(mod, 0, atol=atol) except AssertionError: raise ValueError('Integer argument required but received {0}, check inputs.'.format(val)) return np.round(val).astype(np.int64)
[ "def", "safe_as_int", "(", "val", ",", "atol", "=", "0.001", ")", ":", "mod", "=", "(", "np", ".", "asarray", "(", "val", ")", "%", "1", ")", "if", "(", "mod", ".", "ndim", "==", "0", ")", ":", "if", "(", "mod", ">", "0.5", ")", ":", "mod",...
attempt to safely cast values to integer format .
train
false
2,413
def generate_docs(root_dir, session): services_doc_path = os.path.join(root_dir, 'reference', 'services') if (not os.path.exists(services_doc_path)): os.makedirs(services_doc_path) for service_name in session.get_available_services(): docs = ServiceDocumenter(service_name, session).document_service() service_doc_path = os.path.join(services_doc_path, (service_name + '.rst')) with open(service_doc_path, 'wb') as f: f.write(docs)
[ "def", "generate_docs", "(", "root_dir", ",", "session", ")", ":", "services_doc_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "'reference'", ",", "'services'", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "services_d...
generates the reference documentation for botocore this will go through every available aws service and output restructured text files documenting each service .
train
false
2,414
def commit_statuses(**kwargs): base = {'updated_at': datetimes(timezones=['UTC']), 'state': text(), 'context': text(average_size=2), 'target_url': text(average_size=2)} base.update(**kwargs) return fixed_dictionaries(base)
[ "def", "commit_statuses", "(", "**", "kwargs", ")", ":", "base", "=", "{", "'updated_at'", ":", "datetimes", "(", "timezones", "=", "[", "'UTC'", "]", ")", ",", "'state'", ":", "text", "(", ")", ",", "'context'", ":", "text", "(", "average_size", "=", ...
create a strategy for github commit status dicts .
train
false
2,415
def featured_map(request, site): map_obj = resolve_object(request, Map, {'featuredurl': site}, permission='base.view_resourcebase', permission_msg=_PERMISSION_MSG_VIEW) return map_view(request, str(map_obj.id))
[ "def", "featured_map", "(", "request", ",", "site", ")", ":", "map_obj", "=", "resolve_object", "(", "request", ",", "Map", ",", "{", "'featuredurl'", ":", "site", "}", ",", "permission", "=", "'base.view_resourcebase'", ",", "permission_msg", "=", "_PERMISSIO...
the view that returns the map composer opened to the map with the given official site url .
train
false
2,416
@deprecated(u'2.1') def pieces(seq, num=2): start = 0 while 1: item = seq[start:(start + num)] if (not len(item)): break (yield item) start += num
[ "@", "deprecated", "(", "u'2.1'", ")", "def", "pieces", "(", "seq", ",", "num", "=", "2", ")", ":", "start", "=", "0", "while", "1", ":", "item", "=", "seq", "[", "start", ":", "(", "start", "+", "num", ")", "]", "if", "(", "not", "len", "(",...
break up the *seq* into *num* tuples .
train
false
2,417
@default_selem def dilation(image, selem=None, out=None, shift_x=False, shift_y=False): selem = np.array(selem) selem = _shift_selem(selem, shift_x, shift_y) selem = _invert_selem(selem) if (out is None): out = np.empty_like(image) ndi.grey_dilation(image, footprint=selem, output=out) return out
[ "@", "default_selem", "def", "dilation", "(", "image", ",", "selem", "=", "None", ",", "out", "=", "None", ",", "shift_x", "=", "False", ",", "shift_y", "=", "False", ")", ":", "selem", "=", "np", ".", "array", "(", "selem", ")", "selem", "=", "_sh...
return greyscale morphological dilation of an image .
train
false
2,418
def add_modulo(image1, image2): image1.load() image2.load() return image1._new(image1.im.chop_add_modulo(image2.im))
[ "def", "add_modulo", "(", "image1", ",", "image2", ")", ":", "image1", ".", "load", "(", ")", "image2", ".", "load", "(", ")", "return", "image1", ".", "_new", "(", "image1", ".", "im", ".", "chop_add_modulo", "(", "image2", ".", "im", ")", ")" ]
add two images .
train
false
2,420
def insert_many(seq, where, val): seq = list(seq) result = [] for i in range((len(where) + len(seq))): if (i in where): result.append(val) else: result.append(seq.pop(0)) return tuple(result)
[ "def", "insert_many", "(", "seq", ",", "where", ",", "val", ")", ":", "seq", "=", "list", "(", "seq", ")", "result", "=", "[", "]", "for", "i", "in", "range", "(", "(", "len", "(", "where", ")", "+", "len", "(", "seq", ")", ")", ")", ":", "...
insert multiple documents .
train
false
2,421
@lru_cache() def get_git_branch(): branch = _shell_command(['/usr/bin/git', 'symbolic-ref', '-q', 'HEAD']).strip() if (not branch): return None return '/'.join(branch.split('/')[2:])
[ "@", "lru_cache", "(", ")", "def", "get_git_branch", "(", ")", ":", "branch", "=", "_shell_command", "(", "[", "'/usr/bin/git'", ",", "'symbolic-ref'", ",", "'-q'", ",", "'HEAD'", "]", ")", ".", "strip", "(", ")", "if", "(", "not", "branch", ")", ":", ...
returns the current git branch .
train
false
2,422
def addLoopToPointTable(loop, pointTable): for point in loop: pointTable[point] = None
[ "def", "addLoopToPointTable", "(", "loop", ",", "pointTable", ")", ":", "for", "point", "in", "loop", ":", "pointTable", "[", "point", "]", "=", "None" ]
add the points in the loop to the point table .
train
false