id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
41,328
def _RunCommand(args): logging.info('$ %s', ' '.join(args)) p = subprocess.Popen(args) if (p.wait() != 0): sys.exit(p.returncode)
[ "def", "_RunCommand", "(", "args", ")", ":", "logging", ".", "info", "(", "'$ %s'", ",", "' '", ".", "join", "(", "args", ")", ")", "p", "=", "subprocess", ".", "Popen", "(", "args", ")", "if", "(", "p", ".", "wait", "(", ")", "!=", "0", ")", ...
runs specified args as a subprocess .
train
false
41,329
def getDateStr(format='%Y_%b_%d_%H%M'): now = time.strftime(format, time.localtime()) try: now_decoded = codecs.utf_8_decode(now)[0] except UnicodeDecodeError: now_decoded = time.strftime('%Y_%m_%d_%H%M', time.localtime()) return now_decoded
[ "def", "getDateStr", "(", "format", "=", "'%Y_%b_%d_%H%M'", ")", ":", "now", "=", "time", ".", "strftime", "(", "format", ",", "time", ".", "localtime", "(", ")", ")", "try", ":", "now_decoded", "=", "codecs", ".", "utf_8_decode", "(", "now", ")", "[",...
uses time .
train
false
41,330
def viewingMatrix(projection=None, model=None): if (projection is None): projection = GL.glGetDoublev(GL.GL_PROJECTION_MATRIX) if (model is None): model = GL.glGetDoublev(GL.GL_MODELVIEW_MATRIX) if ((projection is None) or (model is None)): context_log.warn('A NULL matrix was returned from glGetDoublev: proj=%s modelView=%s', projection, model) if projection: return projection if model: return model else: return numpy.identity(4, 'd') if numpy.allclose(projection, (-1.79769313e+308)): context_log.warn('Attempt to retrieve projection matrix when uninitialised %s, model=%s', projection, model) return model if numpy.allclose(model, (-1.79769313e+308)): context_log.warn('Attempt to retrieve model-view matrix when uninitialised %s, projection=%s', model, projection) return projection return numpy.dot(model, projection)
[ "def", "viewingMatrix", "(", "projection", "=", "None", ",", "model", "=", "None", ")", ":", "if", "(", "projection", "is", "None", ")", ":", "projection", "=", "GL", ".", "glGetDoublev", "(", "GL", ".", "GL_PROJECTION_MATRIX", ")", "if", "(", "model", ...
calculate the total viewing matrix from given data projection -- the projection matrix .
train
false
41,331
def get_data_spec(model_instance=None, model_class=None): model_class = (model_class or model_instance.__class__) return MODEL_DATA_SPECS[model_class]
[ "def", "get_data_spec", "(", "model_instance", "=", "None", ",", "model_class", "=", "None", ")", ":", "model_class", "=", "(", "model_class", "or", "model_instance", ".", "__class__", ")", "return", "MODEL_DATA_SPECS", "[", "model_class", "]" ]
returns the data specifications for the given network .
train
false
41,332
@task def pre_index(new_index, old_index, alias, index_name, settings): indexer = INDEXER_MAP[index_name] _print('Flagging the database to start the reindexation.', alias) Reindexing.flag_reindexing(new_index=new_index, old_index=old_index, alias=alias) time.sleep(5) _print('Creating the mapping for index {index}.'.format(index=new_index), alias) settings = {'settings': settings, 'mappings': indexer.get_mapping()} try: ES.indices.create(index=new_index, body=settings) except elasticsearch.ElasticsearchException as e: raise CommandError(('ERROR: New index [%s] already exists? %s' % (new_index, e))) ES.cluster.health(index=new_index, wait_for_status='green', wait_for_relocating_shards=0)
[ "@", "task", "def", "pre_index", "(", "new_index", ",", "old_index", ",", "alias", ",", "index_name", ",", "settings", ")", ":", "indexer", "=", "INDEXER_MAP", "[", "index_name", "]", "_print", "(", "'Flagging the database to start the reindexation.'", ",", "alias...
this sets up everything needed before indexing: * flags the database .
train
false
41,333
def _find_installed_apps_entry(module_label): modules = module_label.split('.') combinations = ['.'.join(modules[:(- count)]) for count in range(1, len(modules))] for app_name in combinations: entry = _get_installed_apps_entry(app_name) if entry: return (entry, app_name) raise AppNotFoundError(("Couldn't find an app to import %s from" % module_label))
[ "def", "_find_installed_apps_entry", "(", "module_label", ")", ":", "modules", "=", "module_label", ".", "split", "(", "'.'", ")", "combinations", "=", "[", "'.'", ".", "join", "(", "modules", "[", ":", "(", "-", "count", ")", "]", ")", "for", "count", ...
given a module label .
train
false
41,334
def GetClientLib(service_class_names, doc_format, language, output_path, hostname=None): discovery_files = GenDiscoveryDoc(service_class_names, doc_format, output_path, hostname=hostname) client_libs = [] for discovery_path in discovery_files: client_libs.append(GenClientLib(discovery_path, language, output_path)) return (discovery_files, client_libs)
[ "def", "GetClientLib", "(", "service_class_names", ",", "doc_format", ",", "language", ",", "output_path", ",", "hostname", "=", "None", ")", ":", "discovery_files", "=", "GenDiscoveryDoc", "(", "service_class_names", ",", "doc_format", ",", "output_path", ",", "h...
fetch discovery documents and client libraries from a cloud service .
train
false
41,335
def createExtraFillLoops(nestedRing, radius, radiusAround, shouldExtraLoopsBeAdded): for innerNestedRing in nestedRing.innerNestedRings: createFillForSurroundings(innerNestedRing.innerNestedRings, radius, radiusAround, shouldExtraLoopsBeAdded) allFillLoops = intercircle.getInsetSeparateLoopsFromAroundLoops(nestedRing.getLoopsToBeFilled(), radius, max((1.4 * radius), radiusAround)) if (len(allFillLoops) < 1): return if shouldExtraLoopsBeAdded: nestedRing.extraLoops += allFillLoops nestedRing.penultimateFillLoops = nestedRing.lastFillLoops nestedRing.lastFillLoops = allFillLoops
[ "def", "createExtraFillLoops", "(", "nestedRing", ",", "radius", ",", "radiusAround", ",", "shouldExtraLoopsBeAdded", ")", ":", "for", "innerNestedRing", "in", "nestedRing", ".", "innerNestedRings", ":", "createFillForSurroundings", "(", "innerNestedRing", ".", "innerNe...
create extra fill loops .
train
false
41,336
def pv_absent(name): ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if (not __salt__['lvm.pvdisplay'](name)): ret['comment'] = 'Physical Volume {0} does not exist'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvremove'](name) if __salt__['lvm.pvdisplay'](name): ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name) ret['result'] = False else: ret['comment'] = 'Removed Physical Volume {0}'.format(name) ret['changes']['removed'] = changes return ret
[ "def", "pv_absent", "(", "name", ")", ":", "ret", "=", "{", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'name'", ":", "name", ",", "'result'", ":", "True", "}", "if", "(", "not", "__salt__", "[", "'lvm.pvdisplay'", "]", "(", "nam...
ensure that a physical device is not being used by lvm name the device name to initialize .
train
true
41,339
def set_dnsmasq_facts_if_unset(facts): if ('common' in facts): if ('use_dnsmasq' not in facts['common']): facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2'])) if (('master' in facts) and ('dns_port' not in facts['master'])): if safe_get_bool(facts['common']['use_dnsmasq']): facts['master']['dns_port'] = 8053 else: facts['master']['dns_port'] = 53 return facts
[ "def", "set_dnsmasq_facts_if_unset", "(", "facts", ")", ":", "if", "(", "'common'", "in", "facts", ")", ":", "if", "(", "'use_dnsmasq'", "not", "in", "facts", "[", "'common'", "]", ")", ":", "facts", "[", "'common'", "]", "[", "'use_dnsmasq'", "]", "=", ...
set dnsmasq facts if not already present in facts args: facts existing facts returns: facts updated facts with values set if not previously set .
train
false
41,341
def figaspect(arg): isarray = (hasattr(arg, u'shape') and (not np.isscalar(arg))) figsize_min = np.array((4.0, 2.0)) figsize_max = np.array((16.0, 16.0)) if isarray: (nr, nc) = arg.shape[:2] arr_ratio = (float(nr) / nc) else: arr_ratio = float(arg) fig_height = rcParams[u'figure.figsize'][1] newsize = np.array(((fig_height / arr_ratio), fig_height)) newsize /= min(1.0, *(newsize / figsize_min)) newsize /= max(1.0, *(newsize / figsize_max)) newsize = np.clip(newsize, figsize_min, figsize_max) return newsize
[ "def", "figaspect", "(", "arg", ")", ":", "isarray", "=", "(", "hasattr", "(", "arg", ",", "u'shape'", ")", "and", "(", "not", "np", ".", "isscalar", "(", "arg", ")", ")", ")", "figsize_min", "=", "np", ".", "array", "(", "(", "4.0", ",", "2.0", ...
create a figure with specified aspect ratio .
train
false
41,342
def getDoubleAfterFirstLetter(word): return float(word[1:])
[ "def", "getDoubleAfterFirstLetter", "(", "word", ")", ":", "return", "float", "(", "word", "[", "1", ":", "]", ")" ]
get the double value of the word after the first letter .
train
false
41,343
@must_be_valid_project @must_have_permission(ADMIN) def node_registration_retraction_post(auth, node, **kwargs): if node.is_pending_retraction: raise HTTPError(http.BAD_REQUEST, data={'message_short': 'Invalid Request', 'message_long': 'This registration is already pending withdrawal'}) if (not node.is_registration): raise HTTPError(http.BAD_REQUEST, data={'message_short': 'Invalid Request', 'message_long': 'Withdrawal of non-registrations is not permitted.'}) if (node.root is not node): raise HTTPError(http.BAD_REQUEST, data={'message_short': 'Invalid Request', 'message_long': 'Withdrawal of non-parent registrations is not permitted.'}) data = request.get_json() try: node.retract_registration(auth.user, data.get('justification', None)) node.save() node.retraction.ask(node.get_active_contributors_recursive(unique_users=True)) except NodeStateError as err: raise HTTPError(http.FORBIDDEN, data=dict(message_long=err.message)) return {'redirectUrl': node.web_url_for('view_project')}
[ "@", "must_be_valid_project", "@", "must_have_permission", "(", "ADMIN", ")", "def", "node_registration_retraction_post", "(", "auth", ",", "node", ",", "**", "kwargs", ")", ":", "if", "node", ".", "is_pending_retraction", ":", "raise", "HTTPError", "(", "http", ...
handles retraction of public registrations .
train
false
41,344
def _devbase(dev): dev = os.path.realpath(os.path.expandvars(dev)) dev = os.path.basename(dev) return dev
[ "def", "_devbase", "(", "dev", ")", ":", "dev", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expandvars", "(", "dev", ")", ")", "dev", "=", "os", ".", "path", ".", "basename", "(", "dev", ")", "return", "dev" ]
basename of just about any dev .
train
true
41,345
def aggregate_delete(context, aggregate_id): return IMPL.aggregate_delete(context, aggregate_id)
[ "def", "aggregate_delete", "(", "context", ",", "aggregate_id", ")", ":", "return", "IMPL", ".", "aggregate_delete", "(", "context", ",", "aggregate_id", ")" ]
delete an aggregate .
train
false
41,346
def get_host_ref(session, cluster=None): if (cluster is None): host_mor = session._call_method(vim_util, 'get_objects', 'HostSystem')[0].obj else: host_ret = session._call_method(vim_util, 'get_dynamic_property', cluster, 'ClusterComputeResource', 'host') if (host_ret is None): return if (not host_ret.ManagedObjectReference): return host_mor = host_ret.ManagedObjectReference[0] return host_mor
[ "def", "get_host_ref", "(", "session", ",", "cluster", "=", "None", ")", ":", "if", "(", "cluster", "is", "None", ")", ":", "host_mor", "=", "session", ".", "_call_method", "(", "vim_util", ",", "'get_objects'", ",", "'HostSystem'", ")", "[", "0", "]", ...
get reference to a host within the cluster specified .
train
false
41,347
def lstrip(s): return s.lstrip()
[ "def", "lstrip", "(", "s", ")", ":", "return", "s", ".", "lstrip", "(", ")" ]
lstrip(s) -> string return a copy of the string s with leading whitespace removed .
train
false
41,348
def read_logic(s, logic_parser=None, encoding=None): if (encoding is not None): s = s.decode(encoding) if (logic_parser is None): logic_parser = LogicParser() statements = [] for (linenum, line) in enumerate(s.splitlines()): line = line.strip() if (line.startswith(u'#') or (line == u'')): continue try: statements.append(logic_parser.parse(line)) except LogicalExpressionException: raise ValueError((u'Unable to parse line %s: %s' % (linenum, line))) return statements
[ "def", "read_logic", "(", "s", ",", "logic_parser", "=", "None", ",", "encoding", "=", "None", ")", ":", "if", "(", "encoding", "is", "not", "None", ")", ":", "s", "=", "s", ".", "decode", "(", "encoding", ")", "if", "(", "logic_parser", "is", "Non...
convert a file of first order formulas into a list of {expression}s .
train
false
41,350
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'): report_store = ReportStore.from_config(config_name) report_store.store_rows(course_id, u'{course_prefix}_{csv_name}_{timestamp_str}.csv'.format(course_prefix=course_filename_prefix_generator(course_id), csv_name=csv_name, timestamp_str=timestamp.strftime('%Y-%m-%d-%H%M')), rows) tracker.emit(REPORT_REQUESTED_EVENT_NAME, {'report_type': csv_name})
[ "def", "upload_csv_to_report_store", "(", "rows", ",", "csv_name", ",", "course_id", ",", "timestamp", ",", "config_name", "=", "'GRADES_DOWNLOAD'", ")", ":", "report_store", "=", "ReportStore", ".", "from_config", "(", "config_name", ")", "report_store", ".", "st...
upload data as a csv using reportstore .
train
false
41,351
def get_mod_func(callback): try: dot = callback.rindex(u'.') except ValueError: return (callback, u'') return (callback[:dot], callback[(dot + 1):])
[ "def", "get_mod_func", "(", "callback", ")", ":", "try", ":", "dot", "=", "callback", ".", "rindex", "(", "u'.'", ")", "except", "ValueError", ":", "return", "(", "callback", ",", "u''", ")", "return", "(", "callback", "[", ":", "dot", "]", ",", "cal...
converts django .
train
false
41,352
def log_buffer_age(): return logs_buffer().age()
[ "def", "log_buffer_age", "(", ")", ":", "return", "logs_buffer", "(", ")", ".", "age", "(", ")" ]
returns the number of seconds since the logs buffer was flushed .
train
false
41,353
def _websocket_mask_python(mask, data): mask_arr = array.array('B', mask) unmasked_arr = array.array('B', data) for i in xrange(len(data)): unmasked_arr[i] = (unmasked_arr[i] ^ mask_arr[(i % 4)]) if PY3: return unmasked_arr.tobytes() else: return unmasked_arr.tostring()
[ "def", "_websocket_mask_python", "(", "mask", ",", "data", ")", ":", "mask_arr", "=", "array", ".", "array", "(", "'B'", ",", "mask", ")", "unmasked_arr", "=", "array", ".", "array", "(", "'B'", ",", "data", ")", "for", "i", "in", "xrange", "(", "len...
websocket masking function .
train
true
41,354
@utils.arg('server', metavar='<server>', help=_('Name or UUID of the server to list actions for.'), start_version='2.0', end_version='2.20') @utils.arg('server', metavar='<server>', help=_('Name or UUID of the server to list actions for. Only UUID can be used to list actions on a deleted server.'), start_version='2.21') def do_instance_action_list(cs, args): if (cs.api_version < api_versions.APIVersion('2.21')): server = _find_server(cs, args.server) else: server = _find_server(cs, args.server, raise_if_notfound=False) actions = cs.instance_action.list(server) utils.print_list(actions, ['Action', 'Request_ID', 'Message', 'Start_Time'], sortby_index=3)
[ "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or UUID of the server to list actions for.'", ")", ",", "start_version", "=", "'2.0'", ",", "end_version", "=", "'2.20'", ")", "@", "utils", ".", ...
list actions on a server .
train
false
41,355
def is_asian(char): return (ord(char) > IDEOGRAPHIC_SPACE)
[ "def", "is_asian", "(", "char", ")", ":", "return", "(", "ord", "(", "char", ")", ">", "IDEOGRAPHIC_SPACE", ")" ]
is the character asian? .
train
false
41,356
def cmd_stop(args, opts): jsonrpc_call(opts, 'crawler/engine', 'close_spider', args[0])
[ "def", "cmd_stop", "(", "args", ",", "opts", ")", ":", "jsonrpc_call", "(", "opts", ",", "'crawler/engine'", ",", "'close_spider'", ",", "args", "[", "0", "]", ")" ]
stop <spider> - stop a running spider .
train
false
41,357
def has_fit_parameter(estimator, parameter): return (parameter in signature(estimator.fit).parameters)
[ "def", "has_fit_parameter", "(", "estimator", ",", "parameter", ")", ":", "return", "(", "parameter", "in", "signature", "(", "estimator", ".", "fit", ")", ".", "parameters", ")" ]
checks whether the estimators fit method supports the given parameter .
train
false
41,360
def lighter(image1, image2): image1.load() image2.load() return image1._new(image1.im.chop_lighter(image2.im))
[ "def", "lighter", "(", "image1", ",", "image2", ")", ":", "image1", ".", "load", "(", ")", "image2", ".", "load", "(", ")", "return", "image1", ".", "_new", "(", "image1", ".", "im", ".", "chop_lighter", "(", "image2", ".", "im", ")", ")" ]
compares the two images .
train
false
41,363
def cancelAll(): for globalRepositoryDialogValue in settings.getGlobalRepositoryDialogValues(): globalRepositoryDialogValue.cancel()
[ "def", "cancelAll", "(", ")", ":", "for", "globalRepositoryDialogValue", "in", "settings", ".", "getGlobalRepositoryDialogValues", "(", ")", ":", "globalRepositoryDialogValue", ".", "cancel", "(", ")" ]
cancel all the dialogs .
train
false
41,364
@pytest.fixture def web_history(stubs, web_history_stub): web_history_stub.history_dict = collections.OrderedDict([('http://qutebrowser.org', history.Entry(datetime(2015, 9, 5).timestamp(), QUrl('http://qutebrowser.org'), 'qutebrowser | qutebrowser')), ('https://python.org', history.Entry(datetime(2016, 3, 8).timestamp(), QUrl('https://python.org'), 'Welcome to Python.org')), ('https://github.com', history.Entry(datetime(2016, 5, 1).timestamp(), QUrl('https://github.com'), 'GitHub'))]) return web_history_stub
[ "@", "pytest", ".", "fixture", "def", "web_history", "(", "stubs", ",", "web_history_stub", ")", ":", "web_history_stub", ".", "history_dict", "=", "collections", ".", "OrderedDict", "(", "[", "(", "'http://qutebrowser.org'", ",", "history", ".", "Entry", "(", ...
pre-populate the web-history stub with some history entries .
train
false
41,365
def _open_image(filename, path=None): if os.path.isfile(filename): return open(filename, 'rb') for p in (path or []): if (p and os.path.isabs(p)): fullpath = os.path.join(p, filename) if os.path.isfile(fullpath): return open(fullpath, 'rb') try: if p: fullpath = os.path.join(p, filename) else: fullpath = filename return file_open(fullpath) except IOError: pass raise IOError(('File %s cannot be found in image path' % filename))
[ "def", "_open_image", "(", "filename", ",", "path", "=", "None", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "open", "(", "filename", ",", "'rb'", ")", "for", "p", "in", "(", "path", "or", "[", "]", ")",...
attempt to open a binary file and return the descriptor .
train
false
41,367
def get_metadata_path(path): with start_action(action_type=u'flocker:node:agents:gce:get_metadata_path', path=path) as action: timeout_sec = 3 r = requests.get((_METADATA_SERVER + path), headers=_METADATA_HEADERS, timeout=timeout_sec) if (r.status_code != 200): raise ValueError('Did not get success result from metadata server for path {}, instead got {}.'.format(path, r.status_code)) action.add_success_fields(response=r.text) return r.text
[ "def", "get_metadata_path", "(", "path", ")", ":", "with", "start_action", "(", "action_type", "=", "u'flocker:node:agents:gce:get_metadata_path'", ",", "path", "=", "path", ")", "as", "action", ":", "timeout_sec", "=", "3", "r", "=", "requests", ".", "get", "...
requests a metadata path from the metadata server available within gce .
train
false
41,368
@cache_page((60 * 15)) def opensearch_suggestions(request): content_type = 'application/x-suggestions+json' search_form = SimpleSearchForm(request.GET, auto_id=False) if (not search_form.is_valid()): return HttpResponseBadRequest(content_type=content_type) cleaned = search_form.cleaned_data language = locale_or_default((cleaned['language'] or request.LANGUAGE_CODE)) searcher = generate_simple_search(search_form, language, with_highlights=False) searcher = searcher.values_dict('document_title', 'question_title', 'url') results = searcher[:10] def urlize(r): return (u'%s://%s%s' % (('https' if request.is_secure() else 'http'), request.get_host(), r['url'][0])) def titleize(r): return r.get('document_title', r.get('question_title', [_('No title')]))[0] try: data = [cleaned['q'], [titleize(r) for r in results], [], [urlize(r) for r in results]] except ES_EXCEPTIONS: data = [] return HttpResponse(json.dumps(data), content_type=content_type)
[ "@", "cache_page", "(", "(", "60", "*", "15", ")", ")", "def", "opensearch_suggestions", "(", "request", ")", ":", "content_type", "=", "'application/x-suggestions+json'", "search_form", "=", "SimpleSearchForm", "(", "request", ".", "GET", ",", "auto_id", "=", ...
a simple search view that returns opensearch suggestions .
train
false
41,369
def p_command_for(p): p[0] = ('FOR', p[2], p[4], p[6], p[7])
[ "def", "p_command_for", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'FOR'", ",", "p", "[", "2", "]", ",", "p", "[", "4", "]", ",", "p", "[", "6", "]", ",", "p", "[", "7", "]", ")" ]
command : for id equals expr to expr optstep .
train
false
41,370
def _safe_update(d, other): for (k, v) in compat.iteritems(other): if (k in d): raise Exception(('Duplicate regressor: %s' % k)) d[k] = v
[ "def", "_safe_update", "(", "d", ",", "other", ")", ":", "for", "(", "k", ",", "v", ")", "in", "compat", ".", "iteritems", "(", "other", ")", ":", "if", "(", "k", "in", "d", ")", ":", "raise", "Exception", "(", "(", "'Duplicate regressor: %s'", "%"...
combine dictionaries with non-overlapping keys .
train
false
41,372
def fetchUnreadEmails(profile, since=None, markRead=False, limit=None): conn = imaplib.IMAP4_SSL('imap.gmail.com') conn.debug = 0 conn.login(profile['gmail_address'], profile['gmail_password']) conn.select(readonly=(not markRead)) msgs = [] (retcode, messages) = conn.search(None, '(UNSEEN)') if ((retcode == 'OK') and (messages != [''])): numUnread = len(messages[0].split(' ')) if (limit and (numUnread > limit)): return numUnread for num in messages[0].split(' '): (ret, data) = conn.fetch(num, '(RFC822)') msg = email.message_from_string(data[0][1]) if ((not since) or (getDate(msg) > since)): msgs.append(msg) conn.close() conn.logout() return msgs
[ "def", "fetchUnreadEmails", "(", "profile", ",", "since", "=", "None", ",", "markRead", "=", "False", ",", "limit", "=", "None", ")", ":", "conn", "=", "imaplib", ".", "IMAP4_SSL", "(", "'imap.gmail.com'", ")", "conn", ".", "debug", "=", "0", "conn", "...
fetches a list of unread email objects from a users gmail inbox .
train
false
41,373
def symptom_debug_mode_is_enabled(): return CONF.debug
[ "def", "symptom_debug_mode_is_enabled", "(", ")", ":", "return", "CONF", ".", "debug" ]
debug mode should be set to false .
train
false
41,375
def _assert_compatible(im1, im2): if (not (im1.dtype == im2.dtype)): raise ValueError('Input images must have the same dtype.') if (not (im1.shape == im2.shape)): raise ValueError('Input images must have the same dimensions.') return
[ "def", "_assert_compatible", "(", "im1", ",", "im2", ")", ":", "if", "(", "not", "(", "im1", ".", "dtype", "==", "im2", ".", "dtype", ")", ")", ":", "raise", "ValueError", "(", "'Input images must have the same dtype.'", ")", "if", "(", "not", "(", "im1"...
raise an error if the shape and dtype do not match .
train
false
41,376
def remote_addr_ip(request): return request.META.get('REMOTE_ADDR')
[ "def", "remote_addr_ip", "(", "request", ")", ":", "return", "request", ".", "META", ".", "get", "(", "'REMOTE_ADDR'", ")" ]
returns the ip address contained in the remote_addr header .
train
false
41,377
def multislice(lst, slices): if atomp(lst): return lst return [multislice(sublst, slices[1:]) for sublst in lst[slices[0]]]
[ "def", "multislice", "(", "lst", ",", "slices", ")", ":", "if", "atomp", "(", "lst", ")", ":", "return", "lst", "return", "[", "multislice", "(", "sublst", ",", "slices", "[", "1", ":", "]", ")", "for", "sublst", "in", "lst", "[", "slices", "[", ...
multi-dimensional slicing: slices is a list of slice objects .
train
false
41,378
def test_cache_nonexistent_metadata_file(config_stub, tmpdir): config_stub.data = {'storage': {'cache-size': 1024}, 'general': {'private-browsing': False}} disk_cache = cache.DiskCache(str(tmpdir)) cache_file = disk_cache.fileMetaData('nosuchfile') assert (not cache_file.isValid())
[ "def", "test_cache_nonexistent_metadata_file", "(", "config_stub", ",", "tmpdir", ")", ":", "config_stub", ".", "data", "=", "{", "'storage'", ":", "{", "'cache-size'", ":", "1024", "}", ",", "'general'", ":", "{", "'private-browsing'", ":", "False", "}", "}",...
test querying nonexistent meta data file from activated cache .
train
false
41,379
def countcalls(counts): def decorate(func): func_name = func.func_name counts[func_name] = 0 def call(*args, **kwds): counts[func_name] += 1 return func(*args, **kwds) call.func_name = func_name return call return decorate
[ "def", "countcalls", "(", "counts", ")", ":", "def", "decorate", "(", "func", ")", ":", "func_name", "=", "func", ".", "func_name", "counts", "[", "func_name", "]", "=", "0", "def", "call", "(", "*", "args", ",", "**", "kwds", ")", ":", "counts", "...
decorator to count calls to a function .
train
false
41,380
@task(soft_time_limit=ADMIN_EXPORT_TIMEOUT) def async_data_export(file_format, values_list, qs_model, filename): from django.contrib import admin admin_obj = admin.site._registry[qs_model] queryset = qs_model.objects.filter(id__in=values_list) resource_class = admin_obj.get_export_resource_class() data = resource_class().export(queryset) export_data = file_format.export_data(data) kwargs = {'aws_access_key_id': settings.AWS_ACCESS_KEY_ID, 'aws_secret_access_key': settings.AWS_SECRET_ACCESS_KEY, 'calling_format': OrdinaryCallingFormat()} conn = boto.connect_s3(**kwargs) bucket = conn.get_bucket(settings.MOZILLIANS_ADMIN_BUCKET) key = bucket.new_key(filename) key.set_contents_from_string(export_data)
[ "@", "task", "(", "soft_time_limit", "=", "ADMIN_EXPORT_TIMEOUT", ")", "def", "async_data_export", "(", "file_format", ",", "values_list", ",", "qs_model", ",", "filename", ")", ":", "from", "django", ".", "contrib", "import", "admin", "admin_obj", "=", "admin",...
task to export data from admin site and store it to s3 .
train
false
41,381
def _get_score_from_persisted_or_latest_block(persisted_block, block, weight): raw_earned = 0.0 attempted = False if persisted_block: raw_possible = persisted_block.raw_possible else: raw_possible = block.transformer_data[GradesTransformer].max_score if (raw_possible is None): weighted_scores = (None, None) else: weighted_scores = weighted_score(raw_earned, raw_possible, weight) return (((raw_earned, raw_possible) + weighted_scores) + (attempted,))
[ "def", "_get_score_from_persisted_or_latest_block", "(", "persisted_block", ",", "block", ",", "weight", ")", ":", "raw_earned", "=", "0.0", "attempted", "=", "False", "if", "persisted_block", ":", "raw_possible", "=", "persisted_block", ".", "raw_possible", "else", ...
returns the score values .
train
false
41,384
def override__dir__(f): if (sys.version_info[:2] < (3, 3)): @functools.wraps(f) def override__dir__wrapper(self): members = set() for cls in self.__class__.mro(): members.update(dir(cls)) members.update(six.iterkeys(self.__dict__)) members.update(f(self)) return sorted(members) else: @functools.wraps(f) def override__dir__wrapper(self): members = set(object.__dir__(self)) members.update(f(self)) return sorted(members) return override__dir__wrapper
[ "def", "override__dir__", "(", "f", ")", ":", "if", "(", "sys", ".", "version_info", "[", ":", "2", "]", "<", "(", "3", ",", "3", ")", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "override__dir__wrapper", "(", "self", ")", ":"...
when overriding a __dir__ method on an object .
train
false
41,385
def key_str(match): skey = get_key(__opts__) return skey.key_str(match)
[ "def", "key_str", "(", "match", ")", ":", "skey", "=", "get_key", "(", "__opts__", ")", "return", "skey", ".", "key_str", "(", "match", ")" ]
return information about the key .
train
false
41,386
def is_naive(value): return (value.utcoffset() is None)
[ "def", "is_naive", "(", "value", ")", ":", "return", "(", "value", ".", "utcoffset", "(", ")", "is", "None", ")" ]
determines if a given datetime .
train
false
41,389
def initialize_vocabulary(vocabulary_path): if gfile.Exists(vocabulary_path): rev_vocab = [] with gfile.GFile(vocabulary_path, mode='rb') as f: rev_vocab.extend(f.readlines()) rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab] vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)]) return (vocab, rev_vocab) else: raise ValueError('Vocabulary file %s not found.', vocabulary_path)
[ "def", "initialize_vocabulary", "(", "vocabulary_path", ")", ":", "if", "gfile", ".", "Exists", "(", "vocabulary_path", ")", ":", "rev_vocab", "=", "[", "]", "with", "gfile", ".", "GFile", "(", "vocabulary_path", ",", "mode", "=", "'rb'", ")", "as", "f", ...
initialize vocabulary from file .
train
true
41,392
@click.command('remove-domain') @click.argument('domain') @click.option('--site', prompt=True) def remove_domain(domain, site=None): from bench.config.site_config import remove_domain if (not site): print 'Please specify site' sys.exit(1) remove_domain(site, domain, bench_path='.')
[ "@", "click", ".", "command", "(", "'remove-domain'", ")", "@", "click", ".", "argument", "(", "'domain'", ")", "@", "click", ".", "option", "(", "'--site'", ",", "prompt", "=", "True", ")", "def", "remove_domain", "(", "domain", ",", "site", "=", "Non...
remove custom domain from a site .
train
false
41,394
def _default_checker(x, y): if (x[0] != y[0]): raise Exception('Output mismatch.', {'performlinker': x[0], 'clinker': y[0]})
[ "def", "_default_checker", "(", "x", ",", "y", ")", ":", "if", "(", "x", "[", "0", "]", "!=", "y", "[", "0", "]", ")", ":", "raise", "Exception", "(", "'Output mismatch.'", ",", "{", "'performlinker'", ":", "x", "[", "0", "]", ",", "'clinker'", "...
default checker for duallinker .
train
false
41,396
@library.global_function def field_with_attrs(bfield, **kwargs): if kwargs.get('label', None): bfield.label = kwargs['label'] bfield.field.widget.attrs.update(kwargs) return bfield
[ "@", "library", ".", "global_function", "def", "field_with_attrs", "(", "bfield", ",", "**", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'label'", ",", "None", ")", ":", "bfield", ".", "label", "=", "kwargs", "[", "'label'", "]", "bfield", "....
allows templates to dynamically add html attributes to bound fields from django forms .
train
false
41,397
def data_science_day_mapper(status_update): if ('data science' in status_update['text'].lower()): day_of_week = status_update['created_at'].weekday() (yield (day_of_week, 1))
[ "def", "data_science_day_mapper", "(", "status_update", ")", ":", "if", "(", "'data science'", "in", "status_update", "[", "'text'", "]", ".", "lower", "(", ")", ")", ":", "day_of_week", "=", "status_update", "[", "'created_at'", "]", ".", "weekday", "(", ")...
yields if status_update contains "data science" .
train
false
41,398
def notify_about_instance_usage(notifier, context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, fault=None): if (not extra_usage_info): extra_usage_info = {} usage_info = notifications.info_from_instance(context, instance, network_info, system_metadata, **extra_usage_info) if fault: fault_payload = exception_to_dict(fault) LOG.debug(fault_payload['message'], instance=instance) usage_info.update(fault_payload) if event_suffix.endswith('error'): method = notifier.error else: method = notifier.info method(context, ('compute.instance.%s' % event_suffix), usage_info)
[ "def", "notify_about_instance_usage", "(", "notifier", ",", "context", ",", "instance", ",", "event_suffix", ",", "network_info", "=", "None", ",", "system_metadata", "=", "None", ",", "extra_usage_info", "=", "None", ",", "fault", "=", "None", ")", ":", "if",...
send a notification about an instance .
train
false
41,399
def CutCommonSpacePrefix(text): text_lines = text.splitlines() while (text_lines and (not text_lines[(-1)])): text_lines = text_lines[:(-1)] if text_lines: if (text_lines[0] and text_lines[0][0].isspace()): text_first_line = [] else: text_first_line = [text_lines.pop(0)] common_prefix = os.path.commonprefix([line for line in text_lines if line]) space_prefix_len = (len(common_prefix) - len(common_prefix.lstrip())) if space_prefix_len: for index in xrange(len(text_lines)): if text_lines[index]: text_lines[index] = text_lines[index][space_prefix_len:] return '\n'.join((text_first_line + text_lines)) return ''
[ "def", "CutCommonSpacePrefix", "(", "text", ")", ":", "text_lines", "=", "text", ".", "splitlines", "(", ")", "while", "(", "text_lines", "and", "(", "not", "text_lines", "[", "(", "-", "1", ")", "]", ")", ")", ":", "text_lines", "=", "text_lines", "["...
removes a common space prefix from the lines of a multiline text .
train
false
41,400
def unpack_patches(hg_unbundle10_obj, remaining): while (remaining >= 12): (start, end, blocklen) = struct.unpack('>lll', readexactly(hg_unbundle10_obj, 12)) remaining -= 12 if (blocklen > remaining): raise Exception('unexpected end of patch stream') block = readexactly(hg_unbundle10_obj, blocklen) remaining -= blocklen (yield {'start': start, 'end': end, 'blocklen': blocklen, 'block': block.encode('string_escape')}) if (remaining > 0): log.error('Unexpected end of patch stream, %s remaining', remaining) raise Exception('unexpected end of patch stream')
[ "def", "unpack_patches", "(", "hg_unbundle10_obj", ",", "remaining", ")", ":", "while", "(", "remaining", ">=", "12", ")", ":", "(", "start", ",", "end", ",", "blocklen", ")", "=", "struct", ".", "unpack", "(", "'>lll'", ",", "readexactly", "(", "hg_unbu...
this method provides a generator of patches from the data field in a chunk .
train
false
41,402
def test_store(): ip = get_ipython() ip.run_line_magic('load_ext', 'storemagic') ip.run_line_magic('store', '-z') ip.user_ns['var'] = 42 ip.run_line_magic('store', 'var') ip.user_ns['var'] = 39 ip.run_line_magic('store', '-r') nt.assert_equal(ip.user_ns['var'], 42) ip.run_line_magic('store', '-d var') ip.user_ns['var'] = 39 ip.run_line_magic('store', '-r') nt.assert_equal(ip.user_ns['var'], 39)
[ "def", "test_store", "(", ")", ":", "ip", "=", "get_ipython", "(", ")", "ip", ".", "run_line_magic", "(", "'load_ext'", ",", "'storemagic'", ")", "ip", ".", "run_line_magic", "(", "'store'", ",", "'-z'", ")", "ip", ".", "user_ns", "[", "'var'", "]", "=...
test %store .
train
false
41,403
def _urlunquote(byte_string, remap=None, preserve=None): if (byte_string is None): return byte_string byte_string = unquote_to_bytes(byte_string) if preserve: replacements = [u'\x1a', u'\x1c', u'\x1d', u'\x1e', u'\x1f'] preserve_unmap = {} for char in remap: replacement = replacements.pop(0) preserve_unmap[replacement] = char byte_string = byte_string.replace(char.encode(u'ascii'), replacement.encode(u'ascii')) if remap: for char in remap: byte_string = byte_string.replace(char.encode(u'ascii'), (u'%%%02x' % ord(char)).encode(u'ascii')) output = byte_string.decode(u'utf-8', u'iriutf8') if preserve: for (replacement, original) in preserve_unmap.items(): output = output.replace(replacement, original) return output
[ "def", "_urlunquote", "(", "byte_string", ",", "remap", "=", "None", ",", "preserve", "=", "None", ")", ":", "if", "(", "byte_string", "is", "None", ")", ":", "return", "byte_string", "byte_string", "=", "unquote_to_bytes", "(", "byte_string", ")", "if", "...
unquotes a uri portion from a byte string into unicode using utf-8 .
train
false
41,404
def _ensure_timestamp_field(dataset_expr, deltas, checkpoints): measure = dataset_expr.dshape.measure if (TS_FIELD_NAME not in measure.names): dataset_expr = bz.transform(dataset_expr, **{TS_FIELD_NAME: dataset_expr[AD_FIELD_NAME]}) deltas = _ad_as_ts(deltas) checkpoints = _ad_as_ts(checkpoints) else: _check_datetime_field(TS_FIELD_NAME, measure) return (dataset_expr, deltas, checkpoints)
[ "def", "_ensure_timestamp_field", "(", "dataset_expr", ",", "deltas", ",", "checkpoints", ")", ":", "measure", "=", "dataset_expr", ".", "dshape", ".", "measure", "if", "(", "TS_FIELD_NAME", "not", "in", "measure", ".", "names", ")", ":", "dataset_expr", "=", ...
verify that the baseline and deltas expressions have a timestamp field .
train
true
41,406
@require_context def group_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): session = get_session() with session.begin(): filters = (filters or {}) filters['context'] = context query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.GroupTypes) if (query is None): if list_result: return [] return {} rows = query.all() if list_result: result = [_dict_with_group_specs_if_authorized(context, row) for row in rows] return result result = {row['name']: _dict_with_group_specs_if_authorized(context, row) for row in rows} return result
[ "@", "require_context", "def", "group_type_get_all", "(", "context", ",", "inactive", "=", "False", ",", "filters", "=", "None", ",", "marker", "=", "None", ",", "limit", "=", "None", ",", "sort_keys", "=", "None", ",", "sort_dirs", "=", "None", ",", "of...
returns a dict describing all group_types with name as key .
train
false
41,409
def _tgrep_node_label_use_action(_s, _l, tokens): assert (len(tokens) == 1) assert tokens[0].startswith(u'=') return tokens[0][1:]
[ "def", "_tgrep_node_label_use_action", "(", "_s", ",", "_l", ",", "tokens", ")", ":", "assert", "(", "len", "(", "tokens", ")", "==", "1", ")", "assert", "tokens", "[", "0", "]", ".", "startswith", "(", "u'='", ")", "return", "tokens", "[", "0", "]",...
returns the node label used to begin a tgrep_expr_labeled .
train
false
41,410
def getErrorString(errorcode): errorString = ctypes.c_char_p((' ' * 50)) staticLib.GetErrorString(errorcode, errorString) return errorString.value
[ "def", "getErrorString", "(", "errorcode", ")", ":", "errorString", "=", "ctypes", ".", "c_char_p", "(", "(", "' '", "*", "50", ")", ")", "staticLib", ".", "GetErrorString", "(", "errorcode", ",", "errorString", ")", "return", "errorString", ".", "value" ]
name: u12 .
train
false
41,411
def get_text_feedback(context): return context.browser.execute_script('\n return $(".{text_input_class}")[0].value;\n '.format(text_input_class=TEXT_INPUT_CLASS))
[ "def", "get_text_feedback", "(", "context", ")", ":", "return", "context", ".", "browser", ".", "execute_script", "(", "'\\n return $(\".{text_input_class}\")[0].value;\\n '", ".", "format", "(", "text_input_class", "=", "TEXT_INPUT_CLASS", ")", ")" ]
get the text feedback displayed after the feedback form is filled out .
train
false
41,412
def _get_service(service, profile): if (isinstance(profile, dict) and ('service' in profile)): return profile['service'] return service
[ "def", "_get_service", "(", "service", ",", "profile", ")", ":", "if", "(", "isinstance", "(", "profile", ",", "dict", ")", "and", "(", "'service'", "in", "profile", ")", ")", ":", "return", "profile", "[", "'service'", "]", "return", "service" ]
return a service .
train
false
41,413
def _consume(seq, num, func=None): num = _B(num) c = seq[:num] r = seq[num:] if func: c = func(c) return (c, r)
[ "def", "_consume", "(", "seq", ",", "num", ",", "func", "=", "None", ")", ":", "num", "=", "_B", "(", "num", ")", "c", "=", "seq", "[", ":", "num", "]", "r", "=", "seq", "[", "num", ":", "]", "if", "func", ":", "c", "=", "func", "(", "c",...
consume the num of bytes return a tuple of func -- a function to call on the consumed .
train
false
41,414
def get_freq_group(freq): if isinstance(freq, offsets.DateOffset): freq = freq.rule_code if isinstance(freq, compat.string_types): (base, mult) = get_freq_code(freq) freq = base elif isinstance(freq, int): pass else: raise ValueError('input must be str, offset or int') return ((freq // 1000) * 1000)
[ "def", "get_freq_group", "(", "freq", ")", ":", "if", "isinstance", "(", "freq", ",", "offsets", ".", "DateOffset", ")", ":", "freq", "=", "freq", ".", "rule_code", "if", "isinstance", "(", "freq", ",", "compat", ".", "string_types", ")", ":", "(", "ba...
return frequency code group of given frequency str or offset .
train
false
41,415
def set_config_defaults(): cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'])
[ "def", "set_config_defaults", "(", ")", ":", "cfg", ".", "set_defaults", "(", "cors", ".", "CORS_OPTS", ",", "allow_headers", "=", "[", "'X-Auth-Token'", ",", "'X-Identity-Status'", ",", "'X-Roles'", ",", "'X-Service-Catalog'", ",", "'X-User-Id'", ",", "'X-Tenant-...
this method updates all configuration default values .
train
false
41,416
def strtime(at=None, fmt=PERFECT_TIME_FORMAT): if (not at): at = utcnow() return at.strftime(fmt)
[ "def", "strtime", "(", "at", "=", "None", ",", "fmt", "=", "PERFECT_TIME_FORMAT", ")", ":", "if", "(", "not", "at", ")", ":", "at", "=", "utcnow", "(", ")", "return", "at", ".", "strftime", "(", "fmt", ")" ]
returns formatted utcnow .
train
true
41,417
def rosen_der(x): x = asarray(x) xm = x[1:(-1)] xm_m1 = x[:(-2)] xm_p1 = x[2:] der = numpy.zeros_like(x) der[1:(-1)] = (((200 * (xm - (xm_m1 ** 2))) - ((400 * (xm_p1 - (xm ** 2))) * xm)) - (2 * (1 - xm))) der[0] = ((((-400) * x[0]) * (x[1] - (x[0] ** 2))) - (2 * (1 - x[0]))) der[(-1)] = (200 * (x[(-1)] - (x[(-2)] ** 2))) return der
[ "def", "rosen_der", "(", "x", ")", ":", "x", "=", "asarray", "(", "x", ")", "xm", "=", "x", "[", "1", ":", "(", "-", "1", ")", "]", "xm_m1", "=", "x", "[", ":", "(", "-", "2", ")", "]", "xm_p1", "=", "x", "[", "2", ":", "]", "der", "=...
the derivative of the rosenbrock function .
train
true
41,418
def get_expire(name): policies = _get_account_policy(name) if ('hardExpireDateGMT' in policies): return policies['hardExpireDateGMT'] return 'Value not set'
[ "def", "get_expire", "(", "name", ")", ":", "policies", "=", "_get_account_policy", "(", "name", ")", "if", "(", "'hardExpireDateGMT'", "in", "policies", ")", ":", "return", "policies", "[", "'hardExpireDateGMT'", "]", "return", "'Value not set'" ]
gets the date on which the account expires .
train
false
41,419
def getattr_(obj, name, default_thunk): try: return getattr(obj, name) except AttributeError: default = default_thunk() setattr(obj, name, default) return default
[ "def", "getattr_", "(", "obj", ",", "name", ",", "default_thunk", ")", ":", "try", ":", "return", "getattr", "(", "obj", ",", "name", ")", "except", "AttributeError", ":", "default", "=", "default_thunk", "(", ")", "setattr", "(", "obj", ",", "name", "...
similar to .
train
false
41,420
def set_epsilon(e): global _EPSILON _EPSILON = e
[ "def", "set_epsilon", "(", "e", ")", ":", "global", "_EPSILON", "_EPSILON", "=", "e" ]
sets the value of the fuzz factor used in numeric expressions .
train
false
41,421
def ensure_floating_forward(floating_ip, fixed_ip, device, network): regex = ('.*\\s+%s(/32|\\s+|$)' % floating_ip) num_rules = iptables_manager.ipv4['nat'].remove_rules_regex(regex) if num_rules: msg = _('Removed %(num)d duplicate rules for floating ip %(float)s') LOG.warn((msg % {'num': num_rules, 'float': floating_ip})) for (chain, rule) in floating_forward_rules(floating_ip, fixed_ip, device): iptables_manager.ipv4['nat'].add_rule(chain, rule) iptables_manager.apply() if (device != network['bridge']): ensure_ebtables_rules(*floating_ebtables_rules(fixed_ip, network))
[ "def", "ensure_floating_forward", "(", "floating_ip", ",", "fixed_ip", ",", "device", ",", "network", ")", ":", "regex", "=", "(", "'.*\\\\s+%s(/32|\\\\s+|$)'", "%", "floating_ip", ")", "num_rules", "=", "iptables_manager", ".", "ipv4", "[", "'nat'", "]", ".", ...
ensure floating ip forwarding rule .
train
false
41,422
@image_comparison(baseline_images=[u'tight_layout5']) def test_tight_layout5(): fig = plt.figure() ax = plt.subplot(111) arr = np.arange(100).reshape((10, 10)) ax.imshow(arr, interpolation=u'none') plt.tight_layout()
[ "@", "image_comparison", "(", "baseline_images", "=", "[", "u'tight_layout5'", "]", ")", "def", "test_tight_layout5", "(", ")", ":", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "plt", ".", "subplot", "(", "111", ")", "arr", "=", "np", ".", ...
test tight_layout for image .
train
false
41,423
@pytest.fixture def tabbed_browser_stubs(stubs, win_registry): win_registry.add_window(1) stubs = [stubs.TabbedBrowserStub(), stubs.TabbedBrowserStub()] objreg.register('tabbed-browser', stubs[0], scope='window', window=0) objreg.register('tabbed-browser', stubs[1], scope='window', window=1) (yield stubs) objreg.delete('tabbed-browser', scope='window', window=0) objreg.delete('tabbed-browser', scope='window', window=1)
[ "@", "pytest", ".", "fixture", "def", "tabbed_browser_stubs", "(", "stubs", ",", "win_registry", ")", ":", "win_registry", ".", "add_window", "(", "1", ")", "stubs", "=", "[", "stubs", ".", "TabbedBrowserStub", "(", ")", ",", "stubs", ".", "TabbedBrowserStub...
fixture providing a fake tabbed-browser object on win_id 0 and 1 .
train
false
41,425
def assert_no_dunder_name(code_obj, expr): for name in code_obj.co_names: if (('__' in name) or (name in _UNSAFE_ATTRIBUTES)): raise NameError(('Access to forbidden name %r (%r)' % (name, expr)))
[ "def", "assert_no_dunder_name", "(", "code_obj", ",", "expr", ")", ":", "for", "name", "in", "code_obj", ".", "co_names", ":", "if", "(", "(", "'__'", "in", "name", ")", "or", "(", "name", "in", "_UNSAFE_ATTRIBUTES", ")", ")", ":", "raise", "NameError", ...
assert_no_dunder_name -> none asserts that the code object does not refer to any "dunder name" .
train
false
41,426
def _active_mounts_darwin(ret): for line in __salt__['cmd.run_stdout']('mount').split('\n'): comps = re.sub('\\s+', ' ', line).split() parens = re.findall('\\((.*?)\\)', line, re.DOTALL)[0].split(', ') ret[comps[2]] = {'device': comps[0], 'fstype': parens[0], 'opts': _resolve_user_group_names(parens[1:])} return ret
[ "def", "_active_mounts_darwin", "(", "ret", ")", ":", "for", "line", "in", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "'mount'", ")", ".", "split", "(", "'\\n'", ")", ":", "comps", "=", "re", ".", "sub", "(", "'\\\\s+'", ",", "' '", ",", "line", ...
list active mounts on mac os systems .
train
true
41,427
def derive_model_url(model_class, urlname_prefix, object, kind): if (not (isinstance(object, model_class) or (inspect.isclass(object) and issubclass(object, model_class)))): return kind_to_urlnames = {u'detail': ((u'%s.detail' % urlname_prefix), (u'%s.edit' % urlname_prefix))} kwarg_sets = [{}] if getattr(object, u'pk', None): kwarg_sets.append({u'pk': object.pk}) for urlname in kind_to_urlnames.get(kind, [(u'%s.%s' % (urlname_prefix, kind))]): for kwargs in kwarg_sets: try: return reverse(urlname, kwargs=kwargs) except NoReverseMatch: pass return None
[ "def", "derive_model_url", "(", "model_class", ",", "urlname_prefix", ",", "object", ",", "kind", ")", ":", "if", "(", "not", "(", "isinstance", "(", "object", ",", "model_class", ")", "or", "(", "inspect", ".", "isclass", "(", "object", ")", "and", "iss...
try to guess a model url for the given object and kind .
train
false
41,428
def _get_frequency_grid(frequency, assume_regular_frequency=False): frequency = np.asarray(frequency) if (frequency.ndim != 1): raise ValueError('frequency grid must be 1 dimensional') elif (len(frequency) == 1): return (frequency[0], frequency[0], 1) elif (not (assume_regular_frequency or _is_regular(frequency))): raise ValueError('frequency must be a regular grid') return (frequency[0], (frequency[1] - frequency[0]), len(frequency))
[ "def", "_get_frequency_grid", "(", "frequency", ",", "assume_regular_frequency", "=", "False", ")", ":", "frequency", "=", "np", ".", "asarray", "(", "frequency", ")", "if", "(", "frequency", ".", "ndim", "!=", "1", ")", ":", "raise", "ValueError", "(", "'...
utility to get grid parameters from a frequency array parameters frequency : array_like or quantity input frequency grid assume_regular_frequency : bool if true .
train
false
41,430
def cert_get_domains(cert): domains = [] try: ext = cert.extensions.get_extension_for_oid(x509.OID_SUBJECT_ALTERNATIVE_NAME) entries = ext.value.get_values_for_type(x509.DNSName) for entry in entries: domains.append(entry) except Exception as e: app.logger.warning('Failed to get SubjectAltName: {0}'.format(e)) return domains
[ "def", "cert_get_domains", "(", "cert", ")", ":", "domains", "=", "[", "]", "try", ":", "ext", "=", "cert", ".", "extensions", ".", "get_extension_for_oid", "(", "x509", ".", "OID_SUBJECT_ALTERNATIVE_NAME", ")", "entries", "=", "ext", ".", "value", ".", "g...
attempts to get an domains listed in a certificate .
train
false
41,431
def total_billed_ops_to_str(self): return billed_ops_to_str(self.total_billed_ops_list())
[ "def", "total_billed_ops_to_str", "(", "self", ")", ":", "return", "billed_ops_to_str", "(", "self", ".", "total_billed_ops_list", "(", ")", ")" ]
formats a list of billedopprotos for display in the appstats ui .
train
false
41,432
def get_capture_loglevel(): return getattr(local_context, u'loglevel', None)
[ "def", "get_capture_loglevel", "(", ")", ":", "return", "getattr", "(", "local_context", ",", "u'loglevel'", ",", "None", ")" ]
if output is currently being redirected to a stream .
train
false
41,433
def write_external_link(links): root = Element(('{%s}externalLink' % SHEET_MAIN_NS)) book = SubElement(root, ('{%s}externalBook' % SHEET_MAIN_NS), {('{%s}id' % REL_NS): 'rId1'}) external_ranges = SubElement(book, ('{%s}definedNames' % SHEET_MAIN_NS)) for l in links: external_ranges.append(Element(('{%s}definedName' % SHEET_MAIN_NS), dict(l))) return root
[ "def", "write_external_link", "(", "links", ")", ":", "root", "=", "Element", "(", "(", "'{%s}externalLink'", "%", "SHEET_MAIN_NS", ")", ")", "book", "=", "SubElement", "(", "root", ",", "(", "'{%s}externalBook'", "%", "SHEET_MAIN_NS", ")", ",", "{", "(", ...
serialise links to ranges in a single external worbook .
train
false
41,434
def _limit_discount_amount_by_min_price(line, order_source): shop_product = line.product.get_shop_instance(order_source.shop) if shop_product.minimum_price: min_total = (shop_product.minimum_price.value * line.quantity) base_price = (line.base_unit_price.value * line.quantity) if ((base_price - line.discount_amount.value) < min_total): line.discount_amount = order_source.create_price((base_price - min_total))
[ "def", "_limit_discount_amount_by_min_price", "(", "line", ",", "order_source", ")", ":", "shop_product", "=", "line", ".", "product", ".", "get_shop_instance", "(", "order_source", ".", "shop", ")", "if", "shop_product", ".", "minimum_price", ":", "min_total", "=...
changes the order line discount amount if the discount amount exceeds the minimium total price set by minimum_price constraint in shopproduct .
train
false
41,435
def xblock_type_display_name(xblock, default_display_name=None): if hasattr(xblock, 'category'): category = xblock.category if ((category == 'vertical') and (not is_unit(xblock))): return _('Vertical') else: category = xblock if (category == 'chapter'): return _('Section') elif (category == 'sequential'): return _('Subsection') elif (category == 'vertical'): return _('Unit') component_class = XBlock.load_class(category, select=settings.XBLOCK_SELECT_FUNCTION) if (hasattr(component_class, 'display_name') and component_class.display_name.default): return _(component_class.display_name.default) else: return default_display_name
[ "def", "xblock_type_display_name", "(", "xblock", ",", "default_display_name", "=", "None", ")", ":", "if", "hasattr", "(", "xblock", ",", "'category'", ")", ":", "category", "=", "xblock", ".", "category", "if", "(", "(", "category", "==", "'vertical'", ")"...
returns the display name for the specified type of xblock .
train
false
41,436
def copy_cache(records, env): (todo, done) = (set(records), set()) while todo: record = todo.pop() if (record not in done): done.add(record) target = record.with_env(env) for name in record._cache: field = record._fields[name] value = record[name] if isinstance(value, BaseModel): todo.update(value) target._cache[name] = field.convert_to_cache(value, target, validate=False)
[ "def", "copy_cache", "(", "records", ",", "env", ")", ":", "(", "todo", ",", "done", ")", "=", "(", "set", "(", "records", ")", ",", "set", "(", ")", ")", "while", "todo", ":", "record", "=", "todo", ".", "pop", "(", ")", "if", "(", "record", ...
recursively copy the cache of records to the environment env .
train
false
41,437
def is_math_exp(str): charset = set('0123456789abcdefx+-*/%^') opers = set('+-*/%^') exp = set(str.lower()) return (((exp & opers) != set()) and ((exp - charset) == set()))
[ "def", "is_math_exp", "(", "str", ")", ":", "charset", "=", "set", "(", "'0123456789abcdefx+-*/%^'", ")", "opers", "=", "set", "(", "'+-*/%^'", ")", "exp", "=", "set", "(", "str", ".", "lower", "(", ")", ")", "return", "(", "(", "(", "exp", "&", "o...
check if a string is a math exprssion .
train
false
41,438
def skip_under_travis(fn=None): if _travisTesting: (skip, msg) = (pytest.skip, 'Cannot be tested under Travis-CI') if (fn is not None): def _inner(): skip(msg) _inner.__name__ = fn.__name__ return _inner else: skip(msg) else: return fn
[ "def", "skip_under_travis", "(", "fn", "=", "None", ")", ":", "if", "_travisTesting", ":", "(", "skip", ",", "msg", ")", "=", "(", "pytest", ".", "skip", ",", "'Cannot be tested under Travis-CI'", ")", "if", "(", "fn", "is", "not", "None", ")", ":", "d...
skip if a test is executed under travis testing environment could also be used as a decorator or unparametrized in the code .
train
false
41,440
def moving_window(array, nrows): count = num_windows_of_length_M_on_buffers_of_length_N(nrows, len(array)) for i in range(count): (yield array[i:(i + nrows)])
[ "def", "moving_window", "(", "array", ",", "nrows", ")", ":", "count", "=", "num_windows_of_length_M_on_buffers_of_length_N", "(", "nrows", ",", "len", "(", "array", ")", ")", "for", "i", "in", "range", "(", "count", ")", ":", "(", "yield", "array", "[", ...
simple moving window generator over a 2d numpy array .
train
false
41,441
def add_line_increment(lines, lineModified, diference, atLineStart=False): def _inner_increment(line): if (((not atLineStart) and (line <= lineModified)) or (lineModified == (line + diference))): return line return (line + diference) return list(map(_inner_increment, lines))
[ "def", "add_line_increment", "(", "lines", ",", "lineModified", ",", "diference", ",", "atLineStart", "=", "False", ")", ":", "def", "_inner_increment", "(", "line", ")", ":", "if", "(", "(", "(", "not", "atLineStart", ")", "and", "(", "line", "<=", "lin...
increment the line number of the list content when needed .
train
false
41,442
def register_parallel_backend(name, factory, make_default=False): BACKENDS[name] = factory if make_default: global DEFAULT_BACKEND DEFAULT_BACKEND = name
[ "def", "register_parallel_backend", "(", "name", ",", "factory", ",", "make_default", "=", "False", ")", ":", "BACKENDS", "[", "name", "]", "=", "factory", "if", "make_default", ":", "global", "DEFAULT_BACKEND", "DEFAULT_BACKEND", "=", "name" ]
register a new parallel backend factory .
train
false
41,446
def casoratian(seqs, n, zero=True): from .dense import Matrix seqs = list(map(sympify, seqs)) if (not zero): f = (lambda i, j: seqs[j].subs(n, (n + i))) else: f = (lambda i, j: seqs[j].subs(n, i)) k = len(seqs) return Matrix(k, k, f).det()
[ "def", "casoratian", "(", "seqs", ",", "n", ",", "zero", "=", "True", ")", ":", "from", ".", "dense", "import", "Matrix", "seqs", "=", "list", "(", "map", "(", "sympify", ",", "seqs", ")", ")", "if", "(", "not", "zero", ")", ":", "f", "=", "(",...
given linear difference operator l of order k and homogeneous equation ly = 0 we want to compute kernel of l .
train
false
41,447
def eqhash(o): try: return o.__eqhash__() except AttributeError: return hash(o)
[ "def", "eqhash", "(", "o", ")", ":", "try", ":", "return", "o", ".", "__eqhash__", "(", ")", "except", "AttributeError", ":", "return", "hash", "(", "o", ")" ]
call obj .
train
false
41,448
def _array_to_file(arr, outfile): if isfile(outfile): write = (lambda a, f: a.tofile(f)) else: write = _array_to_file_like if ((sys.platform == 'darwin') and (arr.nbytes >= (_OSX_WRITE_LIMIT + 1)) and ((arr.nbytes % 4096) == 0)): chunksize = (_OSX_WRITE_LIMIT // arr.itemsize) elif sys.platform.startswith('win'): chunksize = (_WIN_WRITE_LIMIT // arr.itemsize) else: return write(arr, outfile) idx = 0 arr = arr.view(np.ndarray).flatten() while (idx < arr.nbytes): write(arr[idx:(idx + chunksize)], outfile) idx += chunksize
[ "def", "_array_to_file", "(", "arr", ",", "outfile", ")", ":", "if", "isfile", "(", "outfile", ")", ":", "write", "=", "(", "lambda", "a", ",", "f", ":", "a", ".", "tofile", "(", "f", ")", ")", "else", ":", "write", "=", "_array_to_file_like", "if"...
write a numpy array to a file or a file-like object .
train
false
41,449
def _warning_for_deprecated_user_based_rules(rules): for rule in rules: if [resource for resource in USER_BASED_RESOURCES if (resource in rule[0])]: continue if ('user_id' in KEY_EXPR.findall(rule[1])): LOG.warning(_LW("The user_id attribute isn't supported in the rule '%s'. All the user_id based policy enforcement will be removed in the future."), rule[0])
[ "def", "_warning_for_deprecated_user_based_rules", "(", "rules", ")", ":", "for", "rule", "in", "rules", ":", "if", "[", "resource", "for", "resource", "in", "USER_BASED_RESOURCES", "if", "(", "resource", "in", "rule", "[", "0", "]", ")", "]", ":", "continue...
warning user based policy enforcement used in the rule but the rule doesnt support it .
train
true
41,450
def test_zookeeper(): client = logged_in_client() client.click(id='ccs-zookeeper-menu') client.waits.forElement(classname='CCS-zookeeper', timeout='2000')
[ "def", "test_zookeeper", "(", ")", ":", "client", "=", "logged_in_client", "(", ")", "client", ".", "click", "(", "id", "=", "'ccs-zookeeper-menu'", ")", "client", ".", "waits", ".", "forElement", "(", "classname", "=", "'CCS-zookeeper'", ",", "timeout", "="...
launches the default view for zookeeper .
train
false
41,451
def vector_add(v, w): return [(v_i + w_i) for (v_i, w_i) in zip(v, w)]
[ "def", "vector_add", "(", "v", ",", "w", ")", ":", "return", "[", "(", "v_i", "+", "w_i", ")", "for", "(", "v_i", ",", "w_i", ")", "in", "zip", "(", "v", ",", "w", ")", "]" ]
adds two vectors componentwise .
train
false
41,452
def user_verify_password(user_id=None, name=None, password=None, profile=None, **connection_args): kstone = auth(profile, **connection_args) if ('connection_endpoint' in connection_args): auth_url = connection_args.get('connection_endpoint') elif (_OS_IDENTITY_API_VERSION > 2): auth_url = __salt__['config.option']('keystone.endpoint', 'http://127.0.0.1:35357/v3') else: auth_url = __salt__['config.option']('keystone.endpoint', 'http://127.0.0.1:35357/v2.0') if user_id: for user in kstone.users.list(): if (user.id == user_id): name = user.name break if (not name): return {'Error': 'Unable to resolve user name'} kwargs = {'username': name, 'password': password, 'auth_url': auth_url} try: if (_OS_IDENTITY_API_VERSION > 2): client3.Client(**kwargs) else: client.Client(**kwargs) except (keystoneclient.exceptions.Unauthorized, keystoneclient.exceptions.AuthorizationFailure): return False return True
[ "def", "user_verify_password", "(", "user_id", "=", "None", ",", "name", "=", "None", ",", "password", "=", "None", ",", "profile", "=", "None", ",", "**", "connection_args", ")", ":", "kstone", "=", "auth", "(", "profile", ",", "**", "connection_args", ...
verify a users password cli examples: .
train
true
41,453
def gauss_chebyshev_u(n, n_digits): xi = [] w = [] for i in range(1, (n + 1)): xi.append(cos(((i / (n + S.One)) * S.Pi)).n(n_digits)) w.append(((S.Pi / (n + S.One)) * (sin(((i * S.Pi) / (n + S.One))) ** 2)).n(n_digits)) return (xi, w)
[ "def", "gauss_chebyshev_u", "(", "n", ",", "n_digits", ")", ":", "xi", "=", "[", "]", "w", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "(", "n", "+", "1", ")", ")", ":", "xi", ".", "append", "(", "cos", "(", "(", "(", "i", "/...
computes the gauss-chebyshev quadrature [1]_ points and weights of the second kind .
train
false
41,454
def generate_nonce(): return random.randrange(1000000000, 2000000000)
[ "def", "generate_nonce", "(", ")", ":", "return", "random", ".", "randrange", "(", "1000000000", ",", "2000000000", ")" ]
generate pseudorandom number .
train
false
41,455
@fixture(scope='session') def pytestconfig(request): return request.config
[ "@", "fixture", "(", "scope", "=", "'session'", ")", "def", "pytestconfig", "(", "request", ")", ":", "return", "request", ".", "config" ]
the pytest config object with access to command line opts .
train
false
41,457
def getBoolean(value): return bool(value)
[ "def", "getBoolean", "(", "value", ")", ":", "return", "bool", "(", "value", ")" ]
get the boolean .
train
false