id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
15,176
@pytest.fixture(params=['language0_project0', 'templates_project0', 'en_terminology']) def import_tps(request): from pootle_translationproject.models import TranslationProject (language_code, project_code) = request.param.split('_') return TranslationProject.objects.get(language__code=language_code, project__code=project_code)
[ "@", "pytest", ".", "fixture", "(", "params", "=", "[", "'language0_project0'", ",", "'templates_project0'", ",", "'en_terminology'", "]", ")", "def", "import_tps", "(", "request", ")", ":", "from", "pootle_translationproject", ".", "models", "import", "Translatio...
list of required translation projects for import tests .
train
false
15,177
@error.context_aware def lv_reactivate(vg_name, lv_name, timeout=10): try: utils.run(('lvchange -an /dev/%s/%s' % (vg_name, lv_name))) time.sleep(timeout) utils.run(('lvchange -ay /dev/%s/%s' % (vg_name, lv_name))) time.sleep(timeout) except error.CmdError: logging.error(('Failed to reactivate %s - please, ' + 'nuke the process that uses it first.'), lv_name) raise error.TestError(('The logical volume %s is still active' % lv_name))
[ "@", "error", ".", "context_aware", "def", "lv_reactivate", "(", "vg_name", ",", "lv_name", ",", "timeout", "=", "10", ")", ":", "try", ":", "utils", ".", "run", "(", "(", "'lvchange -an /dev/%s/%s'", "%", "(", "vg_name", ",", "lv_name", ")", ")", ")", ...
in case of unclean shutdowns some of the lvs is still active and merging is postponed .
train
false
15,178
def fast_inplace_check(inputs): fgraph = inputs[0].fgraph Supervisor = theano.compile.function_module.Supervisor protected_inputs = [f.protected for f in fgraph._features if isinstance(f, Supervisor)] protected_inputs = sum(protected_inputs, []) protected_inputs.extend(fgraph.outputs) inputs = [i for i in inputs if ((not isinstance(i, graph.Constant)) and (not fgraph.destroyers(i)) and (i not in protected_inputs))] return inputs
[ "def", "fast_inplace_check", "(", "inputs", ")", ":", "fgraph", "=", "inputs", "[", "0", "]", ".", "fgraph", "Supervisor", "=", "theano", ".", "compile", ".", "function_module", ".", "Supervisor", "protected_inputs", "=", "[", "f", ".", "protected", "for", ...
return the variables in inputs that are posible candidate for as inputs of inplace operation .
train
false
15,179
def _EscapeCppDefineForMSBuild(s): s = _EscapeEnvironmentVariableExpansion(s) s = _EscapeCommandLineArgumentForMSBuild(s) s = _EscapeMSBuildSpecialCharacters(s) s = s.replace('#', ('\\%03o' % ord('#'))) return s
[ "def", "_EscapeCppDefineForMSBuild", "(", "s", ")", ":", "s", "=", "_EscapeEnvironmentVariableExpansion", "(", "s", ")", "s", "=", "_EscapeCommandLineArgumentForMSBuild", "(", "s", ")", "s", "=", "_EscapeMSBuildSpecialCharacters", "(", "s", ")", "s", "=", "s", "...
escapes a cpp define so that it will reach the compiler unaltered .
train
false
15,180
def _rerun_course(request, org, number, run, fields): source_course_key = CourseKey.from_string(request.json.get('source_course_key')) if (not has_studio_write_access(request.user, source_course_key)): raise PermissionDenied() store = modulestore() with store.default_store('split'): destination_course_key = store.make_course_key(org, number, run) if store.has_course(destination_course_key, ignore_case=True): raise DuplicateCourseError(source_course_key, destination_course_key) add_instructor(destination_course_key, request.user, request.user) CourseRerunState.objects.initiated(source_course_key, destination_course_key, request.user, fields['display_name']) fields['advertised_start'] = None json_fields = json.dumps(fields, cls=EdxJSONEncoder) rerun_course.delay(unicode(source_course_key), unicode(destination_course_key), request.user.id, json_fields) return JsonResponse({'url': reverse_url('course_handler'), 'destination_course_key': unicode(destination_course_key)})
[ "def", "_rerun_course", "(", "request", ",", "org", ",", "number", ",", "run", ",", "fields", ")", ":", "source_course_key", "=", "CourseKey", ".", "from_string", "(", "request", ".", "json", ".", "get", "(", "'source_course_key'", ")", ")", "if", "(", "...
reruns an existing course .
train
false
15,181
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False): from frappe.model.create_new import get_new_doc return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
[ "def", "new_doc", "(", "doctype", ",", "parent_doc", "=", "None", ",", "parentfield", "=", "None", ",", "as_dict", "=", "False", ")", ":", "from", "frappe", ".", "model", ".", "create_new", "import", "get_new_doc", "return", "get_new_doc", "(", "doctype", ...
returns a new document of the given doctype with defaults set .
train
false
15,183
def getDefaultSPImp(): return 'cpp'
[ "def", "getDefaultSPImp", "(", ")", ":", "return", "'cpp'" ]
return the default spatial pooler implementation for this region .
train
false
15,185
def _combine(n, rs): try: (r, rs) = peek(rs) except StopIteration: (yield n) return if overlap(n, r): (yield merge(n, r)) next(rs) for r in rs: (yield r) else: (yield n) for r in rs: (yield r)
[ "def", "_combine", "(", "n", ",", "rs", ")", ":", "try", ":", "(", "r", ",", "rs", ")", "=", "peek", "(", "rs", ")", "except", "StopIteration", ":", "(", "yield", "n", ")", "return", "if", "overlap", "(", "n", ",", "r", ")", ":", "(", "yield"...
helper for _group_ranges .
train
true
15,186
def parse_modeline(code): seek = MODELINE_RE.search(code) if seek: return dict((v.split('=') for v in seek.group(1).split(':'))) return dict()
[ "def", "parse_modeline", "(", "code", ")", ":", "seek", "=", "MODELINE_RE", ".", "search", "(", "code", ")", "if", "seek", ":", "return", "dict", "(", "(", "v", ".", "split", "(", "'='", ")", "for", "v", "in", "seek", ".", "group", "(", "1", ")",...
parse params from files modeline .
train
true
15,187
@testing.requires_testing_data def test_read_write_fine_cal(): temp_dir = _TempDir() temp_fname = op.join(temp_dir, 'fine_cal_temp.dat') for fname in [fine_cal_fname, fine_cal_fname_3d]: fine_cal_dict = read_fine_calibration(fname) write_fine_calibration(temp_fname, fine_cal_dict) fine_cal_dict_reload = read_fine_calibration(temp_fname) assert_equal(object_hash(fine_cal_dict), object_hash(fine_cal_dict_reload))
[ "@", "testing", ".", "requires_testing_data", "def", "test_read_write_fine_cal", "(", ")", ":", "temp_dir", "=", "_TempDir", "(", ")", "temp_fname", "=", "op", ".", "join", "(", "temp_dir", ",", "'fine_cal_temp.dat'", ")", "for", "fname", "in", "[", "fine_cal_...
test round trip reading/writing of fine calibration .
train
false
15,189
@app.errorhandler(500) def error_500_handler(error): new_issue = 'https://github.com/andresriancho/w3af/issues/new' try: (exc_type, exc_value, exc_traceback) = sys.exc_info() filepath = traceback.extract_tb(exc_traceback)[(-1)][0] filename = basename(filepath) (lineno, function_name) = get_last_call_info(exc_traceback) response = jsonify({'code': 500, 'message': str(error), 'filename': filename, 'line_number': lineno, 'function_name': function_name, 'exception_type': error.__class__.__name__, 'please': new_issue}) except Exception as e: response = jsonify({'code': 500, 'exception': str(error), 'handler_exception': str(e), 'please': new_issue, 'message': 'REST API error'}) response.status_code = 500 return response
[ "@", "app", ".", "errorhandler", "(", "500", ")", "def", "error_500_handler", "(", "error", ")", ":", "new_issue", "=", "'https://github.com/andresriancho/w3af/issues/new'", "try", ":", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", "=", "sys", "....
this error handler will catch all unhandled exceptions in the w3af rest api and return something useful to the user for debugging .
train
false
15,190
def make_quartiles(q1, q3): return graph_objs.Scatter(x=[0, 0], y=[q1, q3], text=[('lower-quartile: ' + '{:0.2f}'.format(q1)), ('upper-quartile: ' + '{:0.2f}'.format(q3))], mode='lines', line=graph_objs.Line(width=4, color='rgb(0,0,0)'), hoverinfo='text')
[ "def", "make_quartiles", "(", "q1", ",", "q3", ")", ":", "return", "graph_objs", ".", "Scatter", "(", "x", "=", "[", "0", ",", "0", "]", ",", "y", "=", "[", "q1", ",", "q3", "]", ",", "text", "=", "[", "(", "'lower-quartile: '", "+", "'{:0.2f}'",...
makes the upper and lower quartiles for a violin plot .
train
false
15,192
def SortShareEpDicts(ep_dicts): ep_dicts.sort(key=(lambda episode: episode['new_episode_id']), reverse=True) for episode in ep_dicts: episode['photo_ids'].sort()
[ "def", "SortShareEpDicts", "(", "ep_dicts", ")", ":", "ep_dicts", ".", "sort", "(", "key", "=", "(", "lambda", "episode", ":", "episode", "[", "'new_episode_id'", "]", ")", ",", "reverse", "=", "True", ")", "for", "episode", "in", "ep_dicts", ":", "episo...
cover_photo selection depends on episode/photo order in share_new and share_existing .
train
false
15,193
@register_jitable def _median_inner(temp_arry, n): low = 0 high = (n - 1) half = (n >> 1) if ((n & 1) == 0): (a, b) = _select_two(temp_arry, (half - 1), low, high) return ((a + b) / 2) else: return _select(temp_arry, half, low, high)
[ "@", "register_jitable", "def", "_median_inner", "(", "temp_arry", ",", "n", ")", ":", "low", "=", "0", "high", "=", "(", "n", "-", "1", ")", "half", "=", "(", "n", ">>", "1", ")", "if", "(", "(", "n", "&", "1", ")", "==", "0", ")", ":", "(...
the main logic of the median() call .
train
false
15,194
def add_role(var, role): roles = getattr(var.tag, 'roles', []) roles = [old_role for old_role in roles if (not isinstance(role, old_role.__class__))] if (not any((isinstance(old_role, role.__class__) for old_role in roles))): roles += [role] var.tag.roles = roles
[ "def", "add_role", "(", "var", ",", "role", ")", ":", "roles", "=", "getattr", "(", "var", ".", "tag", ",", "'roles'", ",", "[", "]", ")", "roles", "=", "[", "old_role", "for", "old_role", "in", "roles", "if", "(", "not", "isinstance", "(", "role",...
add a role to a given theano variable .
train
false
15,195
def is_locust(tup): (name, item) = tup return bool((inspect.isclass(item) and issubclass(item, Locust) and hasattr(item, 'task_set') and getattr(item, 'task_set') and (not name.startswith('_'))))
[ "def", "is_locust", "(", "tup", ")", ":", "(", "name", ",", "item", ")", "=", "tup", "return", "bool", "(", "(", "inspect", ".", "isclass", "(", "item", ")", "and", "issubclass", "(", "item", ",", "Locust", ")", "and", "hasattr", "(", "item", ",", ...
takes tuple .
train
false
15,196
@requires_application() def test_event_order(): x = list() class MyCanvas(Canvas, ): def on_initialize(self, event): x.append('init') def on_draw(self, event): sz = (True if (self.size is not None) else False) x.append(('draw size=%s show=%s' % (sz, show))) def on_close(self, event): x.append('close') for show in (False, True): while x: x.pop() with MyCanvas(show=show) as c: c.update() c.app.process_events() print x assert_true((len(x) >= 3)) assert_equal(x[0], 'init') assert_in('draw size=True', x[1]) assert_in('draw size=True', x[(-2)]) assert_equal(x[(-1)], 'close')
[ "@", "requires_application", "(", ")", "def", "test_event_order", "(", ")", ":", "x", "=", "list", "(", ")", "class", "MyCanvas", "(", "Canvas", ",", ")", ":", "def", "on_initialize", "(", "self", ",", "event", ")", ":", "x", ".", "append", "(", "'in...
test event order .
train
false
15,199
def FastaIterator(handle, alphabet=single_letter_alphabet, title2ids=None): if title2ids: for (title, sequence) in SimpleFastaParser(handle): (id, name, descr) = title2ids(title) (yield SeqRecord(Seq(sequence, alphabet), id=id, name=name, description=descr)) else: for (title, sequence) in SimpleFastaParser(handle): try: first_word = title.split(None, 1)[0] except IndexError: assert (not title), repr(title) first_word = '' (yield SeqRecord(Seq(sequence, alphabet), id=first_word, name=first_word, description=title))
[ "def", "FastaIterator", "(", "handle", ",", "alphabet", "=", "single_letter_alphabet", ",", "title2ids", "=", "None", ")", ":", "if", "title2ids", ":", "for", "(", "title", ",", "sequence", ")", "in", "SimpleFastaParser", "(", "handle", ")", ":", "(", "id"...
generator function to iterate over fasta records .
train
false
15,200
def whenReady(d): d2 = defer.Deferred() d.addCallbacks(_pubReady, d2.errback, callbackArgs=(d2,)) return d2
[ "def", "whenReady", "(", "d", ")", ":", "d2", "=", "defer", ".", "Deferred", "(", ")", "d", ".", "addCallbacks", "(", "_pubReady", ",", "d2", ".", "errback", ",", "callbackArgs", "=", "(", "d2", ",", ")", ")", "return", "d2" ]
wrap a deferred returned from a pb method in another deferred that expects a remotepublished as a result .
train
false
15,201
def sortedURIs(service_element): return [uri_element.text for uri_element in prioSort(service_element.findall(uri_tag))]
[ "def", "sortedURIs", "(", "service_element", ")", ":", "return", "[", "uri_element", ".", "text", "for", "uri_element", "in", "prioSort", "(", "service_element", ".", "findall", "(", "uri_tag", ")", ")", "]" ]
given a service element .
train
false
15,202
def getAndroidID(): try: pythonActivity = autoclass('org.renpy.android.PythonService') settingsSecure = autoclass('android.provider.Settings$Secure') androidId = settingsSecure.getString(pythonActivity.mService.getContentResolver(), settingsSecure.ANDROID_ID) return androidId except Exception as e: return None
[ "def", "getAndroidID", "(", ")", ":", "try", ":", "pythonActivity", "=", "autoclass", "(", "'org.renpy.android.PythonService'", ")", "settingsSecure", "=", "autoclass", "(", "'android.provider.Settings$Secure'", ")", "androidId", "=", "settingsSecure", ".", "getString",...
returns none if an error .
train
false
15,203
def describe_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=None, region=None, key=None, keyid=None, profile=None): ids = _get_ids(UUID, EventSourceArn=EventSourceArn, FunctionName=FunctionName) if (len(ids) < 1): return {'event_source_mapping': None} UUID = ids[0] try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) desc = conn.get_event_source_mapping(UUID=UUID) if desc: keys = ('UUID', 'BatchSize', 'EventSourceArn', 'FunctionArn', 'LastModified', 'LastProcessingResult', 'State', 'StateTransitionReason') return {'event_source_mapping': dict([(k, desc.get(k)) for k in keys])} else: return {'event_source_mapping': None} except ClientError as e: return {'error': salt.utils.boto3.get_error(e)}
[ "def", "describe_event_source_mapping", "(", "UUID", "=", "None", ",", "EventSourceArn", "=", "None", ",", "FunctionName", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":",...
given an event source mapping id or an event source arn and functionname .
train
false
15,204
def str_find(arr, sub, start=0, end=None, side='left'): if (not isinstance(sub, compat.string_types)): msg = 'expected a string object, not {0}' raise TypeError(msg.format(type(sub).__name__)) if (side == 'left'): method = 'find' elif (side == 'right'): method = 'rfind' else: raise ValueError('Invalid side') if (end is None): f = (lambda x: getattr(x, method)(sub, start)) else: f = (lambda x: getattr(x, method)(sub, start, end)) return _na_map(f, arr, dtype=int)
[ "def", "str_find", "(", "arr", ",", "sub", ",", "start", "=", "0", ",", "end", "=", "None", ",", "side", "=", "'left'", ")", ":", "if", "(", "not", "isinstance", "(", "sub", ",", "compat", ".", "string_types", ")", ")", ":", "msg", "=", "'expecte...
return indexes in each strings in the series/index where the substring is fully contained between [start:end] .
train
true
15,205
def set_fnclex(context, c_helpers): ptr_set_fnclex = c_helpers['set_fnclex'] fn = ctypes.CFUNCTYPE(None, ctypes.c_void_p)(ptr_set_fnclex) library = compile_fnclex(context) fnclex_ptr = library.get_pointer_to_function('fnclex') fn(fnclex_ptr) return library
[ "def", "set_fnclex", "(", "context", ",", "c_helpers", ")", ":", "ptr_set_fnclex", "=", "c_helpers", "[", "'set_fnclex'", "]", "fn", "=", "ctypes", ".", "CFUNCTYPE", "(", "None", ",", "ctypes", ".", "c_void_p", ")", "(", "ptr_set_fnclex", ")", "library", "...
install fnclex before fmod calls .
train
false
15,207
def get_testenv(): env = os.environ.copy() env['PYTHONPATH'] = get_pythonpath() return env
[ "def", "get_testenv", "(", ")", ":", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "env", "[", "'PYTHONPATH'", "]", "=", "get_pythonpath", "(", ")", "return", "env" ]
return a os environment dict suitable to fork processes that need to import this installation of scrapy .
train
false
15,208
def cpu_percent(interval=None, percpu=False): global _last_cpu_times global _last_per_cpu_times blocking = ((interval is not None) and (interval > 0.0)) if ((interval is not None) and (interval < 0)): raise ValueError(('interval is not positive (got %r)' % interval)) def calculate(t1, t2): t1_all = _cpu_tot_time(t1) t1_busy = _cpu_busy_time(t1) t2_all = _cpu_tot_time(t2) t2_busy = _cpu_busy_time(t2) if (t2_busy <= t1_busy): return 0.0 busy_delta = (t2_busy - t1_busy) all_delta = (t2_all - t1_all) busy_perc = ((busy_delta / all_delta) * 100) return round(busy_perc, 1) if (not percpu): if blocking: t1 = cpu_times() time.sleep(interval) else: t1 = _last_cpu_times if (t1 is None): t1 = cpu_times() _last_cpu_times = cpu_times() return calculate(t1, _last_cpu_times) else: ret = [] if blocking: tot1 = cpu_times(percpu=True) time.sleep(interval) else: tot1 = _last_per_cpu_times if (tot1 is None): tot1 = cpu_times(percpu=True) _last_per_cpu_times = cpu_times(percpu=True) for (t1, t2) in zip(tot1, _last_per_cpu_times): ret.append(calculate(t1, t2)) return ret
[ "def", "cpu_percent", "(", "interval", "=", "None", ",", "percpu", "=", "False", ")", ":", "global", "_last_cpu_times", "global", "_last_per_cpu_times", "blocking", "=", "(", "(", "interval", "is", "not", "None", ")", "and", "(", "interval", ">", "0.0", ")...
return the percent of time the cpu is busy .
train
true
15,209
def checked_call(to_call, kwargs): try: return to_call(**kwargs) except TypeError: check_call_arguments(to_call, kwargs) raise
[ "def", "checked_call", "(", "to_call", ",", "kwargs", ")", ":", "try", ":", "return", "to_call", "(", "**", "kwargs", ")", "except", "TypeError", ":", "check_call_arguments", "(", "to_call", ",", "kwargs", ")", "raise" ]
attempt calling a function or instantiating a class with a given set of arguments .
train
false
15,213
def _rehash(): shell = __salt__['environ.get']('SHELL') if (shell.split('/')[(-1)] in ('csh', 'tcsh')): __salt__['cmd.run']('rehash', output_loglevel='trace')
[ "def", "_rehash", "(", ")", ":", "shell", "=", "__salt__", "[", "'environ.get'", "]", "(", "'SHELL'", ")", "if", "(", "shell", ".", "split", "(", "'/'", ")", "[", "(", "-", "1", ")", "]", "in", "(", "'csh'", ",", "'tcsh'", ")", ")", ":", "__sal...
recomputes internal hash table for the path variable .
train
false
15,214
def start_service(service_name): logging.info(('Starting ' + service_name)) watch_name = '' if (service_name == datastore_upgrade.CASSANDRA_WATCH_NAME): cassandra_cmd = ((CASSANDRA_EXECUTABLE + ' -p ') + PID_FILE) start_cmd = 'su -c "{0}" cassandra'.format(cassandra_cmd) stop_cmd = (('/usr/bin/python2 ' + APPSCALE_HOME) + '/scripts/stop_service.py java cassandra') watch_name = datastore_upgrade.CASSANDRA_WATCH_NAME ports = [CASSANDRA_PORT] match_cmd = cassandra_interface.CASSANDRA_INSTALL_DIR if (service_name == datastore_upgrade.ZK_WATCH_NAME): zk_server = 'zookeeper-server' command = 'service --status-all|grep zookeeper$' if (subprocess.call(command, shell=True) == 0): zk_server = 'zookeeper' start_cmd = (('/usr/sbin/service ' + zk_server) + ' start') stop_cmd = (('/usr/sbin/service ' + zk_server) + ' stop') watch_name = datastore_upgrade.ZK_WATCH_NAME match_cmd = 'org.apache.zookeeper.server.quorum.QuorumPeerMain' ports = [zk.DEFAULT_PORT] monit_app_configuration.create_config_file(watch_name, start_cmd, stop_cmd, ports, upgrade_flag=True, match_cmd=match_cmd) if (not monit_interface.start(watch_name)): logging.error(('Monit was unable to start ' + service_name)) return 1 else: logging.info('Monit configured for {}'.format(service_name)) return 0
[ "def", "start_service", "(", "service_name", ")", ":", "logging", ".", "info", "(", "(", "'Starting '", "+", "service_name", ")", ")", "watch_name", "=", "''", "if", "(", "service_name", "==", "datastore_upgrade", ".", "CASSANDRA_WATCH_NAME", ")", ":", "cassan...
start openstack service immediately cli example: .
train
false
15,215
def get_issue_comments(issue_number, repo_name=None, profile='github', since=None, output='min'): org_name = _get_config_value(profile, 'org_name') if (repo_name is None): repo_name = _get_config_value(profile, 'repo_name') action = '/'.join(['repos', org_name, repo_name]) command = '/'.join(['issues', str(issue_number), 'comments']) args = {} if since: args['since'] = since comments = _query(profile, action=action, command=command, args=args) ret = {} for comment in comments: comment_id = comment.get('id') if (output == 'full'): ret[comment_id] = comment else: ret[comment_id] = {'id': comment.get('id'), 'created_at': comment.get('created_at'), 'updated_at': comment.get('updated_at'), 'user_login': comment.get('user').get('login')} return ret
[ "def", "get_issue_comments", "(", "issue_number", ",", "repo_name", "=", "None", ",", "profile", "=", "'github'", ",", "since", "=", "None", ",", "output", "=", "'min'", ")", ":", "org_name", "=", "_get_config_value", "(", "profile", ",", "'org_name'", ")", ...
return information about the comments for a given issue in a named repository .
train
true
15,216
def bulk_activate(workers, lbn, profile='default'): ret = {} if isinstance(workers, str): workers = workers.split(',') for worker in workers: try: ret[worker] = worker_activate(worker, lbn, profile) except Exception: ret[worker] = False return ret
[ "def", "bulk_activate", "(", "workers", ",", "lbn", ",", "profile", "=", "'default'", ")", ":", "ret", "=", "{", "}", "if", "isinstance", "(", "workers", ",", "str", ")", ":", "workers", "=", "workers", ".", "split", "(", "','", ")", "for", "worker",...
activate all the given workers in the specific load balancer cli examples: .
train
false
15,217
@handle_response_format @treeio_login_required def sla_index(request, response_format='html'): if request.GET: query = _get_filter_query(request.GET, ServiceLevelAgreement) slas = Object.filter_by_request(request, ServiceLevelAgreement.objects.filter(query)) else: slas = Object.filter_by_request(request, ServiceLevelAgreement.objects) filters = SLAFilterForm(request.user.profile, '', request.GET) context = _get_default_context(request) context.update({'slas': slas, 'filters': filters}) return render_to_response('services/sla_index', context, context_instance=RequestContext(request), response_format=response_format)
[ "@", "handle_response_format", "@", "treeio_login_required", "def", "sla_index", "(", "request", ",", "response_format", "=", "'html'", ")", ":", "if", "request", ".", "GET", ":", "query", "=", "_get_filter_query", "(", "request", ".", "GET", ",", "ServiceLevelA...
all available service level agreements .
train
false
15,220
def ROCR(ds, count, timeperiod=(- (2 ** 31))): return call_talib_with_ds(ds, count, talib.ROCR, timeperiod)
[ "def", "ROCR", "(", "ds", ",", "count", ",", "timeperiod", "=", "(", "-", "(", "2", "**", "31", ")", ")", ")", ":", "return", "call_talib_with_ds", "(", "ds", ",", "count", ",", "talib", ".", "ROCR", ",", "timeperiod", ")" ]
rate of change ratio: .
train
false
15,223
def task_ready(request, remove_request=requests.pop, discard_active_request=active_requests.discard, discard_reserved_request=reserved_requests.discard): remove_request(request.id, None) discard_active_request(request) discard_reserved_request(request)
[ "def", "task_ready", "(", "request", ",", "remove_request", "=", "requests", ".", "pop", ",", "discard_active_request", "=", "active_requests", ".", "discard", ",", "discard_reserved_request", "=", "reserved_requests", ".", "discard", ")", ":", "remove_request", "("...
updates global state when a task is ready .
train
false
15,224
def xml_safe(value): return CONTROL_CHARACTERS.sub('?', value)
[ "def", "xml_safe", "(", "value", ")", ":", "return", "CONTROL_CHARACTERS", ".", "sub", "(", "'?'", ",", "value", ")" ]
replaces invalid xml characters with ? .
train
false
15,225
def is_json(content): try: json.loads(content) is_json = True except: is_json = False return is_json
[ "def", "is_json", "(", "content", ")", ":", "try", ":", "json", ".", "loads", "(", "content", ")", "is_json", "=", "True", "except", ":", "is_json", "=", "False", "return", "is_json" ]
unfortunately facebook returns 500s which mean they are down or 500s with a nice error message because you use open graph wrong so we have to figure out which is which :) .
train
false
15,226
def jpxDecode(stream): decodedStream = '' return ((-1), 'JpxDecode not supported yet')
[ "def", "jpxDecode", "(", "stream", ")", ":", "decodedStream", "=", "''", "return", "(", "(", "-", "1", ")", ",", "'JpxDecode not supported yet'", ")" ]
method to decode streams using the jpeg2000 standard .
train
false
15,227
def objectLoadHook(aDict): if ('__class_uuid__' in aDict): return uuidToLoader[UUID(aDict['__class_uuid__'])](aDict) return aDict
[ "def", "objectLoadHook", "(", "aDict", ")", ":", "if", "(", "'__class_uuid__'", "in", "aDict", ")", ":", "return", "uuidToLoader", "[", "UUID", "(", "aDict", "[", "'__class_uuid__'", "]", ")", "]", "(", "aDict", ")", "return", "aDict" ]
dictionary-to-object-translation hook for certain value types used within the logging system .
train
false
15,228
def top_python_dirs(dirname): top_dirs = [] dir_init = os.path.join(dirname, '__init__.py') if os.path.exists(dir_init): top_dirs.append(dirname) for directory in ['djangoapps', 'lib']: subdir = os.path.join(dirname, directory) subdir_init = os.path.join(subdir, '__init__.py') if (os.path.exists(subdir) and (not os.path.exists(subdir_init))): dirs = os.listdir(subdir) top_dirs.extend((d for d in dirs if os.path.isdir(os.path.join(subdir, d)))) return top_dirs
[ "def", "top_python_dirs", "(", "dirname", ")", ":", "top_dirs", "=", "[", "]", "dir_init", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "'__init__.py'", ")", "if", "os", ".", "path", ".", "exists", "(", "dir_init", ")", ":", "top_dirs", ...
find the directories to start from in order to find all the python files in dirname .
train
false
15,229
@utils.arg('--host', dest='host', metavar='<host>', default=None, help=_('Filter by host.')) @deprecated_network def do_floating_ip_bulk_list(cs, args): utils.print_list(cs.floating_ips_bulk.list(args.host), ['project_id', 'address', 'instance_uuid', 'pool', 'interface'])
[ "@", "utils", ".", "arg", "(", "'--host'", ",", "dest", "=", "'host'", ",", "metavar", "=", "'<host>'", ",", "default", "=", "None", ",", "help", "=", "_", "(", "'Filter by host.'", ")", ")", "@", "deprecated_network", "def", "do_floating_ip_bulk_list", "(...
list all floating ips .
train
false
15,230
def expr_match(line, expr): if (line == expr): return True if fnmatch.fnmatch(line, expr): return True try: if re.match('\\A{0}\\Z'.format(expr), line): return True except re.error: pass return False
[ "def", "expr_match", "(", "line", ",", "expr", ")", ":", "if", "(", "line", "==", "expr", ")", ":", "return", "True", "if", "fnmatch", ".", "fnmatch", "(", "line", ",", "expr", ")", ":", "return", "True", "try", ":", "if", "re", ".", "match", "("...
evaluate a line of text against an expression .
train
true
15,233
def build_finished(app, exception): filename = join(app.outdir, 'sitemap.txt') links_iter = app.status_iterator(sorted(app.sitemap_links), 'adding links to sitemap... ', console.brown, len(app.sitemap_links)) try: with open(filename, 'w') as f: for link in links_iter: f.write(('%s\n' % link)) except OSError as e: raise SphinxError(('cannot write sitemap.txt, reason: %s' % e))
[ "def", "build_finished", "(", "app", ",", "exception", ")", ":", "filename", "=", "join", "(", "app", ".", "outdir", ",", "'sitemap.txt'", ")", "links_iter", "=", "app", ".", "status_iterator", "(", "sorted", "(", "app", ".", "sitemap_links", ")", ",", "...
generate a sitemap .
train
true
15,234
def milestone(): if ('project_id' in get_vars): field = s3db.project_milestone.project_id field.default = get_vars.project_id field.writable = False field.comment = None return s3_rest_controller()
[ "def", "milestone", "(", ")", ":", "if", "(", "'project_id'", "in", "get_vars", ")", ":", "field", "=", "s3db", ".", "project_milestone", ".", "project_id", "field", ".", "default", "=", "get_vars", ".", "project_id", "field", ".", "writable", "=", "False"...
restful crud controller .
train
false
15,236
def security_group_rule_get(context, security_group_rule_id): return IMPL.security_group_rule_get(context, security_group_rule_id)
[ "def", "security_group_rule_get", "(", "context", ",", "security_group_rule_id", ")", ":", "return", "IMPL", ".", "security_group_rule_get", "(", "context", ",", "security_group_rule_id", ")" ]
gets a security group rule .
train
false
15,237
def _other_params(args): default_params = [config.QUERY_WHERE, config.QUERY_SORT, config.QUERY_PAGE, config.QUERY_MAX_RESULTS, config.QUERY_EMBEDDED, config.QUERY_PROJECTION] return MultiDict(((key, value) for (key, values) in args.lists() for value in values if (key not in default_params)))
[ "def", "_other_params", "(", "args", ")", ":", "default_params", "=", "[", "config", ".", "QUERY_WHERE", ",", "config", ".", "QUERY_SORT", ",", "config", ".", "QUERY_PAGE", ",", "config", ".", "QUERY_MAX_RESULTS", ",", "config", ".", "QUERY_EMBEDDED", ",", "...
returns a multidict of params that are not used internally by eve .
train
false
15,238
@pytest.fixture(params=_generate_cmdline_tests(), ids=(lambda e: e.cmd)) def cmdline_test(request): import qutebrowser.app return request.param
[ "@", "pytest", ".", "fixture", "(", "params", "=", "_generate_cmdline_tests", "(", ")", ",", "ids", "=", "(", "lambda", "e", ":", "e", ".", "cmd", ")", ")", "def", "cmdline_test", "(", "request", ")", ":", "import", "qutebrowser", ".", "app", "return",...
fixture which generates tests for things validating commandlines .
train
false
15,239
def timenow(): return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
[ "def", "timenow", "(", ")", ":", "return", "time", ".", "strftime", "(", "'%d/%m/%Y %H:%M:%S'", ",", "time", ".", "localtime", "(", "time", ".", "time", "(", ")", ")", ")" ]
return current time as a string .
train
false
15,241
def parse_decimal_string(s): if (isinstance(s, six.integer_types) or isinstance(s, Decimal)): return Decimal(s) if isinstance(s, float): return Decimal(str(s)) s = s.strip().replace(' ', '') if (not s): return Decimal(0) if (',' in s): if ('.' in s): s = s.replace(',', '') else: s = s.replace(',', '.') return Decimal(strip_non_float_chars(s.strip()))
[ "def", "parse_decimal_string", "(", "s", ")", ":", "if", "(", "isinstance", "(", "s", ",", "six", ".", "integer_types", ")", "or", "isinstance", "(", "s", ",", "Decimal", ")", ")", ":", "return", "Decimal", "(", "s", ")", "if", "isinstance", "(", "s"...
parse decimals with "best effort" .
train
false
15,242
@app.route('/col/<sid>/<fn>') @login_required def download_single_submission(sid, fn): if (('..' in fn) or fn.startswith('/')): abort(404) try: Submission.query.filter((Submission.filename == fn)).one().downloaded = True db_session.commit() except NoResultFound as e: app.logger.error((('Could not mark ' + fn) + (' as downloaded: %s' % (e,)))) return send_file(store.path(sid, fn), mimetype='application/pgp-encrypted')
[ "@", "app", ".", "route", "(", "'/col/<sid>/<fn>'", ")", "@", "login_required", "def", "download_single_submission", "(", "sid", ",", "fn", ")", ":", "if", "(", "(", "'..'", "in", "fn", ")", "or", "fn", ".", "startswith", "(", "'/'", ")", ")", ":", "...
sends a client the contents of a single submission .
train
false
15,243
def _writable_location(typ): with qtutils.unset_organization(): path = QStandardPaths.writableLocation(typ) typ_str = debug.qenum_key(QStandardPaths, typ) log.misc.debug('writable location for {}: {}'.format(typ_str, path)) if (not path): raise EmptyValueError('QStandardPaths returned an empty value!') path = path.replace('/', os.sep) return path
[ "def", "_writable_location", "(", "typ", ")", ":", "with", "qtutils", ".", "unset_organization", "(", ")", ":", "path", "=", "QStandardPaths", ".", "writableLocation", "(", "typ", ")", "typ_str", "=", "debug", ".", "qenum_key", "(", "QStandardPaths", ",", "t...
wrapper around qstandardpaths .
train
false
15,244
def get_all_tags(project): result = set() result.update(_get_project_tags(project)) result.update(_get_issues_tags(project)) result.update(_get_stories_tags(project)) result.update(_get_tasks_tags(project)) return sorted(result)
[ "def", "get_all_tags", "(", "project", ")", ":", "result", "=", "set", "(", ")", "result", ".", "update", "(", "_get_project_tags", "(", "project", ")", ")", "result", ".", "update", "(", "_get_issues_tags", "(", "project", ")", ")", "result", ".", "upda...
given a project .
train
false
15,245
@pytest.fixture(scope='session', autouse=True) def httpbin(qapp): httpbin = WebserverProcess('webserver_sub') httpbin.start() (yield httpbin) httpbin.cleanup()
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "'session'", ",", "autouse", "=", "True", ")", "def", "httpbin", "(", "qapp", ")", ":", "httpbin", "=", "WebserverProcess", "(", "'webserver_sub'", ")", "httpbin", ".", "start", "(", ")", "(", "yield", ...
returns url for httpbin resource .
train
false
15,246
def test_hsp_elp(): raw_txt = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path) raw_elp = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path) pts_txt = np.array([dig_point['r'] for dig_point in raw_txt.info['dig']]) pts_elp = np.array([dig_point['r'] for dig_point in raw_elp.info['dig']]) assert_array_almost_equal(pts_elp, pts_txt, decimal=5) trans_txt = raw_txt.info['dev_head_t']['trans'] trans_elp = raw_elp.info['dev_head_t']['trans'] assert_array_almost_equal(trans_elp, trans_txt, decimal=5) pts_txt_in_dev = apply_trans(linalg.inv(trans_txt), pts_txt) pts_elp_in_dev = apply_trans(linalg.inv(trans_elp), pts_elp) assert_array_almost_equal(pts_elp_in_dev, pts_txt_in_dev, decimal=5)
[ "def", "test_hsp_elp", "(", ")", ":", "raw_txt", "=", "read_raw_kit", "(", "sqd_path", ",", "mrk_path", ",", "elp_txt_path", ",", "hsp_txt_path", ")", "raw_elp", "=", "read_raw_kit", "(", "sqd_path", ",", "mrk_path", ",", "elp_path", ",", "hsp_path", ")", "p...
test kit usage of * .
train
false
15,247
@project_signals.after_create_registration.connect def after_register(src, dst, user): from website.archiver import tasks archiver_utils.before_archive(dst, user) if (dst.root != dst): return archive_tasks = [tasks.archive(job_pk=t.archive_job._id) for t in dst.node_and_primary_descendants()] handlers.enqueue_task(celery.chain(archive_tasks))
[ "@", "project_signals", ".", "after_create_registration", ".", "connect", "def", "after_register", "(", "src", ",", "dst", ",", "user", ")", ":", "from", "website", ".", "archiver", "import", "tasks", "archiver_utils", ".", "before_archive", "(", "dst", ",", "...
blinker listener for registration initiations .
train
false
15,250
def rehashconf(): return __salt__['ps.pkill']('znc', signal=signal.SIGHUP)
[ "def", "rehashconf", "(", ")", ":", "return", "__salt__", "[", "'ps.pkill'", "]", "(", "'znc'", ",", "signal", "=", "signal", ".", "SIGHUP", ")" ]
rehash the active configuration state from config file cli example: .
train
false
15,252
@cronjobs.register def update_addons_collections_downloads(): raise_if_reindex_in_progress('amo') d = AddonCollectionCount.objects.values('addon', 'collection').annotate(sum=Sum('count')) ts = [tasks.update_addons_collections_downloads.subtask(args=[chunk]) for chunk in chunked(d, 100)] TaskSet(ts).apply_async()
[ "@", "cronjobs", ".", "register", "def", "update_addons_collections_downloads", "(", ")", ":", "raise_if_reindex_in_progress", "(", "'amo'", ")", "d", "=", "AddonCollectionCount", ".", "objects", ".", "values", "(", "'addon'", ",", "'collection'", ")", ".", "annot...
update addons+collections download totals .
train
false
15,253
def test_aligned_mem_complex(): a = zeros(1608, dtype=np.uint8) z = np.frombuffer(a.data, offset=8, count=100, dtype=complex) z.shape = (10, 10) eig(z, overwrite_a=True) eig(z.T, overwrite_a=True)
[ "def", "test_aligned_mem_complex", "(", ")", ":", "a", "=", "zeros", "(", "1608", ",", "dtype", "=", "np", ".", "uint8", ")", "z", "=", "np", ".", "frombuffer", "(", "a", ".", "data", ",", "offset", "=", "8", ",", "count", "=", "100", ",", "dtype...
check that complex objects dont need to be completely aligned .
train
false
15,255
def k_random_intersection_graph(n, m, k): G = nx.empty_graph((n + m)) mset = range(n, (n + m)) for v in range(n): targets = random.sample(mset, k) G.add_edges_from(zip(([v] * len(targets)), targets)) return nx.projected_graph(G, range(n))
[ "def", "k_random_intersection_graph", "(", "n", ",", "m", ",", "k", ")", ":", "G", "=", "nx", ".", "empty_graph", "(", "(", "n", "+", "m", ")", ")", "mset", "=", "range", "(", "n", ",", "(", "n", "+", "m", ")", ")", "for", "v", "in", "range",...
return a intersection graph with randomly chosen attribute sets for each node that are of equal size (k) .
train
false
15,256
def urljoin(base_uri, uri_reference): return urlunsplit(urljoin_parts(urlsplit(base_uri), urlsplit(uri_reference)))
[ "def", "urljoin", "(", "base_uri", ",", "uri_reference", ")", ":", "return", "urlunsplit", "(", "urljoin_parts", "(", "urlsplit", "(", "base_uri", ")", ",", "urlsplit", "(", "uri_reference", ")", ")", ")" ]
return the given path *atoms .
train
false
15,257
def _concat_index_asobject(to_concat, name=None): klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex) to_concat = [(x.asobject if isinstance(x, klasses) else x) for x in to_concat] from pandas import Index self = to_concat[0] attribs = self._get_attributes_dict() attribs['name'] = name to_concat = [(x._values if isinstance(x, Index) else x) for x in to_concat] return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
[ "def", "_concat_index_asobject", "(", "to_concat", ",", "name", "=", "None", ")", ":", "klasses", "=", "(", "ABCDatetimeIndex", ",", "ABCTimedeltaIndex", ",", "ABCPeriodIndex", ")", "to_concat", "=", "[", "(", "x", ".", "asobject", "if", "isinstance", "(", "...
concat all inputs as object .
train
false
15,259
def supported_ufunc_loop(ufunc, loop): from .targets import ufunc_db loop_sig = loop.ufunc_sig try: supported_loop = (loop_sig in ufunc_db.get_ufunc_info(ufunc)) except KeyError: loop_types = [x.char for x in (loop.numpy_inputs + loop.numpy_outputs)] supported_types = '?bBhHiIlLqQfd' supported_loop = all(((t in supported_types) for t in loop_types)) return supported_loop
[ "def", "supported_ufunc_loop", "(", "ufunc", ",", "loop", ")", ":", "from", ".", "targets", "import", "ufunc_db", "loop_sig", "=", "loop", ".", "ufunc_sig", "try", ":", "supported_loop", "=", "(", "loop_sig", "in", "ufunc_db", ".", "get_ufunc_info", "(", "uf...
return whether the *loop* for the *ufunc* is supported -in nopython- .
train
false
15,260
def _add_tag_url(question_id): return reverse('questions.add_tag', kwargs={'question_id': question_id})
[ "def", "_add_tag_url", "(", "question_id", ")", ":", "return", "reverse", "(", "'questions.add_tag'", ",", "kwargs", "=", "{", "'question_id'", ":", "question_id", "}", ")" ]
return the url to add_tag for question 1 .
train
false
15,261
def trim_cue_entry(string): if ((string[0] == '"') and (string[(-1)] == '"')): string = string[1:(-1)] return string
[ "def", "trim_cue_entry", "(", "string", ")", ":", "if", "(", "(", "string", "[", "0", "]", "==", "'\"'", ")", "and", "(", "string", "[", "(", "-", "1", ")", "]", "==", "'\"'", ")", ")", ":", "string", "=", "string", "[", "1", ":", "(", "-", ...
removes leading and trailing "s .
train
false
15,263
def server_delete(s_name, **connection_args): ret = True server = _server_get(s_name, **connection_args) if (server is None): return False nitro = _connect(**connection_args) if (nitro is None): return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: {0}'.format(error)) ret = False _disconnect(nitro) return ret
[ "def", "server_delete", "(", "s_name", ",", "**", "connection_args", ")", ":", "ret", "=", "True", "server", "=", "_server_get", "(", "s_name", ",", "**", "connection_args", ")", "if", "(", "server", "is", "None", ")", ":", "return", "False", "nitro", "=...
delete a server cli example: .
train
true
15,264
def lb_edit(lbn, settings, profile='default'): settings['cmd'] = 'update' settings['mime'] = 'prop' settings['w'] = lbn return (_do_http(settings, profile)['worker.result.type'] == 'OK')
[ "def", "lb_edit", "(", "lbn", ",", "settings", ",", "profile", "=", "'default'", ")", ":", "settings", "[", "'cmd'", "]", "=", "'update'", "settings", "[", "'mime'", "]", "=", "'prop'", "settings", "[", "'w'", "]", "=", "lbn", "return", "(", "_do_http"...
edit the loadbalancer settings note: URL data parameters for the standard update action cli examples: .
train
true
15,265
def ColorfulPyPrint_current_verbose_level(): global O_VERBOSE_LEVEL return O_VERBOSE_LEVEL
[ "def", "ColorfulPyPrint_current_verbose_level", "(", ")", ":", "global", "O_VERBOSE_LEVEL", "return", "O_VERBOSE_LEVEL" ]
show current verbose level :rtype: int .
train
false
15,266
def get_transport_cls(transport=None): if (transport not in _transport_cache): _transport_cache[transport] = resolve_transport(transport) return _transport_cache[transport]
[ "def", "get_transport_cls", "(", "transport", "=", "None", ")", ":", "if", "(", "transport", "not", "in", "_transport_cache", ")", ":", "_transport_cache", "[", "transport", "]", "=", "resolve_transport", "(", "transport", ")", "return", "_transport_cache", "[",...
get transport class by name .
train
false
15,267
def replace_insensitive(string, target, replacement): no_case = string.lower() index = no_case.rfind(target.lower()) if (index >= 0): return ((string[:index] + replacement) + string[(index + len(target)):]) else: return string
[ "def", "replace_insensitive", "(", "string", ",", "target", ",", "replacement", ")", ":", "no_case", "=", "string", ".", "lower", "(", ")", "index", "=", "no_case", ".", "rfind", "(", "target", ".", "lower", "(", ")", ")", "if", "(", "index", ">=", "...
similar to string .
train
true
15,268
def wipe_yum_cache(repository): return run_from_args(['yum', '--disablerepo=*', ('--enablerepo=' + repository), 'clean', 'expire-cache'])
[ "def", "wipe_yum_cache", "(", "repository", ")", ":", "return", "run_from_args", "(", "[", "'yum'", ",", "'--disablerepo=*'", ",", "(", "'--enablerepo='", "+", "repository", ")", ",", "'clean'", ",", "'expire-cache'", "]", ")" ]
force yum to update the metadata for a particular repository .
train
false
15,269
def workers_init(): global DB_SUPPORTS_SUBSECOND_RESOLUTION session = get_session() query = session.query(models.Worker).filter_by(resource_type='SENTINEL') worker = query.first() DB_SUPPORTS_SUBSECOND_RESOLUTION = bool(worker.updated_at.microsecond)
[ "def", "workers_init", "(", ")", ":", "global", "DB_SUPPORTS_SUBSECOND_RESOLUTION", "session", "=", "get_session", "(", ")", "query", "=", "session", ".", "query", "(", "models", ".", "Worker", ")", ".", "filter_by", "(", "resource_type", "=", "'SENTINEL'", ")...
check if db supports subsecond resolution and set global flag .
train
false
15,270
def key_to_english(key): english = '' for index in range(0, len(key), 8): subkey = key[index:(index + 8)] skbin = _key2bin(subkey) p = 0 for i in range(0, 64, 2): p = (p + _extract(skbin, i, 2)) skbin = _key2bin((subkey + bchr(((p << 6) & 255)))) for i in range(0, 64, 11): english = ((english + wordlist[_extract(skbin, i, 11)]) + ' ') return english[:(-1)]
[ "def", "key_to_english", "(", "key", ")", ":", "english", "=", "''", "for", "index", "in", "range", "(", "0", ",", "len", "(", "key", ")", ",", "8", ")", ":", "subkey", "=", "key", "[", "index", ":", "(", "index", "+", "8", ")", "]", "skbin", ...
key_to_english(key:string/bytes) : string transform an arbitrary key into a string containing english words .
train
false
15,271
def wait_for_backup_status(client, backup_id, status): body = client.show_backup(backup_id)['backup'] backup_status = body['status'] start = int(time.time()) while (backup_status != status): time.sleep(client.build_interval) body = client.show_backup(backup_id)['backup'] backup_status = body['status'] if ((backup_status == 'error') and (backup_status != status)): raise lib_exc.VolumeBackupException(backup_id=backup_id) if ((int(time.time()) - start) >= client.build_timeout): message = ('Volume backup %s failed to reach %s status (current %s) within the required time (%s s).' % (backup_id, status, backup_status, client.build_timeout)) raise lib_exc.TimeoutException(message)
[ "def", "wait_for_backup_status", "(", "client", ",", "backup_id", ",", "status", ")", ":", "body", "=", "client", ".", "show_backup", "(", "backup_id", ")", "[", "'backup'", "]", "backup_status", "=", "body", "[", "'status'", "]", "start", "=", "int", "(",...
waits for a backup to reach a given status .
train
false
15,272
def getFromCreationEvaluatorPlugins(namePathDictionary, xmlElement): if getEvaluatedBooleanDefault(False, '_fromCreationEvaluator', xmlElement): return getMatchingPlugins(namePathDictionary, xmlElement) return []
[ "def", "getFromCreationEvaluatorPlugins", "(", "namePathDictionary", ",", "xmlElement", ")", ":", "if", "getEvaluatedBooleanDefault", "(", "False", ",", "'_fromCreationEvaluator'", ",", "xmlElement", ")", ":", "return", "getMatchingPlugins", "(", "namePathDictionary", ","...
get the creation evaluator plugins if the xmlelement is from the creation evaluator .
train
false
15,273
def detach_network_interface(name=None, network_interface_id=None, attachment_id=None, force=False, region=None, key=None, keyid=None, profile=None): if (not (name or network_interface_id or attachment_id)): raise SaltInvocationError('Either name or network_interface_id or attachment_id must be provided.') conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} if (not attachment_id): result = _get_network_interface(conn, name, network_interface_id) if ('error' in result): return result eni = result['result'] info = _describe_network_interface(eni) try: attachment_id = info['attachment']['id'] except KeyError: r['error'] = {'message': 'Attachment id not found for this ENI.'} return r try: r['result'] = conn.detach_network_interface(attachment_id, force) except boto.exception.EC2ResponseError as e: r['error'] = __utils__['boto.get_error'](e) return r
[ "def", "detach_network_interface", "(", "name", "=", "None", ",", "network_interface_id", "=", "None", ",", "attachment_id", "=", "None", ",", "force", "=", "False", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "pr...
detach an elastic network interface .
train
true
15,276
def get_if_raw_addr6(iff): r = filter((lambda x: ((x[2] == iff) and (x[1] == IPV6_ADDR_GLOBAL))), in6_getifaddr()) if (len(r) == 0): return None else: r = r[0][0] return inet_pton(socket.AF_INET6, r)
[ "def", "get_if_raw_addr6", "(", "iff", ")", ":", "r", "=", "filter", "(", "(", "lambda", "x", ":", "(", "(", "x", "[", "2", "]", "==", "iff", ")", "and", "(", "x", "[", "1", "]", "==", "IPV6_ADDR_GLOBAL", ")", ")", ")", ",", "in6_getifaddr", "(...
returns the main global unicast address associated with provided interface .
train
true
15,277
@contextmanager def initialize_repo(worktree, gitdir=None): @contextmanager def use_gitdir(): if gitdir: (yield gitdir) else: with temporary_dir() as d: (yield d) with use_gitdir() as git_dir: with environment_as(GIT_DIR=git_dir, GIT_WORK_TREE=worktree): subprocess.check_call([u'git', u'init']) subprocess.check_call([u'git', u'config', u'user.email', u'you@example.com']) subprocess.check_call([u'git', u'config', u'user.name', u'Your Name']) subprocess.check_call([u'git', u'add', u'.']) subprocess.check_call([u'git', u'commit', u'-am', u'Add project files.']) (yield Git(gitdir=git_dir, worktree=worktree))
[ "@", "contextmanager", "def", "initialize_repo", "(", "worktree", ",", "gitdir", "=", "None", ")", ":", "@", "contextmanager", "def", "use_gitdir", "(", ")", ":", "if", "gitdir", ":", "(", "yield", "gitdir", ")", "else", ":", "with", "temporary_dir", "(", ...
initialize a git repository for the given worktree .
train
false
15,280
def _profile(prof_id, func): import pstats try: import cProfile as profile except ImportError: import profile PROF_DAT = ('/tmp/desktop-profile-%s.dat' % (prof_id,)) prof = profile.Profile() try: prof.runcall(func) finally: if os.path.exists(PROF_DAT): os.remove(PROF_DAT) prof.dump_stats(PROF_DAT) pstats.Stats(PROF_DAT).sort_stats('time').print_stats(50) print >>sys.stderr, ('Complete profile data in %s' % (PROF_DAT,))
[ "def", "_profile", "(", "prof_id", ",", "func", ")", ":", "import", "pstats", "try", ":", "import", "cProfile", "as", "profile", "except", "ImportError", ":", "import", "profile", "PROF_DAT", "=", "(", "'/tmp/desktop-profile-%s.dat'", "%", "(", "prof_id", ",",...
wrap a call with a profiler .
train
false
15,281
def retrieve_csp_report_uri(response): uri_set = set() non_report_only_policies = retrieve_csp_policies(response) report_only_policies = retrieve_csp_policies(response, True) policies_all = merge_policies_dict(non_report_only_policies, report_only_policies) if (len(policies_all) > 0): for directive_name in policies_all: if (directive_name.lower() != CSP_DIRECTIVE_REPORT_URI): continue for directive_value in policies_all[directive_name]: uri = directive_value.strip().lower() uri_set.add(uri) return uri_set
[ "def", "retrieve_csp_report_uri", "(", "response", ")", ":", "uri_set", "=", "set", "(", ")", "non_report_only_policies", "=", "retrieve_csp_policies", "(", "response", ")", "report_only_policies", "=", "retrieve_csp_policies", "(", "response", ",", "True", ")", "po...
method to retrieve all report uri from csp policies specified into a http response through csp headers .
train
false
15,282
@pytest.mark.network def test_install_noneditable_git(script, tmpdir): result = script.pip('install', 'git+https://github.com/pypa/pip-test-package.git@0.1.1#egg=pip-test-package') egg_info_folder = ((script.site_packages / 'pip_test_package-0.1.1-py%s.egg-info') % pyversion) result.assert_installed('piptestpackage', without_egg_link=True, editable=False) assert (egg_info_folder in result.files_created), str(result)
[ "@", "pytest", ".", "mark", ".", "network", "def", "test_install_noneditable_git", "(", "script", ",", "tmpdir", ")", ":", "result", "=", "script", ".", "pip", "(", "'install'", ",", "'git+https://github.com/pypa/pip-test-package.git@0.1.1#egg=pip-test-package'", ")", ...
test installing from a non-editable git url with a given tag .
train
false
15,283
def add_limit_to_query(query, args): if args.get(u'limit_page_length'): query += u'\n DCTB DCTB DCTB limit %(limit_start)s, %(limit_page_length)s' import frappe.utils args[u'limit_start'] = frappe.utils.cint(args.get(u'limit_start')) args[u'limit_page_length'] = frappe.utils.cint(args.get(u'limit_page_length')) return (query, args)
[ "def", "add_limit_to_query", "(", "query", ",", "args", ")", ":", "if", "args", ".", "get", "(", "u'limit_page_length'", ")", ":", "query", "+=", "u'\\n DCTB DCTB DCTB limit %(limit_start)s, %(limit_page_length)s'", "import", "frappe", ".", "utils", "args", "[", "...
add limit condition to query can be used by methods called in listing to add limit condition .
train
false
15,284
def parse_args(arguments, apply_config=False): parser = create_parser() args = parser.parse_args(arguments) if ((not args.files) and (not args.list_fixes)): parser.error(u'incorrect number of arguments') args.files = [decode_filename(name) for name in args.files] if apply_config: parser = read_config(args, parser) args = parser.parse_args(arguments) args.files = [decode_filename(name) for name in args.files] if (u'-' in args.files): if (len(args.files) > 1): parser.error(u'cannot mix stdin and regular files') if args.diff: parser.error(u'--diff cannot be used with standard input') if args.in_place: parser.error(u'--in-place cannot be used with standard input') if args.recursive: parser.error(u'--recursive cannot be used with standard input') if ((len(args.files) > 1) and (not (args.in_place or args.diff))): parser.error(u'autopep8 only takes one filename as argument unless the "--in-place" or "--diff" args are used') if (args.recursive and (not (args.in_place or args.diff))): parser.error(u'--recursive must be used with --in-place or --diff') if (args.in_place and args.diff): parser.error(u'--in-place and --diff are mutually exclusive') if (args.max_line_length <= 0): parser.error(u'--max-line-length must be greater than 0') if args.select: args.select = _split_comma_separated(args.select) if args.ignore: args.ignore = _split_comma_separated(args.ignore) elif (not args.select): if args.aggressive: args.select = [u'E', u'W'] else: args.ignore = _split_comma_separated(DEFAULT_IGNORE) if args.exclude: args.exclude = _split_comma_separated(args.exclude) else: args.exclude = [] if (args.jobs < 1): import multiprocessing args.jobs = multiprocessing.cpu_count() if ((args.jobs > 1) and (not args.in_place)): parser.error(u'parallel jobs requires --in-place') if args.line_range: if (args.line_range[0] <= 0): parser.error(u'--range must be positive numbers') if (args.line_range[0] > args.line_range[1]): parser.error(u'First value of --range should be less than or equal to the second') return args
[ "def", "parse_args", "(", "arguments", ",", "apply_config", "=", "False", ")", ":", "parser", "=", "create_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", "arguments", ")", "if", "(", "(", "not", "args", ".", "files", ")", "and", "(",...
parse arguments from command-line to set options .
train
false
15,285
def get_admin_static_url(): return getattr(settings, 'ADMIN_MEDIA_PREFIX', (get_static_url() + 'admin/'))
[ "def", "get_admin_static_url", "(", ")", ":", "return", "getattr", "(", "settings", ",", "'ADMIN_MEDIA_PREFIX'", ",", "(", "get_static_url", "(", ")", "+", "'admin/'", ")", ")" ]
return the admin_media_prefix if it is in the settings .
train
false
15,286
def _email_sequence(emails): if isinstance(emails, basestring): return (emails,) return emails
[ "def", "_email_sequence", "(", "emails", ")", ":", "if", "isinstance", "(", "emails", ",", "basestring", ")", ":", "return", "(", "emails", ",", ")", "return", "emails" ]
forces email to be sequenceable type .
train
false
15,287
def grp_from_name(name): global _gid_to_grp_cache, _name_to_grp_cache (entry, cached) = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache) if (entry and (not cached)): _gid_to_grp_cache[entry.gr_gid] = entry return entry
[ "def", "grp_from_name", "(", "name", ")", ":", "global", "_gid_to_grp_cache", ",", "_name_to_grp_cache", "(", "entry", ",", "cached", ")", "=", "_cache_key_value", "(", "grp", ".", "getgrnam", ",", "name", ",", "_name_to_grp_cache", ")", "if", "(", "entry", ...
return password database entry for name .
train
false
15,289
def read_cz_lsm_floatpairs(fh): size = struct.unpack('<i', fh.read(4))[0] return fh.read_array('<2f8', count=size)
[ "def", "read_cz_lsm_floatpairs", "(", "fh", ")", ":", "size", "=", "struct", ".", "unpack", "(", "'<i'", ",", "fh", ".", "read", "(", "4", ")", ")", "[", "0", "]", "return", "fh", ".", "read_array", "(", "'<2f8'", ",", "count", "=", "size", ")" ]
read lsm sequence of float pairs from file and return as list .
train
false
15,294
def getNonIntersectingGridPointLine(gridPointInsetX, isJunctionWide, paths, pixelTable, yIntersectionPath, width): pointIndexPlusOne = yIntersectionPath.getPointIndexPlusOne() path = yIntersectionPath.getPath(paths) begin = path[yIntersectionPath.pointIndex] end = path[pointIndexPlusOne] plusMinusSign = getPlusMinusSign((end.real - begin.real)) if isJunctionWide: gridPointXFirst = complex((yIntersectionPath.gridPoint.real - (plusMinusSign * gridPointInsetX)), yIntersectionPath.gridPoint.imag) gridPointXSecond = complex((yIntersectionPath.gridPoint.real + (plusMinusSign * gridPointInsetX)), yIntersectionPath.gridPoint.imag) if isAddedPointOnPathFree(path, pixelTable, gridPointXSecond, pointIndexPlusOne, width): if isAddedPointOnPathFree(path, pixelTable, gridPointXFirst, pointIndexPlusOne, width): return [gridPointXSecond, gridPointXFirst] if isAddedPointOnPathFree(path, pixelTable, yIntersectionPath.gridPoint, pointIndexPlusOne, width): return [gridPointXSecond, yIntersectionPath.gridPoint] return [gridPointXSecond] if isAddedPointOnPathFree(path, pixelTable, yIntersectionPath.gridPoint, pointIndexPlusOne, width): return [yIntersectionPath.gridPoint] return []
[ "def", "getNonIntersectingGridPointLine", "(", "gridPointInsetX", ",", "isJunctionWide", ",", "paths", ",", "pixelTable", ",", "yIntersectionPath", ",", "width", ")", ":", "pointIndexPlusOne", "=", "yIntersectionPath", ".", "getPointIndexPlusOne", "(", ")", "path", "=...
get the points around the grid point that is junction wide that do not intersect .
train
false
15,295
def get_tab_by_tab_id_locator(tab_list, tab_id_locator): if ('tab_id' in tab_id_locator): tab = CourseTabList.get_tab_by_id(tab_list, tab_id_locator['tab_id']) elif ('tab_locator' in tab_id_locator): tab = get_tab_by_locator(tab_list, tab_id_locator['tab_locator']) return tab
[ "def", "get_tab_by_tab_id_locator", "(", "tab_list", ",", "tab_id_locator", ")", ":", "if", "(", "'tab_id'", "in", "tab_id_locator", ")", ":", "tab", "=", "CourseTabList", ".", "get_tab_by_id", "(", "tab_list", ",", "tab_id_locator", "[", "'tab_id'", "]", ")", ...
look for a tab with the specified tab_id or locator .
train
false
15,296
def serialize_args(fn): def wrapper(obj, *args, **kwargs): args = [(utils.strtime(arg) if isinstance(arg, datetime.datetime) else arg) for arg in args] for (k, v) in kwargs.items(): if ((k == 'exc_val') and v): kwargs[k] = six.text_type(v) elif ((k == 'exc_tb') and v and (not isinstance(v, six.string_types))): kwargs[k] = ''.join(traceback.format_tb(v)) elif isinstance(v, datetime.datetime): kwargs[k] = utils.strtime(v) if hasattr(fn, '__call__'): return fn(obj, *args, **kwargs) return fn.__get__(None, obj)(*args, **kwargs) wrapper.remotable = getattr(fn, 'remotable', False) wrapper.original_fn = fn return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__') else classmethod(wrapper))
[ "def", "serialize_args", "(", "fn", ")", ":", "def", "wrapper", "(", "obj", ",", "*", "args", ",", "**", "kwargs", ")", ":", "args", "=", "[", "(", "utils", ".", "strtime", "(", "arg", ")", "if", "isinstance", "(", "arg", ",", "datetime", ".", "d...
workaround for float string conversion issues in python 2 .
train
false
15,297
def checkLRC(data, check): return (computeLRC(data) == check)
[ "def", "checkLRC", "(", "data", ",", "check", ")", ":", "return", "(", "computeLRC", "(", "data", ")", "==", "check", ")" ]
checks if the passed in data matches the lrc .
train
false
15,298
def find_repository_host(job_path): site_repo_info = site_find_repository_host(job_path) if (site_repo_info is not None): return site_repo_info results_repos = [RESULTS_HOST] for drone in DRONES.split(','): drone = drone.strip() if (drone not in results_repos): results_repos.append(drone) if (ARCHIVE_HOST and (ARCHIVE_HOST not in results_repos)): results_repos.append(ARCHIVE_HOST) for drone in results_repos: if (drone == 'localhost'): continue http_path = ('http://%s%s' % (drone, job_path)) try: logging.info('Attempting to access the selected results URL: "%s"', http_path) utils.urlopen(http_path) return ('http', utils.normalize_hostname(drone), job_path) except urllib2.URLError: logging.error('Failed to access the selected results URL. Reverting to usual results location') pass
[ "def", "find_repository_host", "(", "job_path", ")", ":", "site_repo_info", "=", "site_find_repository_host", "(", "job_path", ")", "if", "(", "site_repo_info", "is", "not", "None", ")", ":", "return", "site_repo_info", "results_repos", "=", "[", "RESULTS_HOST", "...
find the machine holding the given logs and return a url to the logs .
train
false
15,300
def ortho_weight(ndim): W = numpy.random.randn(ndim, ndim) (u, _, _) = numpy.linalg.svd(W) return u.astype('float32')
[ "def", "ortho_weight", "(", "ndim", ")", ":", "W", "=", "numpy", ".", "random", ".", "randn", "(", "ndim", ",", "ndim", ")", "(", "u", ",", "_", ",", "_", ")", "=", "numpy", ".", "linalg", ".", "svd", "(", "W", ")", "return", "u", ".", "astyp...
orthogonal weight init .
train
false
15,301
def listenWS(factory, contextFactory=None, backlog=50, interface=''): if hasattr(factory, 'reactor'): reactor = factory.reactor else: from twisted.internet import reactor if factory.isSecure: if (contextFactory is None): raise Exception('Secure WebSocket listen requested, but no SSL context factory given') listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface) else: listener = reactor.listenTCP(factory.port, factory, backlog, interface) return listener
[ "def", "listenWS", "(", "factory", ",", "contextFactory", "=", "None", ",", "backlog", "=", "50", ",", "interface", "=", "''", ")", ":", "if", "hasattr", "(", "factory", ",", "'reactor'", ")", ":", "reactor", "=", "factory", ".", "reactor", "else", ":"...
listen for incoming websocket connections from clients .
train
false
15,302
def check_is_spam(content, content_object, request, backends=SPAM_CHECKER_BACKENDS): for backend_path in backends: spam_checker = get_spam_checker(backend_path) if (spam_checker is not None): is_spam = spam_checker(content, content_object, request) if is_spam: return True return False
[ "def", "check_is_spam", "(", "content", ",", "content_object", ",", "request", ",", "backends", "=", "SPAM_CHECKER_BACKENDS", ")", ":", "for", "backend_path", "in", "backends", ":", "spam_checker", "=", "get_spam_checker", "(", "backend_path", ")", "if", "(", "s...
return true if the content is a spam .
train
true
15,305
def log_stats(fun): def newfun(env, res): 'Log the execution time of the passed function' timer = (lambda t: (t.time(), t.clock())) (t0, c0) = timer(time) executed_function = fun(env, res) (t1, c1) = timer(time) log_info = '**** Request: %.2fms/%.2fms (real time/cpu time)' log_info = (log_info % (((t1 - t0) * 1000), ((c1 - c0) * 1000))) logging.info(log_info) return executed_function return newfun
[ "def", "log_stats", "(", "fun", ")", ":", "def", "newfun", "(", "env", ",", "res", ")", ":", "timer", "=", "(", "lambda", "t", ":", "(", "t", ".", "time", "(", ")", ",", "t", ".", "clock", "(", ")", ")", ")", "(", "t0", ",", "c0", ")", "=...
function that will act as a decorator to make logging .
train
false
15,306
@declared def unknown(obj, output): set_value(obj, output, None, 3)
[ "@", "declared", "def", "unknown", "(", "obj", ",", "output", ")", ":", "set_value", "(", "obj", ",", "output", ",", "None", ",", "3", ")" ]
set a service in unknown state .
train
false
15,308
def cmpCompanies(p1, p2): p1n = p1.get('long imdb name', _last) p2n = p2.get('long imdb name', _last) if ((p1n is _last) and (p2n is _last)): p1n = p1.get('name', _last) p2n = p2.get('name', _last) if (p1n > p2n): return 1 if (p1n < p2n): return (-1) p1i = p1.get('country', _last) p2i = p2.get('country', _last) if (p1i > p2i): return 1 if (p1i < p2i): return (-1) return 0
[ "def", "cmpCompanies", "(", "p1", ",", "p2", ")", ":", "p1n", "=", "p1", ".", "get", "(", "'long imdb name'", ",", "_last", ")", "p2n", "=", "p2", ".", "get", "(", "'long imdb name'", ",", "_last", ")", "if", "(", "(", "p1n", "is", "_last", ")", ...
compare two companies .
train
false
15,309
@contextfunction def attachments_block(context, object=None): request = context['request'] response_format = 'html' if ('response_format' in context): response_format = context['response_format'] update = isinstance(object, UpdateRecord) if (not update): attachments = Attachment.objects.filter(attached_object=object) template = 'core/tags/attachments_block' else: attachments = Attachment.objects.filter(attached_record=object) template = 'core/tags/attachments_record_block' return Markup(render_to_string(template, {'object': object, 'attachments': attachments}, context_instance=RequestContext(request), response_format=response_format))
[ "@", "contextfunction", "def", "attachments_block", "(", "context", ",", "object", "=", "None", ")", ":", "request", "=", "context", "[", "'request'", "]", "response_format", "=", "'html'", "if", "(", "'response_format'", "in", "context", ")", ":", "response_f...
attachments for an object or update record .
train
false
15,310
def validateEncoder(encoder, subsampling): for i in range(encoder.minIndex, (encoder.maxIndex + 1), 1): for j in range((i + 1), (encoder.maxIndex + 1), subsampling): if (not encoder._overlapOK(i, j)): return False return True
[ "def", "validateEncoder", "(", "encoder", ",", "subsampling", ")", ":", "for", "i", "in", "range", "(", "encoder", ".", "minIndex", ",", "(", "encoder", ".", "maxIndex", "+", "1", ")", ",", "1", ")", ":", "for", "j", "in", "range", "(", "(", "i", ...
given an encoder .
train
false
15,311
def needs_host(func): from fabric.state import env @wraps(func) def host_prompting_wrapper(*args, **kwargs): while (not env.get('host_string', False)): handle_prompt_abort('the target host connection string') host_string = raw_input('No hosts found. Please specify (single) host string for connection: ') env.update(to_dict(host_string)) return func(*args, **kwargs) host_prompting_wrapper.undecorated = func return host_prompting_wrapper
[ "def", "needs_host", "(", "func", ")", ":", "from", "fabric", ".", "state", "import", "env", "@", "wraps", "(", "func", ")", "def", "host_prompting_wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "while", "(", "not", "env", ".", "get", "("...
prompt user for value of env .
train
false
15,312
def expand_cube_metadata(metadata): metadata = dict(metadata) if (not ('name' in metadata)): raise ModelError('Cube has no name') links = metadata.get('dimensions', []) if links: links = expand_dimension_links(metadata['dimensions']) if ('hierarchies' in metadata): dim_hiers = dict(metadata['hierarchies']) for link in links: try: hiers = dim_hiers.pop(link['name']) except KeyError: continue link['hierarchies'] = hiers if dim_hiers: raise ModelError(('There are hierarchies specified for non-linked dimensions: %s.' % dim_hiers.keys())) nonadditive = metadata.pop('nonadditive', None) if ('measures' in metadata): measures = [] for attr in metadata['measures']: attr = expand_attribute_metadata(attr) if nonadditive: attr['nonadditive'] = attr.get('nonadditive', nonadditive) measures.append(attr) metadata['measures'] = measures if links: metadata['dimensions'] = links return metadata
[ "def", "expand_cube_metadata", "(", "metadata", ")", ":", "metadata", "=", "dict", "(", "metadata", ")", "if", "(", "not", "(", "'name'", "in", "metadata", ")", ")", ":", "raise", "ModelError", "(", "'Cube has no name'", ")", "links", "=", "metadata", ".",...
expands metadata to be as complete as possible cube metadata .
train
false