id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
42,942
def _exists_in_path(cmd, environ): if (u'PATH' in environ): input_environ = environ.get(u'PATH') else: input_environ = os.environ.get(u'PATH', u'') extensions = os.environ.get(u'PATHEXT', u'').split(os.pathsep) for directory in input_environ.split(os.pathsep): base = os.path.join(directory, cmd) options = ([base] + [(base + ext) for ext in extensions]) for filename in options: if os.path.exists(filename): return (True, filename) return (False, None)
[ "def", "_exists_in_path", "(", "cmd", ",", "environ", ")", ":", "if", "(", "u'PATH'", "in", "environ", ")", ":", "input_environ", "=", "environ", ".", "get", "(", "u'PATH'", ")", "else", ":", "input_environ", "=", "os", ".", "environ", ".", "get", "(",...
based on a code snippet from URL .
train
false
42,943
def getEachWordCapitalized(name): withSpaces = name.lower().replace('_', ' ') words = withSpaces.split(' ') capitalizedStrings = [] for word in words: capitalizedStrings.append(word.capitalize()) return ' '.join(capitalizedStrings)
[ "def", "getEachWordCapitalized", "(", "name", ")", ":", "withSpaces", "=", "name", ".", "lower", "(", ")", ".", "replace", "(", "'_'", ",", "' '", ")", "words", "=", "withSpaces", ".", "split", "(", "' '", ")", "capitalizedStrings", "=", "[", "]", "for...
get the capitalized name .
train
false
42,944
def generate_pipeline_code(pipeline_tree): steps = process_operator(pipeline_tree) pipeline_text = 'make_pipeline(\n{STEPS}\n)'.format(STEPS=_indent(',\n'.join(steps), 4)) return pipeline_text
[ "def", "generate_pipeline_code", "(", "pipeline_tree", ")", ":", "steps", "=", "process_operator", "(", "pipeline_tree", ")", "pipeline_text", "=", "'make_pipeline(\\n{STEPS}\\n)'", ".", "format", "(", "STEPS", "=", "_indent", "(", "',\\n'", ".", "join", "(", "ste...
generate code specific to the construction of the sklearn pipeline parameters pipeline_tree: list list of operators in the current optimized pipeline returns source code for the sklearn pipeline .
train
false
42,945
@content_type('application/json') def pretty_json(content, **kwargs): return json(content, indent=4, separators=(',', ': '), **kwargs)
[ "@", "content_type", "(", "'application/json'", ")", "def", "pretty_json", "(", "content", ",", "**", "kwargs", ")", ":", "return", "json", "(", "content", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ",", "**", "kwarg...
json pretty printed and indented .
train
false
42,946
def handle_delete(request): index_to_delete = request.POST.get('delete_index') es_indexes = [name for (name, count) in get_indexes()] if (not index_to_delete.startswith(settings.ES_INDEX_PREFIX)): raise DeleteError(('"%s" is not a valid index name.' % index_to_delete)) if (index_to_delete not in es_indexes): raise DeleteError(('"%s" does not exist.' % index_to_delete)) if (index_to_delete == read_index('default')): raise DeleteError(('"%s" is the default read index.' % index_to_delete)) delete_index(index_to_delete) return HttpResponseRedirect(request.path)
[ "def", "handle_delete", "(", "request", ")", ":", "index_to_delete", "=", "request", ".", "POST", ".", "get", "(", "'delete_index'", ")", "es_indexes", "=", "[", "name", "for", "(", "name", ",", "count", ")", "in", "get_indexes", "(", ")", "]", "if", "...
deletes an index .
train
false
42,947
def _weight_mean_color(graph, src, dst, n): diff = (graph.node[dst]['mean color'] - graph.node[n]['mean color']) diff = np.linalg.norm(diff) return {'weight': diff}
[ "def", "_weight_mean_color", "(", "graph", ",", "src", ",", "dst", ",", "n", ")", ":", "diff", "=", "(", "graph", ".", "node", "[", "dst", "]", "[", "'mean color'", "]", "-", "graph", ".", "node", "[", "n", "]", "[", "'mean color'", "]", ")", "di...
callback to handle merging nodes by recomputing mean color .
train
false
42,948
def verify_user(user): if (not user.email): raise ValidationError(("You cannot verify an account with no email set. You can set this user's email with 'pootle update_user_email %s EMAIL'" % user.username)) try: validate_email_unique(user.email, user) except ValidationError: raise ValidationError("This user's email is not unique. You can find duplicate emails with 'pootle find_duplicate_emails'") existing_primary = EmailAddress.objects.filter(user=user, primary=True) if existing_primary.exists(): existing_primary = existing_primary.first() if (not existing_primary.verified): existing_primary.verified = True existing_primary.save() return else: raise ValueError(("User '%s' is already verified" % user.username)) sync_user_email_addresses(user) email_address = EmailAddress.objects.filter(user=user, email__iexact=user.email).order_by('primary').first() email_address.verified = True email_address.primary = True email_address.save()
[ "def", "verify_user", "(", "user", ")", ":", "if", "(", "not", "user", ".", "email", ")", ":", "raise", "ValidationError", "(", "(", "\"You cannot verify an account with no email set. You can set this user's email with 'pootle update_user_email %s EMAIL'\"", "%", "user", "....
verify a user account without email confirmation if the user has an existing primary allauth .
train
false
42,951
def compress_kml(kml): kmz = cStringIO.StringIO() zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED) zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET)) zf.close() kmz.seek(0) return kmz.read()
[ "def", "compress_kml", "(", "kml", ")", ":", "kmz", "=", "cStringIO", ".", "StringIO", "(", ")", "zf", "=", "zipfile", ".", "ZipFile", "(", "kmz", ",", "'a'", ",", "zipfile", ".", "ZIP_DEFLATED", ")", "zf", ".", "writestr", "(", "'doc.kml'", ",", "km...
returns compressed kmz from the given kml string .
train
false
42,952
def verify_task_queue_add_request(app_id, request, now): if (request.eta_usec() < 0): return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA eta = datetime.datetime.utcfromtimestamp(_usec_to_sec(request.eta_usec())) max_eta = (now + MAX_ETA) if (eta > max_eta): return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA if (request.has_crontimetable() and (app_id is None)): return taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED if (request.mode() == taskqueue_service_pb.TaskQueueMode.PULL): max_task_size_bytes = MAX_PULL_TASK_SIZE_BYTES else: max_task_size_bytes = MAX_PUSH_TASK_SIZE_BYTES if (request.ByteSize() > max_task_size_bytes): return taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE return taskqueue_service_pb.TaskQueueServiceError.SKIPPED
[ "def", "verify_task_queue_add_request", "(", "app_id", ",", "request", ",", "now", ")", ":", "if", "(", "request", ".", "eta_usec", "(", ")", "<", "0", ")", ":", "return", "taskqueue_service_pb", ".", "TaskQueueServiceError", ".", "INVALID_ETA", "eta", "=", ...
checks that a taskqueueaddrequest is valid .
train
false
42,954
def get_properties(obj): if hasattr(obj, 'keys'): return obj.keys() elif hasattr(obj, '__dict__'): return obj.__dict__.keys() return []
[ "def", "get_properties", "(", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'keys'", ")", ":", "return", "obj", ".", "keys", "(", ")", "elif", "hasattr", "(", "obj", ",", "'__dict__'", ")", ":", "return", "obj", ".", "__dict__", ".", "keys", ...
returns a list of properties for l{obj} @since: 0 .
train
true
42,955
def validate_quotes(value): open_quotes = True for i in range(len(value)): if (value[i] == '"'): if (i and (value[(i - 1)] == '\\')): continue if open_quotes: if (i and (value[(i - 1)] != ',')): msg = (_('Invalid filter value %s. There is no comma before opening quotation mark.') % value) raise exception.InvalidParameterValue(message=msg) elif (((i + 1) != len(value)) and (value[(i + 1)] != ',')): msg = (_('Invalid filter value %s. There is no comma after closing quotation mark.') % value) raise exception.InvalidParameterValue(message=msg) open_quotes = (not open_quotes) if (not open_quotes): msg = (_('Invalid filter value %s. The quote is not closed.') % value) raise exception.InvalidParameterValue(message=msg)
[ "def", "validate_quotes", "(", "value", ")", ":", "open_quotes", "=", "True", "for", "i", "in", "range", "(", "len", "(", "value", ")", ")", ":", "if", "(", "value", "[", "i", "]", "==", "'\"'", ")", ":", "if", "(", "i", "and", "(", "value", "[...
validate filter values validation opening/closing quotes in the expression .
train
false
42,956
def _is_mostly_subdomain_compatible(bucket_name): return ((bucket_name.lower() == bucket_name) and (len(bucket_name) >= 3) and (len(bucket_name) <= 63) and ('_' not in bucket_name) and ('..' not in bucket_name) and ('-.' not in bucket_name) and ('.-' not in bucket_name) and (not bucket_name.startswith('-')) and (not bucket_name.endswith('-')) and (not bucket_name.startswith('.')) and (not bucket_name.endswith('.')) and (not _is_ipv4_like(bucket_name)))
[ "def", "_is_mostly_subdomain_compatible", "(", "bucket_name", ")", ":", "return", "(", "(", "bucket_name", ".", "lower", "(", ")", "==", "bucket_name", ")", "and", "(", "len", "(", "bucket_name", ")", ">=", "3", ")", "and", "(", "len", "(", "bucket_name", ...
returns true if subdomaincallingformat can be used .
train
true
42,957
def has_leading_dir(paths): common_prefix = None for path in paths: (prefix, rest) = split_leading_dir(path) if (not prefix): return False elif (common_prefix is None): common_prefix = prefix elif (prefix != common_prefix): return False return True
[ "def", "has_leading_dir", "(", "paths", ")", ":", "common_prefix", "=", "None", "for", "path", "in", "paths", ":", "(", "prefix", ",", "rest", ")", "=", "split_leading_dir", "(", "path", ")", "if", "(", "not", "prefix", ")", ":", "return", "False", "el...
returns true if all the paths have the same leading path name .
train
true
42,959
def group_snapshot_get_all(context, filters=None): return IMPL.group_snapshot_get_all(context, filters)
[ "def", "group_snapshot_get_all", "(", "context", ",", "filters", "=", "None", ")", ":", "return", "IMPL", ".", "group_snapshot_get_all", "(", "context", ",", "filters", ")" ]
get all group snapshots .
train
false
42,960
def extract_tb(tb, limit=None): if (limit is None): if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit list = [] n = 0 while ((tb is not None) and ((limit is None) or (n < limit))): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: line = line.strip() else: line = None list.append((filename, lineno, name, line)) tb = tb.tb_next n = (n + 1) return list
[ "def", "extract_tb", "(", "tb", ",", "limit", "=", "None", ")", ":", "if", "(", "limit", "is", "None", ")", ":", "if", "hasattr", "(", "sys", ",", "'tracebacklimit'", ")", ":", "limit", "=", "sys", ".", "tracebacklimit", "list", "=", "[", "]", "n",...
return list of up to limit pre-processed entries from traceback .
train
true
42,961
def _credit_course_requirements(course_key, student): if (not (settings.FEATURES.get('ENABLE_CREDIT_ELIGIBILITY', False) and is_credit_course(course_key))): return None enrollment = CourseEnrollment.get_enrollment(student, course_key) if (enrollment and (enrollment.mode not in REQUIREMENTS_DISPLAY_MODES)): return None non_eligible_statuses = ['failed', 'declined'] requirement_statuses = get_credit_requirement_status(course_key, student.username) if is_user_eligible_for_credit(student.username, course_key): eligibility_status = 'eligible' elif any(((requirement['status'] in non_eligible_statuses) for requirement in requirement_statuses)): eligibility_status = 'not_eligible' else: eligibility_status = 'partial_eligible' return {'eligibility_status': eligibility_status, 'requirements': requirement_statuses}
[ "def", "_credit_course_requirements", "(", "course_key", ",", "student", ")", ":", "if", "(", "not", "(", "settings", ".", "FEATURES", ".", "get", "(", "'ENABLE_CREDIT_ELIGIBILITY'", ",", "False", ")", "and", "is_credit_course", "(", "course_key", ")", ")", ")...
return information about which credit requirements a user has satisfied .
train
false
42,962
def get_valid_os_versions_for_breed(breed): os_versions = [] if (breed in get_valid_breeds()): os_versions = SIGNATURE_CACHE['breeds'][breed].keys() return os_versions
[ "def", "get_valid_os_versions_for_breed", "(", "breed", ")", ":", "os_versions", "=", "[", "]", "if", "(", "breed", "in", "get_valid_breeds", "(", ")", ")", ":", "os_versions", "=", "SIGNATURE_CACHE", "[", "'breeds'", "]", "[", "breed", "]", ".", "keys", "...
return a list of valid os-versions for the given breed .
train
false
42,963
def test_not_implemented_elemwise_grad(): class TestOp(scalar.ScalarOp, ): def __init__(self): self.output_types_preference = scalar.upgrade_to_float def impl(self, n, x): return (x * n) def grad(self, inputs, gout): (n, x) = inputs (gz,) = gout dy_dx = n return [theano.gradient.grad_not_implemented(self, 0, n), (gz * dy_dx)] test_op = tensor.Elemwise(TestOp()) x = tensor.scalar() tensor.grad(test_op(2, x), x) try: tensor.grad(test_op(x, 2), x) assert False except theano.gradient.NullTypeGradError: pass
[ "def", "test_not_implemented_elemwise_grad", "(", ")", ":", "class", "TestOp", "(", "scalar", ".", "ScalarOp", ",", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "output_types_preference", "=", "scalar", ".", "upgrade_to_float", "def", "impl...
regression test for unimplemented gradient in an elemwise op .
train
false
42,964
def overridden_settings(settings): for (name, defvalue) in iter_default_settings(): value = settings[name] if ((not isinstance(defvalue, dict)) and (value != defvalue)): (yield (name, value))
[ "def", "overridden_settings", "(", "settings", ")", ":", "for", "(", "name", ",", "defvalue", ")", "in", "iter_default_settings", "(", ")", ":", "value", "=", "settings", "[", "name", "]", "if", "(", "(", "not", "isinstance", "(", "defvalue", ",", "dict"...
return a dict of the settings that have been overridden .
train
false
42,965
def _splitPrefix(name): ntok = name.split(':', 1) if (len(ntok) == 2): return ntok else: return (None, ntok[0])
[ "def", "_splitPrefix", "(", "name", ")", ":", "ntok", "=", "name", ".", "split", "(", "':'", ",", "1", ")", "if", "(", "len", "(", "ntok", ")", "==", "2", ")", ":", "return", "ntok", "else", ":", "return", "(", "None", ",", "ntok", "[", "0", ...
internal method for splitting a prefixed element name into its respective parts .
train
false
42,966
def parse_token_response(body, scope=None): try: params = json.loads(body) except ValueError: params = dict(urlparse.parse_qsl(body)) for key in (u'expires_in', u'expires'): if (key in params): params[key] = int(params[key]) if (u'scope' in params): params[u'scope'] = scope_to_list(params[u'scope']) if (u'expires' in params): params[u'expires_in'] = params.pop(u'expires') if (u'expires_in' in params): params[u'expires_at'] = (time.time() + int(params[u'expires_in'])) params = OAuth2Token(params, old_scope=scope) validate_token_parameters(params) return params
[ "def", "parse_token_response", "(", "body", ",", "scope", "=", "None", ")", ":", "try", ":", "params", "=", "json", ".", "loads", "(", "body", ")", "except", "ValueError", ":", "params", "=", "dict", "(", "urlparse", ".", "parse_qsl", "(", "body", ")",...
parse the json token response body into a dict .
train
false
42,967
def hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf): (hfreq, hmag, hphase) = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur) xr = UF.sineSubtraction(x, Ns, H, hfreq, hmag, hphase, fs) stocEnv = STM.stochasticModelAnal(xr, H, (H * 2), stocf) return (hfreq, hmag, hphase, stocEnv)
[ "def", "hpsModelAnal", "(", "x", ",", "fs", ",", "w", ",", "N", ",", "H", ",", "t", ",", "nH", ",", "minf0", ",", "maxf0", ",", "f0et", ",", "harmDevSlope", ",", "minSineDur", ",", "Ns", ",", "stocf", ")", ":", "(", "hfreq", ",", "hmag", ",", ...
analysis of a sound using the harmonic plus stochastic model x: input sound .
train
false
42,968
def country_timezones(iso3166_code): iso3166_code = iso3166_code.upper() if (not _country_timezones_cache): zone_tab = open_resource('zone.tab') for line in zone_tab: if line.startswith('#'): continue (code, coordinates, zone) = line.split(None, 4)[:3] try: _country_timezones_cache[code].append(zone) except KeyError: _country_timezones_cache[code] = [zone] return _country_timezones_cache[iso3166_code]
[ "def", "country_timezones", "(", "iso3166_code", ")", ":", "iso3166_code", "=", "iso3166_code", ".", "upper", "(", ")", "if", "(", "not", "_country_timezones_cache", ")", ":", "zone_tab", "=", "open_resource", "(", "'zone.tab'", ")", "for", "line", "in", "zone...
return a list of timezones used in a particular country .
train
false
42,970
def _generate_cache_key(request, headerlist, key_prefix): ctx = md5.new() for header in headerlist: value = request.META.get(header, None) if (value is not None): ctx.update(value) return ('views.decorators.cache.cache_page.%s.%s.%s' % (key_prefix, request.path, ctx.hexdigest()))
[ "def", "_generate_cache_key", "(", "request", ",", "headerlist", ",", "key_prefix", ")", ":", "ctx", "=", "md5", ".", "new", "(", ")", "for", "header", "in", "headerlist", ":", "value", "=", "request", ".", "META", ".", "get", "(", "header", ",", "None...
returns a cache key from the headers given in the header list .
train
false
42,973
def greedy_partition(counts, n): buckets = [[] for i in range(n)] fill_levels = [0 for i in range(n)] for key in sorted(counts, reverse=True, key=(lambda c: counts[c])): smallest = fill_levels.index(min(fill_levels)) buckets[smallest].append(key) fill_levels[smallest] += counts[key] return (buckets, fill_levels)
[ "def", "greedy_partition", "(", "counts", ",", "n", ")", ":", "buckets", "=", "[", "[", "]", "for", "i", "in", "range", "(", "n", ")", "]", "fill_levels", "=", "[", "0", "for", "i", "in", "range", "(", "n", ")", "]", "for", "key", "in", "sorted...
distribute k counts evenly across n buckets .
train
false
42,974
def x_server_test(f, cond, msg): if isinstance(f, (type, types.ClassType)): if (not cond): f.__unittest_skip__ = True f.__unittest_skip_why__ = msg return f else: @unittest.skipIf((not cond), msg) def wrapped_fn(*args, **kwargs): return f(*args, **kwargs) return wrapped_fn
[ "def", "x_server_test", "(", "f", ",", "cond", ",", "msg", ")", ":", "if", "isinstance", "(", "f", ",", "(", "type", ",", "types", ".", "ClassType", ")", ")", ":", "if", "(", "not", "cond", ")", ":", "f", ".", "__unittest_skip__", "=", "True", "f...
decorator to label test classes or instance methods as x_only .
train
false
42,975
def dmp_add_mul(f, g, h, u, K): return dmp_add(f, dmp_mul(g, h, u, K), u, K)
[ "def", "dmp_add_mul", "(", "f", ",", "g", ",", "h", ",", "u", ",", "K", ")", ":", "return", "dmp_add", "(", "f", ",", "dmp_mul", "(", "g", ",", "h", ",", "u", ",", "K", ")", ",", "u", ",", "K", ")" ]
returns f + g*h where f .
train
false
42,976
def _init_tab_completion(): log.completion.debug('Initializing tab completion.') with debug.log_time(log.completion, 'tab completion init'): model = miscmodels.TabCompletionModel() _instances[usertypes.Completion.tab] = model
[ "def", "_init_tab_completion", "(", ")", ":", "log", ".", "completion", ".", "debug", "(", "'Initializing tab completion.'", ")", "with", "debug", ".", "log_time", "(", "log", ".", "completion", ",", "'tab completion init'", ")", ":", "model", "=", "miscmodels",...
initialize the tab completion model .
train
false
42,978
def _formatRouteBody(data, schema_store): baseSubstitutions = {u'DOMAIN': u'example.com', u'NODE_0': u'cf0f0346-17b2-4812-beca-1434997d6c3f', u'NODE_1': u'7ec3c4eb-6b1c-43da-8015-a163f7d15244'} for line in data['description']: (yield line) if ('input' in data): for line in _formatActualSchema(data['input_schema'], '+ Request JSON Schema', schema_store): (yield line) if ('output' in data): for line in _formatActualSchema(data['output_schema'], '+ Response JSON Schema', schema_store): (yield line) for example in data['examples']: substitutions = baseSubstitutions.copy() for line in _formatExample(example, substitutions): (yield line) if ('input' in data): for line in _formatSchema(data['input'], True): (yield line) if ('output' in data): for line in _formatSchema(data['output'], False): (yield line)
[ "def", "_formatRouteBody", "(", "data", ",", "schema_store", ")", ":", "baseSubstitutions", "=", "{", "u'DOMAIN'", ":", "u'example.com'", ",", "u'NODE_0'", ":", "u'cf0f0346-17b2-4812-beca-1434997d6c3f'", ",", "u'NODE_1'", ":", "u'7ec3c4eb-6b1c-43da-8015-a163f7d15244'", "}...
generate the description of a l{klein} route .
train
false
42,980
def test_gmail_many_folders_one_role(monkeypatch, constants): folders = constants['gmail_folders'] duplicates = [('\\HasNoChildren', '/', u'[Imap]/Trash'), ('\\HasNoChildren', '/', u'[Imap]/Sent')] folders += duplicates client = patch_gmail_client(monkeypatch, folders) raw_folders = client.folders() folder_names = client.folder_names() for role in ['inbox', 'all', 'trash', 'drafts', 'important', 'sent', 'spam', 'starred']: assert (role in folder_names) test_set = filter((lambda x: (x == role)), map((lambda y: y.role), raw_folders)) assert (len(test_set) == 1), 'assigned wrong number of {}'.format(role) names = folder_names[role] assert isinstance(names, list) assert (len(names) == 1), ('assign same role to %s folders' % len(names))
[ "def", "test_gmail_many_folders_one_role", "(", "monkeypatch", ",", "constants", ")", ":", "folders", "=", "constants", "[", "'gmail_folders'", "]", "duplicates", "=", "[", "(", "'\\\\HasNoChildren'", ",", "'/'", ",", "u'[Imap]/Trash'", ")", ",", "(", "'\\\\HasNoC...
tests that accounts with many folders with similar system folders have only one role .
train
false
42,981
def custom_check(cmd, ignore_retcode=False): p = custom_popen(cmd) (out, _) = p.communicate() if (p.returncode and (not ignore_retcode)): raise RarExecError('Check-run failed') return out
[ "def", "custom_check", "(", "cmd", ",", "ignore_retcode", "=", "False", ")", ":", "p", "=", "custom_popen", "(", "cmd", ")", "(", "out", ",", "_", ")", "=", "p", ".", "communicate", "(", ")", "if", "(", "p", ".", "returncode", "and", "(", "not", ...
run command .
train
true
42,982
def text_dialog(text, title): parent = qtutils.active_window() label = QtWidgets.QLabel(parent) label.setFont(qtutils.diff_font()) label.setText(text) label.setTextInteractionFlags(Qt.NoTextInteraction) widget = QtWidgets.QDialog(parent) widget.setWindowModality(Qt.WindowModal) widget.setWindowTitle(title) layout = qtutils.hbox(defs.margin, defs.spacing, label) widget.setLayout(layout) qtutils.add_action(widget, N_(u'Close'), widget.accept, Qt.Key_Question, Qt.Key_Enter, Qt.Key_Return) widget.show() return widget
[ "def", "text_dialog", "(", "text", ",", "title", ")", ":", "parent", "=", "qtutils", ".", "active_window", "(", ")", "label", "=", "QtWidgets", ".", "QLabel", "(", "parent", ")", "label", ".", "setFont", "(", "qtutils", ".", "diff_font", "(", ")", ")",...
show a wall of text in a dialog .
train
false
42,983
def test_file_system_enumerations(): import os os_dir = os.listdir('.') os_dir.sort() enum_dir = [x[2:] for x in System.IO.Directory.EnumerateFileSystemEntries('.')] enum_dir.sort() AreEqual(os_dir, enum_dir)
[ "def", "test_file_system_enumerations", "(", ")", ":", "import", "os", "os_dir", "=", "os", ".", "listdir", "(", "'.'", ")", "os_dir", ".", "sort", "(", ")", "enum_dir", "=", "[", "x", "[", "2", ":", "]", "for", "x", "in", "System", ".", "IO", ".",...
URL only minimal sanity tests should be needed .
train
false
42,984
def task_id_eq(task_id, body, message): return (body[u'id'] == task_id)
[ "def", "task_id_eq", "(", "task_id", ",", "body", ",", "message", ")", ":", "return", "(", "body", "[", "u'id'", "]", "==", "task_id", ")" ]
return true if task id equals task_id .
train
false
42,986
def make_camera(cam_type, *args, **kwargs): cam_types = {None: BaseCamera} for camType in (BaseCamera, PanZoomCamera, PerspectiveCamera, TurntableCamera, FlyCamera, ArcballCamera): cam_types[camType.__name__[:(-6)].lower()] = camType try: return cam_types[cam_type](*args, **kwargs) except KeyError: raise KeyError(('Unknown camera type "%s". Options are: %s' % (cam_type, cam_types.keys())))
[ "def", "make_camera", "(", "cam_type", ",", "*", "args", ",", "**", "kwargs", ")", ":", "cam_types", "=", "{", "None", ":", "BaseCamera", "}", "for", "camType", "in", "(", "BaseCamera", ",", "PanZoomCamera", ",", "PerspectiveCamera", ",", "TurntableCamera", ...
factory function for creating new cameras using a string name .
train
true
42,987
def gemset_create(ruby, gemset, runas=None): return _rvm_do(ruby, ['rvm', 'gemset', 'create', gemset], runas=runas)
[ "def", "gemset_create", "(", "ruby", ",", "gemset", ",", "runas", "=", "None", ")", ":", "return", "_rvm_do", "(", "ruby", ",", "[", "'rvm'", ",", "'gemset'", ",", "'create'", ",", "gemset", "]", ",", "runas", "=", "runas", ")" ]
creates a gemset .
train
false
42,988
def has_override_value(name): if is_site_configuration_enabled(): return has_configuration_override(name) else: return microsite.has_override_value(name)
[ "def", "has_override_value", "(", "name", ")", ":", "if", "is_site_configuration_enabled", "(", ")", ":", "return", "has_configuration_override", "(", "name", ")", "else", ":", "return", "microsite", ".", "has_override_value", "(", "name", ")" ]
returns true/false whether a microsite has a definition for the specified named value .
train
false
42,989
def load_version(): globals_dict = {} with codecs.open(os.path.join('imblearn', 'version.py'), encoding='utf-8-sig') as fp: exec fp.read() in globals_dict return globals_dict
[ "def", "load_version", "(", ")", ":", "globals_dict", "=", "{", "}", "with", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "'imblearn'", ",", "'version.py'", ")", ",", "encoding", "=", "'utf-8-sig'", ")", "as", "fp", ":", "exec", "...
executes imblearn/version .
train
false
42,990
def makescript(filename, compiler): dirname = os.path.split(filename)[0] if (not os.access(dirname, os.X_OK)): os.mkdir(dirname, 493) fp = open(filename, 'w') fp.write((SCRIPT % compiler)) fp.close() os.chmod(filename, 493) print 'fixapplepython23: Created', filename
[ "def", "makescript", "(", "filename", ",", "compiler", ")", ":", "dirname", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "[", "0", "]", "if", "(", "not", "os", ".", "access", "(", "dirname", ",", "os", ".", "X_OK", ")", ")", ":", ...
create a wrapper script for a compiler .
train
false
42,994
def formatRARVersion(field): return ('%u.%u' % divmod(field.value, 10))
[ "def", "formatRARVersion", "(", "field", ")", ":", "return", "(", "'%u.%u'", "%", "divmod", "(", "field", ".", "value", ",", "10", ")", ")" ]
decodes the rar version stored on 1 byte .
train
false
42,996
def FindRegisterPythonExe(exeAlias, searchPaths, actualFileNames=None): import regutil, string (fname, ok) = FindPythonExe(exeAlias, actualFileNames, searchPaths) if (not ok): regutil.RegisterPythonExe(fname, exeAlias) return fname
[ "def", "FindRegisterPythonExe", "(", "exeAlias", ",", "searchPaths", ",", "actualFileNames", "=", "None", ")", ":", "import", "regutil", ",", "string", "(", "fname", ",", "ok", ")", "=", "FindPythonExe", "(", "exeAlias", ",", "actualFileNames", ",", "searchPat...
find and register a python exe assumes the core registry setup correctly .
train
false
42,997
def copy_source_sse_md5(params, **kwargs): _sse_md5(params, 'CopySourceSSECustomer')
[ "def", "copy_source_sse_md5", "(", "params", ",", "**", "kwargs", ")", ":", "_sse_md5", "(", "params", ",", "'CopySourceSSECustomer'", ")" ]
s3 server-side encryption requires the encryption key to be sent to the server base64 encoded .
train
false
42,998
def db_init(db, conn_name): global db_ready if (db_ready.count(conn_name) < 1): db.execute('create table if not exists seen_user(name, time, quote, chan, host, primary key(name, chan))') db.commit() db_ready.append(conn_name)
[ "def", "db_init", "(", "db", ",", "conn_name", ")", ":", "global", "db_ready", "if", "(", "db_ready", ".", "count", "(", "conn_name", ")", "<", "1", ")", ":", "db", ".", "execute", "(", "'create table if not exists seen_user(name, time, quote, chan, host, primary ...
check to see that our db has the the seen table :type db: sqlalchemy .
train
false
42,999
def render_label(content, label_for=None, label_class=None, label_title=u''): attrs = {} if label_for: attrs[u'for'] = label_for if label_class: attrs[u'class'] = label_class if label_title: attrs[u'title'] = label_title return render_tag(u'label', attrs=attrs, content=content)
[ "def", "render_label", "(", "content", ",", "label_for", "=", "None", ",", "label_class", "=", "None", ",", "label_title", "=", "u''", ")", ":", "attrs", "=", "{", "}", "if", "label_for", ":", "attrs", "[", "u'for'", "]", "=", "label_for", "if", "label...
render a label with content .
train
false
43,000
def save_callback(operation, graphdef): if (operation == 'test'): for e in graphdef.expressions: if expression_ok(e): graphdef.expression = e display_graph(graphdef) return mestate.console.writeln('Invalid graph expressions', fg='red') return if (operation == 'save'): save_graph(graphdef, mestate)
[ "def", "save_callback", "(", "operation", ",", "graphdef", ")", ":", "if", "(", "operation", "==", "'test'", ")", ":", "for", "e", "in", "graphdef", ".", "expressions", ":", "if", "expression_ok", "(", "e", ")", ":", "graphdef", ".", "expression", "=", ...
callback from save thread .
train
true
43,001
def s3_set_extension(url, extension=None): if (extension == None): extension = s3_get_extension() u = urlparse.urlparse(url) path = u.path if path: if ('.' in path): elements = [p.split('.')[0] for p in path.split('/')] else: elements = path.split('/') if (extension and elements[(-1)]): elements[(-1)] += ('.%s' % extension) path = '/'.join(elements) return urlparse.urlunparse((u.scheme, u.netloc, path, u.params, u.query, u.fragment))
[ "def", "s3_set_extension", "(", "url", ",", "extension", "=", "None", ")", ":", "if", "(", "extension", "==", "None", ")", ":", "extension", "=", "s3_get_extension", "(", ")", "u", "=", "urlparse", ".", "urlparse", "(", "url", ")", "path", "=", "u", ...
add a file extension to the path of a url .
train
false
43,003
def test_debug_logging(): with use_log_level('debug', 'Selected', True, False) as l: logger.debug('Selected foo') assert_equal(len(l), 1) assert_in('test_logging', l[0]) with use_log_level('debug', record=True, print_msg=False) as l: logger.debug('foo') assert_equal(len(l), 1) assert_in('test_logging', l[0]) with use_log_level('debug', 'foo', True, False) as l: logger.debug('bar') assert_equal(len(l), 0) with use_log_level('info', record=True, print_msg=False) as l: logger.debug('foo') logger.info('bar') assert_equal(len(l), 1) assert_not_in('unknown', l[0])
[ "def", "test_debug_logging", "(", ")", ":", "with", "use_log_level", "(", "'debug'", ",", "'Selected'", ",", "True", ",", "False", ")", "as", "l", ":", "logger", ".", "debug", "(", "'Selected foo'", ")", "assert_equal", "(", "len", "(", "l", ")", ",", ...
test advanced debugging logging .
train
false
43,004
def makeCategoricalIndex(k=10, n=3, name=None): x = rands_array(nchars=4, size=n) return CategoricalIndex(np.random.choice(x, k), name=name)
[ "def", "makeCategoricalIndex", "(", "k", "=", "10", ",", "n", "=", "3", ",", "name", "=", "None", ")", ":", "x", "=", "rands_array", "(", "nchars", "=", "4", ",", "size", "=", "n", ")", "return", "CategoricalIndex", "(", "np", ".", "random", ".", ...
make a length k index or n categories .
train
false
43,005
def as_json(*args, **kwargs): arg = (args or kwargs) return jsonutils.dumps(arg)
[ "def", "as_json", "(", "*", "args", ",", "**", "kwargs", ")", ":", "arg", "=", "(", "args", "or", "kwargs", ")", "return", "jsonutils", ".", "dumps", "(", "arg", ")" ]
helper function for simulating xenapi plugin responses for those that are returning json .
train
false
43,006
@inspect_command(alias=u'dump_conf', signature=u'[include_defaults=False]', args=[(u'with_defaults', strtobool)]) def conf(state, with_defaults=False, **kwargs): return jsonify(state.app.conf.table(with_defaults=with_defaults), keyfilter=_wanted_config_key, unknown_type_filter=safe_repr)
[ "@", "inspect_command", "(", "alias", "=", "u'dump_conf'", ",", "signature", "=", "u'[include_defaults=False]'", ",", "args", "=", "[", "(", "u'with_defaults'", ",", "strtobool", ")", "]", ")", "def", "conf", "(", "state", ",", "with_defaults", "=", "False", ...
list configuration .
train
false
43,007
@register.simple_tag(takes_context=True) def expand_fragment_header_link(context, header): lines_of_context = context[u'lines_of_context'] offset = (context[u'first_line'] - header[u'line']) return render_to_string(u'reviews/expand_link.html', {u'tooltip': _(u'Expand to header'), u'text': format_html(u'<code>{0}</code>', header[u'text']), u'comment_id': context[u'comment'].id, u'expand_pos': ((lines_of_context[0] + offset), lines_of_context[1]), u'image_class': u'rb-icon-diff-expand-header'})
[ "@", "register", ".", "simple_tag", "(", "takes_context", "=", "True", ")", "def", "expand_fragment_header_link", "(", "context", ",", "header", ")", ":", "lines_of_context", "=", "context", "[", "u'lines_of_context'", "]", "offset", "=", "(", "context", "[", ...
render a diff comment fragment header expansion link .
train
false
43,008
def assert_no_overwrite(call, shapes, dtypes=None): if (dtypes is None): dtypes = [np.float32, np.float64, np.complex64, np.complex128] for dtype in dtypes: for order in ['C', 'F']: for faker in [_id, _FakeMatrix, _FakeMatrix2]: orig_inputs = [_get_array(s, dtype) for s in shapes] inputs = [faker(x.copy(order)) for x in orig_inputs] call(*inputs) msg = ('call modified inputs [%r, %r]' % (dtype, faker)) for (a, b) in zip(inputs, orig_inputs): np.testing.assert_equal(a, b, err_msg=msg)
[ "def", "assert_no_overwrite", "(", "call", ",", "shapes", ",", "dtypes", "=", "None", ")", ":", "if", "(", "dtypes", "is", "None", ")", ":", "dtypes", "=", "[", "np", ".", "float32", ",", "np", ".", "float64", ",", "np", ".", "complex64", ",", "np"...
test that a call does not overwrite its input arguments .
train
false
43,010
def p_shift_expression_3(t): pass
[ "def", "p_shift_expression_3", "(", "t", ")", ":", "pass" ]
shift_expression : shift_expression rshift additive_expression .
train
false
43,011
def mean_variance_axis(X, axis): _raise_error_wrong_axis(axis) if isinstance(X, sp.csr_matrix): if (axis == 0): return _csr_mean_var_axis0(X) else: return _csc_mean_var_axis0(X.T) elif isinstance(X, sp.csc_matrix): if (axis == 0): return _csc_mean_var_axis0(X) else: return _csr_mean_var_axis0(X.T) else: _raise_typeerror(X)
[ "def", "mean_variance_axis", "(", "X", ",", "axis", ")", ":", "_raise_error_wrong_axis", "(", "axis", ")", "if", "isinstance", "(", "X", ",", "sp", ".", "csr_matrix", ")", ":", "if", "(", "axis", "==", "0", ")", ":", "return", "_csr_mean_var_axis0", "(",...
compute mean and variance along an axix on a csr or csc matrix parameters x : csr or csc sparse matrix .
train
false
43,012
def isLoopIntersectingLoops(loop, otherLoops): for pointIndex in xrange(len(loop)): pointBegin = loop[pointIndex] pointEnd = loop[((pointIndex + 1) % len(loop))] if isLineIntersectingLoops(otherLoops, pointBegin, pointEnd): return True return False
[ "def", "isLoopIntersectingLoops", "(", "loop", ",", "otherLoops", ")", ":", "for", "pointIndex", "in", "xrange", "(", "len", "(", "loop", ")", ")", ":", "pointBegin", "=", "loop", "[", "pointIndex", "]", "pointEnd", "=", "loop", "[", "(", "(", "pointInde...
determine if the loop is intersecting other loops .
train
false
43,013
@cache_permission def can_edit_flags(user, project): return check_permission(user, project, 'trans.edit_flags')
[ "@", "cache_permission", "def", "can_edit_flags", "(", "user", ",", "project", ")", ":", "return", "check_permission", "(", "user", ",", "project", ",", "'trans.edit_flags'", ")" ]
checks whether user can edit translation flags .
train
false
43,015
def get_pool_name_filter_regex(configuration): pool_patterns = (configuration.netapp_pool_name_search_pattern or '(.+)') pool_patterns = '|'.join([(('^' + pool_pattern.strip('^$ DCTB ')) + '$') for pool_pattern in pool_patterns.split(',')]) try: return re.compile(pool_patterns) except re.error: raise exception.InvalidConfigurationValue(option='netapp_pool_name_search_pattern', value=configuration.netapp_pool_name_search_pattern)
[ "def", "get_pool_name_filter_regex", "(", "configuration", ")", ":", "pool_patterns", "=", "(", "configuration", ".", "netapp_pool_name_search_pattern", "or", "'(.+)'", ")", "pool_patterns", "=", "'|'", ".", "join", "(", "[", "(", "(", "'^'", "+", "pool_pattern", ...
build the regex for filtering pools by name .
train
false
43,016
def truncate_month(dt, measure): months = ((dt.year * 12) + dt.month) months = ((months // measure) * measure) return date(((months - 1) // 12), (((months - 1) % 12) + 1), 1)
[ "def", "truncate_month", "(", "dt", ",", "measure", ")", ":", "months", "=", "(", "(", "dt", ".", "year", "*", "12", ")", "+", "dt", ".", "month", ")", "months", "=", "(", "(", "months", "//", "measure", ")", "*", "measure", ")", "return", "date"...
truncate by months .
train
false
43,017
def generate_static_name(name, base=None): if base: path = os.path.join(base, name) else: path = name sha = hashlib.sha1(open(path).read()).digest() shorthash = base64.urlsafe_b64encode(sha[0:8]).rstrip('=') (name, ext) = os.path.splitext(name) return (((name + '.') + shorthash) + ext)
[ "def", "generate_static_name", "(", "name", ",", "base", "=", "None", ")", ":", "if", "base", ":", "path", "=", "os", ".", "path", ".", "join", "(", "base", ",", "name", ")", "else", ":", "path", "=", "name", "sha", "=", "hashlib", ".", "sha1", "...
generate a unique filename .
train
false
43,018
def _possibly_convert_platform(values): if isinstance(values, (list, tuple)): values = lib.list_to_object_array(list(values)) if (getattr(values, 'dtype', None) == np.object_): if hasattr(values, '_values'): values = values._values values = lib.maybe_convert_objects(values) return values
[ "def", "_possibly_convert_platform", "(", "values", ")", ":", "if", "isinstance", "(", "values", ",", "(", "list", ",", "tuple", ")", ")", ":", "values", "=", "lib", ".", "list_to_object_array", "(", "list", "(", "values", ")", ")", "if", "(", "getattr",...
try to do platform conversion .
train
false
43,019
def train_op_fun(total_loss, global_step): nb_ex_per_train_epoch = int((60000 / FLAGS.nb_teachers)) num_batches_per_epoch = (nb_ex_per_train_epoch / FLAGS.batch_size) decay_steps = int((num_batches_per_epoch * FLAGS.epochs_per_decay)) initial_learning_rate = (float(FLAGS.learning_rate) / 100.0) lr = tf.train.exponential_decay(initial_learning_rate, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.scalar_summary('learning_rate', lr) loss_averages_op = moving_av(total_loss) with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
[ "def", "train_op_fun", "(", "total_loss", ",", "global_step", ")", ":", "nb_ex_per_train_epoch", "=", "int", "(", "(", "60000", "/", "FLAGS", ".", "nb_teachers", ")", ")", "num_batches_per_epoch", "=", "(", "nb_ex_per_train_epoch", "/", "FLAGS", ".", "batch_size...
train model .
train
false
43,021
@conf.commands.register def arping(net, timeout=2, cache=0, verbose=None, **kargs): if (verbose is None): verbose = conf.verb (ans, unans) = srp((Ether(dst='ff:ff:ff:ff:ff:ff') / ARP(pdst=net)), verbose=verbose, filter='arp and arp[7] = 2', timeout=timeout, iface_hint=net, **kargs) ans = ARPingResult(ans.res) if (cache and (ans is not None)): for pair in ans: arp_cache[pair[1].psrc] = (pair[1].hwsrc, time.time()) if verbose: ans.show() return (ans, unans)
[ "@", "conf", ".", "commands", ".", "register", "def", "arping", "(", "net", ",", "timeout", "=", "2", ",", "cache", "=", "0", ",", "verbose", "=", "None", ",", "**", "kargs", ")", ":", "if", "(", "verbose", "is", "None", ")", ":", "verbose", "=",...
send arp who-has requests to determine which hosts are up arping -> none set cache=true if you want arping to modify internal arp-cache .
train
true
43,022
def quote_from_bytes(bs, safe='/'): if (not isinstance(bs, (bytes, bytearray))): raise TypeError('quote_from_bytes() expected bytes') if (not bs): return '' if isinstance(safe, str): safe = safe.encode('ascii', 'ignore') else: safe = bytes([c for c in safe if (c < 128)]) if (not bs.rstrip((_ALWAYS_SAFE_BYTES + safe))): return bs.decode() try: quoter = _safe_quoters[safe] except KeyError: _safe_quoters[safe] = quoter = Quoter(safe).__getitem__ return ''.join([quoter(char) for char in bs])
[ "def", "quote_from_bytes", "(", "bs", ",", "safe", "=", "'/'", ")", ":", "if", "(", "not", "isinstance", "(", "bs", ",", "(", "bytes", ",", "bytearray", ")", ")", ")", ":", "raise", "TypeError", "(", "'quote_from_bytes() expected bytes'", ")", "if", "(",...
like quote() .
train
true
43,023
def p_command_print_empty(p): p[0] = ('PRINT', [], None)
[ "def", "p_command_print_empty", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'PRINT'", ",", "[", "]", ",", "None", ")" ]
command : print .
train
false
43,024
def transformVector3ByMatrix(tetragrid, vector3): if getIsIdentityTetragridOrNone(tetragrid): return transformVector3Blindly(tetragrid, vector3)
[ "def", "transformVector3ByMatrix", "(", "tetragrid", ",", "vector3", ")", ":", "if", "getIsIdentityTetragridOrNone", "(", "tetragrid", ")", ":", "return", "transformVector3Blindly", "(", "tetragrid", ",", "vector3", ")" ]
transform the vector3 by a matrix .
train
false
43,026
def IsDebug(): import imp for suffix_item in imp.get_suffixes(): if (suffix_item[0] == '_d.pyd'): return '_d' return ''
[ "def", "IsDebug", "(", ")", ":", "import", "imp", "for", "suffix_item", "in", "imp", ".", "get_suffixes", "(", ")", ":", "if", "(", "suffix_item", "[", "0", "]", "==", "'_d.pyd'", ")", ":", "return", "'_d'", "return", "''" ]
return "_d" if were running a debug version .
train
false
43,027
def make_pool(max_concurrency, max_members): return worker.TarUploadPool(FakeUploader(), max_concurrency, max_members)
[ "def", "make_pool", "(", "max_concurrency", ",", "max_members", ")", ":", "return", "worker", ".", "TarUploadPool", "(", "FakeUploader", "(", ")", ",", "max_concurrency", ",", "max_members", ")" ]
set up a pool with a fakeuploader .
train
false
43,028
def is_open_quote(sql): parsed = sqlparse.parse(sql) return any((_parsed_is_open_quote(p) for p in parsed))
[ "def", "is_open_quote", "(", "sql", ")", ":", "parsed", "=", "sqlparse", ".", "parse", "(", "sql", ")", "return", "any", "(", "(", "_parsed_is_open_quote", "(", "p", ")", "for", "p", "in", "parsed", ")", ")" ]
returns true if the query contains an unclosed quote .
train
false
43,030
def set_selected_item(widget, idx): if (type(widget) is QtWidgets.QTreeWidget): item = widget.topLevelItem(idx) if item: item.setSelected(True) widget.setCurrentItem(item)
[ "def", "set_selected_item", "(", "widget", ",", "idx", ")", ":", "if", "(", "type", "(", "widget", ")", "is", "QtWidgets", ".", "QTreeWidget", ")", ":", "item", "=", "widget", ".", "topLevelItem", "(", "idx", ")", "if", "item", ":", "item", ".", "set...
sets a the currently selected item to the item at index idx .
train
false
43,031
def binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2, volume_fraction=0.5, seed=None): rs = np.random.RandomState(seed) shape = tuple(([length] * n_dim)) mask = np.zeros(shape) n_pts = max((int((1.0 / blob_size_fraction)) ** n_dim), 1) points = (length * rs.rand(n_dim, n_pts)).astype(np.int) mask[[indices for indices in points]] = 1 mask = gaussian(mask, sigma=((0.25 * length) * blob_size_fraction)) threshold = np.percentile(mask, (100 * (1 - volume_fraction))) return np.logical_not((mask < threshold))
[ "def", "binary_blobs", "(", "length", "=", "512", ",", "blob_size_fraction", "=", "0.1", ",", "n_dim", "=", "2", ",", "volume_fraction", "=", "0.5", ",", "seed", "=", "None", ")", ":", "rs", "=", "np", ".", "random", ".", "RandomState", "(", "seed", ...
generate synthetic binary image with several rounded blob-like objects .
train
false
43,032
def _meijerint_definite_3(f, x): res = _meijerint_definite_4(f, x) if (res and (res[1] != False)): return res if f.is_Add: _debug('Expanding and evaluating all terms.') ress = [_meijerint_definite_4(g, x) for g in f.args] if all(((r is not None) for r in ress)): conds = [] res = S(0) for (r, c) in ress: res += r conds += [c] c = And(*conds) if (c != False): return (res, c)
[ "def", "_meijerint_definite_3", "(", "f", ",", "x", ")", ":", "res", "=", "_meijerint_definite_4", "(", "f", ",", "x", ")", "if", "(", "res", "and", "(", "res", "[", "1", "]", "!=", "False", ")", ")", ":", "return", "res", "if", "f", ".", "is_Add...
try to integrate f dx from zero to infinity .
train
false
43,035
def get_tensor_parents(tensor): parents_list = [] parents_list.append(tensor) if tensor.op: for t in tensor.op.inputs: if (not ('read:0' in t.name)): parents_list += get_tensor_parents(t) return parents_list
[ "def", "get_tensor_parents", "(", "tensor", ")", ":", "parents_list", "=", "[", "]", "parents_list", ".", "append", "(", "tensor", ")", "if", "tensor", ".", "op", ":", "for", "t", "in", "tensor", ".", "op", ".", "inputs", ":", "if", "(", "not", "(", ...
get all calculation and data parent tensors .
train
false
43,036
def pixel_wise_softmax(output, name='pixel_wise_softmax'): with tf.name_scope(name) as scope: return tf.nn.softmax(output)
[ "def", "pixel_wise_softmax", "(", "output", ",", "name", "=", "'pixel_wise_softmax'", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ")", "as", "scope", ":", "return", "tf", ".", "nn", ".", "softmax", "(", "output", ")" ]
return the softmax outputs of images .
train
false
43,038
def test_ast_non_decoratable(): cant_compile(u'(with-decorator (foo) (* x x))')
[ "def", "test_ast_non_decoratable", "(", ")", ":", "cant_compile", "(", "u'(with-decorator (foo) (* x x))'", ")" ]
ensure decorating garbage breaks .
train
false
43,040
def cut_from_dict(desc, cube=None): cut_type = desc['type'].lower() dim = desc.get('dimension') if (dim and cube): dim = cube.dimension(dim) if (cut_type == 'point'): return PointCut(dim, desc.get('path'), desc.get('hierarchy'), desc.get('invert', False)) elif (cut_type == 'set'): return SetCut(dim, desc.get('paths'), desc.get('hierarchy'), desc.get('invert', False)) elif (cut_type == 'range'): return RangeCut(dim, desc.get('from'), desc.get('to'), desc.get('hierarchy'), desc.get('invert', False)) else: raise ArgumentError(('Unknown cut type %s' % cut_type))
[ "def", "cut_from_dict", "(", "desc", ",", "cube", "=", "None", ")", ":", "cut_type", "=", "desc", "[", "'type'", "]", ".", "lower", "(", ")", "dim", "=", "desc", ".", "get", "(", "'dimension'", ")", "if", "(", "dim", "and", "cube", ")", ":", "dim...
returns a cut from desc dictionary .
train
false
43,041
def os_upgrade(): print green(('%s: Updating OS' % env.host)) run('apt-get update', pty=True) run('apt-get upgrade -y', pty=True)
[ "def", "os_upgrade", "(", ")", ":", "print", "green", "(", "(", "'%s: Updating OS'", "%", "env", ".", "host", ")", ")", "run", "(", "'apt-get update'", ",", "pty", "=", "True", ")", "run", "(", "'apt-get upgrade -y'", ",", "pty", "=", "True", ")" ]
update os .
train
false
43,042
def RenderNormalCdf(mu, sigma, low, high, n=101): xs = np.linspace(low, high, n) ps = stats.norm.cdf(xs, mu, sigma) return (xs, ps)
[ "def", "RenderNormalCdf", "(", "mu", ",", "sigma", ",", "low", ",", "high", ",", "n", "=", "101", ")", ":", "xs", "=", "np", ".", "linspace", "(", "low", ",", "high", ",", "n", ")", "ps", "=", "stats", ".", "norm", ".", "cdf", "(", "xs", ",",...
generates sequences of xs and ps for a normal cdf .
train
false
43,045
def almostEqualF(a, b, rel_err=2e-15, abs_err=5e-323): if math.isnan(a): return math.isnan(b) if math.isinf(a): return (a == b) if ((not a) and (not b)): return (math.copysign(1.0, a) == math.copysign(1.0, b)) try: absolute_error = abs((b - a)) except OverflowError: return False else: return (absolute_error <= max(abs_err, (rel_err * abs(a))))
[ "def", "almostEqualF", "(", "a", ",", "b", ",", "rel_err", "=", "2e-15", ",", "abs_err", "=", "5e-323", ")", ":", "if", "math", ".", "isnan", "(", "a", ")", ":", "return", "math", ".", "isnan", "(", "b", ")", "if", "math", ".", "isinf", "(", "a...
determine whether floating-point values a and b are equal to within a rounding error .
train
false
43,047
def _get_arg_list(name, fobj): trunc = 20 argspec = inspect.getargspec(fobj) arg_list = [] if argspec.args: for arg in argspec.args: arg_list.append(str(arg)) arg_list.reverse() if argspec.defaults: for i in range(len(argspec.defaults)): arg_list[i] = ((str(arg_list[i]) + '=') + str(argspec.defaults[(- i)])) arg_list.reverse() if argspec.varargs: arg_list.append(argspec.varargs) if argspec.keywords: arg_list.append(argspec.keywords) arg_list = [x[:trunc] for x in arg_list] str_param = ('%s(%s)' % (name, ', '.join(arg_list))) return str_param
[ "def", "_get_arg_list", "(", "name", ",", "fobj", ")", ":", "trunc", "=", "20", "argspec", "=", "inspect", ".", "getargspec", "(", "fobj", ")", "arg_list", "=", "[", "]", "if", "argspec", ".", "args", ":", "for", "arg", "in", "argspec", ".", "args", ...
given a function object .
train
false
43,049
def send_prowl(title, msg, gtype, force=False, test=None): if test: apikey = test.get('prowl_apikey') else: apikey = sabnzbd.cfg.prowl_apikey() if (not apikey): return T('Cannot send, missing required data') title = Tx(NOTIFICATION.get(gtype, 'other')) title = urllib2.quote(title.encode('utf8')) msg = urllib2.quote(msg.encode('utf8')) prio = get_prio(gtype, 'prowl') if force: prio = 0 if (prio > (-3)): url = ('https://api.prowlapp.com/publicapi/add?apikey=%s&application=SABnzbd&event=%s&description=%s&priority=%d' % (apikey, title, msg, prio)) try: urllib2.urlopen(url) return '' except: logging.warning(T('Failed to send Prowl message')) logging.info('Traceback: ', exc_info=True) return T('Failed to send Prowl message') return ''
[ "def", "send_prowl", "(", "title", ",", "msg", ",", "gtype", ",", "force", "=", "False", ",", "test", "=", "None", ")", ":", "if", "test", ":", "apikey", "=", "test", ".", "get", "(", "'prowl_apikey'", ")", "else", ":", "apikey", "=", "sabnzbd", "....
send message to prowl .
train
false
43,050
def _codeStatusSplit(line): parts = line.split(' ', 1) if (len(parts) == 1): return (parts[0], '') return parts
[ "def", "_codeStatusSplit", "(", "line", ")", ":", "parts", "=", "line", ".", "split", "(", "' '", ",", "1", ")", "if", "(", "len", "(", "parts", ")", "==", "1", ")", ":", "return", "(", "parts", "[", "0", "]", ",", "''", ")", "return", "parts" ...
parse the first line of a multi-line server response .
train
false
43,052
def add_neigh_entry(ip_address, mac_address, device, namespace=None, **kwargs): ip_version = get_ip_version(ip_address) privileged.add_neigh_entry(ip_version, ip_address, mac_address, device, namespace, **kwargs)
[ "def", "add_neigh_entry", "(", "ip_address", ",", "mac_address", ",", "device", ",", "namespace", "=", "None", ",", "**", "kwargs", ")", ":", "ip_version", "=", "get_ip_version", "(", "ip_address", ")", "privileged", ".", "add_neigh_entry", "(", "ip_version", ...
add a neighbour entry .
train
false
43,053
def samples_from_table(table, start=0, stop=(-1), rate=44100): samples = np.array(table.getTable()) if ((start, stop) != (0, (-1))): if (stop > start): samples = samples[(start * rate):(stop * rate)] elif start: samples = samples[(start * rate):] return samples
[ "def", "samples_from_table", "(", "table", ",", "start", "=", "0", ",", "stop", "=", "(", "-", "1", ")", ",", "rate", "=", "44100", ")", ":", "samples", "=", "np", ".", "array", "(", "table", ".", "getTable", "(", ")", ")", "if", "(", "(", "sta...
return samples as a np .
train
false
43,054
def _get_xywh(bb): w = ((bb[:, BB_XMAX_IDX] - bb[:, BB_XMIN_IDX]) + FRCN_EPS) h = ((bb[:, BB_YMAX_IDX] - bb[:, BB_YMIN_IDX]) + FRCN_EPS) x = (bb[:, BB_XMIN_IDX] + (0.5 * w)) y = (bb[:, BB_YMIN_IDX] + (0.5 * h)) return (x, y, w, h)
[ "def", "_get_xywh", "(", "bb", ")", ":", "w", "=", "(", "(", "bb", "[", ":", ",", "BB_XMAX_IDX", "]", "-", "bb", "[", ":", ",", "BB_XMIN_IDX", "]", ")", "+", "FRCN_EPS", ")", "h", "=", "(", "(", "bb", "[", ":", ",", "BB_YMAX_IDX", "]", "-", ...
given bounding boxes with coordinates .
train
false
43,055
def configure_cli_command(cmdline): args = shlex.split(cmdline) cli = CommandLineInterface() cmdinst = cli._configure_command(cmdname=args[0], argv=args[1:]) return cmdinst
[ "def", "configure_cli_command", "(", "cmdline", ")", ":", "args", "=", "shlex", ".", "split", "(", "cmdline", ")", "cli", "=", "CommandLineInterface", "(", ")", "cmdinst", "=", "cli", ".", "_configure_command", "(", "cmdname", "=", "args", "[", "0", "]", ...
helper to configure a command class .
train
false
43,056
@contextfilter def do_rejectattr(*args, **kwargs): return select_or_reject(args, kwargs, (lambda x: (not x)), True)
[ "@", "contextfilter", "def", "do_rejectattr", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "select_or_reject", "(", "args", ",", "kwargs", ",", "(", "lambda", "x", ":", "(", "not", "x", ")", ")", ",", "True", ")" ]
filters a sequence of objects by applying a test to the specified attribute of each object .
train
false
43,060
def dsplit(ary, indices_or_sections): if (ary.ndim <= 2): raise ValueError('Cannot dsplit an array with less than 3 dimensions') return split(ary, indices_or_sections, 2)
[ "def", "dsplit", "(", "ary", ",", "indices_or_sections", ")", ":", "if", "(", "ary", ".", "ndim", "<=", "2", ")", ":", "raise", "ValueError", "(", "'Cannot dsplit an array with less than 3 dimensions'", ")", "return", "split", "(", "ary", ",", "indices_or_sectio...
splits an array into multiple sub arrays along the third axis .
train
false
43,061
def switch(): try: target = g['stuff'].split()[0] args = parse_arguments() try: if (g['stuff'].split()[(-1)] == '-f'): guide = 'To ignore an option, just hit Enter key.' printNicely(light_magenta(guide)) only = raw_input('Only nicks [Ex: @xxx,@yy]: ') ignore = raw_input('Ignore nicks [Ex: @xxx,@yy]: ') args.filter = list(filter(None, only.split(','))) args.ignore = list(filter(None, ignore.split(','))) except: printNicely(red('Sorry, wrong format.')) return g['stream_stop'] = True try: stuff = g['stuff'].split()[1] except: stuff = None spawn_dict = {'public': spawn_public_stream, 'list': spawn_list_stream, 'mine': spawn_personal_stream} spawn_dict.get(target)(args, stuff) except: debug_option() printNicely(red("Sorry I can't understand."))
[ "def", "switch", "(", ")", ":", "try", ":", "target", "=", "g", "[", "'stuff'", "]", ".", "split", "(", ")", "[", "0", "]", "args", "=", "parse_arguments", "(", ")", "try", ":", "if", "(", "g", "[", "'stuff'", "]", ".", "split", "(", ")", "["...
switch stream .
train
false
43,062
def continue_training(path): with change_recursion_limit(config.recursion_limit): with open(path, 'rb') as f: main_loop = load(f) main_loop.run()
[ "def", "continue_training", "(", "path", ")", ":", "with", "change_recursion_limit", "(", "config", ".", "recursion_limit", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "main_loop", "=", "load", "(", "f", ")", "main_loop", "."...
continues training using checkpoint .
train
false
43,063
@pytest.fixture(scope='session') def pootle_content_type(): from django.contrib.contenttypes.models import ContentType args = {'app_label': 'pootle_app', 'model': 'directory'} return ContentType.objects.get(**args)
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "'session'", ")", "def", "pootle_content_type", "(", ")", ":", "from", "django", ".", "contrib", ".", "contenttypes", ".", "models", "import", "ContentType", "args", "=", "{", "'app_label'", ":", "'pootle_app...
require the pootle contenttype .
train
false
43,064
def testsearchandreplace(): (document, docbody, relationships) = simpledoc() docbody = document.xpath('/w:document/w:body', namespaces=nsprefixes)[0] assert search(docbody, 'ing 1') assert search(docbody, 'ing 2') assert search(docbody, 'graph 3') assert search(docbody, 'ist Item') assert search(docbody, 'A1') if search(docbody, 'Paragraph 2'): docbody = replace(docbody, 'Paragraph 2', 'Whacko 55') assert search(docbody, 'Whacko 55')
[ "def", "testsearchandreplace", "(", ")", ":", "(", "document", ",", "docbody", ",", "relationships", ")", "=", "simpledoc", "(", ")", "docbody", "=", "document", ".", "xpath", "(", "'/w:document/w:body'", ",", "namespaces", "=", "nsprefixes", ")", "[", "0", ...
ensure search and replace functions work .
train
false
43,065
def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None): conf.net_type = 'bridge' conf.source_dev = brname conf.vporttype = 'openvswitch' conf.add_vport_param('interfaceid', interfaceid) if tapname: conf.target_dev = tapname conf.script = ''
[ "def", "set_vif_host_backend_ovs_config", "(", "conf", ",", "brname", ",", "interfaceid", ",", "tapname", "=", "None", ")", ":", "conf", ".", "net_type", "=", "'bridge'", "conf", ".", "source_dev", "=", "brname", "conf", ".", "vporttype", "=", "'openvswitch'",...
populate a libvirtconfigguestinterface instance with host backend details for an openvswitch bridge .
train
false
43,066
def center_matrix(M, dim=0): M = np.asarray(M, float) if dim: M = ((M - M.mean(axis=0)) / M.std(axis=0)) else: M = (M - M.mean(axis=1)[:, np.newaxis]) M = (M / M.std(axis=1)[:, np.newaxis]) return M
[ "def", "center_matrix", "(", "M", ",", "dim", "=", "0", ")", ":", "M", "=", "np", ".", "asarray", "(", "M", ",", "float", ")", "if", "dim", ":", "M", "=", "(", "(", "M", "-", "M", ".", "mean", "(", "axis", "=", "0", ")", ")", "/", "M", ...
return the matrix *m* with each row having zero mean and unit std .
train
false
43,067
@pytest.fixture(scope='session', autouse=True) def delete_pattern(): from django.core.cache.backends.locmem import LocMemCache LocMemCache.delete_pattern = (lambda x, y: 0)
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "'session'", ",", "autouse", "=", "True", ")", "def", "delete_pattern", "(", ")", ":", "from", "django", ".", "core", ".", "cache", ".", "backends", ".", "locmem", "import", "LocMemCache", "LocMemCache", ...
adds the no-op delete_pattern() method to locmemcache .
train
false
43,069
def splitfactor_sqf(p, DE, coefficientD=False, z=None, basic=False): kkinv = ([(1 / x) for x in DE.T[:DE.level]] + DE.T[:DE.level]) if z: kkinv = [z] S = [] N = [] p_sqf = p.sqf_list_include() if p.is_zero: return (((p, 1),), ()) for (pi, i) in p_sqf: Si = pi.as_poly(*kkinv).gcd(derivation(pi, DE, coefficientD=coefficientD, basic=basic).as_poly(*kkinv)).as_poly(DE.t) pi = Poly(pi, DE.t) Si = Poly(Si, DE.t) Ni = pi.exquo(Si) if (not Si.is_one): S.append((Si, i)) if (not Ni.is_one): N.append((Ni, i)) return (tuple(N), tuple(S))
[ "def", "splitfactor_sqf", "(", "p", ",", "DE", ",", "coefficientD", "=", "False", ",", "z", "=", "None", ",", "basic", "=", "False", ")", ":", "kkinv", "=", "(", "[", "(", "1", "/", "x", ")", "for", "x", "in", "DE", ".", "T", "[", ":", "DE", ...
splitting square-free factorization given a derivation d on k[t] and p in k[t] .
train
false
43,070
def get_user_obj_perms_model(obj): from guardian.models import UserObjectPermissionBase from guardian.models import UserObjectPermission return get_obj_perms_model(obj, UserObjectPermissionBase, UserObjectPermission)
[ "def", "get_user_obj_perms_model", "(", "obj", ")", ":", "from", "guardian", ".", "models", "import", "UserObjectPermissionBase", "from", "guardian", ".", "models", "import", "UserObjectPermission", "return", "get_obj_perms_model", "(", "obj", ",", "UserObjectPermission...
returns model class that connects given obj and user class .
train
false
43,072
def clean_extra_output_destination(): global extra_print_dests extra_print_dests = []
[ "def", "clean_extra_output_destination", "(", ")", ":", "global", "extra_print_dests", "extra_print_dests", "=", "[", "]" ]
clean extra print destination(s) :rtype: none .
train
false
43,073
def bson_ts_to_long(timestamp): return ((timestamp.time << 32) + timestamp.inc)
[ "def", "bson_ts_to_long", "(", "timestamp", ")", ":", "return", "(", "(", "timestamp", ".", "time", "<<", "32", ")", "+", "timestamp", ".", "inc", ")" ]
convert bson timestamp into integer .
train
false
43,074
def volume_data_get_for_host(context, host, count_only=False): return IMPL.volume_data_get_for_host(context, host, count_only)
[ "def", "volume_data_get_for_host", "(", "context", ",", "host", ",", "count_only", "=", "False", ")", ":", "return", "IMPL", ".", "volume_data_get_for_host", "(", "context", ",", "host", ",", "count_only", ")" ]
get for project .
train
false
43,076
def _parse_single_node(text): return lxml.html.html5parser.fragment_fromstring(text, parser=PARSER)
[ "def", "_parse_single_node", "(", "text", ")", ":", "return", "lxml", ".", "html", ".", "html5parser", ".", "fragment_fromstring", "(", "text", ",", "parser", "=", "PARSER", ")" ]
parse a single html node from a string into a tree .
train
false