id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
16,160
def register_review_request_fieldset(fieldset): fieldset_registry.register(fieldset)
[ "def", "register_review_request_fieldset", "(", "fieldset", ")", ":", "fieldset_registry", ".", "register", "(", "fieldset", ")" ]
register a custom review request fieldset .
train
false
16,161
def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None, pool_class=ProviderPool, **kwargs): downloaded_subtitles = defaultdict(list) checked_videos = [] for video in videos: if (not check_video(video, languages=languages, undefined=only_one)): logger.info('Skipping video %r', video) continue checked_videos.append(video) if (not checked_videos): return downloaded_subtitles with pool_class(**kwargs) as pool: for video in checked_videos: logger.info('Downloading best subtitles for %r', video) subtitles = pool.download_best_subtitles(pool.list_subtitles(video, (languages - video.subtitle_languages)), video, languages, min_score=min_score, hearing_impaired=hearing_impaired, only_one=only_one, compute_score=compute_score) logger.info('Downloaded %d subtitle(s)', len(subtitles)) downloaded_subtitles[video].extend(subtitles) return downloaded_subtitles
[ "def", "download_best_subtitles", "(", "videos", ",", "languages", ",", "min_score", "=", "0", ",", "hearing_impaired", "=", "False", ",", "only_one", "=", "False", ",", "compute_score", "=", "None", ",", "pool_class", "=", "ProviderPool", ",", "**", "kwargs",...
list and download the best matching subtitles .
train
true
16,162
@register.filter def as_stars(value): num_stars_to_class = {0: '', 1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five'} num_stars = int(round((value or 0.0))) return num_stars_to_class.get(num_stars, '')
[ "@", "register", ".", "filter", "def", "as_stars", "(", "value", ")", ":", "num_stars_to_class", "=", "{", "0", ":", "''", ",", "1", ":", "'One'", ",", "2", ":", "'Two'", ",", "3", ":", "'Three'", ",", "4", ":", "'Four'", ",", "5", ":", "'Five'",...
convert a float rating between 0 and 5 to a css class the css class name is the number of stars to be displayed .
train
false
16,163
def _should_send_own_updates(self): try: return self.get_profile().should_send_own_updates except Profile.DoesNotExist: return True
[ "def", "_should_send_own_updates", "(", "self", ")", ":", "try", ":", "return", "self", ".", "get_profile", "(", ")", ".", "should_send_own_updates", "except", "Profile", ".", "DoesNotExist", ":", "return", "True" ]
get whether a user wants to receive emails about their activity .
train
false
16,164
def object_header(num_type, length): return (((object_class(num_type).type_name + ' ') + str(length).encode('ascii')) + '\x00')
[ "def", "object_header", "(", "num_type", ",", "length", ")", ":", "return", "(", "(", "(", "object_class", "(", "num_type", ")", ".", "type_name", "+", "' '", ")", "+", "str", "(", "length", ")", ".", "encode", "(", "'ascii'", ")", ")", "+", "'\\x00'...
return an object header for the given numeric type and text length .
train
false
16,166
def delete_principal(name): ret = {} cmd = __execute_kadmin('delprinc -force {0}'.format(name)) if ((cmd['retcode'] != 0) or cmd['stderr']): ret['comment'] = cmd['stderr'].splitlines()[(-1)] ret['result'] = False return ret return True
[ "def", "delete_principal", "(", "name", ")", ":", "ret", "=", "{", "}", "cmd", "=", "__execute_kadmin", "(", "'delprinc -force {0}'", ".", "format", "(", "name", ")", ")", "if", "(", "(", "cmd", "[", "'retcode'", "]", "!=", "0", ")", "or", "cmd", "["...
delete principal cli example: .
train
true
16,168
def sample_iter(expr, condition=None, numsamples=S.Infinity, **kwargs): try: return sample_iter_lambdify(expr, condition, numsamples, **kwargs) except TypeError: return sample_iter_subs(expr, condition, numsamples, **kwargs)
[ "def", "sample_iter", "(", "expr", ",", "condition", "=", "None", ",", "numsamples", "=", "S", ".", "Infinity", ",", "**", "kwargs", ")", ":", "try", ":", "return", "sample_iter_lambdify", "(", "expr", ",", "condition", ",", "numsamples", ",", "**", "kwa...
returns an iterator of realizations from the expression given a condition expr: random expression to be realized condition: a conditional expression numsamples: length of the iterator examples .
train
false
16,169
def ParseLogs(logs): return [ParseLogEntry(line) for line in logs.split('\n') if line]
[ "def", "ParseLogs", "(", "logs", ")", ":", "return", "[", "ParseLogEntry", "(", "line", ")", "for", "line", "in", "logs", ".", "split", "(", "'\\n'", ")", "if", "line", "]" ]
parses a str containing newline separated log entries .
train
false
16,174
def get_cosmetic_display_price(course, registration_price): currency_symbol = settings.PAID_COURSE_REGISTRATION_CURRENCY[1] price = course.cosmetic_display_price if (registration_price > 0): price = registration_price if price: return _('{currency_symbol}{price}').format(currency_symbol=currency_symbol, price=price) else: return _('Free')
[ "def", "get_cosmetic_display_price", "(", "course", ",", "registration_price", ")", ":", "currency_symbol", "=", "settings", ".", "PAID_COURSE_REGISTRATION_CURRENCY", "[", "1", "]", "price", "=", "course", ".", "cosmetic_display_price", "if", "(", "registration_price", ...
return course price as a string preceded by correct currency .
train
false
16,176
def addPositives(derivation, paths, positives): portionDirections = getSpacedPortionDirections(derivation.interpolationDictionary) for path in paths: loopLists = getLoopListsByPath(derivation, None, path, portionDirections) geometryOutput = triangle_mesh.getPillarsOutput(loopLists) positives.append(geometryOutput)
[ "def", "addPositives", "(", "derivation", ",", "paths", ",", "positives", ")", ":", "portionDirections", "=", "getSpacedPortionDirections", "(", "derivation", ".", "interpolationDictionary", ")", "for", "path", "in", "paths", ":", "loopLists", "=", "getLoopListsByPa...
add pillars output to positives .
train
false
16,177
@ensure_csrf_cookie def update_session_language(request): if (request.method == 'PATCH'): data = json.loads(request.body) language = data.get(LANGUAGE_KEY, settings.LANGUAGE_CODE) if (request.session.get(LANGUAGE_SESSION_KEY, None) != language): request.session[LANGUAGE_SESSION_KEY] = unicode(language) return HttpResponse(200)
[ "@", "ensure_csrf_cookie", "def", "update_session_language", "(", "request", ")", ":", "if", "(", "request", ".", "method", "==", "'PATCH'", ")", ":", "data", "=", "json", ".", "loads", "(", "request", ".", "body", ")", "language", "=", "data", ".", "get...
update the language session key .
train
false
16,178
@service.jsonrpc def list_apps(): regex = re.compile('^\\w+$') apps = [f for f in os.listdir(apath(r=request)) if regex.match(f)] return apps
[ "@", "service", ".", "jsonrpc", "def", "list_apps", "(", ")", ":", "regex", "=", "re", ".", "compile", "(", "'^\\\\w+$'", ")", "apps", "=", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "apath", "(", "r", "=", "request", ")", ")", "if", ...
list apps in site .
train
false
16,179
def context_cache(func): def context_cache_wrap(*args, **kwargs): func_context = func.__globals__['__context__'] func_opts = func.__globals__['__opts__'] func_name = func.__globals__['__name__'] context_cache = ContextCache(func_opts, func_name) if ((not func_context) and os.path.isfile(context_cache.cache_path)): salt.utils.dictupdate.update(func_context, context_cache.get_cache_context()) else: context_cache.cache_context(func_context) return func(*args, **kwargs) return context_cache_wrap
[ "def", "context_cache", "(", "func", ")", ":", "def", "context_cache_wrap", "(", "*", "args", ",", "**", "kwargs", ")", ":", "func_context", "=", "func", ".", "__globals__", "[", "'__context__'", "]", "func_opts", "=", "func", ".", "__globals__", "[", "'__...
a decorator to be used module functions which need to cache their context .
train
true
16,180
def call_getiter(context, builder, iterable_type, val): getiter_sig = typing.signature(iterable_type.iterator_type, iterable_type) getiter_impl = context.get_function('getiter', getiter_sig) return getiter_impl(builder, (val,))
[ "def", "call_getiter", "(", "context", ",", "builder", ",", "iterable_type", ",", "val", ")", ":", "getiter_sig", "=", "typing", ".", "signature", "(", "iterable_type", ".", "iterator_type", ",", "iterable_type", ")", "getiter_impl", "=", "context", ".", "get_...
call the getiter() implementation for the given *iterable_type* of value *val* .
train
false
16,181
def load_filters(filters_path): filterlist = [] for filterdir in filters_path: if (not os.path.isdir(filterdir)): continue for filterfile in os.listdir(filterdir): filterconfig = ConfigParser.RawConfigParser() filterconfig.read(os.path.join(filterdir, filterfile)) for (name, value) in filterconfig.items('Filters'): filterdefinition = [string.strip(s) for s in value.split(',')] newfilter = build_filter(*filterdefinition) if (newfilter is None): continue newfilter.name = name filterlist.append(newfilter) return filterlist
[ "def", "load_filters", "(", "filters_path", ")", ":", "filterlist", "=", "[", "]", "for", "filterdir", "in", "filters_path", ":", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "filterdir", ")", ")", ":", "continue", "for", "filterfile", "in", ...
load filters from a list of directories .
train
false
16,183
def rs_min_pow(expr, series_rs, a): series = 0 n = 2 while (series == 0): series = _rs_series(expr, series_rs, a, n) n *= 2 R = series.ring a = R(a) i = R.gens.index(a) return min(series, key=(lambda t: t[i]))[i]
[ "def", "rs_min_pow", "(", "expr", ",", "series_rs", ",", "a", ")", ":", "series", "=", "0", "n", "=", "2", "while", "(", "series", "==", "0", ")", ":", "series", "=", "_rs_series", "(", "expr", ",", "series_rs", ",", "a", ",", "n", ")", "n", "*...
find the minimum power of a in the series expansion of expr .
train
false
16,184
def get_win_launcher(type): launcher_fn = ('%s.exe' % type) if (platform.machine().lower() == 'arm'): launcher_fn = launcher_fn.replace('.', '-arm.') if is_64bit(): launcher_fn = launcher_fn.replace('.', '-64.') else: launcher_fn = launcher_fn.replace('.', '-32.') return resource_string('setuptools', launcher_fn)
[ "def", "get_win_launcher", "(", "type", ")", ":", "launcher_fn", "=", "(", "'%s.exe'", "%", "type", ")", "if", "(", "platform", ".", "machine", "(", ")", ".", "lower", "(", ")", "==", "'arm'", ")", ":", "launcher_fn", "=", "launcher_fn", ".", "replace"...
load the windows launcher suitable for launching a script .
train
true
16,186
@task() @timeit def escalate_question(question_id): from kitsune.questions.models import Question question = Question.objects.get(id=question_id) url = 'https://{domain}{url}'.format(domain=Site.objects.get_current().domain, url=question.get_absolute_url()) try: submit_ticket(email='support@mozilla.com', category='Escalated', subject=u'[Escalated] {title}'.format(title=question.title), body=u'{url}\n\n{content}'.format(url=url, content=question.content), tags=[t.slug for t in question.tags.all()]) except ZendeskError: raise PickleableZendeskError()
[ "@", "task", "(", ")", "@", "timeit", "def", "escalate_question", "(", "question_id", ")", ":", "from", "kitsune", ".", "questions", ".", "models", "import", "Question", "question", "=", "Question", ".", "objects", ".", "get", "(", "id", "=", "question_id"...
escalate a question to zendesk by submitting a ticket .
train
false
16,188
def parse_fasta_file(fasta_lines): fasta_seqs = {} seq_order = [] for (label, seq) in parse_fasta(fasta_lines): fasta_seqs[label.split()[0].strip()] = seq seq_order.append(label) return (fasta_seqs, seq_order)
[ "def", "parse_fasta_file", "(", "fasta_lines", ")", ":", "fasta_seqs", "=", "{", "}", "seq_order", "=", "[", "]", "for", "(", "label", ",", "seq", ")", "in", "parse_fasta", "(", "fasta_lines", ")", ":", "fasta_seqs", "[", "label", ".", "split", "(", ")...
parses fasta file .
train
false
16,189
def count_words(training_set): counts = defaultdict((lambda : [0, 0])) for (message, is_spam) in training_set: for word in tokenize(message): counts[word][(0 if is_spam else 1)] += 1 return counts
[ "def", "count_words", "(", "training_set", ")", ":", "counts", "=", "defaultdict", "(", "(", "lambda", ":", "[", "0", ",", "0", "]", ")", ")", "for", "(", "message", ",", "is_spam", ")", "in", "training_set", ":", "for", "word", "in", "tokenize", "("...
training set consists of pairs .
train
false
16,191
def synsets(word, pos=NOUN): (word, pos) = (normalize(word), pos.lower()) try: if pos.startswith(NOUN.lower()): w = wn.N[word] elif pos.startswith(VERB.lower()): w = wn.V[word] elif pos.startswith(ADJECTIVE.lower()): w = wn.ADJ[word] elif pos.startswith(ADVERB.lower()): w = wn.ADV[word] else: raise TypeError(('part of speech must be NOUN, VERB, ADJECTIVE or ADVERB, not %s' % repr(pos))) return [Synset(s.synset) for (i, s) in enumerate(w)] except KeyError: return [] return []
[ "def", "synsets", "(", "word", ",", "pos", "=", "NOUN", ")", ":", "(", "word", ",", "pos", ")", "=", "(", "normalize", "(", "word", ")", ",", "pos", ".", "lower", "(", ")", ")", "try", ":", "if", "pos", ".", "startswith", "(", "NOUN", ".", "l...
returns a list of synset objects .
train
false
16,192
@library.global_function def tag_vocab(): return json.dumps(dict(((t[0], t[1]) for t in Tag.objects.values_list('name', 'slug'))))
[ "@", "library", ".", "global_function", "def", "tag_vocab", "(", ")", ":", "return", "json", ".", "dumps", "(", "dict", "(", "(", "(", "t", "[", "0", "]", ",", "t", "[", "1", "]", ")", "for", "t", "in", "Tag", ".", "objects", ".", "values_list", ...
returns the tag vocabulary as a json object .
train
false
16,193
def _error_msg_routes(iface, option, expected): msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected)
[ "def", "_error_msg_routes", "(", "iface", ",", "option", ",", "expected", ")", ":", "msg", "=", "'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]'", "return", "msg", ".", "format", "(", "iface", ",", "option", ",", "expected", ")" ]
build an appropriate error message from a given option and a list of expected values .
train
true
16,194
def get_grouped_by_powers(bases, powers): positive = [] negative = [] for (base, power) in zip(bases, powers): if (power < 0): negative.append((base, (- power))) elif (power > 0): positive.append((base, power)) else: raise ValueError(u'Unit with 0 power') return (positive, negative)
[ "def", "get_grouped_by_powers", "(", "bases", ",", "powers", ")", ":", "positive", "=", "[", "]", "negative", "=", "[", "]", "for", "(", "base", ",", "power", ")", "in", "zip", "(", "bases", ",", "powers", ")", ":", "if", "(", "power", "<", "0", ...
groups the powers and bases in the given ~astropy .
train
false
16,195
def _guess_iface_name(netif): with os.popen(('%s -l' % conf.prog.ifconfig)) as fdesc: ifaces = fdesc.readline().strip().split(' ') matches = [iface for iface in ifaces if iface.startswith(netif)] if (len(matches) == 1): return matches[0] return None
[ "def", "_guess_iface_name", "(", "netif", ")", ":", "with", "os", ".", "popen", "(", "(", "'%s -l'", "%", "conf", ".", "prog", ".", "ifconfig", ")", ")", "as", "fdesc", ":", "ifaces", "=", "fdesc", ".", "readline", "(", ")", ".", "strip", "(", ")",...
we attempt to guess the name of interfaces that are truncated from the output of ifconfig -l .
train
true
16,196
def serial_escape(value): return value.replace('\\', '\\\\').replace(' ', '\\ ')
[ "def", "serial_escape", "(", "value", ")", ":", "return", "value", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", ".", "replace", "(", "' '", ",", "'\\\\ '", ")" ]
escape string values that are elements of a list .
train
false
16,198
def test_subfmts_regex(): class TimeLongYear(TimeString, ): name = 'longyear' subfmts = (('date', '(?P<year>[+-]\\d{5})-%m-%d', '{year:+06d}-{mon:02d}-{day:02d}'),) t = Time('+02000-02-03', format='longyear') assert (t.value == '+02000-02-03') assert (t.jd == Time('2000-02-03').jd)
[ "def", "test_subfmts_regex", "(", ")", ":", "class", "TimeLongYear", "(", "TimeString", ",", ")", ":", "name", "=", "'longyear'", "subfmts", "=", "(", "(", "'date'", ",", "'(?P<year>[+-]\\\\d{5})-%m-%d'", ",", "'{year:+06d}-{mon:02d}-{day:02d}'", ")", ",", ")", ...
test having a custom subfmts with a regular expression .
train
false
16,200
def add32(a, b): lo = ((a & 65535) + (b & 65535)) hi = (((a >> 16) + (b >> 16)) + (lo >> 16)) return ((((- (hi & 32768)) | (hi & 32767)) << 16) | (lo & 65535))
[ "def", "add32", "(", "a", ",", "b", ")", ":", "lo", "=", "(", "(", "a", "&", "65535", ")", "+", "(", "b", "&", "65535", ")", ")", "hi", "=", "(", "(", "(", "a", ">>", "16", ")", "+", "(", "b", ">>", "16", ")", ")", "+", "(", "lo", "...
add two 32-bit words discarding carry above 32nd bit .
train
false
16,201
def _get_dependencies(documents, deps_mode=True): doc_set = set() for doc in documents: stack = [doc] while stack: curr_doc = stack.pop() if ((curr_doc not in doc_set) and (not curr_doc.is_history)): doc_set.add(curr_doc) if deps_mode: deps_set = set(curr_doc.dependencies.all()) else: deps_set = set(curr_doc.children.all()) stack.extend((deps_set - doc_set)) return doc_set
[ "def", "_get_dependencies", "(", "documents", ",", "deps_mode", "=", "True", ")", ":", "doc_set", "=", "set", "(", ")", "for", "doc", "in", "documents", ":", "stack", "=", "[", "doc", "]", "while", "stack", ":", "curr_doc", "=", "stack", ".", "pop", ...
given a list of document2 objects .
train
false
16,202
def compute_prefix(word): word_length = len(word) prefix = ([0] * word_length) k = 0 for q in range(1, word_length): while ((k > 0) and (word[k] != word[q])): k = prefix[(k - 1)] if (word[(k + 1)] == word[q]): k = (k + 1) prefix[q] = k return prefix
[ "def", "compute_prefix", "(", "word", ")", ":", "word_length", "=", "len", "(", "word", ")", "prefix", "=", "(", "[", "0", "]", "*", "word_length", ")", "k", "=", "0", "for", "q", "in", "range", "(", "1", ",", "word_length", ")", ":", "while", "(...
returns the prefix of the word .
train
false
16,205
def _ConvertToUnicodeList(arg): return [_ConvertToUnicode(value) for value in _ConvertToList(arg)]
[ "def", "_ConvertToUnicodeList", "(", "arg", ")", ":", "return", "[", "_ConvertToUnicode", "(", "value", ")", "for", "value", "in", "_ConvertToList", "(", "arg", ")", "]" ]
converts arg to a list of unicode objects .
train
false
16,207
def lu(a): import scipy.linalg if (a.ndim != 2): raise ValueError('Dimension must be 2 to perform lu decomposition') (xdim, ydim) = a.shape if (xdim != ydim): raise ValueError('Input must be a square matrix to perform lu decomposition') if (not (len(set((a.chunks[0] + a.chunks[1]))) == 1)): msg = 'All chunks must be a square matrix to perform lu decomposition. Use .rechunk method to change the size of chunks.' raise ValueError(msg) vdim = len(a.chunks[0]) hdim = len(a.chunks[1]) token = tokenize(a) name_lu = ('lu-lu-' + token) name_p = ('lu-p-' + token) name_l = ('lu-l-' + token) name_u = ('lu-u-' + token) name_p_inv = ('lu-p-inv-' + token) name_l_permuted = ('lu-l-permute-' + token) name_u_transposed = ('lu-u-transpose-' + token) name_plu_dot = ('lu-plu-dot-' + token) name_lu_dot = ('lu-lu-dot-' + token) dsk = {} for i in range(min(vdim, hdim)): target = (a.name, i, i) if (i > 0): prevs = [] for p in range(i): prev = (name_plu_dot, i, p, p, i) dsk[prev] = (np.dot, (name_l_permuted, i, p), (name_u, p, i)) prevs.append(prev) target = (operator.sub, target, (sum, prevs)) dsk[(name_lu, i, i)] = (scipy.linalg.lu, target) for j in range((i + 1), hdim): target = (np.dot, (name_p_inv, i, i), (a.name, i, j)) if (i > 0): prevs = [] for p in range(i): prev = (name_lu_dot, i, p, p, j) dsk[prev] = (np.dot, (name_l, i, p), (name_u, p, j)) prevs.append(prev) target = (operator.sub, target, (sum, prevs)) dsk[(name_lu, i, j)] = (_solve_triangular_lower, (name_l, i, i), target) for k in range((i + 1), vdim): target = (a.name, k, i) if (i > 0): prevs = [] for p in range(i): prev = (name_plu_dot, k, p, p, i) dsk[prev] = (np.dot, (name_l_permuted, k, p), (name_u, p, i)) prevs.append(prev) target = (operator.sub, target, (sum, prevs)) dsk[(name_lu, k, i)] = (np.transpose, (_solve_triangular_lower, (name_u_transposed, i, i), (np.transpose, target))) for i in range(min(vdim, hdim)): for j in range(min(vdim, hdim)): if (i == j): dsk[(name_p, i, j)] = (operator.getitem, (name_lu, i, j), 0) dsk[(name_l, i, j)] = (operator.getitem, (name_lu, i, j), 1) dsk[(name_u, i, j)] = (operator.getitem, (name_lu, i, j), 2) dsk[(name_l_permuted, i, j)] = (np.dot, (name_p, i, j), (name_l, i, j)) dsk[(name_u_transposed, i, j)] = (np.transpose, (name_u, i, j)) dsk[(name_p_inv, i, j)] = (np.transpose, (name_p, i, j)) elif (i > j): dsk[(name_p, i, j)] = (np.zeros, (a.chunks[0][i], a.chunks[1][j])) dsk[(name_l, i, j)] = (np.dot, (name_p_inv, i, i), (name_lu, i, j)) dsk[(name_u, i, j)] = (np.zeros, (a.chunks[0][i], a.chunks[1][j])) dsk[(name_l_permuted, i, j)] = (name_lu, i, j) else: dsk[(name_p, i, j)] = (np.zeros, (a.chunks[0][i], a.chunks[1][j])) dsk[(name_l, i, j)] = (np.zeros, (a.chunks[0][i], a.chunks[1][j])) dsk[(name_u, i, j)] = (name_lu, i, j) dsk.update(a.dask) (pp, ll, uu) = scipy.linalg.lu(np.ones(shape=(1, 1), dtype=a.dtype)) p = Array(dsk, name_p, shape=a.shape, chunks=a.chunks, dtype=pp.dtype) l = Array(dsk, name_l, shape=a.shape, chunks=a.chunks, dtype=ll.dtype) u = Array(dsk, name_u, shape=a.shape, chunks=a.chunks, dtype=uu.dtype) return (p, l, u)
[ "def", "lu", "(", "a", ")", ":", "import", "scipy", ".", "linalg", "if", "(", "a", ".", "ndim", "!=", "2", ")", ":", "raise", "ValueError", "(", "'Dimension must be 2 to perform lu decomposition'", ")", "(", "xdim", ",", "ydim", ")", "=", "a", ".", "sh...
compute pivoted lu decomposition of a matrix .
train
false
16,208
def set_required_content(course_key, gated_content_key, prereq_content_key, min_score): milestone = None for gating_milestone in find_gating_milestones(course_key, gated_content_key, 'requires'): if ((not prereq_content_key) or (prereq_content_key not in gating_milestone.get('namespace'))): milestones_api.remove_course_content_milestone(course_key, gated_content_key, gating_milestone) else: milestone = gating_milestone if prereq_content_key: _validate_min_score(min_score) requirements = {'min_score': min_score} if (not milestone): milestone = _get_prerequisite_milestone(prereq_content_key) milestones_api.add_course_content_milestone(course_key, gated_content_key, 'requires', milestone, requirements)
[ "def", "set_required_content", "(", "course_key", ",", "gated_content_key", ",", "prereq_content_key", ",", "min_score", ")", ":", "milestone", "=", "None", "for", "gating_milestone", "in", "find_gating_milestones", "(", "course_key", ",", "gated_content_key", ",", "'...
adds a requires milestone relationship for the given gated_content_key if a prerequisite prereq_content_key is provided .
train
false
16,209
def is_valid_ip_prefix(prefix, bits): try: prefix = int(prefix) except ValueError: return False return (0 <= prefix <= bits)
[ "def", "is_valid_ip_prefix", "(", "prefix", ",", "bits", ")", ":", "try", ":", "prefix", "=", "int", "(", "prefix", ")", "except", "ValueError", ":", "return", "False", "return", "(", "0", "<=", "prefix", "<=", "bits", ")" ]
returns true if *prefix* is a valid ipv4 or ipv6 address prefix .
train
true
16,210
def apply_settings(django_settings): django_settings.FIELDS_STORED_IN_SESSION = _FIELDS_STORED_IN_SESSION django_settings.MIDDLEWARE_CLASSES += _MIDDLEWARE_CLASSES django_settings.SOCIAL_AUTH_LOGIN_ERROR_URL = '/' django_settings.SOCIAL_AUTH_LOGIN_REDIRECT_URL = _SOCIAL_AUTH_LOGIN_REDIRECT_URL django_settings.SOCIAL_AUTH_PIPELINE = ['third_party_auth.pipeline.parse_query_params', 'social.pipeline.social_auth.social_details', 'social.pipeline.social_auth.social_uid', 'social.pipeline.social_auth.auth_allowed', 'social.pipeline.social_auth.social_user', 'third_party_auth.pipeline.associate_by_email_if_login_api', 'social.pipeline.user.get_username', 'third_party_auth.pipeline.set_pipeline_timeout', 'third_party_auth.pipeline.ensure_user_information', 'social.pipeline.user.create_user', 'social.pipeline.social_auth.associate_user', 'social.pipeline.social_auth.load_extra_data', 'social.pipeline.user.user_details', 'third_party_auth.pipeline.set_logged_in_cookies', 'third_party_auth.pipeline.login_analytics'] insert_enterprise_pipeline_elements(django_settings.SOCIAL_AUTH_PIPELINE) django_settings.SOCIAL_AUTH_STRATEGY = 'third_party_auth.strategy.ConfigurationModelStrategy' django_settings.SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email'] django_settings.SOCIAL_AUTH_RAISE_EXCEPTIONS = False django_settings.SOCIAL_AUTH_INACTIVE_USER_LOGIN = True django_settings.SOCIAL_AUTH_INACTIVE_USER_URL = '/auth/inactive' django_settings.SOCIAL_AUTH_UUID_LENGTH = 4 django_settings.DEFAULT_TEMPLATE_ENGINE['OPTIONS']['context_processors'] += ('social.apps.django_app.context_processors.backends', 'social.apps.django_app.context_processors.login_redirect')
[ "def", "apply_settings", "(", "django_settings", ")", ":", "django_settings", ".", "FIELDS_STORED_IN_SESSION", "=", "_FIELDS_STORED_IN_SESSION", "django_settings", ".", "MIDDLEWARE_CLASSES", "+=", "_MIDDLEWARE_CLASSES", "django_settings", ".", "SOCIAL_AUTH_LOGIN_ERROR_URL", "="...
set provider-independent settings .
train
false
16,212
def mktime_tz(data): if (data[9] is None): return time.mktime((data[:8] + ((-1),))) else: t = calendar.timegm(data) return (t - data[9])
[ "def", "mktime_tz", "(", "data", ")", ":", "if", "(", "data", "[", "9", "]", "is", "None", ")", ":", "return", "time", ".", "mktime", "(", "(", "data", "[", ":", "8", "]", "+", "(", "(", "-", "1", ")", ",", ")", ")", ")", "else", ":", "t"...
turn a 10-tuple as returned by parsedate_tz() into a utc timestamp .
train
true
16,213
def removeZip(): zipName = 'reprap_python_beanshell' zipNameExtension = (zipName + '.zip') if (zipNameExtension in os.listdir(os.getcwd())): os.remove(zipNameExtension) shellCommand = ('zip -r %s * -x \\*.pyc \\*~' % zipName) commandResult = os.system(shellCommand) if (commandResult != 0): print 'Failed to execute the following command in removeZip in prepare.' print shellCommand
[ "def", "removeZip", "(", ")", ":", "zipName", "=", "'reprap_python_beanshell'", "zipNameExtension", "=", "(", "zipName", "+", "'.zip'", ")", "if", "(", "zipNameExtension", "in", "os", ".", "listdir", "(", "os", ".", "getcwd", "(", ")", ")", ")", ":", "os...
remove the zip file .
train
false
16,214
def getSimplePatterns(numOnes, numPatterns, patternOverlap=0): assert (patternOverlap < numOnes) numNewBitsInEachPattern = (numOnes - patternOverlap) numCols = ((numNewBitsInEachPattern * numPatterns) + patternOverlap) p = [] for i in xrange(numPatterns): x = numpy.zeros(numCols, dtype='float32') startBit = (i * numNewBitsInEachPattern) nextStartBit = (startBit + numOnes) x[startBit:nextStartBit] = 1 p.append(x) return p
[ "def", "getSimplePatterns", "(", "numOnes", ",", "numPatterns", ",", "patternOverlap", "=", "0", ")", ":", "assert", "(", "patternOverlap", "<", "numOnes", ")", "numNewBitsInEachPattern", "=", "(", "numOnes", "-", "patternOverlap", ")", "numCols", "=", "(", "(...
very simple patterns .
train
true
16,215
def add_alias(alias, canonical): ALIASES[alias] = canonical
[ "def", "add_alias", "(", "alias", ",", "canonical", ")", ":", "ALIASES", "[", "alias", "]", "=", "canonical" ]
add a character set alias .
train
false
16,217
@pytest.mark.network def test_uninstall_easy_installed_console_scripts(script): args = ['easy_install'] args.append('discover') result = script.run(*args, **{'expect_stderr': True}) assert (((script.bin / 'discover') + script.exe) in result.files_created), sorted(result.files_created.keys()) result2 = script.pip('uninstall', 'discover', '-y') assert_all_changes(result, result2, [(script.venv / 'build'), 'cache', (script.site_packages / 'easy-install.pth')])
[ "@", "pytest", ".", "mark", ".", "network", "def", "test_uninstall_easy_installed_console_scripts", "(", "script", ")", ":", "args", "=", "[", "'easy_install'", "]", "args", ".", "append", "(", "'discover'", ")", "result", "=", "script", ".", "run", "(", "*"...
test uninstalling package with console_scripts that is easy_installed .
train
false
16,218
def is_probable_prime(n, k=7): if (n < 6): return [False, False, True, True, False, True][n] if ((n & 1) == 0): return False else: (s, d) = (0, (n - 1)) while ((d & 1) == 0): (s, d) = ((s + 1), (d >> 1)) for a in random.sample(xrange(2, min((n - 2), sys.maxint)), min((n - 4), k)): x = pow(a, d, n) if ((x != 1) and ((x + 1) != n)): for r in xrange(1, s): x = pow(x, 2, n) if (x == 1): return False elif (x == (n - 1)): a = 0 break if a: return False return True
[ "def", "is_probable_prime", "(", "n", ",", "k", "=", "7", ")", ":", "if", "(", "n", "<", "6", ")", ":", "return", "[", "False", ",", "False", ",", "True", ",", "True", ",", "False", ",", "True", "]", "[", "n", "]", "if", "(", "(", "n", "&",...
use rabin-miller algorithm to return true or false the parameter k defines the accuracy of the test .
train
false
16,219
def subscribe(hass, callback): def zigbee_frame_subscriber(event): 'Decode and unpickle the frame from the event bus, and call back.' frame = pickle.loads(b64decode(event.data[ATTR_FRAME])) callback(frame) hass.bus.listen(EVENT_ZIGBEE_FRAME_RECEIVED, zigbee_frame_subscriber)
[ "def", "subscribe", "(", "hass", ",", "callback", ")", ":", "def", "zigbee_frame_subscriber", "(", "event", ")", ":", "frame", "=", "pickle", ".", "loads", "(", "b64decode", "(", "event", ".", "data", "[", "ATTR_FRAME", "]", ")", ")", "callback", "(", ...
toggle watching a document for edits .
train
false
16,220
def each_channel(image_filter, image, *args, **kwargs): c_new = [image_filter(c, *args, **kwargs) for c in image.T] return np.array(c_new).T
[ "def", "each_channel", "(", "image_filter", ",", "image", ",", "*", "args", ",", "**", "kwargs", ")", ":", "c_new", "=", "[", "image_filter", "(", "c", ",", "*", "args", ",", "**", "kwargs", ")", "for", "c", "in", "image", ".", "T", "]", "return", ...
return color image by applying image_filter on channels of image .
train
false
16,222
def build_uri(orig_uriparts, kwargs): uriparts = [] for uripart in orig_uriparts: if uripart.startswith(u'_'): part = str(kwargs.pop(uripart, uripart)) else: part = uripart uriparts.append(part) uri = u'/'.join(uriparts) id = kwargs.pop(u'id', None) if id: uri += (u'/%s' % id) return uri
[ "def", "build_uri", "(", "orig_uriparts", ",", "kwargs", ")", ":", "uriparts", "=", "[", "]", "for", "uripart", "in", "orig_uriparts", ":", "if", "uripart", ".", "startswith", "(", "u'_'", ")", ":", "part", "=", "str", "(", "kwargs", ".", "pop", "(", ...
build the uri from the original uriparts and kwargs .
train
false
16,223
def find_data_files(base, globs): rv_dirs = [root for (root, dirs, files) in os.walk(base)] rv = [] for rv_dir in rv_dirs: files = [] for pat in globs: files += glob.glob(os.path.join(rv_dir, pat)) if (not files): continue target = os.path.join('lib', 'mypy', rv_dir) rv.append((target, files)) return rv
[ "def", "find_data_files", "(", "base", ",", "globs", ")", ":", "rv_dirs", "=", "[", "root", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "base", ")", "]", "rv", "=", "[", "]", "for", "rv_dir", "in", "rv_dirs",...
find ipythons data_files .
train
false
16,224
def extract_tarball_to_dir(tarball, dir): if os.path.exists(dir): if os.path.isdir(dir): shutil.rmtree(dir) else: os.remove(dir) pwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(dir))) newdir = extract_tarball(tarball) os.rename(newdir, dir) os.chdir(pwd)
[ "def", "extract_tarball_to_dir", "(", "tarball", ",", "dir", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "dir", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "dir", ")", ":", "shutil", ".", "rmtree", "(", "dir", ")", "else", ":...
extract a tarball to a specified directory name instead of whatever the top level of a tarball is - useful for versioned directory names .
train
false
16,225
def get_exploitable_vulns(exploit): try: vulns = exploit.get_exploitable_vulns() except BaseFrameworkException: print ('WARNING: The %r exploit has no get_exploitable_vulns method!' % exploit) vulns = [] return vulns
[ "def", "get_exploitable_vulns", "(", "exploit", ")", ":", "try", ":", "vulns", "=", "exploit", ".", "get_exploitable_vulns", "(", ")", "except", "BaseFrameworkException", ":", "print", "(", "'WARNING: The %r exploit has no get_exploitable_vulns method!'", "%", "exploit", ...
returns the exploitable vulnerabilities .
train
false
16,228
def get_content_view(viewmode, hdrItems, content, limit, logfunc): if (not content): return ('No content', '') msg = [] hdrs = flow.ODictCaseless([list(i) for i in hdrItems]) enc = hdrs.get_first('content-encoding') if (enc and (enc != 'identity')): decoded = encoding.decode(enc, content) if decoded: content = decoded msg.append(('[decoded %s]' % enc)) try: ret = viewmode(hdrs, content, limit) except Exception as e: s = traceback.format_exc() s = ('Content viewer failed: \n' + s) logfunc(s) ret = None if (not ret): ret = get('Raw')(hdrs, content, limit) msg.append("Couldn't parse: falling back to Raw") else: msg.append(ret[0]) return (' '.join(msg), ret[1])
[ "def", "get_content_view", "(", "viewmode", ",", "hdrItems", ",", "content", ",", "limit", ",", "logfunc", ")", ":", "if", "(", "not", "content", ")", ":", "return", "(", "'No content'", ",", "''", ")", "msg", "=", "[", "]", "hdrs", "=", "flow", ".",...
returns a tuple .
train
false
16,230
def upload(): os.system('cd build/html; rsync -avz . pandas@pandas.pydata.org:/usr/share/nginx/pandas/pandas-docs/vbench/ -essh')
[ "def", "upload", "(", ")", ":", "os", ".", "system", "(", "'cd build/html; rsync -avz . pandas@pandas.pydata.org:/usr/share/nginx/pandas/pandas-docs/vbench/ -essh'", ")" ]
upload diffs to the code review server uploads the current modifications for a given change to the server .
train
false
16,231
def skip_bom(f): if (f.read(1) != u'\ufeff'): f.seek(0)
[ "def", "skip_bom", "(", "f", ")", ":", "if", "(", "f", ".", "read", "(", "1", ")", "!=", "u'\\ufeff'", ")", ":", "f", ".", "seek", "(", "0", ")" ]
read past a bom at the beginning of a source file .
train
false
16,232
def sudoer(username, hosts='ALL', operators='ALL', passwd=False, commands='ALL'): tags = ('PASSWD:' if passwd else 'NOPASSWD:') spec = ('%(username)s %(hosts)s=(%(operators)s) %(tags)s %(commands)s' % locals()) filename = ('/etc/sudoers.d/fabtools-%s' % username.strip()) if is_file(filename): run_as_root(('chmod 0640 %(filename)s && rm -f %(filename)s' % locals())) run_as_root(('echo "%(spec)s" >%(filename)s && chmod 0440 %(filename)s' % locals()), shell=True)
[ "def", "sudoer", "(", "username", ",", "hosts", "=", "'ALL'", ",", "operators", "=", "'ALL'", ",", "passwd", "=", "False", ",", "commands", "=", "'ALL'", ")", ":", "tags", "=", "(", "'PASSWD:'", "if", "passwd", "else", "'NOPASSWD:'", ")", "spec", "=", ...
require sudo permissions for a given user .
train
false
16,233
def unsubscribe_mailchimp(list_name, user_id, username=None, send_goodbye=True): user = User.load(user_id) m = get_mailchimp_api() list_id = get_list_id_from_name(list_name=list_name) m.lists.unsubscribe(id=list_id, email={'email': (username or user.username)}, send_goodbye=send_goodbye) if (user.mailchimp_mailing_lists is None): user.mailchimp_mailing_lists = {} user.save() user.mailchimp_mailing_lists[list_name] = False user.save()
[ "def", "unsubscribe_mailchimp", "(", "list_name", ",", "user_id", ",", "username", "=", "None", ",", "send_goodbye", "=", "True", ")", ":", "user", "=", "User", ".", "load", "(", "user_id", ")", "m", "=", "get_mailchimp_api", "(", ")", "list_id", "=", "g...
unsubscribe a user from a mailchimp mailing list given its name .
train
false
16,235
def Sequence(token): return OneOrMore((token + maybeComma))
[ "def", "Sequence", "(", "token", ")", ":", "return", "OneOrMore", "(", "(", "token", "+", "maybeComma", ")", ")" ]
a sequence of the token .
train
false
16,236
def work_rheader(r, tabs=[]): if (r.representation != 'html'): return None tablename = r.tablename record = r.record rheader = None rheader_fields = [] if record: T = current.T if (tablename == 'work_job'): if (not tabs): tabs = [(T('Basic Details'), ''), (T('Assignments'), 'assignment')] rheader_fields = [['name'], ['site_id'], ['start_date', 'duration'], ['status']] rheader = S3ResourceHeader(rheader_fields, tabs)(r) return rheader
[ "def", "work_rheader", "(", "r", ",", "tabs", "=", "[", "]", ")", ":", "if", "(", "r", ".", "representation", "!=", "'html'", ")", ":", "return", "None", "tablename", "=", "r", ".", "tablename", "record", "=", "r", ".", "record", "rheader", "=", "N...
work module resource headers .
train
false
16,238
@deprecated(Version('Twisted', 15, 5, 0)) def objectType(obj): keyDataMapping = {('n', 'e', 'd', 'p', 'q'): 'ssh-rsa', ('n', 'e', 'd', 'p', 'q', 'u'): 'ssh-rsa', ('y', 'g', 'p', 'q', 'x'): 'ssh-dss'} try: return keyDataMapping[tuple(obj.keydata)] except (KeyError, AttributeError): raise BadKeyError('invalid key object', obj)
[ "@", "deprecated", "(", "Version", "(", "'Twisted'", ",", "15", ",", "5", ",", "0", ")", ")", "def", "objectType", "(", "obj", ")", ":", "keyDataMapping", "=", "{", "(", "'n'", ",", "'e'", ",", "'d'", ",", "'p'", ",", "'q'", ")", ":", "'ssh-rsa'"...
return the ssh key type corresponding to a c{crypto .
train
false
16,239
def validate_type(magic_kind): if (magic_kind not in magic_spec): raise ValueError(('magic_kind must be one of %s, %s given' % magic_kinds), magic_kind)
[ "def", "validate_type", "(", "magic_kind", ")", ":", "if", "(", "magic_kind", "not", "in", "magic_spec", ")", ":", "raise", "ValueError", "(", "(", "'magic_kind must be one of %s, %s given'", "%", "magic_kinds", ")", ",", "magic_kind", ")" ]
ensure that the given magic_kind is valid .
train
false
16,240
def getCraftPreferences(pluginName): return settings.getReadRepository(getCraftModule(pluginName).getNewRepository()).preferences
[ "def", "getCraftPreferences", "(", "pluginName", ")", ":", "return", "settings", ".", "getReadRepository", "(", "getCraftModule", "(", "pluginName", ")", ".", "getNewRepository", "(", ")", ")", ".", "preferences" ]
get craft preferences .
train
false
16,241
def s3_yes_no_represent(value): if (value is True): return current.T('Yes') elif (value is False): return current.T('No') else: return current.messages['NONE']
[ "def", "s3_yes_no_represent", "(", "value", ")", ":", "if", "(", "value", "is", "True", ")", ":", "return", "current", ".", "T", "(", "'Yes'", ")", "elif", "(", "value", "is", "False", ")", ":", "return", "current", ".", "T", "(", "'No'", ")", "els...
represent a boolean field as yes/no instead of true/false .
train
false
16,242
def iddp_rid(eps, m, n, matvect): proj = np.empty(((m + 1) + ((2 * n) * (min(m, n) + 1))), order='F') (k, idx, proj, ier) = _id.iddp_rid(eps, m, n, matvect, proj) if (ier != 0): raise _RETCODE_ERROR proj = proj[:(k * (n - k))].reshape((k, (n - k)), order='F') return (k, idx, proj)
[ "def", "iddp_rid", "(", "eps", ",", "m", ",", "n", ",", "matvect", ")", ":", "proj", "=", "np", ".", "empty", "(", "(", "(", "m", "+", "1", ")", "+", "(", "(", "2", "*", "n", ")", "*", "(", "min", "(", "m", ",", "n", ")", "+", "1", ")...
compute id of a real matrix to a specified relative precision using random matrix-vector multiplication .
train
false
16,243
def _pick_channels_inverse_operator(ch_names, inv): sel = list() for name in inv['noise_cov'].ch_names: try: sel.append(ch_names.index(name)) except ValueError: raise ValueError(('The inverse operator was computed with channel %s which is not present in the data. You should compute a new inverse operator restricted to the good data channels.' % name)) return sel
[ "def", "_pick_channels_inverse_operator", "(", "ch_names", ",", "inv", ")", ":", "sel", "=", "list", "(", ")", "for", "name", "in", "inv", "[", "'noise_cov'", "]", ".", "ch_names", ":", "try", ":", "sel", ".", "append", "(", "ch_names", ".", "index", "...
data channel indices to be used knowing an inverse operator .
train
false
16,244
def ladder_graph(n, create_using=None): if ((create_using is not None) and create_using.is_directed()): raise nx.NetworkXError('Directed Graph not supported') G = empty_graph((2 * n), create_using) G.name = ('ladder_graph_(%d)' % n) G.add_edges_from(pairwise(range(n))) G.add_edges_from(pairwise(range(n, (2 * n)))) G.add_edges_from(((v, (v + n)) for v in range(n))) return G
[ "def", "ladder_graph", "(", "n", ",", "create_using", "=", "None", ")", ":", "if", "(", "(", "create_using", "is", "not", "None", ")", "and", "create_using", ".", "is_directed", "(", ")", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "'Directed Gra...
return the ladder graph of length n .
train
false
16,245
def get_gravatar_url(email): return ('https://www.gravatar.com/avatar/%s?d=identicon&s=%s' % (hashlib.md5(email).hexdigest(), GRAVATAR_SIZE_PX))
[ "def", "get_gravatar_url", "(", "email", ")", ":", "return", "(", "'https://www.gravatar.com/avatar/%s?d=identicon&s=%s'", "%", "(", "hashlib", ".", "md5", "(", "email", ")", ".", "hexdigest", "(", ")", ",", "GRAVATAR_SIZE_PX", ")", ")" ]
returns the gravatar url for the specified email .
train
false
16,247
def gen_equivalent_factors(): (yield SomeFactor()) (yield SomeFactor(inputs=NotSpecified)) (yield SomeFactor(SomeFactor.inputs)) (yield SomeFactor(inputs=SomeFactor.inputs)) (yield SomeFactor([SomeDataSet.foo, SomeDataSet.bar])) (yield SomeFactor(window_length=SomeFactor.window_length)) (yield SomeFactor(window_length=NotSpecified)) (yield SomeFactor([SomeDataSet.foo, SomeDataSet.bar], window_length=NotSpecified)) (yield SomeFactor([SomeDataSet.foo, SomeDataSet.bar], window_length=SomeFactor.window_length)) (yield SomeFactorAlias())
[ "def", "gen_equivalent_factors", "(", ")", ":", "(", "yield", "SomeFactor", "(", ")", ")", "(", "yield", "SomeFactor", "(", "inputs", "=", "NotSpecified", ")", ")", "(", "yield", "SomeFactor", "(", "SomeFactor", ".", "inputs", ")", ")", "(", "yield", "So...
return an iterator of somefactor instances that should all be the same object .
train
false
16,248
def valid_fileproto(uri): try: return bool(re.match('^(?:salt|https?|ftp)://', uri)) except Exception: return False
[ "def", "valid_fileproto", "(", "uri", ")", ":", "try", ":", "return", "bool", "(", "re", ".", "match", "(", "'^(?:salt|https?|ftp)://'", ",", "uri", ")", ")", "except", "Exception", ":", "return", "False" ]
returns a boolean value based on whether or not the uri passed has a valid remote file protocol designation cli example: .
train
false
16,250
def _yaml_configuration_path_option(option_name, option_value): yaml_path = _existing_file_path_option(option_name, option_value) try: configuration = yaml.safe_load(yaml_path.open()) except ParserError as e: raise UsageError(u'Problem with --{}. Unable to parse YAML from {}. Error message: {}.'.format(option_name, yaml_path.path, unicode(e))) return configuration
[ "def", "_yaml_configuration_path_option", "(", "option_name", ",", "option_value", ")", ":", "yaml_path", "=", "_existing_file_path_option", "(", "option_name", ",", "option_value", ")", "try", ":", "configuration", "=", "yaml", ".", "safe_load", "(", "yaml_path", "...
validate a command line option containing a filepath to a yaml file .
train
false
16,251
def get_group_host(name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if (not conn): return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host
[ "def", "get_group_host", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid...
get hostname from replication cache group cli example:: salt myminion boto_elasticache .
train
true
16,253
def _GuessOrders(filters, orders): orders = orders[:] if (not orders): for filter_pb in filters: if (filter_pb.op() != datastore_pb.Query_Filter.EQUAL): order = datastore_pb.Query_Order() order.set_property(filter_pb.property(0).name()) orders.append(order) break exists_props = (filter_pb.property(0).name() for filter_pb in filters if (filter_pb.op() == datastore_pb.Query_Filter.EXISTS)) for prop in sorted(exists_props): order = datastore_pb.Query_Order() order.set_property(prop) orders.append(order) if ((not orders) or (orders[(-1)].property() != '__key__')): order = datastore_pb.Query_Order() order.set_property('__key__') orders.append(order) return orders
[ "def", "_GuessOrders", "(", "filters", ",", "orders", ")", ":", "orders", "=", "orders", "[", ":", "]", "if", "(", "not", "orders", ")", ":", "for", "filter_pb", "in", "filters", ":", "if", "(", "filter_pb", ".", "op", "(", ")", "!=", "datastore_pb",...
guess any implicit ordering .
train
false
16,254
def register_function(f): register_variable(f.__name__, f)
[ "def", "register_function", "(", "f", ")", ":", "register_variable", "(", "f", ".", "__name__", ",", "f", ")" ]
associate a callable with a particular func .
train
false
16,255
def suggestions(matches, command): for m in matches: if (('name' in m) and ('suggestions' in m)): before = command[:m['start']] after = command[m['end']:] newsuggestions = [] for othermp in sorted(m['suggestions'], key=(lambda mp: mp.section)): mid = ('%s.%s' % (othermp.name, othermp.section)) newsuggestions.append({'cmd': ''.join([before, mid, after]), 'text': othermp.namesection}) m['suggestions'] = newsuggestions
[ "def", "suggestions", "(", "matches", ",", "command", ")", ":", "for", "m", "in", "matches", ":", "if", "(", "(", "'name'", "in", "m", ")", "and", "(", "'suggestions'", "in", "m", ")", ")", ":", "before", "=", "command", "[", ":", "m", "[", "'sta...
enrich command matches with links to other man pages with the same name .
train
false
16,256
@contextlib.contextmanager def forbid_codegen(): from numba.targets import codegen patchpoints = ['CodeLibrary._finalize_final_module'] old = {} def fail(*args, **kwargs): raise RuntimeError('codegen forbidden by test case') try: for name in patchpoints: parts = name.split('.') obj = codegen for attrname in parts[:(-1)]: obj = getattr(obj, attrname) attrname = parts[(-1)] value = getattr(obj, attrname) assert callable(value), ('%r should be callable' % name) old[(obj, attrname)] = value setattr(obj, attrname, fail) (yield) finally: for ((obj, attrname), value) in old.items(): setattr(obj, attrname, value)
[ "@", "contextlib", ".", "contextmanager", "def", "forbid_codegen", "(", ")", ":", "from", "numba", ".", "targets", "import", "codegen", "patchpoints", "=", "[", "'CodeLibrary._finalize_final_module'", "]", "old", "=", "{", "}", "def", "fail", "(", "*", "args",...
forbid llvm code generation during the execution of the context managers enclosed block .
train
false
16,257
@bdd.then(bdd.parsers.parse('the following tabs should be open:\n{tabs}')) def check_open_tabs(quteproc, request, tabs): session = quteproc.get_session() active_suffix = ' (active)' tabs = tabs.splitlines() assert (len(session['windows']) == 1) assert (len(session['windows'][0]['tabs']) == len(tabs)) for (i, line) in enumerate(tabs): line = line.strip() assert line.startswith('- ') line = line[2:] if line.endswith(active_suffix): path = line[:(- len(active_suffix))] active = True else: path = line active = False session_tab = session['windows'][0]['tabs'][i] assert (session_tab['history'][(-1)]['url'] == quteproc.path_to_url(path)) if active: assert session_tab['active'] else: assert ('active' not in session_tab)
[ "@", "bdd", ".", "then", "(", "bdd", ".", "parsers", ".", "parse", "(", "'the following tabs should be open:\\n{tabs}'", ")", ")", "def", "check_open_tabs", "(", "quteproc", ",", "request", ",", "tabs", ")", ":", "session", "=", "quteproc", ".", "get_session",...
check the list of open tabs in the session .
train
false
16,260
def _plot_unit_kde(ax, x, data, color, **kwargs): _ts_kde(ax, x, data, color, **kwargs)
[ "def", "_plot_unit_kde", "(", "ax", ",", "x", ",", "data", ",", "color", ",", "**", "kwargs", ")", ":", "_ts_kde", "(", "ax", ",", "x", ",", "data", ",", "color", ",", "**", "kwargs", ")" ]
plot the kernal density estimate over the sample .
train
false
16,261
def getMissval(inped=''): commonmissvals = {'N': 'N', '0': '0', 'n': 'n', '9': '9', '-': '-', '.': '.'} try: f = open(inped, 'r') except: return None missval = None while (missval is None): try: l = f.readline() except: break ll = l.split()[6:] for c in ll: if commonmissvals.get(c, None): missval = c f.close() return missval if (not missval): missval = 'N' f.close() return missval
[ "def", "getMissval", "(", "inped", "=", "''", ")", ":", "commonmissvals", "=", "{", "'N'", ":", "'N'", ",", "'0'", ":", "'0'", ",", "'n'", ":", "'n'", ",", "'9'", ":", "'9'", ",", "'-'", ":", "'-'", ",", "'.'", ":", "'.'", "}", "try", ":", "f...
read some lines .
train
false
16,262
@requires_application() @requires_pyopengl() def test_basics_pypengl(): _test_basics('pyopengl2')
[ "@", "requires_application", "(", ")", "@", "requires_pyopengl", "(", ")", "def", "test_basics_pypengl", "(", ")", ":", "_test_basics", "(", "'pyopengl2'", ")" ]
test pyopengl gl backend for basic functionality .
train
false
16,263
def getExtendedLineSegment(extensionDistance, lineSegment, loopXIntersections): pointBegin = lineSegment[0].point pointEnd = lineSegment[1].point segment = (pointEnd - pointBegin) segmentLength = abs(segment) if (segmentLength <= 0.0): print 'This should never happen in getExtendedLineSegment in raft, the segment should have a length greater than zero.' print lineSegment return None segmentExtend = ((segment * extensionDistance) / segmentLength) lineSegment[0].point -= segmentExtend lineSegment[1].point += segmentExtend for loopXIntersection in loopXIntersections: setExtendedPoint(lineSegment[0], pointBegin, loopXIntersection) setExtendedPoint(lineSegment[1], pointEnd, loopXIntersection) return lineSegment
[ "def", "getExtendedLineSegment", "(", "extensionDistance", ",", "lineSegment", ",", "loopXIntersections", ")", ":", "pointBegin", "=", "lineSegment", "[", "0", "]", ".", "point", "pointEnd", "=", "lineSegment", "[", "1", "]", ".", "point", "segment", "=", "(",...
get extended line segment .
train
false
16,264
def _api_config_set_pause(output, kwargs): value = kwargs.get('value') scheduler.plan_resume(int_conv(value)) return report(output)
[ "def", "_api_config_set_pause", "(", "output", ",", "kwargs", ")", ":", "value", "=", "kwargs", ".", "get", "(", "'value'", ")", "scheduler", ".", "plan_resume", "(", "int_conv", "(", "value", ")", ")", "return", "report", "(", "output", ")" ]
api: accepts output .
train
false
16,265
def _fill_shape(x, n): if ((not isinstance(n, numbers.Integral)) or (n < 1)): raise TypeError('n must be a positive integer') if isinstance(x, numbers.Integral): return ((x,) * n) elif (isinstance(x, collections.Iterable) and (len(x) == n) and all((isinstance(v, numbers.Integral) for v in x))): return tuple(x) else: raise TypeError('x is {}, must be either an integer or an iterable of integers of size {}'.format(x, n))
[ "def", "_fill_shape", "(", "x", ",", "n", ")", ":", "if", "(", "(", "not", "isinstance", "(", "n", ",", "numbers", ".", "Integral", ")", ")", "or", "(", "n", "<", "1", ")", ")", ":", "raise", "TypeError", "(", "'n must be a positive integer'", ")", ...
idempotentally converts an integer to a tuple of integers of a given size .
train
false
16,266
def build_controller_args(facts): cloud_cfg_path = os.path.join(facts['common']['config_base'], 'cloudprovider') if ('master' in facts): controller_args = {} if ('cloudprovider' in facts): if ('kind' in facts['cloudprovider']): if (facts['cloudprovider']['kind'] == 'aws'): controller_args['cloud-provider'] = ['aws'] controller_args['cloud-config'] = [(cloud_cfg_path + '/aws.conf')] if (facts['cloudprovider']['kind'] == 'openstack'): controller_args['cloud-provider'] = ['openstack'] controller_args['cloud-config'] = [(cloud_cfg_path + '/openstack.conf')] if (facts['cloudprovider']['kind'] == 'gce'): controller_args['cloud-provider'] = ['gce'] controller_args['cloud-config'] = [(cloud_cfg_path + '/gce.conf')] if (controller_args != {}): facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], []) return facts
[ "def", "build_controller_args", "(", "facts", ")", ":", "cloud_cfg_path", "=", "os", ".", "path", ".", "join", "(", "facts", "[", "'common'", "]", "[", "'config_base'", "]", ",", "'cloudprovider'", ")", "if", "(", "'master'", "in", "facts", ")", ":", "co...
build master controller_args .
train
false
16,267
def user_autocreate_handler(sender, instance, created, **kwargs): if created: try: profile = instance.profile except: profile = User(user=instance) profile.save()
[ "def", "user_autocreate_handler", "(", "sender", ",", "instance", ",", "created", ",", "**", "kwargs", ")", ":", "if", "created", ":", "try", ":", "profile", "=", "instance", ".", "profile", "except", ":", "profile", "=", "User", "(", "user", "=", "insta...
when a django user is created .
train
false
16,268
def test_genelatex_wrap_with_breqn(): def mock_kpsewhich(filename): nt.assert_equal(filename, 'breqn.sty') return 'path/to/breqn.sty' with patch.object(latextools, 'kpsewhich', mock_kpsewhich): nt.assert_equal('\n'.join(latextools.genelatex('x^2', True)), '\\documentclass{article}\n\\usepackage{amsmath}\n\\usepackage{amsthm}\n\\usepackage{amssymb}\n\\usepackage{bm}\n\\usepackage{breqn}\n\\pagestyle{empty}\n\\begin{document}\n\\begin{dmath*}\nx^2\n\\end{dmath*}\n\\end{document}')
[ "def", "test_genelatex_wrap_with_breqn", "(", ")", ":", "def", "mock_kpsewhich", "(", "filename", ")", ":", "nt", ".", "assert_equal", "(", "filename", ",", "'breqn.sty'", ")", "return", "'path/to/breqn.sty'", "with", "patch", ".", "object", "(", "latextools", "...
test genelatex with wrap=true for the case breqn .
train
false
16,270
def submitData(): action = request.post_vars.action if (action == 'vulnerability'): return import_vul_ui() elif (action == 'vulnerability_part1'): return import_vul_csv_part1() elif (action == 'vulnerability_part2'): return import_vul_csv_part2() elif (action in ('map', 'image', 'other', 'vca')): return import_document(action) elif (action == 'demographics'): return import_demo_ui() elif (action == 'demographics_part1'): return import_demo_csv_part1() elif (action == 'demographics_part2'): return import_demo_csv_part2()
[ "def", "submitData", "(", ")", ":", "action", "=", "request", ".", "post_vars", ".", "action", "if", "(", "action", "==", "'vulnerability'", ")", ":", "return", "import_vul_ui", "(", ")", "elif", "(", "action", "==", "'vulnerability_part1'", ")", ":", "ret...
controller to manage the ajax import of vulnerability data .
train
false
16,271
def already_listening_socket(port, renewer=False): try: testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: testsocket.bind(('', port)) except socket.error: display = zope.component.getUtility(interfaces.IDisplay) extra = '' if renewer: extra = RENEWER_EXTRA_MSG display.notification('Port {0} is already in use by another process. This will prevent us from binding to that port. Please stop the process that is populating the port in question and try again. {1}'.format(port, extra), force_interactive=True) return True finally: testsocket.close() except socket.error: pass return False
[ "def", "already_listening_socket", "(", "port", ",", "renewer", "=", "False", ")", ":", "try", ":", "testsocket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ",", "0", ")", "testsocket", ".", "setsockopt"...
simple socket based check to find out if port is already in use .
train
false
16,272
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get the repository constructor .
train
false
16,274
def delete_acl_group(id): models.AclGroup.smart_get(id).delete()
[ "def", "delete_acl_group", "(", "id", ")", ":", "models", ".", "AclGroup", ".", "smart_get", "(", "id", ")", ".", "delete", "(", ")" ]
delete acl group .
train
false
16,275
def _get_xml(xml_str): try: xml_data = etree.XML(xml_str) except etree.XMLSyntaxError as err: raise SaltCloudSystemExit('opennebula returned: {0}'.format(xml_str)) return xml_data
[ "def", "_get_xml", "(", "xml_str", ")", ":", "try", ":", "xml_data", "=", "etree", ".", "XML", "(", "xml_str", ")", "except", "etree", ".", "XMLSyntaxError", "as", "err", ":", "raise", "SaltCloudSystemExit", "(", "'opennebula returned: {0}'", ".", "format", ...
intrepret the data coming from opennebula and raise if its not xml .
train
true
16,276
def latex_visit_inheritance_diagram(self, node): graph = node['graph'] graph_hash = get_graph_hash(node) name = ('inheritance%s' % graph_hash) dotcode = graph.generate_dot(name, env=self.builder.env, graph_attrs={'size': '"6.0,6.0"'}) render_dot_latex(self, node, dotcode, {}, 'inheritance') raise nodes.SkipNode
[ "def", "latex_visit_inheritance_diagram", "(", "self", ",", "node", ")", ":", "graph", "=", "node", "[", "'graph'", "]", "graph_hash", "=", "get_graph_hash", "(", "node", ")", "name", "=", "(", "'inheritance%s'", "%", "graph_hash", ")", "dotcode", "=", "grap...
output the graph for latex .
train
true
16,277
def rms(data): if (data.dtype == np.int16): md2 = (data.astype(np.float) ** 2) else: md2 = (data ** 2) return np.sqrt(np.mean(md2))
[ "def", "rms", "(", "data", ")", ":", "if", "(", "data", ".", "dtype", "==", "np", ".", "int16", ")", ":", "md2", "=", "(", "data", ".", "astype", "(", "np", ".", "float", ")", "**", "2", ")", "else", ":", "md2", "=", "(", "data", "**", "2",...
basic audio-power measure: root-mean-square of data .
train
false
16,278
def applyLookupTable(data, lut): if (data.dtype.kind not in ('i', 'u')): data = data.astype(int) return np.take(lut, data, axis=0, mode='clip')
[ "def", "applyLookupTable", "(", "data", ",", "lut", ")", ":", "if", "(", "data", ".", "dtype", ".", "kind", "not", "in", "(", "'i'", ",", "'u'", ")", ")", ":", "data", "=", "data", ".", "astype", "(", "int", ")", "return", "np", ".", "take", "(...
uses values in *data* as indexes to select values from *lut* .
train
false
16,279
def teletex_search_function(name): if (name != u'teletex'): return None return codecs.CodecInfo(name=u'teletex', encode=TeletexCodec().encode, decode=TeletexCodec().decode, incrementalencoder=TeletexIncrementalEncoder, incrementaldecoder=TeletexIncrementalDecoder, streamreader=TeletexStreamReader, streamwriter=TeletexStreamWriter)
[ "def", "teletex_search_function", "(", "name", ")", ":", "if", "(", "name", "!=", "u'teletex'", ")", ":", "return", "None", "return", "codecs", ".", "CodecInfo", "(", "name", "=", "u'teletex'", ",", "encode", "=", "TeletexCodec", "(", ")", ".", "encode", ...
search function for teletex codec that is passed to codecs .
train
true
16,280
def setup_pidlockfile_fixtures(testcase, scenario_name=None): setup_pidfile_fixtures(testcase) scaffold.mock('pidlockfile.write_pid_to_pidfile', tracker=testcase.mock_tracker) scaffold.mock('pidlockfile.remove_existing_pidfile', tracker=testcase.mock_tracker) if (scenario_name is not None): set_pidlockfile_scenario(testcase, scenario_name, clear_tracker=False)
[ "def", "setup_pidlockfile_fixtures", "(", "testcase", ",", "scenario_name", "=", "None", ")", ":", "setup_pidfile_fixtures", "(", "testcase", ")", "scaffold", ".", "mock", "(", "'pidlockfile.write_pid_to_pidfile'", ",", "tracker", "=", "testcase", ".", "mock_tracker",...
set up common fixtures for pidlockfile test cases .
train
false
16,281
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',)) def binary_function(symfunc, expr, **kwargs): binary = autowrap(expr, **kwargs) return implemented_function(symfunc, binary)
[ "@", "doctest_depends_on", "(", "exe", "=", "(", "'f2py'", ",", "'gfortran'", ")", ",", "modules", "=", "(", "'numpy'", ",", ")", ")", "def", "binary_function", "(", "symfunc", ",", "expr", ",", "**", "kwargs", ")", ":", "binary", "=", "autowrap", "(",...
returns a sympy function with expr as binary implementation this is a convenience function that automates the steps needed to autowrap the sympy expression and attaching it to a function object with implemented_function() .
train
false
16,282
def test_service_schema(): options = ({}, None, {'service': 'homeassistant.turn_on', 'service_template': 'homeassistant.turn_on'}, {'data': {'entity_id': 'light.kitchen'}}, {'service': 'homeassistant.turn_on', 'data': None}, {'service': 'homeassistant.turn_on', 'data_template': {'brightness': '{{ no_end'}}) for value in options: with pytest.raises(vol.MultipleInvalid): cv.SERVICE_SCHEMA(value) options = ({'service': 'homeassistant.turn_on'}, {'service': 'homeassistant.turn_on', 'entity_id': 'light.kitchen'}, {'service': 'homeassistant.turn_on', 'entity_id': ['light.kitchen', 'light.ceiling']}) for value in options: cv.SERVICE_SCHEMA(value)
[ "def", "test_service_schema", "(", ")", ":", "options", "=", "(", "{", "}", ",", "None", ",", "{", "'service'", ":", "'homeassistant.turn_on'", ",", "'service_template'", ":", "'homeassistant.turn_on'", "}", ",", "{", "'data'", ":", "{", "'entity_id'", ":", ...
test service_schema validation .
train
false
16,283
def reprovision(vm, image, key='uuid'): ret = {} vmadm = _check_vmadm() if (key not in ['uuid', 'alias', 'hostname']): ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if ('Error' in vm): return vm if (image not in __salt__['imgadm.list']()): ret['Error'] = 'Image ({0}) is not present on this host'.format(image) return ret cmd = 'echo {image} | {vmadm} reprovision {uuid}'.format(vmadm=vmadm, uuid=vm, image=_quote_args(json.dumps({'image_uuid': image}))) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if (retcode != 0): ret['Error'] = (res['stderr'] if ('stderr' in res) else _exit_status(retcode)) return ret return True
[ "def", "reprovision", "(", "vm", ",", "image", ",", "key", "=", "'uuid'", ")", ":", "ret", "=", "{", "}", "vmadm", "=", "_check_vmadm", "(", ")", "if", "(", "key", "not", "in", "[", "'uuid'", ",", "'alias'", ",", "'hostname'", "]", ")", ":", "ret...
reprovision a vm vm : string vm to be reprovisioned image : string uuid of new image key : string [uuid|alias|hostname] value type of vm parameter cli example: .
train
false
16,284
def test_shifted_sparktext(): chart = Line() chart.add('_', list(map((lambda x: (x + 10000)), range(8)))) assert (chart.render_sparktext() == u('\xe2\x96\x81\xe2\x96\x82\xe2\x96\x83\xe2\x96\x84\xe2\x96\x85\xe2\x96\x86\xe2\x96\x87\xe2\x96\x88')) assert (chart.render_sparktext(relative_to=0) == u('\xe2\x96\x87\xe2\x96\x87\xe2\x96\x87\xe2\x96\x87\xe2\x96\x87\xe2\x96\x87\xe2\x96\x87\xe2\x96\x88'))
[ "def", "test_shifted_sparktext", "(", ")", ":", "chart", "=", "Line", "(", ")", "chart", ".", "add", "(", "'_'", ",", "list", "(", "map", "(", "(", "lambda", "x", ":", "(", "x", "+", "10000", ")", ")", ",", "range", "(", "8", ")", ")", ")", "...
test relative_to option in sparktext .
train
false
16,285
def gt(a, b): return (a > b)
[ "def", "gt", "(", "a", ",", "b", ")", ":", "return", "(", "a", ">", "b", ")" ]
returns a boolean of whether the value is greater than the argument .
train
false
16,286
def p_declarator_2(t): pass
[ "def", "p_declarator_2", "(", "t", ")", ":", "pass" ]
declarator : direct_declarator .
train
false
16,288
def plot_scatter(fig, x, y, x2, y2, binnum): fig.canvas.set_window_title(u'\u4ea4\u6613\u9e1f\u77b0\u56fe') (left, width) = (0.1, 0.65) (bottom, height) = (0.1, 0.65) bottom_h = left_h = ((left + width) + 0.02) rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] axScatter = plt.axes(rect_scatter) axHistx = plt.axes(rect_histx) axHisty = plt.axes(rect_histy) cursor = Cursor(axScatter, useblit=True, color='red', linewidth=1) axScatter.plot(x, y, 'o', color='red') axScatter.plot(x2, y2, 'o', color='blue') xmax = np.max((x + x2)) xmin = np.min((x + x2)) binwidth = (xmax / binnum) lim = ((int((xmax / binwidth)) + 1) * binwidth) bins = np.arange((- lim), (lim + binwidth), binwidth) axHistx.hist((x + x2), bins=bins) ymax = np.max((y + y2)) ymin = np.min((y + y2)) binwidth = (ymax / binnum) lim = ((int((ymax / binwidth)) + 1) * binwidth) bins = np.arange((- lim), (lim + binwidth), binwidth) axHisty.hist(y, bins=bins, orientation='horizontal', color='red') axHisty.hist(y2, bins=bins, orientation='horizontal', color='blue') xymax = np.max([np.max(np.fabs((x + x2))), np.max(np.fabs((y + y2)))]) lim = ((int((xymax / binwidth)) + 1) * binwidth) axScatter.axhline(color='black') axHistx.set_xlim(axScatter.get_xlim()) axHisty.set_ylim(axScatter.get_ylim()) axHisty.set_xlabel(u'\u76c8\u4e8f\u5206\u5e03', fontproperties=font_big) axHistx.set_ylabel(u'\u5468\u671f\u5206\u5e03', fontproperties=font_big) axScatter.set_xlabel(u'\u76c8\u4e8f\u548c\u5468\u671f\u5206\u5e03', fontproperties=font_big) axScatter.grid(True) axHistx.grid(True) axHisty.grid(True) c = Cursor(axScatter, useblit=True, color='red', linewidth=1, vertOn=True, horizOn=True) return ([axScatter, axHistx, axHisty], [c])
[ "def", "plot_scatter", "(", "fig", ",", "x", ",", "y", ",", "x2", ",", "y2", ",", "binnum", ")", ":", "fig", ".", "canvas", ".", "set_window_title", "(", "u'\\u4ea4\\u6613\\u9e1f\\u77b0\\u56fe'", ")", "(", "left", ",", "width", ")", "=", "(", "0.1", ",...
scatter plot .
train
false
16,289
def conditionally_calculate_md5(params, context, request_signer, **kwargs): if MD5_AVAILABLE: calculate_md5(params, **kwargs)
[ "def", "conditionally_calculate_md5", "(", "params", ",", "context", ",", "request_signer", ",", "**", "kwargs", ")", ":", "if", "MD5_AVAILABLE", ":", "calculate_md5", "(", "params", ",", "**", "kwargs", ")" ]
only add a content-md5 if the system supports it .
train
false
16,290
def tree_width(N, to_binary=False): if (N < 32): group_size = 2 else: group_size = int(math.log(N)) num_groups = (N // group_size) if (to_binary or (num_groups < 16)): return (2 ** int(math.log((N / group_size), 2))) else: return num_groups
[ "def", "tree_width", "(", "N", ",", "to_binary", "=", "False", ")", ":", "if", "(", "N", "<", "32", ")", ":", "group_size", "=", "2", "else", ":", "group_size", "=", "int", "(", "math", ".", "log", "(", "N", ")", ")", "num_groups", "=", "(", "N...
generate tree width suitable for merge_sorted given n inputs the larger n is .
train
false
16,297
def p_topic_given_document(topic, d, alpha=0.1): return ((document_topic_counts[d][topic] + alpha) / (document_lengths[d] + (K * alpha)))
[ "def", "p_topic_given_document", "(", "topic", ",", "d", ",", "alpha", "=", "0.1", ")", ":", "return", "(", "(", "document_topic_counts", "[", "d", "]", "[", "topic", "]", "+", "alpha", ")", "/", "(", "document_lengths", "[", "d", "]", "+", "(", "K",...
the fraction of words in document _d_ that are assigned to _topic_ .
train
false